aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/configure.in159
-rw-r--r--erts/doc/src/absform.xml2
-rw-r--r--erts/doc/src/erl.xml18
-rw-r--r--erts/doc/src/erl_ext_dist.xml15
-rw-r--r--erts/doc/src/erl_nif.xml327
-rw-r--r--erts/doc/src/erl_tracer.xml4
-rw-r--r--erts/doc/src/erlang.xml285
-rw-r--r--erts/doc/src/erts_alloc.xml2
-rw-r--r--erts/doc/src/notes.xml14
-rw-r--r--erts/doc/src/zlib.xml12
-rw-r--r--erts/emulator/Makefile.in78
-rw-r--r--erts/emulator/beam/atom.c46
-rw-r--r--erts/emulator/beam/atom.h6
-rw-r--r--erts/emulator/beam/atom.names36
-rw-r--r--erts/emulator/beam/beam_bif_load.c547
-rw-r--r--erts/emulator/beam/beam_bp.c355
-rw-r--r--erts/emulator/beam/beam_bp.h46
-rw-r--r--erts/emulator/beam/beam_debug.c493
-rw-r--r--erts/emulator/beam/beam_emu.c683
-rw-r--r--erts/emulator/beam/beam_load.c551
-rw-r--r--erts/emulator/beam/beam_load.h48
-rw-r--r--erts/emulator/beam/beam_ranges.c18
-rw-r--r--erts/emulator/beam/bif.c404
-rw-r--r--erts/emulator/beam/bif.h97
-rw-r--r--erts/emulator/beam/bif.tab55
-rw-r--r--erts/emulator/beam/binary.c34
-rw-r--r--erts/emulator/beam/break.c41
-rw-r--r--erts/emulator/beam/code_ix.h76
-rw-r--r--erts/emulator/beam/copy.c146
-rw-r--r--erts/emulator/beam/dist.c38
-rw-r--r--erts/emulator/beam/dist.h6
-rw-r--r--erts/emulator/beam/dtrace-wrapper.h2
-rw-r--r--erts/emulator/beam/erl_alloc.c56
-rw-r--r--erts/emulator/beam/erl_alloc.types11
-rw-r--r--erts/emulator/beam/erl_alloc_util.c68
-rw-r--r--erts/emulator/beam/erl_alloc_util.h6
-rw-r--r--erts/emulator/beam/erl_bif_binary.c53
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c16
-rw-r--r--erts/emulator/beam/erl_bif_guard.c65
-rw-r--r--erts/emulator/beam/erl_bif_info.c279
-rw-r--r--erts/emulator/beam/erl_bif_op.c2
-rw-r--r--erts/emulator/beam/erl_bif_os.c14
-rw-r--r--erts/emulator/beam/erl_bif_port.c14
-rw-r--r--erts/emulator/beam/erl_bif_re.c15
-rw-r--r--erts/emulator/beam/erl_bif_trace.c228
-rw-r--r--erts/emulator/beam/erl_bif_unique.c310
-rw-r--r--erts/emulator/beam/erl_bif_unique.h344
-rw-r--r--erts/emulator/beam/erl_binary.h232
-rw-r--r--erts/emulator/beam/erl_bits.c3
-rw-r--r--erts/emulator/beam/erl_bits.h17
-rw-r--r--erts/emulator/beam/erl_db.c48
-rw-r--r--erts/emulator/beam/erl_db.h4
-rw-r--r--erts/emulator/beam/erl_db_hash.c680
-rw-r--r--erts/emulator/beam/erl_db_hash.h17
-rw-r--r--erts/emulator/beam/erl_db_tree.c40
-rw-r--r--erts/emulator/beam/erl_db_util.c58
-rw-r--r--erts/emulator/beam/erl_db_util.h45
-rw-r--r--erts/emulator/beam/erl_debug.c2
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab82
-rw-r--r--erts/emulator/beam/erl_driver.h16
-rw-r--r--erts/emulator/beam/erl_drv_nif.h19
-rw-r--r--erts/emulator/beam/erl_drv_thread.c98
-rw-r--r--erts/emulator/beam/erl_fun.c27
-rw-r--r--erts/emulator/beam/erl_fun.h6
-rw-r--r--erts/emulator/beam/erl_gc.c384
-rw-r--r--erts/emulator/beam/erl_gc.h107
-rw-r--r--erts/emulator/beam/erl_hl_timer.c12
-rw-r--r--erts/emulator/beam/erl_init.c16
-rw-r--r--erts/emulator/beam/erl_lock_check.c6
-rw-r--r--erts/emulator/beam/erl_lock_count.c25
-rw-r--r--erts/emulator/beam/erl_lock_count.h6
-rw-r--r--erts/emulator/beam/erl_map.c75
-rw-r--r--erts/emulator/beam/erl_math.c13
-rw-r--r--erts/emulator/beam/erl_message.c32
-rw-r--r--erts/emulator/beam/erl_monitors.c76
-rw-r--r--erts/emulator/beam/erl_monitors.h21
-rw-r--r--erts/emulator/beam/erl_msacc.c14
-rw-r--r--erts/emulator/beam/erl_msacc.h10
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.c180
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.h332
-rw-r--r--erts/emulator/beam/erl_nif.c1695
-rw-r--r--erts/emulator/beam/erl_nif.h48
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h10
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h37
-rw-r--r--erts/emulator/beam/erl_node_tables.c17
-rw-r--r--erts/emulator/beam/erl_port.h2
-rw-r--r--erts/emulator/beam/erl_port_task.c2
-rw-r--r--erts/emulator/beam/erl_port_task.h8
-rw-r--r--erts/emulator/beam/erl_printf_term.c11
-rw-r--r--erts/emulator/beam/erl_process.c1884
-rw-r--r--erts/emulator/beam/erl_process.h116
-rw-r--r--erts/emulator/beam/erl_process_dict.c7
-rw-r--r--erts/emulator/beam/erl_process_dict.h1
-rw-r--r--erts/emulator/beam/erl_process_dump.c7
-rw-r--r--erts/emulator/beam/erl_process_lock.c56
-rw-r--r--erts/emulator/beam/erl_process_lock.h6
-rw-r--r--erts/emulator/beam/erl_ptab.c22
-rw-r--r--erts/emulator/beam/erl_smp.h4
-rw-r--r--erts/emulator/beam/erl_term.c40
-rw-r--r--erts/emulator/beam/erl_term.h248
-rw-r--r--erts/emulator/beam/erl_threads.h33
-rw-r--r--erts/emulator/beam/erl_time_sup.c19
-rw-r--r--erts/emulator/beam/erl_trace.c60
-rw-r--r--erts/emulator/beam/erl_trace.h16
-rw-r--r--erts/emulator/beam/erl_unicode.c98
-rw-r--r--erts/emulator/beam/erl_utils.h1
-rw-r--r--erts/emulator/beam/erl_vm.h16
-rw-r--r--erts/emulator/beam/error.h91
-rw-r--r--erts/emulator/beam/export.c44
-rw-r--r--erts/emulator/beam/export.h18
-rw-r--r--erts/emulator/beam/external.c105
-rw-r--r--erts/emulator/beam/global.h381
-rw-r--r--erts/emulator/beam/io.c125
-rw-r--r--erts/emulator/beam/module.c61
-rw-r--r--erts/emulator/beam/module.h8
-rw-r--r--erts/emulator/beam/ops.tab17
-rw-r--r--erts/emulator/beam/register.c12
-rw-r--r--erts/emulator/beam/sys.h75
-rw-r--r--erts/emulator/beam/utils.c452
-rw-r--r--erts/emulator/drivers/common/efile_drv.c11
-rw-r--r--erts/emulator/drivers/common/gzio.c2
-rw-r--r--erts/emulator/drivers/common/inet_drv.c4
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c86
-rw-r--r--erts/emulator/drivers/unix/sig_drv.c2
-rw-r--r--erts/emulator/hipe/hipe_amd64.c40
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m412
-rw-r--r--erts/emulator/hipe/hipe_arch.h12
-rw-r--r--erts/emulator/hipe/hipe_arm.c193
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m44
-rw-r--r--erts/emulator/hipe/hipe_bif0.c1127
-rw-r--r--erts/emulator/hipe/hipe_bif0.h17
-rw-r--r--erts/emulator/hipe/hipe_bif0.tab18
-rw-r--r--erts/emulator/hipe/hipe_bif1.c8
-rw-r--r--erts/emulator/hipe/hipe_bif2.c10
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m438
-rw-r--r--erts/emulator/hipe/hipe_debug.c16
-rw-r--r--erts/emulator/hipe/hipe_gc.c63
-rw-r--r--erts/emulator/hipe/hipe_load.c106
-rw-r--r--erts/emulator/hipe/hipe_load.h48
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c3
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c34
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.h8
-rw-r--r--erts/emulator/hipe/hipe_module.c35
-rw-r--r--erts/emulator/hipe/hipe_module.h45
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c35
-rw-r--r--erts/emulator/hipe/hipe_native_bif.h20
-rw-r--r--erts/emulator/hipe/hipe_ppc.c178
-rw-r--r--erts/emulator/hipe/hipe_ppc_bifs.m44
-rw-r--r--erts/emulator/hipe/hipe_risc_gc.h16
-rw-r--r--erts/emulator/hipe/hipe_risc_glue.h18
-rw-r--r--erts/emulator/hipe/hipe_risc_stack.c16
-rw-r--r--erts/emulator/hipe/hipe_sparc.c113
-rw-r--r--erts/emulator/hipe/hipe_sparc_bifs.m44
-rw-r--r--erts/emulator/hipe/hipe_stack.c112
-rw-r--r--erts/emulator/hipe/hipe_stack.h56
-rw-r--r--erts/emulator/hipe/hipe_x86.c118
-rw-r--r--erts/emulator/hipe/hipe_x86_bifs.m412
-rw-r--r--erts/emulator/hipe/hipe_x86_gc.h35
-rw-r--r--erts/emulator/hipe/hipe_x86_glue.h21
-rw-r--r--erts/emulator/hipe/hipe_x86_signal.c2
-rw-r--r--erts/emulator/hipe/hipe_x86_stack.c23
-rw-r--r--erts/emulator/internal_doc/DelayedDealloc.md18
-rw-r--r--erts/emulator/internal_doc/PortSignals.md2
-rw-r--r--erts/emulator/internal_doc/SuperCarrier.md2
-rw-r--r--erts/emulator/internal_doc/ThreadProgress.md8
-rw-r--r--erts/emulator/internal_doc/Tracing.md2
-rw-r--r--erts/emulator/pcre/pcre_exec.c2
-rw-r--r--erts/emulator/sys/common/erl_check_io.c963
-rw-r--r--erts/emulator/sys/common/erl_check_io.h18
-rw-r--r--erts/emulator/sys/common/erl_mmap.c10
-rw-r--r--erts/emulator/sys/common/erl_mmap.h15
-rw-r--r--erts/emulator/sys/common/erl_mseg.c4
-rw-r--r--erts/emulator/sys/common/erl_poll.c1
-rw-r--r--erts/emulator/sys/unix/erl_child_setup.h2
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h4
-rw-r--r--erts/emulator/sys/unix/sys.c404
-rw-r--r--erts/emulator/sys/win32/erl_poll.c6
-rw-r--r--erts/emulator/sys/win32/sys.c12
-rw-r--r--erts/emulator/test/Makefile2
-rw-r--r--erts/emulator/test/alloc_SUITE_data/testcase_driver.h2
-rw-r--r--erts/emulator/test/bif_SUITE.erl47
-rw-r--r--erts/emulator/test/binary_SUITE.erl4
-rw-r--r--erts/emulator/test/bs_match_int_SUITE.erl2
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl4
-rw-r--r--erts/emulator/test/code_SUITE.erl457
-rw-r--r--erts/emulator/test/code_SUITE_data/call_purged_fun_tester.erl186
-rw-r--r--erts/emulator/test/ddll_SUITE.erl2
-rw-r--r--erts/emulator/test/dirty_bif_SUITE.erl583
-rw-r--r--erts/emulator/test/dirty_bif_SUITE_data/.gitignore0
-rw-r--r--erts/emulator/test/dirty_nif_SUITE.erl10
-rw-r--r--erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/chkio_drv.c14
-rw-r--r--erts/emulator/test/erl_link_SUITE.erl4
-rw-r--r--erts/emulator/test/estone_SUITE.erl2
-rw-r--r--erts/emulator/test/float_SUITE.erl2
-rw-r--r--erts/emulator/test/fun_SUITE.erl35
-rw-r--r--erts/emulator/test/gc_SUITE.erl103
-rw-r--r--erts/emulator/test/hash_SUITE.erl44
-rw-r--r--erts/emulator/test/hibernate_SUITE.erl2
-rw-r--r--erts/emulator/test/hipe_SUITE.erl57
-rw-r--r--erts/emulator/test/map_SUITE.erl90
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl12
-rw-r--r--erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c2
-rw-r--r--erts/emulator/test/nif_SUITE.erl550
-rw-r--r--erts/emulator/test/nif_SUITE_data/Makefile.src8
-rw-r--r--erts/emulator/test/nif_SUITE_data/hipe_compiled.erl6
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c941
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_0/README5
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_drv_nif.h48
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif.h206
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif_api_funcs.h257
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_4/README6
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_drv_nif.h48
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif.h237
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif_api_funcs.h503
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.1.2_0.c4
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.1.2_4.c4
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.2.2_0.c4
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.2.2_4.c4
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.3.2_0.c4
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.3.2_4.c4
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.c19
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.erl29
-rw-r--r--erts/emulator/test/nif_SUITE_data/testcase_driver.h2
-rw-r--r--erts/emulator/test/nif_SUITE_data/tester.c6
-rw-r--r--erts/emulator/test/node_container_SUITE.erl43
-rw-r--r--erts/emulator/test/num_bif_SUITE.erl86
-rw-r--r--erts/emulator/test/old_scheduler_SUITE.erl4
-rw-r--r--erts/emulator/test/os_signal_SUITE.erl354
-rw-r--r--erts/emulator/test/os_signal_SUITE_data/Makefile.src6
-rw-r--r--erts/emulator/test/os_signal_SUITE_data/os_signal_nif.c66
-rw-r--r--erts/emulator/test/port_SUITE.erl6
-rw-r--r--erts/emulator/test/port_SUITE_data/port_test.c2
-rw-r--r--erts/emulator/test/port_bif_SUITE_data/port_test.c2
-rw-r--r--erts/emulator/test/process_SUITE.erl30
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl22
-rw-r--r--erts/emulator/test/statistics_SUITE.erl131
-rw-r--r--erts/emulator/test/system_info_SUITE.erl72
-rw-r--r--erts/emulator/test/time_SUITE.erl2
-rw-r--r--erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c9
-rw-r--r--erts/emulator/test/trace_local_SUITE.erl118
-rw-r--r--erts/emulator/test/trace_nif_SUITE.erl12
-rw-r--r--erts/emulator/test/trace_nif_SUITE_data/trace_nif.c13
-rw-r--r--erts/emulator/test/tracer_SUITE_data/tracer_test.c2
-rw-r--r--erts/emulator/test/z_SUITE.erl20
-rwxr-xr-xerts/emulator/utils/beam_makeops27
-rwxr-xr-xerts/emulator/utils/make_preload26
-rwxr-xr-xerts/emulator/utils/make_tables227
-rw-r--r--erts/epmd/test/epmd_SUITE.erl2
-rw-r--r--erts/etc/common/escript.c40
-rw-r--r--erts/etc/common/inet_gethost.c2
-rw-r--r--erts/etc/unix/README2
-rw-r--r--erts/etc/unix/etp-commands.in401
-rw-r--r--erts/etc/unix/run_erl.c6
-rw-r--r--erts/example/matrix_nif.c2
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h2
-rw-r--r--erts/include/internal/gcc/ethr_dw_atomic.h2
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin55784 -> 55820 bytes
-rw-r--r--erts/preloaded/ebin/erl_tracer.beambin2188 -> 2160 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin105124 -> 106104 bytes
-rw-r--r--erts/preloaded/ebin/erts_code_purger.beambin12072 -> 11456 bytes
-rw-r--r--erts/preloaded/ebin/erts_dirty_process_code_checker.beambin2128 -> 2092 bytes
-rw-r--r--erts/preloaded/ebin/erts_internal.beambin11140 -> 11072 bytes
-rw-r--r--erts/preloaded/ebin/erts_literal_area_collector.beambin3288 -> 3264 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin50044 -> 49996 bytes
-rw-r--r--erts/preloaded/ebin/otp_ring0.beambin1432 -> 1404 bytes
-rw-r--r--erts/preloaded/ebin/prim_eval.beambin1448 -> 1428 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin44748 -> 44408 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin76472 -> 76440 bytes
-rw-r--r--erts/preloaded/ebin/prim_zip.beambin23136 -> 23112 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin14124 -> 14288 bytes
-rw-r--r--erts/preloaded/src/Makefile2
-rw-r--r--erts/preloaded/src/erlang.erl113
-rw-r--r--erts/preloaded/src/erts_code_purger.erl340
-rw-r--r--erts/preloaded/src/erts_dirty_process_code_checker.erl3
-rw-r--r--erts/preloaded/src/erts_internal.erl64
-rw-r--r--erts/preloaded/src/init.erl8
-rw-r--r--erts/preloaded/src/zlib.erl41
-rw-r--r--erts/vsn.mk2
279 files changed, 17701 insertions, 8053 deletions
diff --git a/erts/configure.in b/erts/configure.in
index ad9a66126f..1c4a3e90ef 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -121,7 +121,6 @@ AS_HELP_STRING([--enable-bootstrap-only],
enable_hipe=no
enable_sctp=no
enable_dirty_schedulers=no
- enable_new_purge=no
fi
])
@@ -140,12 +139,12 @@ AS_HELP_STRING([--enable-dirty-schedulers], [enable dirty scheduler support]),
*) enable_dirty_schedulers=yes ;;
esac ], enable_dirty_schedulers=default)
-AC_ARG_ENABLE(new-purge-strategy,
-AS_HELP_STRING([--enable-new-purge-strategy], [enable new code purge strategy]),
+AC_ARG_ENABLE(dirty-schedulers-test,
+AS_HELP_STRING([--enable-dirty-schedulers-test], [enable dirty scheduler test (for debugging purposes)]),
[ case "$enableval" in
- no) enable_new_purge=no ;;
- *) enable_new_purge=yes ;;
- esac ], enable_new_purge=default)
+ yes) enable_dirty_schedulers_test=yes ;;
+ *) enable_dirty_schedulers_test=no ;;
+ esac ], enable_dirty_schedulers_test=no)
AC_ARG_ENABLE(smp-support,
AS_HELP_STRING([--enable-smp-support], [enable smp support])
@@ -541,6 +540,7 @@ if test "x$GCC" = xyes; then
# Treat certain GCC warnings as errors
LM_TRY_ENABLE_CFLAG([-Werror=return-type], [WERRORFLAGS])
LM_TRY_ENABLE_CFLAG([-Werror=implicit], [WERRORFLAGS])
+ LM_TRY_ENABLE_CFLAG([-Werror=undef], [WERRORFLAGS])
# until the emulator can handle this, I suggest we turn it off!
#WFLAGS="-Wall -Wshadow -Wcast-qual -Wmissing-declarations"
@@ -1046,8 +1046,7 @@ case $ERTS_BUILD_SMP_EMU-$enable_dirty_schedulers in
yes-yes)
DIRTY_SCHEDULER_SUPPORT=yes;;
yes-default)
- ## Maybe yes for OTP 19...
- DIRTY_SCHEDULER_SUPPORT=no;;
+ DIRTY_SCHEDULER_SUPPORT=yes;;
no-default)
DIRTY_SCHEDULER_SUPPORT=no;;
no-yes)
@@ -1058,26 +1057,21 @@ esac
AC_MSG_RESULT($DIRTY_SCHEDULER_SUPPORT)
AC_SUBST(DIRTY_SCHEDULER_SUPPORT)
-AC_MSG_CHECKING(whether the new code purge strategy should be enabled)
-case $enable_new_purge-$enable_dirty_schedulers in
- yes-*)
- AC_MSG_RESULT(yes)
- enable_new_purge=yes;;
- default-yes)
- AC_MSG_RESULT(yes; forced by dirty scheduler support)
- enable_new_purge=yes;;
- no-yes)
- AC_MSG_ERROR([Dirty schedulers enabled, but new code purge strategy disabled]);;
- *)
- AC_MSG_RESULT(no)
- enable_new_purge=no;;
-esac
+DIRTY_SCHEDULER_TEST=$enable_dirty_schedulers_test
+test $DIRTY_SCHEDULER_SUPPORT = yes || DIRTY_SCHEDULER_TEST=no
+AC_SUBST(DIRTY_SCHEDULER_TEST)
+test $DIRTY_SCHEDULER_TEST != yes || {
+ test -f "$ERL_TOP/erts/CONF_INFO" || echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> $ERL_TOP/erts/CONF_INFO <<EOF
-if test $enable_new_purge = yes; then
- AC_DEFINE(ERTS_NEW_PURGE_STRATEGY, 1, [Define if you want to use the new code purge strategy])
-fi
-NEW_PURGE_STRATEGY=$enable_new_purge
-AC_SUBST(NEW_PURGE_STRATEGY)
+ WARNING:
+ Dirty Scheduler Test has been enabled. This
+ feature is for debugging purposes only.
+ Poor performance as well as strange system
+ characteristics is expected!
+
+EOF
+}
if test $ERTS_BUILD_SMP_EMU = yes; then
@@ -1410,6 +1404,8 @@ AC_ARG_ENABLE(builtin-zlib,
Z_LIB=
if test "x$enable_builtin_zlib" = "xyes"; then
+ AC_DEFINE(HAVE_ZLIB_INFLATEGETDICTIONARY, 1,
+ [Define if your zlib version defines inflateGetDictionary.])
AC_MSG_NOTICE([Using our own built-in zlib source])
else
AC_MSG_CHECKING(for zlib 1.2.5 or higher)
@@ -1436,6 +1432,11 @@ error
AC_MSG_RESULT(no)
])
LIBS=$zlib_save_LIBS
+
+AC_MSG_CHECKING(for zlib inflateGetDictionary presence)
+AC_SEARCH_LIBS(inflateGetDictionary, [z],
+ AC_DEFINE(HAVE_ZLIB_INFLATEGETDICTIONARY, 1,
+ [Define if your zlib version defines inflateGetDictionary.]))
fi
AC_SUBST(Z_LIB)
@@ -1910,7 +1911,25 @@ case X$erl_xcomp_bigendian in
*) AC_MSG_ERROR([Bad erl_xcomp_bigendian value: $erl_xcomp_bigendian]);;
esac
-AC_C_BIGENDIAN
+AC_C_BIGENDIAN(
+ [
+ AC_DEFINE([WORDS_BIGENDIAN], [1], [Define if big-endian])
+ AC_DEFINE([ERTS_ENDIANNESS], [1], [Define > 0 if big-endian < 0 if little-endian, or 0 if unknown])
+ ],
+ [
+ AC_DEFINE([ERTS_ENDIANNESS], [-1], [Define > 0 if big-endian < 0 if little-endian, or 0 if unknown])
+ ],
+ [
+ case "$erl_xcomp_bigendian" in
+ yes)
+ AC_DEFINE([ERTS_ENDIANNESS], [1], [Define > 0 if big-endian < 0 if little-endian, or 0 if unknown]);;
+ no)
+ AC_DEFINE([ERTS_ENDIANNESS], [-1], [Define > 0 if big-endian < 0 if little-endian, or 0 if unknown]);;
+ *)
+ AC_DEFINE([ERTS_ENDIANNESS], [0], [Define > 0 if big-endian < 0 if little-endian, or 0 if unknown]);;
+ esac
+ ])
+
AC_C_DOUBLE_MIDDLE_ENDIAN
dnl fdatasync syscall (Unix only)
@@ -2061,7 +2080,7 @@ AC_CHECK_FUNCS([getipnodebyname getipnodebyaddr gethostbyname2])
AC_CHECK_FUNCS([ieee_handler fpsetmask finite isnan isinf res_gethostbyname dlopen \
pread pwrite memmove strerror strerror_r strncasecmp \
- gethrtime localtime_r gmtime_r inet_pton \
+ gethrtime localtime_r gmtime_r inet_pton mprotect \
mmap mremap memcpy mallopt sbrk _sbrk __sbrk brk _brk __brk \
flockfile fstat strlcpy strlcat setsid posix2time time2posix \
setlocale nl_langinfo poll mlockall ppoll])
@@ -2767,17 +2786,29 @@ ERL_TIME_CORRECTION
AC_CHECK_PROG(M4, m4, m4)
-dnl HiPE cannot run on 64-bit without MAP_FIXED and MAP_NORESERVE
-if test X${enable_hipe} != Xno && test X$ac_cv_sizeof_void_p != X4; then
- AC_CHECK_DECLS([MAP_FIXED, MAP_NORESERVE], [], [], [#include <sys/mman.h>])
- if test X$ac_cv_have_decl_MAP_FIXED != Xyes || test X$ac_cv_have_decl_MAP_NORESERVE != Xyes; then
- if test X${enable_hipe} = Xyes; then
- AC_MSG_ERROR([HiPE on 64-bit needs MAP_FIXED and MAP_NORESERVE flags for mmap()])
- else
- enable_hipe=no
- AC_MSG_WARN([Disable HiPE due to lack of MAP_FIXED and MAP_NORESERVE flags for mmap()])
- fi
- fi
+if test X${enable_hipe} != Xno; then
+ if test X$ac_cv_sizeof_void_p != X4 && test X$ARCH = Xamd64; then
+ dnl HiPE cannot run on x86_64 without MAP_FIXED and MAP_NORESERVE
+ AC_CHECK_DECLS([MAP_FIXED, MAP_NORESERVE], [], [], [#include <sys/mman.h>])
+ if test X$ac_cv_have_decl_MAP_FIXED != Xyes || test X$ac_cv_have_decl_MAP_NORESERVE != Xyes; then
+ if test X${enable_hipe} = Xyes; then
+ AC_MSG_ERROR([HiPE on x86_64 needs MAP_FIXED and MAP_NORESERVE flags for mmap()])
+ else
+ enable_hipe=no
+ AC_MSG_WARN([Disable HiPE due to lack of MAP_FIXED and MAP_NORESERVE flags for mmap()])
+ fi
+ fi
+ else
+ dnl HiPE cannot run without mprotect()
+ if test X$ac_cv_func_mprotect != Xyes; then
+ if test X${enable_hipe} = Xyes; then
+ AC_MSG_ERROR([HiPE needs mprotect() on $ARCH])
+ else
+ enable_hipe=no
+ AC_MSG_WARN([Disable HiPE due to lack of mprotect()])
+ fi
+ fi
+ fi
fi
dnl check to auto-enable hipe here...
@@ -3426,13 +3457,8 @@ if test X${enable_hipe} = Xyes; then
AC_DEFINE(HIPE,[1],[Define to enable HiPE])
HIPE_HELPERS="xmerl syntax_tools edoc"
ENABLE_ALLOC_TYPE_VARS="$ENABLE_ALLOC_TYPE_VARS hipe"
- case "$ARCH" in
- amd64)
- # For now exec_alloc is only used for hipe on amd64
- AC_MSG_NOTICE([Enable exec_alloc for hipe code allocation])
- ENABLE_ALLOC_TYPE_VARS="$ENABLE_ALLOC_TYPE_VARS exec_alloc"
- ;;
- esac
+ AC_MSG_NOTICE([Enable exec_alloc for hipe code allocation])
+ ENABLE_ALLOC_TYPE_VARS="$ENABLE_ALLOC_TYPE_VARS exec_alloc"
fi
fi
AC_SUBST(HIPE_HELPERS)
@@ -3968,6 +3994,7 @@ dnl use "PATH/include" and "PATH/lib".
AC_SUBST(SSL_INCLUDE)
AC_SUBST(SSL_INCDIR)
AC_SUBST(SSL_LIBDIR)
+AC_SUBST(SSL_FLAGS)
AC_SUBST(SSL_CRYPTO_LIBNAME)
AC_SUBST(SSL_SSL_LIBNAME)
AC_SUBST(SSL_CC_RUNTIME_LIBRARY_PATH)
@@ -4299,8 +4326,7 @@ yes
#include <stdio.h>
#include <openssl/hmac.h>],
[
- HMAC_CTX hc;
- HMAC_CTX_init(&hc);
+ HMAC(0, 0, 0, 0, 0, 0, 0);
],
[ssl_linkable=yes],
[ssl_linkable=no])
@@ -4355,8 +4381,7 @@ dnl so it is - be adoptable
#include <stdio.h>
#include <openssl/hmac.h>],
[
- HMAC_CTX hc;
- HMAC_CTX_init(&hc);
+ HMAC(0, 0, 0, 0, 0, 0, 0);
],
[ssl_dyn_linkable=yes],
[ssl_dyn_linkable=no])
@@ -4461,12 +4486,14 @@ esac
if test "x$SSL_APP" != "x" ; then
dnl We found openssl, now check if we use kerberos 5 support
+ dnl FIXME: Do we still support platforms that have Kerberos?
AC_MSG_CHECKING(for OpenSSL kerberos 5 support)
old_CPPFLAGS=$CPPFLAGS
CPPFLAGS=$SSL_INCLUDE
AC_EGREP_CPP(^yes$,[
+#include <openssl/opensslv.h>
#include <openssl/opensslconf.h>
-#ifndef OPENSSL_NO_KRB5
+#if OPENSSL_VERSION_NUMBER < 0x1010000fL && !defined(OPENSSL_NO_KRB5)
yes
#endif
],[
@@ -4617,8 +4644,7 @@ yes) # Use standard lib locations for ssl runtime library path
#include <openssl/hmac.h>
],
[
- HMAC_CTX hc;
- HMAC_CTX_init(&hc);
+ HMAC(0, 0, 0, 0, 0, 0, 0);
],
[rpath_success=yes],
[rpath_success=no])
@@ -4657,6 +4683,31 @@ no) # Use no ssl runtime library path
esac
+AC_ARG_ENABLE(fips,
+AS_HELP_STRING([--enable-fips], [enable OpenSSL FIPS mode support])
+AS_HELP_STRING([--disable-fips], [disable OpenSSL FIPS mode support (default)]),
+[ case "$enableval" in
+ yes) enable_fips_support=yes ;;
+ *) enable_fips_support=no ;;
+ esac ], enable_fips_support=no)
+
+if test "x$enable_fips_support" = "xyes" && test "$CRYPTO_APP" != ""; then
+ saveCFLAGS="$CFLAGS"
+ saveLDFLAGS="$LDFLAGS"
+ saveLIBS="$LIBS"
+ CFLAGS="$CFLAGS $SSL_INCLUDE"
+ LDFLAGS="$LDFLAGS $SSL_LD_RUNTIME_LIBRARY_PATH -L$SSL_LIBDIR"
+ LIBS="-lcrypto"
+ AC_CHECK_FUNC([FIPS_mode_set],
+ [SSL_FLAGS="-DFIPS_SUPPORT"],
+ [SSL_FLAGS=])
+ CFLAGS="$saveCFLAGS"
+ LDFLAGS="$saveLDFLAGS"
+ LIBS="$saveLIBS"
+else
+ SSL_FLAGS=
+fi
+
#--------------------------------------------------------------------
# Os mon stuff.
#--------------------------------------------------------------------
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index ab00d47425..fe8e3b30e7 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -886,7 +886,7 @@
Rep(Fc) = <c>[Rep(C_1), ..., Rep(C_k)]</c>.</p>
<list type="bulleted">
- <item>If C is a constraint <c>is_subtype(V, T)</c> or <c>V :: T</c>,
+ <item>If C is a constraint <c>V :: T</c>,
where <c>V</c> is a type variable
and <c>T</c> is a type, then Rep(C) =
<c>{type,LINE,constraint,[{atom,LINE,is_subtype},[Rep(V),Rep(T)]]}</c>.
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 8da832ac37..29fef7348b 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -638,7 +638,7 @@
this value also applies to command-line parameters and environment
variables (see section <seealso
marker="stdlib:unicode_usage#unicode_in_environment_and_parameters">
- Unicode in Enviroment and Parameters</seealso> in the STDLIB
+ Unicode in Environment and Parameters</seealso> in the STDLIB
User's Guide).</p>
</item>
<tag><c><![CDATA[+fnu[{w|i|e}]]]></c></tag>
@@ -674,7 +674,7 @@
this value also applies to command-line parameters and environment
variables (see section <seealso
marker="stdlib:unicode_usage#unicode_in_environment_and_parameters">
- Unicode in Enviroment and Parameters</seealso> in the STDLIB
+ Unicode in Environment and Parameters</seealso> in the STDLIB
User's Guide).</p>
</item>
<tag><c><![CDATA[+fna[{w|i|e}]]]></c></tag>
@@ -695,7 +695,7 @@
this value also applies to command-line parameters and environment
variables (see section <seealso
marker="stdlib:unicode_usage#unicode_in_environment_and_parameters">
- Unicode in Enviroment and Parameters</seealso> in the STDLIB
+ Unicode in Environment and Parameters</seealso> in the STDLIB
User's Guide).</p>
</item>
<tag><c><![CDATA[+hms Size]]></c></tag>
@@ -946,9 +946,7 @@
schedulers was allowed to be unlimited, dirty CPU bound jobs would
potentially starve normal jobs.</p>
<p>This option is ignored if the emulator does not have threading
- support enabled. <em>This option is experimental</em> and
- is supported only if the emulator was configured and built with
- support for dirty schedulers enabled (it is disabled by default).</p>
+ support enabled.</p>
</item>
<tag><marker id="+SDPcpu"/><c><![CDATA[+SDPcpu
DirtyCPUSchedulersPercentage:DirtyCPUSchedulersOnlinePercentage]]></c></tag>
@@ -974,9 +972,7 @@
either order) results in 2 dirty CPU scheduler threads (50% of 4) and
1 dirty CPU scheduler thread online (25% of 4).</p>
<p>This option is ignored if the emulator does not have threading
- support enabled. <em>This option is experimental</em> and
- is supported only if the emulator was configured and built with
- support for dirty schedulers enabled (it is disabled by default).</p>
+ support enabled.</p>
</item>
<tag><marker id="+SDio"/><c><![CDATA[+SDio DirtyIOSchedulers]]></c></tag>
<item>
@@ -992,9 +988,7 @@
bound jobs on dirty I/O schedulers, these jobs might starve ordinary
jobs executing on ordinary schedulers.</p>
<p>This option is ignored if the emulator does not have threading
- support enabled. <em>This option is experimental</em> and
- is supported only if the emulator was configured and built with
- support for dirty schedulers enabled (it is disabled by default).</p>
+ support enabled.</p>
</item>
<tag><c><![CDATA[+sFlag Value]]></c></tag>
<item>
diff --git a/erts/doc/src/erl_ext_dist.xml b/erts/doc/src/erl_ext_dist.xml
index 4f799f8f34..a436a9ca74 100644
--- a/erts/doc/src/erl_ext_dist.xml
+++ b/erts/doc/src/erl_ext_dist.xml
@@ -119,16 +119,11 @@
<tcaption>Compressed Data Format when Expanded</tcaption></table>
<marker id="utf8_atoms"/>
<note>
- <p>As from ERTS 5.10 (OTP R16) support
- for UTF-8 encoded atoms has been introduced in the external format.
- However, only characters that can be encoded using Latin-1 (ISO-8859-1)
- are currently supported in atoms. The support for UTF-8 encoded atoms
- in the external format has been implemented to be able to support
- all Unicode characters in atoms in <em>some future release</em>.
- Until full Unicode support for atoms has been introduced,
- it is an <em>error</em> to pass atoms containing
- characters that cannot be encoded in Latin-1, and <em>the behavior is
- undefined</em>.</p>
+ <p>As from ERTS 9.0 (OTP 20), UTF-8 encoded atoms may contain any Unicode
+ character. Although the support for UTF-8 encoded atoms in the external
+ format is available since ERTS 5.10 (OTP R16), passing atoms that cannot
+ be encoded in Latin-1 is an <em>error</em> in versions earlier than
+ Erlang/OTP 20, and <em>the behavior is undefined</em>.</p>
<p>When distribution flag <seealso marker="erl_dist_protocol#dflags">
<c>DFLAG_UTF8_ATOMS</c></seealso> has been exchanged between both nodes
in the <seealso marker="erl_dist_protocol#distribution_handshake">
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index b5dc9037c4..b0a632d2d6 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -80,7 +80,7 @@
<code type="none">
/* niftest.c */
-#include "erl_nif.h"
+#include &lt;erl_nif.h&gt;
static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
@@ -123,7 +123,7 @@ ok
"Hello world!"</code>
<p>A better solution for a real module is to take advantage of the new
- directive <c>on load</c> (see section
+ directive <c>on_load</c> (see section
<seealso marker="doc/reference_manual:code_loading#on_load">Running a
Function When a Module is Loaded</seealso> in the Erlang Reference
Manual) to load the NIF library automatically when the module is
@@ -135,27 +135,14 @@ ok
away by the compiler, causing loading of the NIF library to fail.</p>
</note>
- <p>A loaded NIF library is tied to the Erlang module code version
- that loaded it. If the module is upgraded with a new version, the
- new Erlang code need to load its own NIF library (or maybe choose not
- to). The new code version can, however, choose to load the
- same NIF library as the old code if it wants to. Sharing the
- dynamic library means that static data defined by the library
- is shared as well. To avoid unintentionally shared static
- data, each Erlang module code can keep its own private data. This
- private data can be set when the NIF library is loaded and
- then retrieved by calling <seealso marker="#enif_priv_data">
- <c>enif_priv_data</c></seealso>.</p>
-
- <p>A NIF library cannot be loaded explicitly. A library is
- automatically unloaded when the module code that it belongs to is purged
- by the code server.</p>
+ <p>Once loaded, a NIF library is persistent. It will not be unloaded
+ until the module code version that it belongs to is purged.</p>
</description>
<section>
<title>Functionality</title>
- <p>All functions that a NIF library needs to do with Erlang are
- performed through the NIF API functions. Functions exist
+ <p>All interaction between NIF code and the Erlang runtime system is
+ performed by calling NIF API functions. Functions exist
for the following functionality:</p>
<taglist>
@@ -286,6 +273,19 @@ return term;</code>
library is postponed as long as there exist resource objects with a
destructor function in the library.</p>
</item>
+ <tag>Module upgrade and static data</tag>
+ <item>
+ <p>A loaded NIF library is tied to the Erlang module instance
+ that loaded it. If the module is upgraded, the new module instance
+ needs to load its own NIF library (or maybe choose not to). The new
+ module instance can, however, choose to load the exact same NIF library
+ as the old code if it wants to. Sharing the dynamic library means that
+ static data defined by the library is shared as well. To avoid
+ unintentionally shared static data between module instances, each Erlang
+ module version can keep its own private data. This private data can be
+ set when the NIF library is loaded and later retrieved by calling
+ <seealso marker="#enif_priv_data"><c>enif_priv_data</c></seealso>.</p>
+ </item>
<tag>Threads and concurrency</tag>
<item>
<p>A NIF is thread-safe without any explicit synchronization as
@@ -296,8 +296,8 @@ return term;</code>
synchronization. This includes terms in process-independent
environments that are shared between threads. Resource objects also
require synchronization if you treat them as mutable.</p>
- <p>The library initialization callbacks <c>load</c>, <c>reload</c>, and
- <c>upgrade</c> are all thread-safe even for shared state data.</p>
+ <p>The library initialization callbacks <c>load</c> and
+ <c>upgrade</c> are thread-safe even for shared state data.</p>
</item>
<tag><marker id="version_management"/>Version Management</tag>
<item>
@@ -402,14 +402,14 @@ return term;</code>
<tag><marker id="dirty_nifs"/>Dirty NIF</tag>
<item>
<note>
- <p><em>The dirty NIF functionality described here
- is experimental</em>. Dirty NIF support is available only when
- the emulator is configured with dirty schedulers enabled. This
- feature is disabled by default. The Erlang runtime
- without SMP support does not support dirty schedulers even when
- the dirty scheduler support is enabled. To check at runtime for
- the presence of dirty scheduler threads, code can use the
- <seealso marker="#enif_system_info">
+ <p>Dirty NIF support is available only when the emulator is
+ configured with dirty scheduler support. As of ERTS version
+ 9.0, dirty scheduler support is enabled by default on the
+ runtime system with SMP support. The Erlang runtime without
+ SMP support does <em>not</em> support dirty schedulers even
+ when the dirty scheduler support is explicitly enabled. To
+ check at runtime for the presence of dirty scheduler threads,
+ code can use the <seealso marker="#enif_system_info">
<c>enif_system_info()</c></seealso> API function.</p>
</note>
<p>A NIF that cannot be split and cannot execute in a millisecond
@@ -498,7 +498,7 @@ return term;</code>
<title>Initialization</title>
<taglist>
<tag><marker id="ERL_NIF_INIT"/><c>ERL_NIF_INIT(MODULE,
- ErlNifFunc funcs[], load, reload, upgrade, unload)</c></tag>
+ ErlNifFunc funcs[], load, NULL, upgrade, unload)</c></tag>
<item>
<p>This is the magic macro to initialize a NIF library. It
is to be evaluated in global file scope.</p>
@@ -507,11 +507,14 @@ return term;</code>
the macro.</p>
<p><c>funcs</c> is a static array of function descriptors for
all the implemented NIFs in this library.</p>
- <p><c>load</c>, <c>reload</c>, <c>upgrade</c> and <c>unload</c>
- are pointers to functions. One of <c>load</c>, <c>reload</c>, or
+ <p><c>load</c>, <c>upgrade</c> and <c>unload</c>
+ are pointers to functions. One of <c>load</c> or
<c>upgrade</c> is called to initialize the library.
<c>unload</c> is called to release the library. All are
described individually below.</p>
+ <p>The fourth argument <c>NULL</c> is ignored. It
+ was earlier used for the deprectated <c>reload</c> callback
+ which is no longer supported since OTP 20.</p>
<p>If compiling a NIF for static inclusion through
<c>--enable-static-nifs</c>, you must define <c>STATIC_ERLANG_NIF</c>
before the <c>ERL_NIF_INIT</c> declaration.</p>
@@ -522,7 +525,7 @@ return term;</code>
<p><c>load</c> is called when the NIF library is loaded
and no previously loaded library exists for this module.</p>
<p><c>*priv_data</c> can be set to point to some private data
- that the library needs to keep a state between NIF
+ if the library needs to keep a state between NIF
calls. <c>enif_priv_data</c> returns this pointer.
<c>*priv_data</c> is initialized to <c>NULL</c> when <c>load</c> is
called.</p>
@@ -539,7 +542,7 @@ return term;</code>
and there is old code of this module with a loaded NIF library.</p>
<p>Works as <c>load</c>, except that <c>*old_priv_data</c> already
contains the value set by the last call to <c>load</c> or
- <c>reload</c> for the old module code. <c>*priv_data</c> is
+ <c>upgrade</c> for the old module code. <c>*priv_data</c> is
initialized to <c>NULL</c> when <c>upgrade</c> is called. It is
allowed to write to both <c>*priv_data</c> and
<c>*old_priv_data.</c></p>
@@ -551,27 +554,7 @@ return term;</code>
<item>
<p><c>unload</c> is called when the module code that
the NIF library belongs to is purged as old. New code of the same
- module may or may not exist. Notice that <c>unload</c> is not
- called for a replaced library as a consequence of <c>reload</c>.</p>
- </item>
- <tag><marker id="reload"/><c>int (*reload)(ErlNifEnv* env, void**
- priv_data, ERL_NIF_TERM load_info)</c></tag>
- <item>
- <note>
- <p><em>The reload mechanism is deprecated.</em> It was only intended
- as a development feature. Do not use it as an upgrade method for
- live production systems. It can be removed in future releases.
- Ensure to pass <c>reload</c> as <c>NULL</c> to
- <seealso marker="#ERL_NIF_INIT"><c>ERL_NIF_INIT</c></seealso>
- to disable it when not used.</p>
- </note>
- <p><c>reload</c> is called when the NIF library is loaded and a
- previously loaded library already exists for this module code.</p>
- <p>Works as <c>load</c>, except that
- <c>*priv_data</c> already contains the value set by the
- previous call to <c>load</c> or <c>reload</c>.</p>
- <p>The library fails to load if <c>reload</c> returns
- anything other than <c>0</c> or if <c>reload</c> is <c>NULL</c>.</p>
+ module may or may not exist.</p>
</item>
</taglist>
</section>
@@ -659,9 +642,6 @@ typedef struct {
<p><c>flags</c> can be used to indicate that the NIF is a
<seealso marker="#dirty_nifs">dirty NIF</seealso> that is to be
executed on a dirty scheduler thread.</p>
- <p><em>The dirty NIF functionality described here is
- experimental.</em> You have to enable support for dirty
- schedulers when building OTP to try out the functionality.</p>
<p>If the dirty NIF is expected to be CPU-bound, its <c>flags</c>
field is to be set to <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c> or
<c>ERL_NIF_DIRTY_JOB_IO_BOUND</c>.</p>
@@ -695,6 +675,18 @@ typedef struct {
<p>When receiving data from untrusted sources, use option
<c>ERL_NIF_BIN2TERM_SAFE</c>.</p>
</item>
+ <tag><marker id="ErlNifMonitor"/><c>ErlNifMonitor</c></tag>
+ <item>
+ <p>This is an opaque data type that identifies a monitor.</p>
+ <p>The nif writer is to provide the memory for storing the
+ monitor when calling <seealso marker="#enif_monitor_process">
+ <c>enif_monitor_process</c></seealso>. The
+ address of the data is not stored by the runtime system, so
+ <c>ErlNifMonitor</c> can be used as any other data, it
+ can be copied, moved in memory, forgotten, and so on.
+ To compare two monitors, <seealso marker="#enif_compare_monitors">
+ <c>enif_compare_monitors</c></seealso> must be used.</p>
+ </item>
<tag><marker id="ErlNifPid"/><c>ErlNifPid</c></tag>
<item>
<p>A process identifier (pid). In contrast to pid terms (instances of
@@ -716,11 +708,46 @@ typedef struct {
Each resource type has a unique name and a destructor function that
is called when objects of its type are released.</p>
</item>
+ <tag><marker id="ErlNifResourceTypeInit"/><c>ErlNifResourceTypeInit</c></tag>
+ <item>
+ <code type="none">
+typedef struct {
+ ErlNifResourceDtor* dtor;
+ ErlNifResourceStop* stop;
+} ErlNifResourceTypeInit;</code>
+ <p>Initialization structure read by <seealso marker="#enif_open_resource_type_x">
+ enif_open_resource_type_x</seealso>.</p>
+ </item>
<tag><marker id="ErlNifResourceDtor"/><c>ErlNifResourceDtor</c></tag>
<item>
<code type="none">
typedef void ErlNifResourceDtor(ErlNifEnv* env, void* obj);</code>
<p>The function prototype of a resource destructor function.</p>
+ <p>The <c>obj</c> argument is a pointer to the resource. The only
+ allowed use for the resource in the destructor is to access its
+ user data one final time. The destructor is guaranteed to be the
+ last callback before the resource is deallocated.</p>
+ </item>
+ <tag><marker id="ErlNifResourceDown"/><c>ErlNifResourceDown</c></tag>
+ <item>
+ <code type="none">
+typedef void ErlNifResourceDown(ErlNifEnv* env, void* obj, const ErlNifPid* pid, const ErlNifMonitor* mon);</code>
+ <p>The function prototype of a resource down function,
+ called on the behalf of <seealso marker="#enif_monitor_process">
+ enif_monitor_process</seealso>. <c>obj</c> is the resource, <c>pid</c>
+ is the identity of the monitored process that is exiting, and <c>mon</c>
+ is the identity of the monitor.
+ </p>
+ </item>
+ <tag><marker id="ErlNifResourceStop"/><c>ErlNifResourceStop</c></tag>
+ <item>
+ <code type="none">
+typedef void ErlNifResourceStop(ErlNifEnv* env, void* obj, ErlNifEvent event, int is_direct_call);</code>
+ <p>The function prototype of a resource stop function,
+ called on the behalf of <seealso marker="#enif_select">
+ enif_select</seealso>. <c>obj</c> is the resource, <c>event</c> is OS event,
+ <c>is_direct_call</c> is true if the call is made directly from <c>enif_select</c>
+ or false if it is a scheduled call (potentially from another thread).</p>
</item>
<tag><marker id="ErlNifCharEncoding"/><c>ErlNifCharEncoding</c></tag>
<item>
@@ -895,6 +922,21 @@ typedef enum {
</func>
<func>
+ <name><ret>int</ret><nametext>enif_compare_monitors(const ErlNifMonitor
+ *monitor1, const ErlNifMonitor *monitor2)</nametext></name>
+ <fsummary>Compare two monitors.</fsummary>
+ <desc>
+ <marker id="enif_compare_monitors"></marker>
+ <p>Compares two <seealso marker="#ErlNifMonitor"><c>ErlNifMonitor</c></seealso>s.
+ Can also be used to imply some artificial order on monitors,
+ for whatever reason.</p>
+ <p>Returns <c>0</c> if <c>monitor1</c> and <c>monitor2</c> are equal,
+ &lt; <c>0</c> if <c>monitor1</c> &lt; <c>monitor2</c>, and
+ &gt; <c>0</c> if <c>monitor1</c> &gt; <c>monitor2</c>.</p>
+ </desc>
+ </func>
+
+ <func>
<name><ret>void</ret>
<nametext>enif_cond_broadcast(ErlNifCond *cnd)</nametext></name>
<fsummary></fsummary>
@@ -1022,6 +1064,30 @@ typedef enum {
</func>
<func>
+ <name><ret>int</ret><nametext>enif_demonitor_process(ErlNifEnv* env, void* obj,
+ const ErlNifMonitor* mon)</nametext></name>
+ <fsummary>Cancel a process monitor.</fsummary>
+ <desc>
+ <marker id="enif_demonitor_process"></marker>
+ <p>Cancels a monitor created earlier with <seealso marker="#enif_monitor_process">
+ <c>enif_monitor_process</c></seealso>. Argument <c>obj</c> is a pointer
+ to the resource holding the monitor and <c>*mon</c> identifies the monitor.</p>
+ <p>Returns <c>0</c> if the monitor was successfully identified and removed.
+ Returns a non-zero value if the monitor could not be identified, which means
+ it was either</p>
+ <list type="bulleted">
+ <item>never created for this resource</item>
+ <item>already cancelled</item>
+ <item>already triggered</item>
+ <item>just about to be triggered by a concurrent thread</item>
+ </list>
+ <p>This function is only thread-safe when the emulator with SMP support
+ is used. It can only be used in a non-SMP emulator from a NIF-calling
+ thread.</p>
+ </desc>
+ </func>
+
+ <func>
<name><ret>int</ret>
<nametext>enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2)</nametext>
</name>
@@ -2137,6 +2203,36 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
</func>
<func>
+ <name><ret>int</ret><nametext>enif_monitor_process(ErlNifEnv* env, void* obj,
+ const ErlNifPid* target_pid, ErlNifMonitor* mon)</nametext></name>
+ <fsummary>Monitor a process from a resource.</fsummary>
+ <desc>
+ <marker id="enif_monitor_process"></marker>
+ <p>Starts monitoring a process from a resource. When a process is
+ monitored, a process exit results in a call to the provided
+ <seealso marker="#ErlNifResourceDown">
+ <c>down</c></seealso> callback associated with the resource type.</p>
+ <p>Argument <c>obj</c> is pointer to the resource to hold the monitor and
+ <c>*target_pid</c> identifies the local process to be monitored.</p>
+ <p>If <c>mon</c> is not <c>NULL</c>, a successful call stores the
+ identity of the monitor in the
+ <seealso marker="#ErlNifMonitor"><c>ErlNifMonitor</c></seealso>
+ struct pointed to by <c>mon</c>. This identifier is used to refer to the
+ monitor for later removal with
+ <seealso marker="#enif_demonitor_process"><c>enif_demonitor_process</c></seealso>
+ or compare with
+ <seealso marker="#enif_compare_monitors"><c>enif_compare_monitors</c></seealso>.
+ A monitor is automatically removed when it triggers or when
+ the resource is deallocated.</p>
+ <p>Returns <c>0</c> on success, &lt; 0 if no <c>down</c> callback is
+ provided, and &gt; 0 if the process is no longer alive.</p>
+ <p>This function is only thread-safe when the emulator with SMP support
+ is used. It can only be used in a non-SMP emulator from a NIF-calling
+ thread.</p>
+ </desc>
+ </func>
+
+ <func>
<name><ret>ErlNifTime</ret>
<nametext>enif_monotonic_time(ErlNifTimeUnit time_unit)</nametext>
</name>
@@ -2249,14 +2345,31 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
returns <c>NULL</c> and sets <c>*tried</c> to <c>flags</c>.
It is allowed to set <c>tried</c> to <c>NULL</c>.</p>
<p>Notice that <c>enif_open_resource_type</c> is only allowed to be
- called in the three callbacks
- <seealso marker="#load"><c>load</c></seealso>,
- <seealso marker="#reload"><c>reload</c></seealso>, and
+ called in the two callbacks
+ <seealso marker="#load"><c>load</c></seealso> and
<seealso marker="#upgrade"><c>upgrade</c></seealso>.</p>
</desc>
</func>
<func>
+ <name><ret>ErlNifResourceType *</ret>
+ <nametext>enif_open_resource_type_x(ErlNifEnv* env, const char* name,
+ ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags, ErlNifResourceFlags* tried)</nametext>
+ </name>
+ <fsummary>Create or takeover a resource type.</fsummary>
+ <desc>
+ <p>Same as <seealso marker="#enif_open_resource_type"><c>enif_open_resource_type</c></seealso>
+ except is also accept a <c>stop</c> callback for resource types that are
+ used together with <seealso marker="#enif_select"><c>enif_select</c></seealso>.</p>
+ <p>Argument <c>init</c> is a pointer to an
+ <seealso marker="#ErlNifResourceTypeInit"><c>ErlNifResourceTypeInit</c></seealso>
+ structure that contains the function pointers for destructor, down and stop callbacks
+ for the resource type.</p>
+ </desc>
+ </func>
+
+ <func>
<name><ret>int</ret><nametext>enif_port_command(ErlNifEnv* env, const
ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg)</nametext>
</name>
@@ -2305,10 +2418,8 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
<fsummary>Get the private data of a NIF library.</fsummary>
<desc>
<p>Returns the pointer to the private data that was set by
- <seealso marker="#load"><c>load</c></seealso>,
- <seealso marker="#reload"><c>reload</c></seealso>, or
+ <seealso marker="#load"><c>load</c></seealso> or
<seealso marker="#upgrade"><c>upgrade</c></seealso>.</p>
- <p>Was previously named <c>enif_get_data</c>.</p>
</desc>
</func>
@@ -2365,7 +2476,7 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
<nametext>enif_release_resource(void* obj)</nametext></name>
<fsummary>Release a resource object.</fsummary>
<desc>
- <p>Removes a reference to resource object <c>obj</c>obtained from
+ <p>Removes a reference to resource object <c>obj</c> obtained from
<seealso marker="#enif_alloc_resource">
<c>enif_alloc_resource</c></seealso>.
The resource object is destructed when the last reference is removed.
@@ -2470,9 +2581,6 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
application to break up long-running work into multiple regular NIF
calls or to schedule a <seealso marker="#dirty_nifs">
dirty NIF</seealso> to execute on a dirty scheduler thread.</p>
- <p><em>The dirty NIF functionality described here is
- experimental.</em> You have to enable support for dirty
- schedulers when building OTP to try out the functionality.</p>
<taglist>
<tag><c>fun_name</c></tag>
<item>
@@ -2483,13 +2591,13 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
<tag><c>flags</c></tag>
<item>
<p>Must be set to <c>0</c> for a regular NIF. If the emulator was
- built with the experimental dirty scheduler support enabled,
+ built with dirty scheduler support enabled,
<c>flags</c> can be set to either
<c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c> if the job is expected to be
CPU-bound, or <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c> for
jobs that will be I/O-bound. If dirty scheduler threads are not
available in the emulator, an attempt to schedule such a job
- results in a <c>badarg</c> exception.</p>
+ results in a <c>notsup</c> exception.</p>
</item>
<tag><c>argc</c> and <c>argv</c></tag>
<item>
@@ -2508,6 +2616,85 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
</func>
<func>
+ <name><ret>int</ret>
+ <nametext>enif_select(ErlNifEnv* env, ErlNifEvent event, enum ErlNifSelectFlags mode,
+ void* obj, const ErlNifPid* pid, ERL_NIF_TERM ref)</nametext>
+ </name>
+ <fsummary>Manage subscription on IO event.</fsummary>
+ <desc>
+ <p>This function can be used to receive asynchronous notifications
+ when OS-specific event objects become ready for either read or write operations.</p>
+ <p>Argument <c>event</c> identifies the event object. On Unix
+ systems, the functions <c>select</c>/<c>poll</c> are used. The event
+ object must be a socket, pipe or other file descriptor object that
+ <c>select</c>/<c>poll</c> can use.</p>
+ <p>Argument <c>mode</c> describes the type of events to wait for. It can be
+ <c>ERL_NIF_SELECT_READ</c>, <c>ERL_NIF_SELECT_WRITE</c> or a bitwise
+ OR combination to wait for both. It can also be <c>ERL_NIF_SELECT_STOP</c>
+ which is described further below. When a read or write event is triggerred,
+ a notification message like this is sent to the process identified by
+ <c>pid</c>:</p>
+ <code type="none"><c>{select, Obj, Ref, ready_input | ready_output}</c></code>
+ <p><c>ready_input</c> or <c>ready_output</c> indicates if the event object
+ is ready for reading or writing.</p>
+ <p>Argument <c>pid</c> may be <c>NULL</c> to indicate the calling process.</p>
+ <p>Argument <c>obj</c> is a resource object obtained from
+ <seealso marker="#enif_alloc_resource"><c>enif_alloc_resource</c></seealso>.
+ The purpose of the resource objects is as a container of the event object
+ to manage its state and lifetime. A handle to the resource is received
+ in the notification message as <c>Obj</c>.</p>
+ <p>Argument <c>ref</c> must be either a reference obtained from
+ <seealso marker="erlang#make_ref-0"><c>erlang:make_ref/0</c></seealso>
+ or the atom <c>undefined</c>. It will be passed as <c>Ref</c> in the notifications.
+ If a selective <c>receive</c> statement is used to wait for the notification
+ then a reference created just before the <c>receive</c> will exploit a runtime
+ optimization that bypasses all earlier received messages in the queue.</p>
+ <p>The notifications are one-shot only. To receive further notifications of the same
+ type (read or write), repeated calls to <c>enif_select</c> must be made
+ after receiving each notification.</p>
+ <p>Use <c>ERL_NIF_SELECT_STOP</c> as <c>mode</c> in order to safely
+ close an event object that has been passed to <c>enif_select</c>. The
+ <seealso marker="#ErlNifResourceStop"><c>stop</c></seealso> callback
+ of the resource <c>obj</c> will be called when it is safe to close
+ the event object. This safe way of closing event objects must be used
+ even if all notifications have been received and no further calls to
+ <c>enif_select</c> have been made.</p>
+ <p>Returns a non-negative value on success where the following bits can be set:</p>
+ <taglist>
+ <tag><c>ERL_NIF_SELECT_STOP_CALLED</c></tag>
+ <item>The stop callback was called directly by <c>enif_select</c>.</item>
+ <tag><c>ERL_NIF_SELECT_STOP_SCHEDULED</c></tag>
+ <item>The stop callback was scheduled to run on some other thread
+ or later by this thread.</item>
+ </taglist>
+ <p>Returns a negative value if the call failed where the follwing bits can be set:</p>
+ <taglist>
+ <tag><c>ERL_NIF_SELECT_INVALID_EVENT</c></tag>
+ <item>Argument <c>event</c> is not a valid OS event object.</item>
+ <tag><c>ERL_NIF_SELECT_FAILED</c></tag>
+ <item>The system call failed to add the event object to the poll set.</item>
+ </taglist>
+ <note>
+ <p>Use bitwise AND to test for specific bits in the return vaue.
+ New significant bits may be added in future releases to give more detailed
+ information for both failed and successful calls. Do NOT use equallity tests
+ like <c>==</c>, as that may cause your application to stop working.</p>
+ <p>Example:</p>
+ <code type="none">
+retval = enif_select(env, fd, ERL_NIF_SELECT_STOP, resource, ref);
+if (retval &lt; 0) {
+ /* handle error */
+}
+/* Success! */
+if (retval &amp; ERL_NIF_SELECT_STOP_CALLED) {
+ /* ... */
+}
+</code>
+ </note>
+ </desc>
+ </func>
+
+ <func>
<name><ret>ErlNifPid *</ret>
<nametext>enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)</nametext>
</name>
diff --git a/erts/doc/src/erl_tracer.xml b/erts/doc/src/erl_tracer.xml
index 83eef374ca..2681a19da0 100644
--- a/erts/doc/src/erl_tracer.xml
+++ b/erts/doc/src/erl_tracer.xml
@@ -103,7 +103,7 @@
<item>If set the tracer has been requested to include a
time stamp.</item>
<tag><c>extra</c></tag>
- <item>If set the tracepoint has included additonal data about
+ <item>If set the tracepoint has included additional data about
the trace event. What the additional data is depends on which
<c>TraceTag</c> has been triggered. The <c>extra</c> trace data
corresponds to the fifth element in the trace tuples described in
@@ -684,7 +684,7 @@ trace(_, _, _, _, _) ->
<p><c>erl_msg_tracer.c</c>:</p>
<pre>
-#include "erl_nif.h"
+#include &lt;erl_nif.h&gt;
/* NIF interface declarations */
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 112682d713..352d30f3b4 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -325,16 +325,11 @@ Z = erlang:adler32_combine(X,Y,iolist_size(Data2)).</code>
is <c>latin1</c>, one byte exists for each character
in the text representation. If <c><anno>Encoding</anno></c> is
<c>utf8</c> or
- <c>unicode</c>, the characters are encoded using UTF-8
- (that is, characters from 128 through 255 are
- encoded in two bytes).</p>
+ <c>unicode</c>, the characters are encoded using UTF-8 where
+ characters may require multiple bytes.</p>
<note>
- <p><c>atom_to_binary(<anno>Atom</anno>, latin1)</c> never
- fails, as the text representation of an atom can only
- contain characters from 0 through 255. In a future release,
- the text representation
- of atoms can be allowed to contain any Unicode character and
- <c>atom_to_binary(<anno>Atom</anno>, latin1)</c> then fails if the
+ <p>As from Erlang/OTP 20, atoms can contain any Unicode character
+ and <c>atom_to_binary(<anno>Atom</anno>, latin1)</c> may fail if the
text representation for <c><anno>Atom</anno></c> contains a Unicode
character &gt; 255.</p>
</note>
@@ -402,13 +397,11 @@ Z = erlang:adler32_combine(X,Y,iolist_size(Data2)).</code>
translation of bytes in the binary is done.
If <c><anno>Encoding</anno></c>
is <c>utf8</c> or <c>unicode</c>, the binary must contain
- valid UTF-8 sequences. Only Unicode characters up
- to 255 are allowed.</p>
+ valid UTF-8 sequences.</p>
<note>
- <p><c>binary_to_atom(<anno>Binary</anno>, utf8)</c> fails if
- the binary contains Unicode characters &gt; 255.
- In a future release, such Unicode characters can be allowed and
- <c>binary_to_atom(<anno>Binary</anno>, utf8)</c> does then not fail.
+ <p>As from Erlang/OTP 20, <c>binary_to_atom(<anno>Binary</anno>, utf8)</c>
+ is capable of encoding any Unicode character. Earlier versions would
+ fail if the binary contained Unicode characters &gt; 255.
For more information about Unicode support in atoms, see the
<seealso marker="erl_ext_dist#utf8_atoms">note on UTF-8
encoded atoms</seealso>
@@ -419,9 +412,7 @@ Z = erlang:adler32_combine(X,Y,iolist_size(Data2)).</code>
> <input>binary_to_atom(&lt;&lt;"Erlang"&gt;&gt;, latin1).</input>
'Erlang'
> <input>binary_to_atom(&lt;&lt;1024/utf8&gt;&gt;, utf8).</input>
-** exception error: bad argument
- in function binary_to_atom/2
- called as binary_to_atom(&lt;&lt;208,128&gt;&gt;,utf8)</pre>
+'Ѐ'</pre>
</desc>
</func>
@@ -733,6 +724,19 @@ hello
</func>
<func>
+ <name name="ceil" arity="1"/>
+ <fsummary>Returns the smallest integer not less than the argument</fsummary>
+ <desc>
+ <p>Returns the smallest integer not less than
+ <c><anno>Number</anno></c>.
+ For example:</p>
+ <pre>
+> <input>ceil(5.5).</input>
+6</pre>
+ <p>Allowed in guard tests.</p>
+ </desc>
+ </func>
+ <func>
<name name="check_old_code" arity="1"/>
<fsummary>Check if a module has old code.</fsummary>
<desc>
@@ -1480,6 +1484,20 @@ true</pre>
</func>
<func>
+ <name name="floor" arity="1"/>
+ <fsummary>Returns the largest integer not greater than the argument</fsummary>
+ <desc>
+ <p>Returns the largest integer not greater than
+ <c><anno>Number</anno></c>.
+ For example:</p>
+ <pre>
+> <input>floor(-10.5).</input>
+-11</pre>
+ <p>Allowed in guard tests.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="fun_info" arity="1"/>
<fsummary>Information about a fun.</fsummary>
<desc>
@@ -1667,6 +1685,12 @@ true</pre>
the form <c>{garbage_collect,
<anno>RequestId</anno>, <anno>GCResult</anno>}</c>.
</item>
+
+ <tag><c>{type, 'major' | 'minor'}</c></tag>
+ <item>Triggers garbage collection of requested type. Default value is
+ <c>'major'</c>, which would trigger a fullsweep GC.
+ The option <c>'minor'</c> is considered a hint and may lead to
+ either minor or major GC run.</item>
</taglist>
<p>If <c><anno>Pid</anno></c> equals <c>self()</c>, and
no <c>async</c> option has been passed, the garbage
@@ -1922,26 +1946,6 @@ os_prompt%</pre>
</func>
<func>
- <name name="hash" arity="2"/>
- <fsummary>Hash function (deprecated).</fsummary>
- <desc>
- <p>Returns a hash value for <c><anno>Term</anno></c> within the range
- <c>1..<anno>Range</anno></c>. The maximum range is 1..2^27-1.</p>
- <warning>
- <p><em>This BIF is deprecated, as the hash value can differ on
- different architectures.</em> The hash values for integer
- terms &gt; 2^27 and large binaries are
- poor. The BIF is retained for backward compatibility
- reasons (it can have been used to hash records into a file),
- but all new code is to use one of the BIFs
- <seealso marker="#phash/2"><c>erlang:phash/2</c></seealso> or
- <seealso marker="#phash2/1"><c>erlang:phash2/1,2</c></seealso>
- instead.</p>
- </warning>
- </desc>
- </func>
-
- <func>
<name name="hd" arity="1"/>
<fsummary>Head of a list.</fsummary>
<desc>
@@ -2368,10 +2372,10 @@ os_prompt%</pre>
<desc>
<p>Returns the atom whose text representation is
<c><anno>String</anno></c>.</p>
- <p><c><anno>String</anno></c> can only contain ISO-latin-1
- characters (that is, numbers &lt; 256) as the implementation does not
- allow Unicode characters equal to or above 256 in atoms.
- For more information on Unicode support in atoms, see
+ <p>As from Erlang/OTP 20, <c><anno>String</anno></c> may contain
+ any Unicode character. Earlier versions allowed only ISO-latin-1
+ characters as the implementation did not allow Unicode characters
+ above 255. For more information on Unicode support in atoms, see
<seealso marker="erl_ext_dist#utf8_atoms">note on UTF-8
encoded atoms</seealso>
in section "External Term Format" in the User's Guide.</p>
@@ -2544,13 +2548,6 @@ os_prompt%</pre>
<name name="load_nif" arity="2"/>
<fsummary>Load NIF library.</fsummary>
<desc>
- <note>
- <p>Before Erlang/OTP R14B, NIFs were an
- experimental feature. Versions before Erlang/OTP R14B can
- have different and possibly incompatible NIF semantics and
- interfaces. For example, in Erlang/OTP R13B03 the return value on
- failure was <c>{error,Reason,Text}</c>.</p>
- </note>
<p>Loads and links a dynamic library containing native
implemented functions (NIFs) for a module. <c><anno>Path</anno></c>
is a file path to the shareable object/dynamic library file minus
@@ -2580,14 +2577,22 @@ os_prompt%</pre>
<item>The library did not fulfill the requirements as a NIF
library of the calling module.
</item>
- <tag><c>load | reload | upgrade</c></tag>
+ <tag><c>load | upgrade</c></tag>
<item>The corresponding library callback was unsuccessful.
</item>
+ <tag><c>reload</c></tag>
+ <item>A NIF library is already loaded for this module instance.
+ The previously deprecated <c>reload</c> feature was removed in OTP 20.
+ </item>
<tag><c>old_code</c></tag>
<item>The call to <c>load_nif/2</c> was made from the old
code of a module that has been upgraded; this is not
allowed.
</item>
+ <tag><c>notsup</c></tag>
+ <item>Lack of support. Such as loading NIF library for a
+ HiPE compiled module.
+ </item>
</taglist>
</desc>
</func>
@@ -3784,9 +3789,6 @@ RealSystem = system + MissedSystem</code>
<c><anno>Term</anno></c> within the range
<c>1..<anno>Range</anno></c>. The maximum value for
<c><anno>Range</anno></c> is 2^32.</p>
- <p>This BIF can be used instead of the old deprecated BIF
- <c>erlang:hash/2</c>, as it calculates better hashes for
- all data types, but consider using <c>phash2/1,2</c> instead.</p>
</desc>
</func>
@@ -3821,10 +3823,6 @@ RealSystem = system + MissedSystem</code>
<desc>
<p>Returns a string corresponding to the text
representation of <c><anno>Pid</anno></c>.</p>
- <warning>
- <p>This BIF is intended for debugging and is not to be used
- in application programs.</p>
- </warning>
</desc>
</func>
@@ -4397,10 +4395,6 @@ RealSystem = system + MissedSystem</code>
<desc>
<p>Returns a string corresponding to the text
representation of the port identifier <c><anno>Port</anno></c>.</p>
- <warning>
- <p>This BIF is intended for debugging. It is not to be used
- in application programs.</p>
- </warning>
</desc>
</func>
@@ -4597,7 +4591,7 @@ RealSystem = system + MissedSystem</code>
<p>If the process potentially can get many messages,
you are advised to set the flag to <c>off_heap</c>. This
because a garbage collection with many messages placed on
- the heap can become extremly expensive and the process can
+ the heap can become extremely expensive and the process can
consume large amounts of memory. Performance of the
actual message passing is however generally better when not
using flag <c>off_heap</c>.</p>
@@ -6381,12 +6375,17 @@ lists:map(
<c><anno>TotalTime</anno></c> is the total time duration since
<seealso marker="#system_flag_scheduler_wall_time">
<c>scheduler_wall_time</c></seealso>
- activation. The time unit is undefined and can be subject
- to change between releases, OSs, and system restarts.
- <c>scheduler_wall_time</c> is only to be used to
- calculate relative values for scheduler-utilization.
- <c><anno>ActiveTime</anno></c> can never exceed
- <c><anno>TotalTime</anno></c>.</p>
+ activation for the specific scheduler. Note that
+ activation time can differ significantly between
+ schedulers. Currently dirty schedulers are activated
+ at system start while normal schedulers are activated
+ some time after the <c>scheduler_wall_time</c>
+ functionality is enabled. The time unit is undefined
+ and can be subject to change between releases, OSs,
+ and system restarts. <c>scheduler_wall_time</c> is only
+ to be used to calculate relative values for scheduler
+ utilization. <c><anno>ActiveTime</anno></c> can never
+ exceed <c><anno>TotalTime</anno></c>.</p>
<p>The definition of a busy scheduler is when it is not idle
and is not scheduling (selecting) a process or port,
that is:</p>
@@ -6404,15 +6403,37 @@ lists:map(
<c>scheduler_wall_time</c></seealso> is turned off.</p>
<p>The list of scheduler information is unsorted and can
appear in different order between calls.</p>
+ <p>As of ERTS version 9.0, also dirty CPU schedulers will
+ be included in the result. That is, all scheduler threads
+ that are expected to handle CPU bound work. If you also
+ want information about dirty I/O schedulers, use
+ <seealso marker="#statistics_scheduler_wall_time_all"><c>statistics(scheduler_wall_time_all)</c></seealso>
+ instead.</p>
+
+ <p>Normal schedulers will have scheduler identifiers in
+ the range <c>1 =&lt; <anno>SchedulerId</anno> =&lt;
+ </c><seealso marker="#system_info_schedulers"><c>erlang:system_info(schedulers)</c></seealso>.
+ Dirty CPU schedulers will have scheduler identifiers in
+ the range <c>erlang:system_info(schedulers) &lt;
+ <anno>SchedulerId</anno> =&lt; erlang:system_info(schedulers)
+ +
+ </c><seealso marker="#system_info_dirty_cpu_schedulers"><c>erlang:system_info(dirty_cpu_schedulers)</c></seealso>.
+ </p>
+ <note><p>The different types of schedulers handle
+ specific types of jobs. Every job is assigned to a specific
+ scheduler type. Jobs can migrate between different schedulers
+ of the same type, but never between schedulers of different
+ types. This fact has to be taken under consideration when
+ evaluating the result returned.</p></note>
<p>Using <c>scheduler_wall_time</c> to calculate
- scheduler-utilization:</p>
+ scheduler utilization:</p>
<pre>
> <input>erlang:system_flag(scheduler_wall_time, true).</input>
false
> <input>Ts0 = lists:sort(erlang:statistics(scheduler_wall_time)), ok.</input>
ok</pre>
<p>Some time later the user takes another snapshot and calculates
- scheduler-utilization per scheduler, for example:</p>
+ scheduler utilization per scheduler, for example:</p>
<pre>
> <input>Ts1 = lists:sort(erlang:statistics(scheduler_wall_time)), ok.</input>
ok
@@ -6427,11 +6448,32 @@ ok
{7,0.973237033077876},
{8,0.9741297293248656}]</pre>
<p>Using the same snapshots to calculate a total
- scheduler-utilization:</p>
+ scheduler utilization:</p>
<pre>
> <input>{A, T} = lists:foldl(fun({{_, A0, T0}, {_, A1, T1}}, {Ai,Ti}) ->
- {Ai + (A1 - A0), Ti + (T1 - T0)} end, {0, 0}, lists:zip(Ts0,Ts1)), A/T.</input>
+ {Ai + (A1 - A0), Ti + (T1 - T0)} end, {0, 0}, lists:zip(Ts0,Ts1)),
+ TotalSchedulerUtilization = A/T.</input>
+0.9769136803764825</pre>
+ <p>Total scheduler utilization will equal <c>1.0</c> when
+ all schedulers have been active all the time between the
+ two measurements.</p>
+ <p>Another (probably more) useful value is to calculate
+ total scheduler utilization weighted against maximum amount
+ of available CPU time:</p>
+ <pre>
+> <input>WeightedSchedulerUtilization = (TotalSchedulerUtilization
+ * (erlang:system_info(schedulers)
+ + erlang:system_info(dirty_cpu_schedulers)))
+ / erlang:system_info(logical_processors_available).</input>
0.9769136803764825</pre>
+ <p>This weighted scheduler utilization will reach <c>1.0</c>
+ when schedulers are active the same amount of time as
+ maximum available CPU time. If more schedulers exist
+ than available logical processors, this value may
+ be greater than <c>1.0</c>.</p>
+ <p>As of ERTS version 9.0, the Erlang runtime system
+ with SMP support will as default have more schedulers
+ than logical processors. This due to the dirty schedulers.</p>
<note>
<p><c>scheduler_wall_time</c> is by default disabled. To
enable it, use
@@ -6442,6 +6484,31 @@ ok
<func>
<name name="statistics" arity="1" clause_i="12"/>
+ <fsummary>Information about each schedulers work time.</fsummary>
+ <desc>
+ <marker id="statistics_scheduler_wall_time_all"></marker>
+ <p>The same as
+ <seealso marker="#statistics_scheduler_wall_time"><c>statistics(scheduler_wall_time)</c></seealso>,
+ except that it also include information about all dirty I/O
+ schedulers.</p>
+ <p>Dirty IO schedulers will have scheduler identifiers in
+ the range
+ <seealso marker="#system_info_schedulers"><c>erlang:system_info(schedulers)</c></seealso><c>
+ +
+ </c><seealso marker="#system_info_dirty_cpu_schedulers"><c>erlang:system_info(dirty_cpu_schedulers)</c></seealso><c> &lt;
+ <anno>SchedulerId</anno> =&lt; erlang:system_info(schedulers)
+ + erlang:system_info(dirty_cpu_schedulers)
+ +
+ </c><seealso marker="#system_info_dirty_io_schedulers"><c>erlang:system_info(dirty_io_schedulers)</c></seealso>.</p>
+ <note><p>Note that work executing on dirty I/O schedulers
+ are expected to mainly wait for I/O. That is, when you
+ get high scheduler utilization on dirty I/O schedulers,
+ CPU utilization is <em>not</em> expected to be high due to
+ this work.</p></note>
+ </desc>
+ </func>
+ <func>
+ <name name="statistics" arity="1" clause_i="13"/>
<fsummary>Information about active processes and ports.</fsummary>
<desc><marker id="statistics_total_active_tasks"></marker>
<p>Returns the total amount of active processes and ports in
@@ -6461,7 +6528,7 @@ ok
</func>
<func>
- <name name="statistics" arity="1" clause_i="13"/>
+ <name name="statistics" arity="1" clause_i="14"/>
<fsummary>Information about the run-queue lengths.</fsummary>
<desc><marker id="statistics_total_run_queue_lengths"></marker>
<p>Returns the total length of the run queues. That is, the number
@@ -6481,7 +6548,7 @@ ok
</func>
<func>
- <name name="statistics" arity="1" clause_i="14"/>
+ <name name="statistics" arity="1" clause_i="15"/>
<fsummary>Information about wall clock.</fsummary>
<desc>
<p>Returns information about wall clock. <c>wall_clock</c> can
@@ -6687,11 +6754,6 @@ ok
down to 3. Similarly, the number of dirty CPU schedulers
online increases proportionally to increases in the number of
schedulers online.</p>
- <note>
- <p>The dirty schedulers functionality is experimental.
- Enable support for dirty schedulers when building OTP to
- try out the functionality.</p>
- </note>
<p>For more information, see
<seealso marker="#system_info_dirty_cpu_schedulers">
<c>erlang:system_info(dirty_cpu_schedulers)</c></seealso> and
@@ -7194,8 +7256,8 @@ ok
</func>
<func>
- <name name="system_info" arity="1" clause_i="10"/>
- <name name="system_info" arity="1" clause_i="11"/>
+ <name name="system_info" arity="1" clause_i="12"/>
+ <name name="system_info" arity="1" clause_i="13"/>
<fsummary>Information about the CPU topology of the system.</fsummary>
<type name="cpu_topology"/>
<type name="level_entry"/>
@@ -7295,12 +7357,12 @@ ok
</func>
<func>
- <name name="system_info" arity="1" clause_i="27"/>
- <name name="system_info" arity="1" clause_i="28"/>
- <name name="system_info" arity="1" clause_i="36"/>
- <name name="system_info" arity="1" clause_i="37"/>
+ <name name="system_info" arity="1" clause_i="29"/>
+ <name name="system_info" arity="1" clause_i="30"/>
<name name="system_info" arity="1" clause_i="38"/>
<name name="system_info" arity="1" clause_i="39"/>
+ <name name="system_info" arity="1" clause_i="40"/>
+ <name name="system_info" arity="1" clause_i="41"/>
<fsummary>Information about the default process heap settings.</fsummary>
<type name="message_queue_data"/>
<type name="max_heap_size"/>
@@ -7343,12 +7405,6 @@ ok
see <seealso marker="#process_flag_max_heap_size">
<c>process_flag(max_heap_size, MaxHeapSize)</c></seealso>.</p>
</item>
- <tag><c>min_heap_size</c></tag>
- <item>
- <p>Returns <c>{min_heap_size, <anno>MinHeapSize</anno>}</c>,
- where <c><anno>MinHeapSize</anno></c> is the current
- system-wide minimum heap size for spawned processes.</p>
- </item>
<tag><marker id="system_info_message_queue_data"/>
<c>message_queue_data</c></tag>
<item>
@@ -7361,6 +7417,12 @@ ok
<seealso marker="#process_flag_message_queue_data">
<c>process_flag(message_queue_data, MQD)</c></seealso>.</p>
</item>
+ <tag><c>min_heap_size</c></tag>
+ <item>
+ <p>Returns <c>{min_heap_size, <anno>MinHeapSize</anno>}</c>,
+ where <c><anno>MinHeapSize</anno></c> is the current
+ system-wide minimum heap size for spawned processes.</p>
+ </item>
<tag><c>min_bin_vheap_size</c></tag>
<item>
<p>Returns <c>{min_bin_vheap_size,
@@ -7377,8 +7439,8 @@ ok
<name name="system_info" arity="1" clause_i="7"/>
<name name="system_info" arity="1" clause_i="8"/>
<name name="system_info" arity="1" clause_i="9"/>
- <name name="system_info" arity="1" clause_i="12"/>
- <name name="system_info" arity="1" clause_i="13"/>
+ <name name="system_info" arity="1" clause_i="10"/>
+ <name name="system_info" arity="1" clause_i="11"/>
<name name="system_info" arity="1" clause_i="14"/>
<name name="system_info" arity="1" clause_i="15"/>
<name name="system_info" arity="1" clause_i="16"/>
@@ -7392,15 +7454,15 @@ ok
<name name="system_info" arity="1" clause_i="24"/>
<name name="system_info" arity="1" clause_i="25"/>
<name name="system_info" arity="1" clause_i="26"/>
- <name name="system_info" arity="1" clause_i="29"/>
- <name name="system_info" arity="1" clause_i="30"/>
+ <name name="system_info" arity="1" clause_i="27"/>
+ <name name="system_info" arity="1" clause_i="28"/>
<name name="system_info" arity="1" clause_i="31"/>
<name name="system_info" arity="1" clause_i="32"/>
<name name="system_info" arity="1" clause_i="33"/>
<name name="system_info" arity="1" clause_i="34"/>
<name name="system_info" arity="1" clause_i="35"/>
- <name name="system_info" arity="1" clause_i="40"/>
- <name name="system_info" arity="1" clause_i="41"/>
+ <name name="system_info" arity="1" clause_i="36"/>
+ <name name="system_info" arity="1" clause_i="37"/>
<name name="system_info" arity="1" clause_i="42"/>
<name name="system_info" arity="1" clause_i="43"/>
<name name="system_info" arity="1" clause_i="44"/>
@@ -7429,16 +7491,28 @@ ok
<name name="system_info" arity="1" clause_i="67"/>
<name name="system_info" arity="1" clause_i="68"/>
<name name="system_info" arity="1" clause_i="69"/>
+ <name name="system_info" arity="1" clause_i="70"/>
+ <name name="system_info" arity="1" clause_i="71"/>
<fsummary>Information about the system.</fsummary>
<desc>
<p>Returns various information about the current system
(emulator) as specified by <c><anno>Item</anno></c>:</p>
<taglist>
- <tag><c>allocated_areas</c>, <c>allocator</c>,
- <c>alloc_util_allocators</c>, <c>allocator_sizes</c></tag>
+ <tag><c>atom_count</c></tag>
+ <item>
+ <marker id="system_info_atom_count"></marker>
+ <p>Returns the number of atoms currently existing at the
+ local node. The value is given as an integer.</p>
+ </item>
+ <tag><c>atom_limit</c></tag>
<item>
- <p>See <seealso marker="#system_info_allocator_tags">
- above</seealso>.</p>
+ <marker id="system_info_atom_limit"></marker>
+ <p>Returns the maximum number of atoms allowed.
+ This limit can be increased at startup by passing
+ command-line flag
+ <seealso marker="erts:erl#+t"><c>+t</c></seealso> to
+ <c>erl(1)</c>.
+ </p>
</item>
<tag><c>build_type</c></tag>
<item>
@@ -7524,9 +7598,6 @@ ok
<seealso marker="erts:erl#+SDcpu"><c>+SDcpu</c></seealso> or
<seealso marker="erts:erl#+SDPcpu"><c>+SDPcpu</c></seealso> in
<c>erl(1)</c>.</p>
- <p>Notice that the dirty schedulers functionality is
- experimental. Enable support for dirty schedulers when
- building OTP to try out the functionality.</p>
<p>See also
<seealso marker="#system_flag_dirty_cpu_schedulers_online">
<c>erlang:system_flag(dirty_cpu_schedulers_online,
@@ -7556,9 +7627,6 @@ ok
startup by passing command-line flag
<seealso marker="erts:erl#+SDcpu"><c>+SDcpu</c></seealso> in
<c>erl(1)</c>.</p>
- <p>Notice that the dirty schedulers functionality is
- experimental. Enable support for dirty schedulers when
- building OTP to try out the functionality.</p>
<p>For more information, see
<seealso marker="#system_info_dirty_cpu_schedulers">
<c>erlang:system_info(dirty_cpu_schedulers)</c></seealso>,
@@ -7580,9 +7648,6 @@ ok
<p>This value can be set at startup by passing command-line
argument <seealso marker="erts:erl#+SDio"><c>+SDio</c></seealso>
in <c>erl(1)</c>.</p>
- <p>Notice that the dirty schedulers functionality is
- experimental. Enable support for dirty schedulers when
- building OTP to try out the functionality.</p>
<p>For more information, see
<seealso marker="#system_info_dirty_cpu_schedulers">
<c>erlang:system_info(dirty_cpu_schedulers)</c></seealso>,
diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml
index 8ab35851c1..49dadfb42c 100644
--- a/erts/doc/src/erts_alloc.xml
+++ b/erts/doc/src/erts_alloc.xml
@@ -452,7 +452,7 @@
utilization value &gt; 0 is used, allocator instances
are allowed to abandon multiblock carriers. If <c>de</c> (default
enabled) is passed instead of a <c><![CDATA[<utilization>]]></c>,
- a recomended non-zero utilization value is used. The value
+ a recommended non-zero utilization value is used. The value
chosen depends on the allocator type and can be changed between
ERTS versions. Defaults to <c>de</c>, but this
can be changed in the future.</p>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index ae1d2b1d93..05142c9338 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -564,7 +564,7 @@
<list>
<item>
<p>
- Fixed a VM crash that occured in a garbage collection of
+ Fixed a VM crash that occurred in a garbage collection of
a process when it had received binaries. This bug was
introduced in ERTS version 8.0 (OTP 19.0).</p>
<p>
@@ -581,7 +581,7 @@
<list>
<item>
<p>
- Fixed a VM crash that occured in garbage collection of a
+ Fixed a VM crash that occurred in garbage collection of a
process when it had received maps over the distribution.
This bug was introduced in ERTS version 8.0 (OTP 19.0).</p>
<p>
@@ -5682,7 +5682,7 @@
dependent, so applications aiming to be portable should
consider using <c>{ipv6_v6only,true}</c> when creating an
<c>inet6</c> listening/destination socket, and if
- neccesary also create an <c>inet</c> socket on the same
+ necessary also create an <c>inet</c> socket on the same
port for IPv4 traffic. See the documentation.</p>
<p>
Own Id: OTP-8928 Aux Id: kunagi-193 [104] </p>
@@ -6063,7 +6063,7 @@
This change of default value will reduce lock contention
on ETS tables using the <c>read_concurrency</c> option at
the expense of memory consumption when the amount of
- schedulers and logical processors are beween 8 and 64.
+ schedulers and logical processors are between 8 and 64.
For more information, see documentation of the <c>+rg</c>
command line argument of <c>erl(1)</c>.</p>
<p>
@@ -7040,7 +7040,7 @@
<p>
For the subsection about process_flag(save_calls, N)
there's an unrelated paragraph about process priorities
- which was copied from the preceeding subsection regarding
+ which was copied from the preceding subsection regarding
process_flag(priority, Level). (Thanks to Filipe David
Manana)</p>
<p>
@@ -8255,7 +8255,7 @@
<item>
<p>
Wx on MacOS X generated complains on stderr about certain
- cocoa functions not beeing called from the "Main thread".
+ cocoa functions not being called from the "Main thread".
This is now corrected.</p>
<p>
Own Id: OTP-9081</p>
@@ -9246,7 +9246,7 @@
</item>
<item>
<p>The <c>empd</c> program could loop and consume 100%
- CPU time if an unexpected error ocurred in
+ CPU time if an unexpected error occurred in
<c>listen()</c> or <c>accept()</c>. Now <c>epmd</c> will
terminate if a non-recoverable error occurs. (Thanks to
Michael Santos.)</p>
diff --git a/erts/doc/src/zlib.xml b/erts/doc/src/zlib.xml
index 138414a880..e1924fffee 100644
--- a/erts/doc/src/zlib.xml
+++ b/erts/doc/src/zlib.xml
@@ -576,6 +576,18 @@ unpack(Z, Compressed, Dict) ->
</func>
<func>
+ <name name="inflateGetDictionary" arity="1"/>
+ <fsummary>Return the decompression dictionary.</fsummary>
+ <desc>
+ <p>Returns the decompression dictionary currently in use
+ by the stream. This function must be called between
+ <seealso marker="#inflateInit/1"><c>inflateInit/1,2</c></seealso>
+ and <seealso marker="#inflateEnd/1"><c>inflateEnd</c></seealso>.</p>
+ <p>Only supported if ERTS was compiled with zlib >= 1.2.8.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="open" arity="0"/>
<fsummary>Open a stream and return a stream reference.</fsummary>
<desc>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 8772befe27..18fd7f320b 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -52,7 +52,7 @@ OMIT_OMIT_FP=no
TYPE_LIBS=
DIRTY_SCHEDULER_SUPPORT=@DIRTY_SCHEDULER_SUPPORT@
-NEW_PURGE_STRATEGY=@NEW_PURGE_STRATEGY@
+DIRTY_SCHEDULER_TEST=@DIRTY_SCHEDULER_TEST@
ifeq ($(TYPE),debug)
PURIFY =
@@ -182,9 +182,24 @@ ENABLE_ALLOC_TYPE_VARS += smp nofrag
M4FLAGS += -DERTS_SMP=1
ifeq ($(DIRTY_SCHEDULER_SUPPORT),yes)
THR_DEFS += -DERTS_DIRTY_SCHEDULERS
-endif
+DS_SUPPORT=yes
-else
+ifeq ($(DIRTY_SCHEDULER_TEST),yes)
+DS_TEST=yes
+THR_DEFS += -DERTS_DIRTY_SCHEDULERS_TEST
+else # DIRTY_SCHEDULER_TEST
+DS_TEST=no
+endif # DIRTY_SCHEDULER_TEST
+
+else # DIRTY_SCHEDULER_SUPPORT
+DS_SUPPORT=no
+DS_TEST=no
+endif # DIRTY_SCHEDULER_SUPPORT
+
+else # FLAVOR
+
+DS_SUPPORT=no
+DS_TEST=no
# If flavor isn't one of the above, it *is* plain flavor...
override FLAVOR=plain
@@ -549,8 +564,10 @@ GENERATE += $(TTF_DIR)/OPCODES-GENERATED
# bif and atom table
ATOMS= beam/atom.names
+DIRTY_BIFS = beam/erl_dirty_bif.tab
BIFS = beam/bif.tab
ifdef HIPE_ENABLED
+HIPE=yes
HIPE_ARCH64_TAB=hipe/hipe_bif64.tab
HIPE_x86_TAB=hipe/hipe_x86.tab
HIPE_amd64_TAB=hipe/hipe_amd64.tab $(HIPE_ARCH64_TAB)
@@ -560,20 +577,26 @@ HIPE_ppc64_TAB=hipe/hipe_ppc64.tab $(HIPE_ARCH64_TAB)
HIPE_arm_TAB=hipe/hipe_arm.tab
HIPE_ARCH_TAB=$(HIPE_$(ARCH)_TAB)
BIFS += hipe/hipe_bif0.tab hipe/hipe_bif1.tab hipe/hipe_bif2.tab $(HIPE_ARCH_TAB)
-endif
-
-$(TARGET)/erl_bif_table.c \
-$(TARGET)/erl_bif_table.h \
-$(TARGET)/erl_bif_wrap.c \
-$(TARGET)/erl_bif_list.h \
-$(TARGET)/erl_atom_table.c \
-$(TARGET)/erl_atom_table.h \
-$(TARGET)/erl_pbifs.c \
- : $(TARGET)/TABLES-GENERATED
-$(TARGET)/TABLES-GENERATED: $(ATOMS) $(BIFS) utils/make_tables
- $(gen_verbose)LANG=C $(PERL) utils/make_tables -src $(TARGET) -include $(TARGET)\
- $(ATOMS) $(BIFS) && echo $? >$(TARGET)/TABLES-GENERATED
-GENERATE += $(TARGET)/TABLES-GENERATED
+HIPE_NBIF_FILES=$(TTF_DIR)/hipe_nbif_impl.h $(TTF_DIR)/hipe_nbif_impl.c
+else
+HIPE=no
+HIPE_NBIF_FILES=
+endif
+
+$(TTF_DIR)/erl_bif_table.c \
+$(TTF_DIR)/erl_bif_table.h \
+$(TTF_DIR)/erl_bif_wrap.c \
+$(TTF_DIR)/erl_bif_list.h \
+$(TTF_DIR)/erl_atom_table.c \
+$(TTF_DIR)/erl_atom_table.h \
+$(TTF_DIR)/erl_guard_bifs.c \
+$(TTF_DIR)/erl_dirty_bif_wrap.c \
+$(HIPE_NBIF_FILES) \
+ : $(TTF_DIR)/TABLES-GENERATED
+$(TTF_DIR)/TABLES-GENERATED: $(ATOMS) $(DIRTY_BIFS) $(BIFS) utils/make_tables
+ $(gen_verbose)LANG=C $(PERL) utils/make_tables -src $(TTF_DIR) -include $(TTF_DIR)\
+ -ds $(DS_SUPPORT) -dst $(DS_TEST) -hipe $(HIPE) $(ATOMS) $(DIRTY_BIFS) $(BIFS) && echo $? >$(TTF_DIR)/TABLES-GENERATED
+GENERATE += $(TTF_DIR)/TABLES-GENERATED
$(TTF_DIR)/erl_alloc_types.h: beam/erl_alloc.types utils/make_alloc_types
$(gen_verbose)LANG=C $(PERL) utils/make_alloc_types -src $< -dst $@ $(ENABLE_ALLOC_TYPE_VARS)
@@ -742,7 +765,6 @@ EMU_OBJS = \
$(OBJDIR)/beam_ranges.o
RUN_OBJS = \
- $(OBJDIR)/erl_pbifs.o \
$(OBJDIR)/erl_alloc.o $(OBJDIR)/erl_mtrace.o \
$(OBJDIR)/erl_alloc_util.o $(OBJDIR)/erl_goodfit_alloc.o \
$(OBJDIR)/erl_bestfit_alloc.o $(OBJDIR)/erl_afit_alloc.o \
@@ -752,7 +774,8 @@ RUN_OBJS = \
$(OBJDIR)/erl_bif_info.o $(OBJDIR)/erl_bif_op.o \
$(OBJDIR)/erl_bif_os.o $(OBJDIR)/erl_bif_lists.o \
$(OBJDIR)/erl_bif_trace.o $(OBJDIR)/erl_bif_unique.o \
- $(OBJDIR)/erl_bif_wrap.o \
+ $(OBJDIR)/erl_bif_wrap.o $(OBJDIR)/erl_nfunc_sched.o \
+ $(OBJDIR)/erl_guard_bifs.o $(OBJDIR)/erl_dirty_bif_wrap.o \
$(OBJDIR)/erl_trace.o $(OBJDIR)/copy.o \
$(OBJDIR)/utils.o $(OBJDIR)/bif.o \
$(OBJDIR)/io.o $(OBJDIR)/erl_printf_term.o\
@@ -883,12 +906,15 @@ HIPE_noarch_OBJS=
HIPE_ARCH_OBJS=$(HIPE_$(ARCH)_OBJS)
HIPE_OBJS= \
+ $(OBJDIR)/hipe_nbif_impl.o \
$(OBJDIR)/hipe_bif0.o \
$(OBJDIR)/hipe_bif1.o \
$(OBJDIR)/hipe_bif2.o \
$(OBJDIR)/hipe_debug.o \
$(OBJDIR)/hipe_gc.o \
+ $(OBJDIR)/hipe_load.o \
$(OBJDIR)/hipe_mode_switch.o \
+ $(OBJDIR)/hipe_module.o \
$(OBJDIR)/hipe_native_bif.o \
$(OBJDIR)/hipe_stack.o $(HIPE_ARCH_OBJS)
ifdef HIPE_ENABLED
@@ -916,7 +942,7 @@ $(OBJS): $(TTF_DIR)/GENERATED
########################################
# HiPE section
-M4FLAGS += -DTARGET=$(TARGET) -DOPSYS=$(OPSYS) -DARCH=$(ARCH)
+M4FLAGS += -DTARGET=$(TARGET) -DTTF_DIR=$(TTF_DIR) -DOPSYS=$(OPSYS) -DARCH=$(ARCH)
$(TTF_DIR)/%.S: hipe/%.m4
$(m4_verbose)m4 $(M4FLAGS) $< > $@
@@ -937,7 +963,7 @@ $(BINDIR)/hipe_mkliterals$(TF_MARKER): $(OBJDIR)/hipe_mkliterals.o
$(ld_verbose)$(CC) $(LDFLAGS) -o $@ $< $(TYPE_LIBS)
$(OBJDIR)/hipe_mkliterals.o: $(HIPE_ASM) $(TTF_DIR)/erl_alloc_types.h $(DTRACE_HEADERS) \
- $(TTF_DIR)/OPCODES-GENERATED $(TARGET)/TABLES-GENERATED
+ $(TTF_DIR)/OPCODES-GENERATED $(TTF_DIR)/TABLES-GENERATED
$(TTF_DIR)/hipe_literals.h: $(BINDIR)/hipe_mkliterals$(TF_MARKER)
$(gen_verbose)$(BINDIR)/hipe_mkliterals$(TF_MARKER) -c > $@
@@ -946,7 +972,7 @@ $(OBJDIR)/hipe_x86_glue.o: hipe/hipe_x86_glue.S \
$(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_literals.h \
hipe/hipe_mode_switch.h
$(TTF_DIR)/hipe_x86_bifs.S: hipe/hipe_x86_bifs.m4 hipe/hipe_x86_asm.m4 \
- hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+ hipe/hipe_bif_list.m4 $(TTF_DIR)/erl_bif_list.h hipe/hipe_gbif_list.h
$(OBJDIR)/hipe_x86_bifs.o: $(TTF_DIR)/hipe_x86_bifs.S \
$(TTF_DIR)/hipe_literals.h
@@ -954,7 +980,7 @@ $(OBJDIR)/hipe_amd64_glue.o: hipe/hipe_amd64_glue.S \
$(TTF_DIR)/hipe_amd64_asm.h $(TTF_DIR)/hipe_literals.h \
hipe/hipe_mode_switch.h
$(TTF_DIR)/hipe_amd64_bifs.S: hipe/hipe_amd64_bifs.m4 hipe/hipe_amd64_asm.m4 \
- hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+ hipe/hipe_bif_list.m4 $(TTF_DIR)/erl_bif_list.h hipe/hipe_gbif_list.h
$(OBJDIR)/hipe_amd64_bifs.o: $(TTF_DIR)/hipe_amd64_bifs.S \
$(TTF_DIR)/hipe_literals.h
@@ -962,21 +988,21 @@ $(OBJDIR)/hipe_sparc_glue.o: hipe/hipe_sparc_glue.S \
$(TTF_DIR)/hipe_sparc_asm.h hipe/hipe_mode_switch.h \
$(TTF_DIR)/hipe_literals.h
$(TTF_DIR)/hipe_sparc_bifs.S: hipe/hipe_sparc_bifs.m4 hipe/hipe_sparc_asm.m4 \
- hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+ hipe/hipe_bif_list.m4 $(TTF_DIR)/erl_bif_list.h hipe/hipe_gbif_list.h
$(OBJDIR)/hipe_sparc_bifs.o: $(TTF_DIR)/hipe_sparc_bifs.S \
$(TTF_DIR)/hipe_literals.h
$(OBJDIR)/hipe_ppc_glue.o: hipe/hipe_ppc_glue.S $(TTF_DIR)/hipe_ppc_asm.h \
hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
$(TTF_DIR)/hipe_ppc_bifs.S: hipe/hipe_ppc_bifs.m4 hipe/hipe_ppc_asm.m4 \
- hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+ hipe/hipe_bif_list.m4 $(TTF_DIR)/erl_bif_list.h hipe/hipe_gbif_list.h
$(OBJDIR)/hipe_ppc_bifs.o: $(TTF_DIR)/hipe_ppc_bifs.S \
$(TTF_DIR)/hipe_literals.h
$(OBJDIR)/hipe_arm_glue.o: hipe/hipe_arm_glue.S $(TTF_DIR)/hipe_arm_asm.h \
hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
$(TTF_DIR)/hipe_arm_bifs.S: hipe/hipe_arm_bifs.m4 hipe/hipe_arm_asm.m4 \
- hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+ hipe/hipe_bif_list.m4 $(TTF_DIR)/erl_bif_list.h hipe/hipe_gbif_list.h
$(OBJDIR)/hipe_arm_bifs.o: $(TTF_DIR)/hipe_arm_bifs.S \
$(TTF_DIR)/hipe_literals.h
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 2052afe52b..2055c29190 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -199,7 +199,7 @@ atom_alloc(Atom* tmpl)
static void
atom_free(Atom* obj)
{
- erts_free(ERTS_ALC_T_ATOM, (void*) obj);
+ ASSERT(obj->slot.index == atom_val(am_ErtsSecretAtom));
}
static void latin1_to_utf8(byte* conv_buf, const byte** srcp, int* lenp)
@@ -233,10 +233,10 @@ need_convertion:
}
/*
- * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ * erts_atom_put_index() may fail. Returns negative indexes for errors.
*/
-Eterm
-erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+int
+erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
{
byte utf8_copy[MAX_ATOM_SZ_FROM_LATIN1];
const byte *text = name;
@@ -253,7 +253,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = 0;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
switch (enc) {
@@ -262,7 +262,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
#ifdef DEBUG
for (aix = 0; aix < len; aix++) {
@@ -276,7 +276,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
no_latin1_chars = tlen;
latin1_to_utf8(utf8_copy, &text, &tlen);
@@ -284,7 +284,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_ATOM_ENC_UTF8:
/* First sanity check; need to verify later */
if (tlen > MAX_ATOM_SZ_LIMIT && !trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
break;
}
@@ -295,7 +295,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_read_unlock();
if (aix >= 0) {
/* Already in table no need to verify it */
- return make_atom(aix);
+ return aix;
}
if (enc == ERTS_ATOM_ENC_UTF8) {
@@ -314,13 +314,13 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_UTF8_OK_MAX_CHARS:
/* Truncated... */
if (!trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
ASSERT(no_chars == MAX_ATOM_CHARACTERS);
tlen = err_pos - text;
break;
default:
/* Bad utf8... */
- return THE_NON_VALUE;
+ return ATOM_BAD_ENCODING_ERROR;
}
}
@@ -333,7 +333,20 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_write_lock();
aix = index_put(&erts_atom_table, (void*) &a);
atom_write_unlock();
- return make_atom(aix);
+ return aix;
+}
+
+/*
+ * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ */
+Eterm
+erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+{
+ int aix = erts_atom_put_index(name, len, enc, trunc);
+ if (aix >= 0)
+ return make_atom(aix);
+ else
+ return THE_NON_VALUE;
}
Eterm
@@ -467,6 +480,9 @@ init_atom_table(void)
atom_space -= a.len;
atom_tab(ix)->name = (byte*)erl_atom_names[i];
}
+
+ /* Hide am_ErtsSecretAtom */
+ hash_erase(&erts_atom_table.htable, atom_tab(atom_val(am_ErtsSecretAtom)));
}
void
@@ -483,3 +499,9 @@ dump_atoms(fmtfn_t to, void *to_arg)
}
}
}
+
+Uint
+erts_get_atom_limit(void)
+{
+ return erts_atom_table.limit;
+} \ No newline at end of file
diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h
index a82efabb9f..be998a46bd 100644
--- a/erts/emulator/beam/atom.h
+++ b/erts/emulator/beam/atom.h
@@ -29,6 +29,8 @@
#define MAX_ATOM_SZ_LIMIT (4*MAX_ATOM_CHARACTERS) /* theoretical byte limit */
#define ATOM_LIMIT (1024*1024)
#define MIN_ATOM_TABLE_SIZE 8192
+#define ATOM_BAD_ENCODING_ERROR -1
+#define ATOM_MAX_CHARS_ERROR -2
#ifndef ARCH_32
/* Internal atom cache needs MAX_ATOM_TABLE_SIZE to be less than an
@@ -133,11 +135,11 @@ int atom_table_sz(void); /* table size in bytes, excluding stored objects */
Eterm am_atom_put(const char*, int); /* ONLY 7-bit ascii! */
Eterm erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
-int atom_erase(byte*, int);
-int atom_static_put(byte*, int);
+int erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
void init_atom_table(void);
void atom_info(fmtfn_t, void *);
void dump_atoms(fmtfn_t, void *);
+Uint erts_get_atom_limit(void);
int erts_atom_get(const char* name, int len, Eterm* ap, ErtsAtomEncoding enc);
void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used);
#endif
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 2b66bf6f4e..df2866b40e 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -59,6 +59,9 @@ atom nocatch
atom undefined_function
atom undefined_lambda
+# Secret internal atom that can never be found by string lookup
+# and should never leak out to be seen by the user.
+atom ErtsSecretAtom='3RT$'
# All other atoms. Try to keep the order alphabetic.
#
@@ -73,6 +76,7 @@ atom ac
atom accessor
atom active
atom active_tasks
+atom alive
atom all
atom all_but_first
atom all_names
@@ -191,15 +195,21 @@ atom current_stacktrace
atom data
atom debug_flags
atom decimals
+atom default
atom delay_trap
atom dexit
atom depth
atom dgroup_leader
atom dictionary
+atom dirty_bif_exception
+atom dirty_bif_result
+atom dirty_bif_trap
atom dirty_cpu
atom dirty_cpu_schedulers_online
atom dirty_execution
atom dirty_io
+atom dirty_nif_exception
+atom dirty_nif_finalizer
atom disable_trace
atom disabled
atom discard
@@ -236,10 +246,12 @@ atom Eq='=:='
atom Eqeq='=='
atom erl_tracer
atom erlang
+atom erl_signal_server
atom ERROR='ERROR'
atom error_handler
atom error_logger
atom erts_code_purger
+atom erts_debug
atom erts_internal
atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
@@ -302,6 +314,7 @@ atom global
atom Gt='>'
atom grun
atom group_leader
+atom handle
atom have_dt_utag
atom heap_block_size
atom heap_size
@@ -366,6 +379,8 @@ atom long_schedule
atom low
atom Lt='<'
atom machine
+atom magic_ref
+atom major
atom match
atom match_limit
atom match_limit_recursion
@@ -395,6 +410,7 @@ atom milli_seconds
atom millisecond
atom min_heap_size
atom min_bin_vheap_size
+atom minor
atom minor_version
atom Minus='-'
atom module
@@ -560,14 +576,17 @@ atom running_procs
atom runtime
atom safe
atom save_calls
-atom scheduler
+atom scheduler
atom scheduler_id
+atom scheduler_wall_time
+atom scheduler_wall_time_all
atom schedulers_online
atom scheme
atom scientific
atom scope
atom second
atom seconds
+atom send
atom send_to_non_existing_process
atom sensitive
atom sequential_tracer
@@ -585,6 +604,19 @@ atom set_tcw
atom set_tcw_fake
atom separate
atom shared
+atom sighup
+atom sigterm
+atom sigusr1
+atom sigusr2
+atom sigill
+atom sigchld
+atom sigabrt
+atom sigalrm
+atom sigstop
+atom sigint
+atom sigsegv
+atom sigtstp
+atom sigquit
atom silent
atom size
atom sl_alloc
@@ -660,11 +692,11 @@ atom value
atom values
atom version
atom visible
+atom wait
atom waiting
atom wall_clock
atom warning
atom warning_msg
-atom scheduler_wall_time
atom wordsize
atom write_concurrency
atom xor
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 7f5e9ff691..4ba8c2a669 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -36,6 +36,13 @@
#include "erl_nif.h"
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
+#ifdef HIPE
+# include "hipe_bif0.h"
+# define IF_HIPE(X) (X)
+#else
+# define IF_HIPE(X) (0)
+#endif
#ifdef HIPE
# include "hipe_stack.h"
@@ -65,7 +72,6 @@ erts_smp_atomic_t erts_copy_literal_area__;
#define ERTS_SET_COPY_LITERAL_AREA(LA) \
erts_smp_atomic_set_nob(&erts_copy_literal_area__, \
(erts_aint_t) (LA))
-#ifdef ERTS_NEW_PURGE_STRATEGY
Process *erts_literal_area_collector = NULL;
typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef;
@@ -79,10 +85,9 @@ struct {
ErtsLiteralAreaRef *first;
ErtsLiteralAreaRef *last;
} release_literal_areas;
-#endif
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls);
+static Eterm check_process_code(Process* rp, Module* modp, int *redsp, int fcalls);
static void delete_code(Module* modp);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
@@ -113,11 +118,9 @@ init_purge_state(void)
void
erts_beam_bif_load_init(void)
{
-#ifdef ERTS_NEW_PURGE_STRATEGY
erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas");
release_literal_areas.first = NULL;
release_literal_areas.last = NULL;
-#endif
erts_smp_atomic_init_nob(&erts_copy_literal_area__,
(erts_aint_t) NULL);
@@ -147,8 +150,19 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
{
+#if !defined(HIPE)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
Module* modp;
- Eterm res;
+ Eterm res, mod;
+
+ if (!is_internal_magic_ref(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ mod = erts_module_for_prepared_code(erts_magic_ref2bin(BIF_ARG_1));
+
+ if (is_not_atom(mod))
+ BIF_ERROR(BIF_P, BADARG);
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3],
@@ -158,7 +172,7 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp && modp->curr.num_breakpoints > 0) {
ASSERT(modp->curr.code_hdr != NULL);
@@ -170,9 +184,12 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- if (res == BIF_ARG_1) {
+ if (res == mod) {
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
+ if (!modp)
+ modp = erts_get_module(mod, erts_active_code_ix());
+ hipe_redirect_to_module(modp);
}
else {
erts_abort_staging_code_ix();
@@ -181,6 +198,7 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
return res;
+#endif
}
BIF_RETTYPE
@@ -213,8 +231,8 @@ prepare_loading_2(BIF_ALIST_2)
res = TUPLE2(hp, am_error, reason);
BIF_RET(res);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &MSO(BIF_P), magic);
erts_refc_dec(&magic->refc, 1);
BIF_RET(res);
}
@@ -223,15 +241,13 @@ BIF_RETTYPE
has_prepared_code_on_load_1(BIF_ALIST_1)
{
Eterm res;
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1)) {
+ if (!is_internal_magic_ref(BIF_ARG_1)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- pb = (ProcBin*) binary_val(BIF_ARG_1);
- res = erts_has_code_on_load(pb->val);
+ res = erts_has_code_on_load(erts_magic_ref2bin(BIF_ARG_1));
if (res == NIL) {
goto error;
}
@@ -245,7 +261,7 @@ struct m {
Uint exception;
};
-static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int);
+static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int);
#ifdef ERTS_SMP
static void smp_code_ix_commiter(void*);
@@ -317,13 +333,11 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
Eterm* cons = list_val(BIF_ARG_1);
Eterm term = CAR(cons);
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
+ if (!is_internal_magic_ref(term)) {
goto badarg;
}
- pb = (ProcBin*) binary_val(term);
- p[i].code = pb->val;
+ p[i].code = erts_magic_ref2bin(term);
p[i].module = erts_module_for_prepared_code(p[i].code);
if (p[i].module == NIL) {
goto badarg;
@@ -381,8 +395,9 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
if (p[i].modp->curr.num_breakpoints > 0 ||
p[i].modp->curr.num_traced_exports > 0 ||
- erts_is_default_trace_enabled()) {
- /* tracing involved, fallback with thread blocking */
+ erts_is_default_trace_enabled() ||
+ IF_HIPE(hipe_need_blocking(p[i].modp))) {
+ /* tracing or hipe need thread blocking */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
is_blocking = 1;
@@ -440,32 +455,36 @@ finish_loading_1(BIF_ALIST_1)
}
done:
- return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
+ return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n, 1);
}
static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
- struct m* loaded, int nloaded)
+ struct m* mods, int nmods, int free_mods)
{
#ifdef ERTS_SMP
if (is_blocking || !commit)
#endif
{
if (commit) {
+ int i;
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
- if (loaded) {
- int i;
- for (i=0; i < nloaded; i++) {
- set_default_trace_pattern(loaded[i].module);
+
+ for (i=0; i < nmods; i++) {
+ if (mods[i].modp->curr.code_hdr) {
+ set_default_trace_pattern(mods[i].module);
}
+ #ifdef HIPE
+ hipe_redirect_to_module(mods[i].modp);
+ #endif
}
}
else {
erts_abort_staging_code_ix();
}
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
if (is_blocking) {
erts_smp_thr_progress_unblock();
@@ -478,8 +497,8 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
else {
ASSERT(is_value(res));
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
erts_end_staging_code_ix();
/*
@@ -547,7 +566,7 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls)
+erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls)
{
Module* modp;
Eterm res;
@@ -562,31 +581,23 @@ erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int
if (!modp)
return am_false;
erts_rlock_old_code(code_ix);
- res = (!modp->old.code_hdr ? am_false :
- check_process_code(c_p, modp, flags, redsp, fcalls));
+ res = (!modp->old.code_hdr
+ ? am_false
+ : check_process_code(c_p, modp, redsp, fcalls));
erts_runlock_old_code(code_ix);
return res;
}
-BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_check_process_code_1(BIF_ALIST_1)
{
int reds = 0;
- Uint flags;
Eterm res;
if (is_not_atom(BIF_ARG_1))
goto badarg;
- if (is_not_small(BIF_ARG_2))
- goto badarg;
-
- flags = unsigned_val(BIF_ARG_2);
- if (flags & ~ERTS_CPC_ALL) {
- goto badarg;
- }
-
- res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds, BIF_P->fcalls);
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, &reds, BIF_P->fcalls);
ASSERT(is_value(res));
@@ -622,7 +633,7 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
if (!rp)
BIF_RET(am_false);
- res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls);
+ res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls);
if (BIF_P != rp)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
@@ -665,8 +676,9 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
else {
if (modp->curr.num_breakpoints > 0 ||
- modp->curr.num_traced_exports > 0) {
- /* we have tracing, retry single threaded */
+ modp->curr.num_traced_exports > 0 ||
+ IF_HIPE(hipe_need_blocking(modp))) {
+ /* tracing or hipe need to go single threaded */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
is_blocking = 1;
@@ -680,7 +692,14 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
success = 1;
}
}
- return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0);
+ {
+ struct m mod;
+ Eterm retval;
+ mod.module = BIF_ARG_1;
+ mod.modp = modp;
+ retval = staging_epilogue(BIF_P, success, res, is_blocking, &mod, 1, 0);
+ return retval;
+ }
}
BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
@@ -805,23 +824,26 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
num_exps = export_list_size(code_ix);
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (ep->code[4] != 0) {
- ep->addressv[code_ix] = (void *) ep->code[4];
- ep->code[4] = 0;
+ if (ep->beam[1] != 0) {
+ ep->addressv[code_ix] = (void *) ep->beam[1];
+ ep->beam[1] = 0;
} else {
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
}
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = (BeamInstr) em_call_error_handler;
}
}
modp->curr.code_hdr->on_load_function_ptr = NULL;
set_default_trace_pattern(BIF_ARG_1);
+ #ifdef HIPE
+ hipe_redirect_to_module(modp);
+ #endif
} else if (BIF_ARG_2 == am_false) {
int i, num_exps;
@@ -833,13 +855,13 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
num_exps = export_list_size(code_ix);
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
}
- ep->code[4] = 0;
+ ep->beam[1] = 0;
}
}
erts_smp_thr_progress_unblock();
@@ -863,9 +885,9 @@ set_default_trace_pattern(Eterm module)
&trace_pattern_flags,
&meta_tracer);
if (trace_pattern_is_on) {
- Eterm mfa[1];
- mfa[0] = module;
- (void) erts_set_trace_pattern(0, mfa, 1,
+ ErtsCodeMFA mfa;
+ mfa.module = module;
+ (void) erts_set_trace_pattern(0, &mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
@@ -873,32 +895,12 @@ set_default_trace_pattern(Eterm module)
}
}
-#ifndef ERTS_NEW_PURGE_STRATEGY
-
-static ERTS_INLINE int
-check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size)
-{
- struct erl_off_heap_header* oh;
- for (oh = off_heap->first; oh; oh = oh->next) {
- if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
- ErlFunThing* funp = (ErlFunThing*) oh;
- if (ErtsInArea(funp->fe->address, area, area_size))
- return !0;
- }
- }
- return 0;
-}
-
-#endif
-
static Uint hfrag_literal_size(Eterm* start, Eterm* end,
char* lit_start, Uint lit_size);
static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
Eterm *start, Eterm *end,
char *lit_start, Uint lit_size);
-#ifdef ERTS_NEW_PURGE_STRATEGY
-
Eterm
erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed)
{
@@ -911,7 +913,7 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
la = ERTS_COPY_LITERAL_AREA();
if (!la)
- return am_ok;
+ goto return_ok;
oh = la->off_heap;
literals = (char *) &la->start[0];
@@ -975,6 +977,11 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
* this is not completely certain). We go for
* the GC directly instead of scanning everything
* one more time...
+ *
+ * Also note that calling functions expect a
+ * major GC to be performed if gc_allowed is set
+ * to true. If you change this, you need to fix
+ * callers...
*/
goto literal_gc;
}
@@ -1049,6 +1056,13 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
}
}
+return_ok:
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)))
+ c_p->flags &= ~F_DIRTY_CLA;
+#endif
+
return am_ok;
literal_gc:
@@ -1059,24 +1073,28 @@ literal_gc:
if (c_p->flags & F_DISABLE_GC)
return THE_NON_VALUE;
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ *redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize,
+ oh, fcalls);
- *redsp += erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls);
-
- erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh);
-
- *redsp += lit_bsize / 64; /* Need, better value... */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->flags & F_DIRTY_CLA)
+ return THE_NON_VALUE;
+#endif
return am_ok;
}
static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
+check_process_code(Process* rp, Module* modp, int *redsp, int fcalls)
{
BeamInstr* start;
char* mod_start;
Uint mod_size;
Eterm* sp;
+#ifdef HIPE
+ void *nat_start = NULL;
+ Uint nat_size = 0;
+#endif
*redsp += 1;
@@ -1111,84 +1129,19 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
}
- /*
- * Check all continuation pointers stored in stackdump
- * and clear exception stackdump if there is a pointer
- * to the module.
- */
- if (rp->ftrace != NIL) {
- struct StackTrace *s;
- ASSERT(is_list(rp->ftrace));
- s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
- if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) ||
- (s->current && ErtsInArea(s->current, mod_start, mod_size))) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- } else {
- int i;
- for (i = 0; i < s->depth; i++) {
- if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- break;
- }
- }
- }
- }
-
- return am_false;
-}
-
-#else /* !ERTS_NEW_PURGE_STRATEGY, i.e, old style purge... */
-
-static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
-{
- BeamInstr* start;
- char* literals;
- Uint lit_bsize;
- char* mod_start;
- Uint mod_size;
- Eterm* sp;
- int done_gc = 0;
- int need_gc = 0;
- ErtsMessage *msgp;
- ErlHeapFragment *hfrag;
-
-#define ERTS_ORDINARY_GC__ (1 << 0)
-#define ERTS_LITERAL_GC__ (1 << 1)
-
- /*
- * Pick up limits for the module.
- */
- start = (BeamInstr*) modp->old.code_hdr;
- mod_start = (char *) start;
- mod_size = modp->old.code_length;
-
- /*
- * Check if current instruction or continuation pointer points into module.
- */
- if (ErtsInArea(rp->i, mod_start, mod_size)
- || ErtsInArea(rp->cp, mod_start, mod_size)) {
- return am_true;
- }
-
- *redsp += 1;
-
- if (erts_check_nif_export_in_area(rp, mod_start, mod_size))
- return am_true;
-
-
+#ifdef HIPE
/*
- * Check all continuation pointers stored on the stack.
+ * Check all continuation pointers stored on the native stack if the module
+ * has native code.
*/
- for (sp = rp->stop; sp < STACK_START(rp); sp++) {
- if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) {
+ if (modp->old.hipe_code) {
+ nat_start = modp->old.hipe_code->text_segment;
+ nat_size = modp->old.hipe_code->text_segment_size;
+ if (nat_size && nstack_any_cps_in_segment(rp, nat_start, nat_size)) {
return am_true;
}
}
+#endif
/*
* Check all continuation pointers stored in stackdump
@@ -1206,8 +1159,16 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
rp->ftrace = NIL;
} else {
int i;
+ char *area_start = mod_start;
+ Uint area_size = mod_size;
+#ifdef HIPE
+ if (rp->freason & EXF_NATIVE) {
+ area_start = nat_start;
+ area_size = nat_size;
+ }
+#endif
for (i = 0; i < s->depth; i++) {
- if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
+ if (ErtsInArea(s->trace[i], area_start, area_size)) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
@@ -1217,188 +1178,9 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
}
- if (rp->flags & F_DISABLE_GC) {
- /*
- * Cannot proceed. Process has disabled gc in order to
- * safely leave inconsistent data on the heap and/or
- * off heap lists. Need to wait for gc to be enabled
- * again.
- */
- return THE_NON_VALUE;
- }
-
- /*
- * Message queue can contains funs, and may contain
- * literals. If we got references to this module from the message
- * queue.
- *
- * If a literal is in the message queue we maka an explicit copy of
- * and attach it to the heap fragment. Each message needs to be
- * self contained, we cannot save the literal in the old_heap or
- * any other heap than the message it self.
- */
-
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
-
- if (modp->old.code_hdr->literal_area) {
- literals = (char*) modp->old.code_hdr->literal_area->start;
- lit_bsize = (char*) modp->old.code_hdr->literal_area->end - literals;
- }
- else {
- literals = NULL;
- lit_bsize = 0;
- }
-
- for (msgp = rp->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- hfrag = &msgp->hfrag;
- else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
- hfrag = msgp->data.heap_frag;
- else
- continue;
- {
- ErlHeapFragment *hf;
- Uint lit_sz;
- for (hf=hfrag; hf; hf = hf->next) {
- if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
- return am_true;
- lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- }
- if (lit_sz > 0) {
- ErlHeapFragment *bp = new_message_buffer(lit_sz);
- Eterm *hp = bp->mem;
-
- for (hf=hfrag; hf; hf = hf->next) {
- hfrag_literal_copy(&hp, &bp->off_heap,
- &hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- hfrag=hf;
- }
- /* link new hfrag last */
- ASSERT(hfrag->next == NULL);
- hfrag->next = bp;
- bp->next = NULL;
- }
- }
- }
-
- while (1) {
-
- /* Check heap, stack etc... */
- if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size))
- goto try_gc;
- if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- }
- if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize))
- goto try_literal_gc;
-#ifdef HIPE
- if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize))
- goto try_literal_gc;
-#endif
- if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
- goto try_literal_gc;
- if (rp->abandoned_heap) {
- if (any_heap_refs(rp->abandoned_heap, rp->abandoned_heap + rp->heap_sz,
- literals, lit_bsize))
- goto try_literal_gc;
- }
- if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
- goto try_literal_gc;
-
- /* Check dictionary */
- if (rp->dictionary) {
- Eterm* start = ERTS_PD_START(rp->dictionary);
- Eterm* end = start + ERTS_PD_SIZE(rp->dictionary);
-
- if (any_heap_ref_ptrs(start, end, literals, lit_bsize))
- goto try_literal_gc;
- }
-
- /* Check heap fragments */
- for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) {
- Eterm *hp, *hp_end;
- /* Off heap lists should already have been moved into process */
- ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
-
- hp = &hfrag->mem[0];
- hp_end = &hfrag->mem[hfrag->used_size];
- if (any_heap_refs(hp, hp_end, literals, lit_bsize))
- goto try_literal_gc;
- }
-
- /*
- * Message buffer fragments (matched messages)
- * - off heap lists should already have been moved into
- * process off heap structure.
- * - Check for literals
- */
- for (msgp = rp->msg_frag; msgp; msgp = msgp->next) {
- hfrag = erts_message_to_heap_frag(msgp);
- for (; hfrag; hfrag = hfrag->next) {
- Eterm *hp, *hp_end;
- ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
-
- hp = &hfrag->mem[0];
- hp_end = &hfrag->mem[hfrag->used_size];
-
- if (any_heap_refs(hp, hp_end, literals, lit_bsize))
- goto try_literal_gc;
- }
- }
-
- return am_false;
-
- try_literal_gc:
- need_gc |= ERTS_LITERAL_GC__;
-
- try_gc:
- need_gc |= ERTS_ORDINARY_GC__;
-
- if ((done_gc & need_gc) == need_gc)
- return am_true;
-
- if (!(flags & ERTS_CPC_ALLOW_GC))
- return am_aborted;
-
- need_gc &= ~done_gc;
-
- /*
- * Try to get rid of literals by by garbage collecting.
- * Clear both fvalue and ftrace.
- */
-
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
-
- if (need_gc & ERTS_ORDINARY_GC__) {
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls);
- done_gc |= ERTS_ORDINARY_GC__;
- }
- if (need_gc & ERTS_LITERAL_GC__) {
- struct erl_off_heap_header* oh;
- oh = modp->old.code_hdr->literal_area->off_heap;
- *redsp += lit_bsize / 64; /* Need, better value... */
- erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh);
- done_gc |= ERTS_LITERAL_GC__;
- }
- need_gc = 0;
- }
-
-#undef ERTS_ORDINARY_GC__
-#undef ERTS_LITERAL_GC__
-
+ return am_false;
}
-#endif /* !ERTS_NEW_PURGE_STRATEGY */
-
static int
any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
{
@@ -1527,8 +1309,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
}
}
-#ifdef ERTS_NEW_PURGE_STRATEGY
-
#ifdef ERTS_SMP
ErtsThrPrgrLaterOp later_literal_area_switch;
@@ -1559,13 +1339,8 @@ complete_literal_area_switch(void *literal_area)
}
#endif
-#endif /* ERTS_NEW_PURGE_STRATEGY */
-
BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
{
-#ifndef ERTS_NEW_PURGE_STRATEGY
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#else
ErtsLiteralArea *unused_la;
ErtsLiteralAreaRef *la_ref;
@@ -1624,7 +1399,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
BIF_RET(am_true);
#endif
-#endif /* ERTS_NEW_PURGE_STRATEGY */
}
void
@@ -1818,10 +1592,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
code = (BeamInstr*) modp->old.code_hdr;
end = (BeamInstr *)((char *)code + modp->old.code_length);
erts_fun_purge_prepare(code, end);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- ASSERT(!ERTS_COPY_LITERAL_AREA());
- ERTS_SET_COPY_LITERAL_AREA(modp->old.code_hdr->literal_area);
-#endif
}
if (BIF_ARG_2 == am_prepare_on_load) {
@@ -1866,10 +1636,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- ASSERT(ERTS_COPY_LITERAL_AREA());
- ERTS_SET_COPY_LITERAL_AREA(NULL);
-#endif
#ifndef ERTS_SMP
erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
finalize_purge_operation(BIF_P, 0);
@@ -1938,15 +1704,18 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
/*
* Unload any NIF library
*/
- if (modp->old.nif != NULL) {
+ if (modp->old.nif != NULL
+ || IF_HIPE(hipe_purge_need_blocking(modp))) {
/* ToDo: Do unload nif without blocking */
erts_rwunlock_old_code(code_ix);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
is_blocking = 1;
erts_rwlock_old_code(code_ix);
- erts_unload_nif(modp->old.nif);
- modp->old.nif = NULL;
+ if (modp->old.nif) {
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
}
/*
@@ -1965,7 +1734,9 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
modp->old.code_length = 0;
modp->old.catches = BEAM_CATCHES_NIL;
erts_remove_from_ranges(code);
-
+#ifdef HIPE
+ hipe_purge_module(modp, is_blocking);
+#endif
ERTS_BIF_PREP_RET(ret, am_true);
}
@@ -1984,14 +1755,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
finalize_purge_operation(BIF_P, ret == am_true);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
-
- ASSERT(ERTS_COPY_LITERAL_AREA() == literals);
- ERTS_SET_COPY_LITERAL_AREA(NULL);
- erts_release_literal_area(literals);
-
-#else /* ERTS_NEW_PURGE_STRATEGY */
-
if (literals) {
ErtsLiteralAreaRef *ref;
ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
@@ -2015,8 +1778,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
BIF_P->common.id);
}
-#endif /* ERTS_NEW_PURGE_STRATEGY */
-
return ret;
}
@@ -2039,34 +1800,34 @@ delete_code(Module* modp)
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep != NULL && (ep->code[0] == module)) {
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep != NULL && (ep->info.mfa.module == module)) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
}
- else if (ep->code[3] ==
+ else if (ep->beam[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- erts_clear_export_break(modp, ep->code+3);
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export trace cleared, code_ix=%d", code_ix);
+ erts_clear_export_break(modp, &ep->info);
}
- else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
+ else ASSERT(ep->beam[0] == (BeamInstr) em_call_error_handler
|| !erts_initialized);
}
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
- ep->code[4] = 0;
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = (BeamInstr) em_call_error_handler;
+ ep->beam[1] = 0;
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export invalidation, code_ix=%d", code_ix);
}
}
ASSERT(modp->curr.num_breakpoints == 0);
ASSERT(modp->curr.num_traced_exports == 0);
modp->old = modp->curr;
- modp->curr.code_hdr = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- modp->curr.nif = NULL;
-
+ erts_module_instance_init(&modp->curr);
}
@@ -2080,9 +1841,11 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->curr.code_hdr && modp->old.code_hdr) {
- return am_not_purged;
- } else if (!modp->old.code_hdr) { /* Make the current version old. */
+ if (modp->curr.code_hdr) {
+ if (modp->old.code_hdr) {
+ return am_not_purged;
+ }
+ /* Make the current version old. */
delete_code(modp);
}
return NIL;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index a66d9f68d9..b32c74ce7a 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -32,6 +32,7 @@
#include "erl_binary.h"
#include "beam_bp.h"
#include "erl_term.h"
+#include "erl_nfunc_sched.h"
/* *************************************************************************
** Macros
@@ -120,27 +121,27 @@ release_bp_sched_ix(Uint32 ix)
/*
** Helpers
*/
-static ErtsTracer do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+static ErtsTracer do_call_trace(Process* c_p, ErtsCodeInfo *info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, ErtsTracer tracer);
-static void set_function_break(BeamInstr *pc,
+static void set_function_break(ErtsCodeInfo *ci,
Binary *match_spec,
Uint break_flags,
enum erts_break_op count_op,
ErtsTracer tracer);
static void clear_break(BpFunctions* f, Uint break_flags);
-static int clear_function_break(BeamInstr *pc, Uint break_flags);
+static int clear_function_break(ErtsCodeInfo *ci, Uint break_flags);
-static BpDataTime* get_time_break(BeamInstr *pc);
-static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static BpDataTime* get_time_break(ErtsCodeInfo *ci);
+static GenericBpData* check_break(ErtsCodeInfo *ci, Uint break_flags);
-static void bp_meta_unref(BpMetaTracer* bmt);
-static void bp_count_unref(BpCount* bcp);
-static void bp_time_unref(BpDataTime* bdt);
-static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
-static void uninstall_breakpoint(BeamInstr* pc);
+static void bp_meta_unref(BpMetaTracer *bmt);
+static void bp_count_unref(BpCount *bcp);
+static void bp_time_unref(BpDataTime *bdt);
+static void consolidate_bp_data(Module *modp, ErtsCodeInfo *ci, int local);
+static void uninstall_breakpoint(ErtsCodeInfo *ci);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -170,7 +171,7 @@ erts_bp_init(void) {
void
-erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
Uint max_funcs = 0;
@@ -195,41 +196,44 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
i = 0;
for (current = 0; current < num_modules; current++) {
BeamCodeHeader* code_hdr = module[current]->curr.code_hdr;
- BeamInstr* code;
+ ErtsCodeInfo* ci;
Uint num_functions = (Uint)(UWord) code_hdr->num_functions;
Uint fi;
if (specified > 0) {
- if (mfa[0] != make_atom(module[current]->module)) {
+ if (mfa->module != make_atom(module[current]->module)) {
/* Wrong module name */
continue;
}
}
for (fi = 0; fi < num_functions; fi++) {
- BeamInstr* pc;
- int wi;
- code = code_hdr->functions[fi];
- ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- pc = code+5;
- if (erts_is_native_break(pc)) {
+ ci = code_hdr->functions[fi];
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (erts_is_native_break(ci)) {
continue;
}
- if (is_nil(code[3])) { /* Ignore BIF stub */
+ if (is_nil(ci->mfa.module)) { /* Ignore BIF stub */
continue;
}
- for (wi = 0;
- wi < specified && (Eterm) code[2+wi] == mfa[wi];
- wi++) {
- /* Empty loop body */
- }
- if (wi == specified) {
- /* Store match */
- f->matching[i].pc = pc;
- f->matching[i].mod = module[current];
- i++;
- }
+ switch (specified) {
+ case 3:
+ if (ci->mfa.arity != mfa->arity)
+ continue;
+ case 2:
+ if (ci->mfa.function != mfa->function)
+ continue;
+ case 1:
+ if (ci->mfa.module != mfa->module)
+ continue;
+ case 0:
+ break;
+ }
+ /* Store match */
+ f->matching[i].ci = ci;
+ f->matching[i].mod = module[current];
+ i++;
}
}
f->matched = i;
@@ -237,7 +241,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
}
void
-erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
int i;
@@ -249,27 +253,36 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i, code_ix);
BeamInstr* pc;
- int j;
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j < specified) {
- continue;
- }
- pc = ep->code+3;
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ pc = ep->beam;
if (ep->addressv[code_ix] == pc) {
if ((*pc == (BeamInstr) em_apply_bif ||
*pc == (BeamInstr) em_call_error_handler)) {
continue;
}
ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
- } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ } else if (erts_is_native_break(erts_code_to_codeinfo(ep->addressv[code_ix]))) {
continue;
}
- f->matching[ne].pc = pc;
- f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ f->matching[ne].ci = &ep->info;
+ f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
ne++;
}
@@ -295,7 +308,7 @@ erts_consolidate_bp_data(BpFunctions* f, int local)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < n; i++) {
- consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ consolidate_bp_data(fs[i].mod, fs[i].ci, local);
}
}
@@ -307,14 +320,14 @@ erts_consolidate_bif_bp_data(void)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < BIF_SIZE; i++) {
Export *ep = bif_export[i];
- consolidate_bp_data(0, ep->code+3, 0);
+ consolidate_bp_data(0, &ep->info, 0);
}
}
static void
-consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = (GenericBp *) ci->native;
GenericBpData* src;
GenericBpData* dst;
Uint flags;
@@ -360,9 +373,10 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
}
ASSERT(modp->curr.num_breakpoints >= 0);
ASSERT(modp->curr.num_traced_exports >= 0);
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ ASSERT(*erts_codeinfo_to_code(ci) !=
+ (BeamInstr) BeamOp(op_i_generic_breakpoint));
}
- pc[-4] = 0;
+ ci->native = 0;
Free(g);
return;
}
@@ -411,8 +425,9 @@ erts_install_breakpoints(BpFunctions* f)
BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- GenericBp* g = (GenericBp *) pc[-4];
+ ErtsCodeInfo* ci = f->matching[i].ci;
+ BeamInstr *pc = erts_codeinfo_to_code(ci);
+ GenericBp* g = (GenericBp *) ci->native;
if (*pc != br && g) {
Module* modp = f->matching[i].mod;
@@ -444,16 +459,16 @@ erts_uninstall_breakpoints(BpFunctions* f)
Uint n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- uninstall_breakpoint(pc);
+ uninstall_breakpoint(f->matching[i].ci);
}
}
static void
-uninstall_breakpoint(BeamInstr* pc)
+uninstall_breakpoint(ErtsCodeInfo *ci)
{
+ BeamInstr *pc = erts_codeinfo_to_code(ci);
if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = (GenericBp *) ci->native;
if (g->data[erts_active_bp_ix()].flags == 0) {
/*
* The following write is not protected by any lock. We
@@ -480,30 +495,30 @@ erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
}
void
-erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- set_function_break(pc, match_spec, flags, 0, erts_tracer_nil);
+ set_function_break(ci, match_spec, flags, 0, erts_tracer_nil);
}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, ErtsTracer tracer)
+erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec, ErtsTracer tracer)
{
- set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
+ set_function_break(ci, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
-erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op count_op)
{
- set_function_break(pc, NULL,
+ set_function_break(ci, NULL,
ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
count_op, erts_tracer_nil);
}
void
-erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+erts_clear_time_trace_bif(ErtsCodeInfo *ci) {
+ clear_function_break(ci, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
}
void
@@ -532,14 +547,14 @@ erts_clear_trace_break(BpFunctions* f)
}
void
-erts_clear_call_trace_bif(BeamInstr *pc, int local)
+erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = (GenericBp *) ci->native;
if (g) {
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
if (g->data[erts_staging_bp_ix()].flags & flags) {
- clear_function_break(pc, flags);
+ clear_function_break(ci, flags);
}
}
}
@@ -551,9 +566,9 @@ erts_clear_mtrace_break(BpFunctions* f)
}
void
-erts_clear_mtrace_bif(BeamInstr *pc)
+erts_clear_mtrace_bif(ErtsCodeInfo *ci)
{
- clear_function_break(pc, ERTS_BPF_META_TRACE);
+ clear_function_break(ci, ERTS_BPF_META_TRACE);
}
void
@@ -595,52 +610,48 @@ erts_clear_module_break(Module *modp) {
}
n = (Uint)(UWord) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_hdr->functions[i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_native_break(ci))
continue;
- }
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
}
erts_commit_staged_bp();
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_hdr->functions[i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_native_break(ci))
continue;
- }
- uninstall_breakpoint(pc);
- consolidate_bp_data(modp, pc, 1);
- ASSERT(pc[-4] == 0);
+ uninstall_breakpoint(ci);
+ consolidate_bp_data(modp, ci, 1);
+ ASSERT(ci->native == 0);
}
return n;
}
void
-erts_clear_export_break(Module* modp, BeamInstr* pc)
+erts_clear_export_break(Module* modp, ErtsCodeInfo *ci)
{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
erts_commit_staged_bp();
- *pc = (BeamInstr) 0;
- consolidate_bp_data(modp, pc, 0);
- ASSERT(pc[-4] == 0);
+ *erts_codeinfo_to_code(ci) = (BeamInstr) 0;
+ consolidate_bp_data(modp, ci, 0);
+ ASSERT(ci->native == 0);
}
BeamInstr
-erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
{
GenericBp* g;
GenericBpData* bp;
Uint bp_flags;
ErtsBpIndex ix = erts_active_bp_ix();
- g = (GenericBp *) I[-4];
+ ASSERT(info->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+
+ g = (GenericBp *) info->native;
bp = &g->data[ix];
bp_flags = bp->flags;
ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
@@ -659,9 +670,9 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
- (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
+ (void) do_call_trace(c_p, info, reg, 1, bp->local_ms, erts_tracer_true);
} else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
+ (void) do_call_trace(c_p, info, reg, 0, bp->local_ms, erts_tracer_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
@@ -669,7 +680,8 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
- new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
+ new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer);
+
if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
&bp->meta_tracer->tracer,
@@ -688,7 +700,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
Eterm w;
- erts_trace_time_call(c_p, I, bp->time);
+ erts_trace_time_call(c_p, info, bp->time);
w = (BeamInstr) *c_p->cp;
if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
w == (BeamInstr) BeamOp(op_return_trace) ||
@@ -696,7 +708,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
Eterm* E = c_p->stop;
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - 2 < c_p->htop) {
- (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, 2, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
}
E = c_p->stop;
@@ -704,7 +716,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
ASSERT(c_p->htop <= E && E <= c_p->hend);
E -= 2;
- E[0] = make_cp(I);
+ E[0] = make_cp(erts_codeinfo_to_code(info));
E[1] = make_cp(c_p->cp); /* original return address */
c_p->cp = beam_return_time_trace;
c_p->stop = E;
@@ -732,9 +744,9 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
ErtsTracer meta_tracer = erts_tracer_nil;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
+ int applying = (I == ep->beam); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
BeamInstr *cp = p->cp;
GenericBp* g;
GenericBpData* bp = NULL;
@@ -742,7 +754,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ g = (GenericBp *) ep->info.native;
if (g) {
bp = &g->data[erts_active_bp_ix()];
bp_flags = bp->flags;
@@ -758,7 +770,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
- flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
local, &ERTS_TRACER(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
@@ -766,7 +778,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
old_tracer = meta_tracer;
- flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
0, &meta_tracer);
if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
@@ -784,8 +796,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
- BeamInstr *pc = (BeamInstr *)ep->code+3;
- erts_trace_time_call(p, pc, bp->time);
+ erts_trace_time_call(p, &ep->info, bp->time);
}
/* Restore original continuation pointer (if changed). */
@@ -795,6 +806,30 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
result = func(p, args, I);
+ if (erts_nif_export_check_save_trace(p, result,
+ applying, ep,
+ cp, flags,
+ flags_meta, I,
+ meta_tracer)) {
+ /*
+ * erts_bif_trace_epilogue() will be called
+ * later when appropriate via the NIF export
+ * scheduling functionality...
+ */
+ return result;
+ }
+
+ return erts_bif_trace_epilogue(p, result, applying, ep, cp,
+ flags, flags_meta, I,
+ meta_tracer);
+}
+
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
BeamInstr i_return_trace = beam_return_trace[0];
BeamInstr i_return_to_trace = beam_return_to_trace[0];
@@ -848,11 +883,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
class = exception_tag[GET_EXC_CLASS(reason)];
if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
+ erts_trace_exception(p, &ep->info.mfa, class, value,
&meta_tracer);
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
+ erts_trace_exception(p, &ep->info.mfa, class, value,
&ERTS_TRACER(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
@@ -883,11 +918,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
} else {
if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer);
+ erts_trace_return(p, &ep->info.mfa, result, &meta_tracer);
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER(p));
+ erts_trace_return(p, &ep->info.mfa, result, &ERTS_TRACER(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE &&
IS_TRACED_FL(p, F_TRACE_RETURN_TO)) {
@@ -906,7 +941,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
static ErtsTracer
-do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer)
{
Eterm* cpp;
@@ -947,7 +982,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
ASSERT(is_CP(*cpp));
}
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer);
+ flags = erts_call_trace(c_p, info, ms, reg, local, &tracer);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (cpp) {
c_p->cp = cp_save;
@@ -963,7 +998,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
if (need) {
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - need < c_p->htop) {
- (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, need, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
E = c_p->stop;
}
@@ -979,12 +1014,12 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E -= 3;
c_p->stop = E;
ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(is_CP((Eterm) (UWord) (&info->mfa.module)));
ASSERT(IS_TRACER_VALID(tracer));
E[2] = make_cp(c_p->cp);
E[1] = copy_object(tracer, c_p);
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
+ E[0] = make_cp(&info->mfa.module);
+ /* We ARE at the beginning of an instruction,
the funcinfo is above i. */
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
beam_exception_trace : beam_return_trace;
@@ -997,7 +1032,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
}
void
-erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt)
{
ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
@@ -1027,14 +1062,14 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
pbt = Alloc(sizeof(process_breakpoint_time_t));
(void) ERTS_PROC_SET_CALL_TIME(c_p, pbt);
} else {
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
/* add time to previous code */
sitem.time = time - pbt->time;
sitem.pid = c_p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* if null then the breakpoint was removed */
if (pbdt) {
@@ -1071,14 +1106,14 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
BP_TIME_ADD(item, &sitem);
}
- pbt->pc = I;
+ pbt->ci = info;
pbt->time = time;
release_bp_sched_ix(six);
}
void
-erts_trace_time_return(Process *p, BeamInstr *pc)
+erts_trace_time_return(Process *p, ErtsCodeInfo *ci)
{
ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
@@ -1110,14 +1145,14 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
/* might have been removed due to
* trace_pattern(false)
*/
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
sitem.time = time - pbt->time;
sitem.pid = p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* beware, the trace_pattern might have been removed */
if (pbdt) {
@@ -1136,7 +1171,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
}
- pbt->pc = pc;
+ pbt->ci = ci;
pbt->time = time;
}
@@ -1145,10 +1180,10 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- GenericBpData* bp = check_break(pc, flags);
+ GenericBpData* bp = check_break(ci, flags);
if (bp) {
if (match_spec_ret) {
@@ -1160,10 +1195,10 @@ erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_META_TRACE);
if (bp) {
if (match_spec_ret) {
@@ -1178,20 +1213,20 @@ erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
}
int
-erts_is_native_break(BeamInstr *pc) {
+erts_is_native_break(ErtsCodeInfo *ci) {
#ifdef HIPE
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- return pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
- || pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ return erts_codeinfo_to_code(ci)[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
+ || erts_codeinfo_to_code(ci)[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
#else
return 0;
#endif
}
int
-erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_COUNT);
if (bp) {
if (count_ret) {
@@ -1202,13 +1237,13 @@ erts_is_count_break(BeamInstr *pc, Uint *count_ret)
return 0;
}
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *retval) {
Uint i, ix;
bp_time_hash_t hash;
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = get_time_break(pc);
+ BpDataTime *bdt = get_time_break(ci);
if (bdt) {
if (retval) {
@@ -1262,26 +1297,25 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
-BeamInstr *
-erts_find_local_func(Eterm mfa[3]) {
+ErtsCodeInfo *
+erts_find_local_func(ErtsCodeMFA *mfa) {
Module *modp;
BeamCodeHeader* code_hdr;
- BeamInstr* code_ptr;
+ ErtsCodeInfo* ci;
Uint i,n;
- if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL)
+ if ((modp = erts_get_module(mfa->module, erts_active_code_ix())) == NULL)
return NULL;
if ((code_hdr = modp->curr.code_hdr) == NULL)
return NULL;
n = (BeamInstr) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- code_ptr = code_hdr->functions[i];
- ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
- ASSERT(mfa[0] == ((Eterm) code_ptr[2]) ||
- is_nil((Eterm) code_ptr[2]));
- if (mfa[1] == ((Eterm) code_ptr[3]) &&
- ((BeamInstr) mfa[2]) == code_ptr[4]) {
- return code_ptr + 5;
+ ci = code_hdr->functions[i];
+ ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == ci->op);
+ ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module));
+ if (mfa->function == ci->mfa.function &&
+ mfa->arity == ci->mfa.arity) {
+ return ci;
}
}
return NULL;
@@ -1411,7 +1445,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
if (pbdt) {
sitem.time = get_mtime(p) - pbt->time;
sitem.pid = p->common.id;
@@ -1461,14 +1495,14 @@ set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- set_function_break(pc, match_spec, break_flags,
+ set_function_break(f->matching[i].ci,
+ match_spec, break_flags,
count_op, tracer);
}
}
static void
-set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, ErtsTracer tracer)
{
GenericBp* g;
@@ -1477,7 +1511,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ErtsBpIndex ix = erts_staging_bp_ix();
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- g = (GenericBp *) pc[-4];
+ g = (GenericBp *) ci->native;
if (g == 0) {
int i;
if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
@@ -1485,11 +1519,11 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
return;
}
g = Alloc(sizeof(GenericBp));
- g->orig_instr = *pc;
+ g->orig_instr = *erts_codeinfo_to_code(ci);
for (i = 0; i < ERTS_NUM_BP_IX; i++) {
g->data[i].flags = 0;
}
- pc[-4] = (BeamInstr) g;
+ ci->native = (BeamInstr) g;
}
bp = &g->data[ix];
@@ -1585,13 +1619,12 @@ clear_break(BpFunctions* f, Uint break_flags)
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- clear_function_break(pc, break_flags);
+ clear_function_break(f->matching[i].ci, break_flags);
}
}
static int
-clear_function_break(BeamInstr *pc, Uint break_flags)
+clear_function_break(ErtsCodeInfo *ci, Uint break_flags)
{
GenericBp* g;
GenericBpData* bp;
@@ -1600,7 +1633,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- if ((g = (GenericBp *) pc[-4]) == 0) {
+ if ((g = (GenericBp *) ci->native) == 0) {
return 1;
}
@@ -1686,19 +1719,19 @@ bp_time_unref(BpDataTime* bdt)
}
static BpDataTime*
-get_time_break(BeamInstr *pc)
+get_time_break(ErtsCodeInfo *ci)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_TIME_TRACE);
return bp ? bp->time : 0;
}
static GenericBpData*
-check_break(BeamInstr *pc, Uint break_flags)
+check_break(ErtsCodeInfo *ci, Uint break_flags)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = (GenericBp *) ci->native;
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (erts_is_native_break(pc)) {
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (erts_is_native_break(ci)) {
return 0;
}
if (g) {
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 9e4ab3cd20..3dba3cc1b5 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -46,7 +46,7 @@ typedef struct bp_data_time { /* Call time */
typedef struct {
ErtsMonotonicTime time;
- BeamInstr *pc;
+ ErtsCodeInfo *ci;
} process_breakpoint_time_t; /* used within psd */
typedef struct {
@@ -93,7 +93,7 @@ enum erts_break_op{
typedef Uint32 ErtsBpIndex;
typedef struct {
- BeamInstr* pc;
+ ErtsCodeInfo *ci;
Module* mod;
} BpFunction;
@@ -114,8 +114,8 @@ void erts_commit_staged_bp(void);
ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
-void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
-void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
+void erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
void erts_bp_free_matched_functions(BpFunctions* f);
void erts_install_breakpoints(BpFunctions* f);
@@ -126,15 +126,15 @@ void erts_consolidate_bif_bp_data(void);
void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
void erts_clear_trace_break(BpFunctions *f);
-void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
-void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+void erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
-void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
+void erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec,
ErtsTracer tracer);
-void erts_clear_mtrace_bif(BeamInstr *pc);
+void erts_clear_mtrace_bif(ErtsCodeInfo *ci);
void erts_set_debug_break(BpFunctions *f);
void erts_clear_debug_break(BpFunctions *f);
@@ -144,32 +144,32 @@ void erts_clear_count_break(BpFunctions *f);
void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-void erts_clear_export_break(Module *modp, BeamInstr* pc);
+void erts_clear_export_break(Module *modp, ErtsCodeInfo* ci);
-BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
-BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
+BeamInstr erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *ci, Eterm* reg);
+BeamInstr erts_trace_break(Process *p, ErtsCodeInfo *ci, Eterm *args,
Uint32 *ret_flags, ErtsTracer *tracer);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
-int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+int erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local);
+int erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
+int erts_is_mtrace_bif(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
+int erts_is_native_break(ErtsCodeInfo *ci);
+int erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret);
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
-void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
-void erts_trace_time_return(Process* c_p, BeamInstr* pc);
+void erts_trace_time_call(Process* c_p, ErtsCodeInfo *ci, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, ErtsCodeInfo *ci);
void erts_schedule_time_break(Process *p, Uint out);
void erts_set_time_break(BpFunctions *f, enum erts_break_op);
void erts_clear_time_break(BpFunctions *f);
-int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
-void erts_clear_time_trace_bif(BeamInstr *pc);
+int erts_is_time_trace_bif(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
+void erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op);
+void erts_clear_time_trace_bif(ErtsCodeInfo *ci);
-BeamInstr *erts_find_local_func(Eterm mfa[3]);
+ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 21d336049f..8326d348af 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -39,6 +39,7 @@
#include "beam_bp.h"
#include "erl_binary.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef ARCH_64
# define HEXF "%016bpX"
@@ -51,6 +52,7 @@ void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr);
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif);
BIF_RETTYPE
erts_debug_same_2(BIF_ALIST_2)
@@ -115,7 +117,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
Eterm MFA = BIF_ARG_1;
Eterm boolean = BIF_ARG_2;
Eterm* tp;
- Eterm mfa[3];
+ ErtsCodeMFA mfa;
int i;
int specified = 0;
Eterm res;
@@ -131,23 +133,24 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
if (*tp != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+
+ if (is_small(tp[3])) {
+ mfa.arity = signed_val(tp[3]);
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
@@ -157,7 +160,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- erts_bp_match_functions(&f, mfa, specified);
+ erts_bp_match_functions(&f, &mfa, specified);
if (boolean == am_true) {
erts_set_debug_break(&f);
erts_install_breakpoints(&f);
@@ -241,9 +244,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
Eterm* tp;
Eterm bin;
Eterm mfa;
- BeamInstr* funcinfo = NULL; /* Initialized to eliminate warning. */
+ ErtsCodeMFA *cmfa = NULL;
BeamCodeHeader* code_hdr;
- BeamInstr* code_ptr = NULL; /* Initialized to eliminate warning. */
+ BeamInstr *code_ptr;
BeamInstr instr;
BeamInstr uaddr;
Uint hsz;
@@ -251,7 +254,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
if (term_to_UWord(addr, &uaddr)) {
code_ptr = (BeamInstr *) uaddr;
- if ((funcinfo = find_function_from_pc(code_ptr)) == NULL) {
+ if ((cmfa = find_function_from_pc(code_ptr)) == NULL) {
BIF_RET(am_false);
}
} else if (is_tuple(addr)) {
@@ -282,24 +285,22 @@ erts_debug_disassemble_1(BIF_ALIST_1)
* such as erts_debug:apply/4. Then search for it in the module.
*/
if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
- /* XXX: add "&& ep->address != ep->code+3" condition?
+ /* XXX: add "&& ep->address != ep->code" condition?
* Consider a traced function.
- * Its ep will have ep->address == ep->code+3.
+ * Its ep will have ep->address == ep->code.
* erts_find_function() will return the non-NULL ep.
* Below we'll try to derive a code_ptr from ep->address.
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5;
- funcinfo = code_ptr+2;
+ cmfa = erts_code_to_codemfa(ep->addressv[code_ix]);
} else if (modp == NULL || (code_hdr = modp->curr.code_hdr) == NULL) {
BIF_RET(am_undef);
} else {
n = code_hdr->num_functions;
for (i = 0; i < n; i++) {
- code_ptr = code_hdr->functions[i];
- if (code_ptr[3] == name && code_ptr[4] == arity) {
- funcinfo = code_ptr+2;
+ cmfa = &code_hdr->functions[i]->mfa;
+ if (cmfa->function == name && cmfa->arity == arity) {
break;
}
}
@@ -307,6 +308,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
BIF_RET(am_undef);
}
}
+ code_ptr = erts_codemfa_to_code(cmfa);
} else {
goto error;
}
@@ -332,9 +334,10 @@ erts_debug_disassemble_1(BIF_ALIST_1)
(void) erts_bld_uword(NULL, &hsz, (BeamInstr) code_ptr);
hp = HAlloc(p, hsz);
addr = erts_bld_uword(&hp, NULL, (BeamInstr) code_ptr);
- ASSERT(is_atom(funcinfo[0]) || funcinfo[0] == NIL);
- ASSERT(is_atom(funcinfo[1]) || funcinfo[1] == NIL);
- mfa = TUPLE3(hp, (Eterm) funcinfo[0], (Eterm) funcinfo[1], make_small((Eterm) funcinfo[2]));
+ ASSERT(is_atom(cmfa->module) || is_nil(cmfa->module));
+ ASSERT(is_atom(cmfa->function) || is_nil(cmfa->function));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
hp += 4;
return TUPLE3(hp, addr, bin, mfa);
}
@@ -346,11 +349,12 @@ dbg_bt(Process* p, Eterm* sp)
while (sp < stack) {
if (is_CP(*sp)) {
- BeamInstr* addr = find_function_from_pc(cp_val(*sp));
- if (addr)
+ ErtsCodeMFA* cmfa = find_function_from_pc(cp_val(*sp));
+ if (cmfa)
erts_fprintf(stderr,
HEXF ": %T:%T/%bpu\n",
- addr, (Eterm) addr[0], (Eterm) addr[1], addr[2]);
+ &cmfa->module, cmfa->module,
+ cmfa->function, cmfa->arity);
}
sp++;
}
@@ -359,17 +363,17 @@ dbg_bt(Process* p, Eterm* sp)
void
dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
{
- BeamInstr* f = find_function_from_pc(addr);
+ ErtsCodeMFA* cmfa = find_function_from_pc(addr);
- if (f == NULL) {
+ if (cmfa == NULL) {
erts_fprintf(stderr, "???\n");
} else {
int arity;
int i;
- addr = f;
- arity = addr[2];
- erts_fprintf(stderr, HEXF ": %T:%T(", addr, (Eterm) addr[0], (Eterm) addr[1]);
+ arity = cmfa->arity;
+ erts_fprintf(stderr, HEXF ": %T:%T(", addr,
+ cmfa->module, cmfa->function);
for (i = 0; i < arity; i++)
erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
erts_fprintf(stderr, ")\n");
@@ -520,27 +524,49 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
break;
case 'I': /* Untagged integer. */
case 't':
- erts_print(to, to_arg, "%d", *ap);
+ switch (op) {
+ case op_i_gc_bif1_jIsId:
+ case op_i_gc_bif2_jIIssd:
+ case op_i_gc_bif3_jIIssd:
+ {
+ const ErtsGcBif* p;
+ BifFunction gcf = (BifFunction) *ap;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->gc_bif == gcf) {
+ print_bif_name(to, to_arg, p->bif);
+ break;
+ }
+ }
+ if (p->bif == 0) {
+ erts_print(to, to_arg, "%d", (Uint)gcf);
+ }
+ break;
+ }
+ default:
+ erts_print(to, to_arg, "%d", *ap);
+ }
ap++;
break;
case 'f': /* Destination label */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
+ ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap);
+ if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) {
erts_print(to, to_arg, "f(" HEXF ")", *ap);
} else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
}
ap++;
}
break;
case 'p': /* Pointer (to label) */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
+ ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap);
+ if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) {
erts_print(to, to_arg, "p(" HEXF ")", *ap);
} else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
}
ap++;
}
@@ -553,26 +579,16 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
{
Export* ex = (Export *) *ap;
erts_print(to, to_arg,
- "%T:%T/%bpu", (Eterm) ex->code[0], (Eterm) ex->code[1], ex->code[2]);
+ "%T:%T/%bpu", (Eterm) ex->info.mfa.module,
+ (Eterm) ex->info.mfa.function,
+ ex->info.mfa.arity);
ap++;
}
break;
case 'F': /* Function definition */
break;
case 'b':
- for (i = 0; i < BIF_SIZE; i++) {
- BifFunction bif = (BifFunction) *ap;
- if (bif == bif_table[i].f) {
- break;
- }
- }
- if (i == BIF_SIZE) {
- erts_print(to, to_arg, "b(%d)", (Uint) *ap);
- } else {
- Eterm name = bif_table[i].name;
- unsigned arity = bif_table[i].arity;
- erts_print(to, to_arg, "%T/%u", name, arity);
- }
+ print_bif_name(to, to_arg, (BifFunction) *ap);
ap++;
break;
case 'P': /* Byte offset into tuple (see beam_load.c) */
@@ -731,3 +747,374 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
return size;
}
+
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (bif == bif_table[i].f) {
+ break;
+ }
+ }
+ if (i == BIF_SIZE) {
+ erts_print(to, to_arg, "b(%d)", (Uint) bif);
+ } else {
+ Eterm name = bif_table[i].name;
+ unsigned arity = bif_table[i].arity;
+ erts_print(to, to_arg, "%T/%u", name, arity);
+ }
+}
+
+/*
+ * Dirty BIF testing.
+ *
+ * The erts_debug:dirty_cpu/2, erts_debug:dirty_io/1, and
+ * erts_debug:dirty/3 BIFs are used by the dirty_bif_SUITE
+ * test suite.
+ */
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+static int ms_wait(Process *c_p, Eterm etimeout, int busy);
+static int dirty_send_message(Process *c_p, Eterm to, Eterm tag);
+#endif
+static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I);
+
+/*
+ * erts_debug:dirty_cpu/2 is statically determined to execute on
+ * a dirty CPU scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_cpu_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_cpu, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty_io/2 is statically determined to execute on
+ * a dirty I/O scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_io_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_io, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty/3 executes on a normal scheduler.
+ */
+BIF_RETTYPE
+erts_debug_dirty_3(BIF_ALIST_3)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Eterm argv[2];
+ switch (BIF_ARG_1) {
+ case am_normal:
+ return dirty_test(BIF_P, am_normal, BIF_ARG_2, BIF_ARG_3, BIF_I);
+ case am_dirty_cpu:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ case am_dirty_io:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ default:
+ BIF_ERROR(BIF_P, EXC_BADARG);
+ }
+#else
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#endif
+}
+
+
+static BIF_RETTYPE
+dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
+{
+ BIF_RETTYPE ret;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (am_scheduler == arg1) {
+ ErtsSchedulerData *esdp;
+ if (arg2 != am_type)
+ goto badarg;
+ esdp = erts_proc_sched_data(c_p);
+ if (!esdp)
+ ERTS_BIF_PREP_RET(ret, am_error);
+ else if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ ERTS_BIF_PREP_RET(ret, am_normal);
+ else if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp))
+ ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
+ else if (ERTS_SCHEDULER_IS_DIRTY_IO(esdp))
+ ERTS_BIF_PREP_RET(ret, am_dirty_io);
+ else
+ ERTS_BIF_PREP_RET(ret, am_error);
+ }
+ else if (am_error == arg1) {
+ switch (arg2) {
+ case am_notsup:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
+ break;
+ case am_undef:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
+ break;
+ case am_badarith:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
+ break;
+ case am_noproc:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
+ break;
+ case am_system_limit:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case am_badarg:
+ default:
+ goto badarg;
+ }
+ }
+ else if (am_copy == arg1) {
+ int i;
+ Eterm res;
+
+ for (res = NIL, i = 0; i < 1000; i++) {
+ Eterm *hp, sz;
+ Eterm cpy;
+ /* We do not want this to be optimized,
+ but rather the oposite... */
+ sz = size_object(arg2);
+ hp = HAlloc(c_p, sz);
+ cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
+ hp = HAlloc(c_p, 2);
+ res = CONS(hp, cpy, res);
+ }
+
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else if (am_send == arg1) {
+ dirty_send_message(c_p, arg2, am_ok);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("wait", arg1)) {
+ if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
+ goto badarg;
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
+ /*
+ * Reschedule operation after decrement of two until we reach
+ * zero. Switch between dirty scheduler types when 'n' is
+ * evenly divided by 4. If the initial value wasn't evenly
+ * dividable by 2, throw badarg exception.
+ */
+ Eterm next_type;
+ Sint n;
+ if (!term_to_Sint(arg2, &n) || n < 0)
+ goto badarg;
+ if (n == 0)
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ else {
+ Eterm argv[3];
+ Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
+ if (n % 4 != 0)
+ next_type = type;
+ else {
+ switch (type) {
+ case am_dirty_cpu: next_type = am_dirty_io; break;
+ case am_dirty_io: next_type = am_normal; break;
+ case am_normal: next_type = am_dirty_cpu; break;
+ default: goto badarg;
+ }
+ }
+ switch (next_type) {
+ case am_dirty_io:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ break;
+ case am_dirty_cpu:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ break;
+ case am_normal:
+ argv[0] = am_normal;
+ argv[1] = arg1;
+ argv[2] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_3,
+ ERTS_SCHED_NORMAL,
+ am_erts_debug,
+ am_dirty,
+ 3);
+ break;
+ default:
+ goto badarg;
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
+ ERTS_DECL_AM(ready);
+ ERTS_DECL_AM(done);
+ dirty_send_message(c_p, arg2, AM_ready);
+ ms_wait(c_p, make_small(6000), 0);
+ dirty_send_message(c_p, arg2, AM_done);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
+ Process *real_c_p = erts_proc_shadow2real(c_p);
+ Eterm *hp, *hp2;
+ Uint sz;
+ int i;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;
+
+ if (ERTS_PROC_IS_EXITING(real_c_p))
+ goto badarg;
+ dirty_send_message(c_p, arg2, am_alive);
+
+ /* Wait until dead */
+ while (!ERTS_PROC_IS_EXITING(real_c_p)) {
+ if (dirty_io)
+ ms_wait(c_p, make_small(100), 0);
+ else
+ erts_thr_yield();
+ }
+
+ ms_wait(c_p, make_small(1000), 0);
+
+ /* Should still be able to allocate memory */
+ hp = HAlloc(c_p, 3); /* Likely on heap */
+ sz = 10000;
+ hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
+ *hp2 = make_pos_bignum_header(sz);
+ for (i = 1; i < sz; i++)
+ hp2[i] = (Eterm) 4711;
+ ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
+ }
+ else {
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ }
+#else
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
+#endif
+ return ret;
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static int
+dirty_send_message(Process *c_p, Eterm to, Eterm tag)
+{
+ ErtsProcLocks c_p_locks, rp_locks;
+ Process *rp, *real_c_p;
+ Eterm msg, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ ASSERT(is_immed(tag));
+
+ real_c_p = erts_proc_shadow2real(c_p);
+ if (real_c_p != c_p)
+ c_p_locks = 0;
+ else
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+
+ ASSERT(real_c_p->common.id == c_p->common.id);
+
+ rp = erts_pid2proc_opt(real_c_p, c_p_locks,
+ to, 0,
+ ERTS_P2P_FLG_INC_REFC);
+
+ if (!rp)
+ return 0;
+
+ rp_locks = 0;
+ mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
+
+ msg = TUPLE2(hp, tag, c_p->common.id);
+ erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+
+ if (rp == real_c_p)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+ erts_proc_dec_refc(rp);
+
+ return 1;
+}
+
+static int
+ms_wait(Process *c_p, Eterm etimeout, int busy)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ErtsMonotonicTime time, timeout_time;
+ Sint64 ms;
+
+ if (!term_to_Sint64(etimeout, &ms))
+ return 0;
+
+ time = erts_get_monotonic_time(esdp);
+
+ if (ms < 0)
+ timeout_time = time;
+ else
+ timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);
+
+ while (time < timeout_time) {
+ if (busy)
+ erts_thr_yield();
+ else {
+ ErtsMonotonicTime timeout = timeout_time - time;
+
+#ifdef __WIN32__
+ Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
+#else
+ {
+ ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
+ struct timeval tv;
+
+ tv.tv_sec = (long) to / (1000*1000);
+ tv.tv_usec = (long) to % (1000*1000);
+
+ select(0, NULL, NULL, NULL, &tv);
+ }
+#endif
+ }
+
+ time = erts_get_monotonic_time(esdp);
+ }
+ return 1;
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 1c83d8f02e..8be0f58227 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -38,6 +38,7 @@
#include "beam_bp.h"
#include "beam_catches.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef HIPE
#include "hipe_mode_switch.h"
#include "hipe_bif1.h"
@@ -116,16 +117,16 @@ do { \
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#endif
-#define GET_BIF_MODULE(p) ((Eterm) (((Export *) p)->code[0]))
-#define GET_BIF_FUNCTION(p) ((Eterm) (((Export *) p)->code[1]))
-#define GET_BIF_ARITY(p) ((Eterm) (((Export *) p)->code[2]))
-#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
+#define GET_BIF_MODULE(p) (p->info.mfa.module)
+#define GET_BIF_FUNCTION(p) (p->info.mfa.function)
+#define GET_BIF_ARITY(p) (p->info.mfa.arity)
+#define GET_BIF_ADDRESS(p) ((BifFunction) (p->beam[1]))
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
/*
* We reuse some of fields in the save area in the process structure.
- * This is safe to do, since this space is only activly used when
+ * This is safe to do, since this space is only actively used when
* the process is switched out.
*/
#define REDS_IN(p) ((p)->def_arg_reg[5])
@@ -214,11 +215,12 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
BeamInstr* em_call_nif;
+BeamInstr* em_call_bif_e;
/* NOTE These should be the only variables containing trace instructions.
** Sometimes tests are form the instruction value, and sometimes
-** for the refering variable (one of these), and rouge references
+** for the referring variable (one of these), and rouge references
** will most likely cause chaos.
*/
BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
@@ -633,21 +635,34 @@ void** beam_ops;
y[4] = xt4; \
} while (0)
+#define DispatchReturn \
+do { \
+ if (FCALLS > 0 || FCALLS > neg_o_reds) { \
+ FCALLS--; \
+ Goto(*I); \
+ } \
+ else { \
+ c_p->current = NULL; \
+ c_p->arity = 1; \
+ goto context_switch3; \
+ } \
+} while (0)
+
#define MoveReturn(Src) \
x(0) = (Src); \
I = c_p->cp; \
ASSERT(VALID_INSTR(*c_p->cp)); \
c_p->cp = 0; \
CHECK_TERM(r(0)); \
- Goto(*I)
+ DispatchReturn
#define DeallocateReturn(Deallocate) \
do { \
int words_to_pop = (Deallocate); \
- SET_I((BeamInstr *) cp_val(*E)); \
+ SET_I((BeamInstr *) cp_val(*E)); \
E = ADD_BYTE_OFFSET(E, words_to_pop); \
CHECK_TERM(r(0)); \
- Goto(*I); \
+ DispatchReturn; \
} while (0)
#define MoveDeallocateReturn(Src, Deallocate) \
@@ -1042,10 +1057,11 @@ void** beam_ops;
* The following functions are called directly by process_main().
* Don't inline them.
*/
-static BifFunction translate_gc_bif(void* gcf) NOINLINE;
+static ErtsCodeMFA *ubif2mfa(void* uf) NOINLINE;
+static ErtsCodeMFA *gcbif2mfa(void* gcf) NOINLINE;
static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
- Eterm* reg, BifFunction bf) NOINLINE;
-static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
+ Eterm* reg, ErtsCodeMFA* bif_mfa) NOINLINE;
+static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
Eterm* reg, Eterm func) NOINLINE;
static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
BeamInstr *I, Uint offs) NOINLINE;
@@ -1073,14 +1089,14 @@ static BeamInstr* next_catch(Process* c_p, Eterm *reg);
static void terminate_proc(Process* c_p, Eterm Value);
static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
- BifFunction bf, Eterm args);
+ ErtsCodeMFA *bif_mfa, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
void
init_emulator(void)
{
- process_main();
+ process_main(0, 0);
}
/*
@@ -1108,98 +1124,91 @@ init_emulator(void)
#ifdef USE_VM_CALL_PROBES
-#define DTRACE_LOCAL_CALL(p, m, f, a) \
+#define DTRACE_LOCAL_CALL(p, mfa) \
if (DTRACE_ENABLED(local_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(local_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_GLOBAL_CALL(p, m, f, a) \
+#define DTRACE_GLOBAL_CALL(p, mfa) \
if (DTRACE_ENABLED(global_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(global_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_RETURN(p, m, f, a) \
+#define DTRACE_RETURN(p, mfa) \
if (DTRACE_ENABLED(function_return)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(function_return, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(function_return, process_name, mfa_buf, depth); \
}
-#define DTRACE_BIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(bif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_entry, process_name, mfa); \
+#define DTRACE_BIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(bif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_entry, process_name, mfa_buf); \
}
-#define DTRACE_BIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(bif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_return, process_name, mfa); \
+#define DTRACE_BIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(bif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_return, process_name, mfa_buf); \
}
-#define DTRACE_NIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(nif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_entry, process_name, mfa); \
+#define DTRACE_NIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(nif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_entry, process_name, mfa_buf); \
}
-#define DTRACE_NIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(nif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_return, process_name, mfa); \
+#define DTRACE_NIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(nif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_return, process_name, mfa_buf); \
}
#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
do { \
if (DTRACE_ENABLED(global_function_entry)) { \
BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
- DTRACE_GLOBAL_CALL((p), (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); \
+ DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
} \
} while(0)
#define DTRACE_RETURN_FROM_PC(p) \
do { \
- BeamInstr* fp; \
- if (DTRACE_ENABLED(function_return) && (fp = find_function_from_pc((p)->cp))) { \
- DTRACE_RETURN((p), (Eterm)fp[0], (Eterm)fp[1], (Uint)fp[2]); \
+ ErtsCodeMFA* cmfa; \
+ if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc((p)->cp))) { \
+ DTRACE_RETURN((p), cmfa); \
} \
} while(0)
#else /* USE_VM_PROBES */
-#define DTRACE_LOCAL_CALL(p, m, f, a) do {} while (0)
-#define DTRACE_GLOBAL_CALL(p, m, f, a) do {} while (0)
+#define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
+#define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
-#define DTRACE_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_RETURN(p, mfa) do {} while (0)
#define DTRACE_RETURN_FROM_PC(p) do {} while (0)
-#define DTRACE_BIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_BIF_RETURN(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
+#define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
#endif /* USE_VM_PROBES */
#ifdef DEBUG
@@ -1227,7 +1236,7 @@ init_emulator(void)
* the instructions' C labels to the loader.
* The second call starts execution of BEAM code. This call never returns.
*/
-void process_main(void)
+void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
{
static int init_done = 0;
Process* c_p = NULL;
@@ -1239,7 +1248,7 @@ void process_main(void)
/* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
* in all other cases x0 is used.
*/
- register Eterm* reg REG_xregs = NULL;
+ register Eterm* reg REG_xregs = x_reg_array;
/*
* Top of heap (next free location); grows upwards.
@@ -1266,7 +1275,7 @@ void process_main(void)
* X registers and floating point registers are located in
* scheduler specific data.
*/
- register FloatDef *freg;
+ register FloatDef *freg = f_reg_array;
/*
* For keeping the negative old value of 'reds' when call saving is active.
@@ -1315,6 +1324,7 @@ void process_main(void)
goto do_schedule1;
do_schedule:
+ ASSERT(c_p->arity < 6);
ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
reds_used = REDS_IN(c_p) - FCALLS;
@@ -1326,8 +1336,8 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
- BeamInstr *inptr = find_function_from_pc(start_time_i);
- BeamInstr *outptr = find_function_from_pc(c_p->i);
+ ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
+ ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
}
}
@@ -1352,8 +1362,6 @@ void process_main(void)
start_time_i = c_p->i;
}
- reg = erts_proc_sched_data(c_p)->x_reg_array;
- freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
@@ -1403,10 +1411,9 @@ void process_main(void)
if (ERTS_PROC_IS_EXITING(c_p)) {
strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa,
NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
@@ -1587,7 +1594,7 @@ void process_main(void)
/* FALL THROUGH */
OpCase(i_call_only_f): {
SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
Dispatch();
}
@@ -1599,7 +1606,7 @@ void process_main(void)
RESTORE_CP(E);
E = ADD_BYTE_OFFSET(E, Arg(1));
SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
Dispatch();
}
@@ -1611,7 +1618,7 @@ void process_main(void)
OpCase(i_call_f): {
SET_CP(c_p, I+2);
SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
Dispatch();
}
@@ -1691,7 +1698,7 @@ void process_main(void)
c_p->cp = 0;
CHECK_TERM(r(0));
HEAP_SPACE_VERIFIED(0);
- Goto(*I);
+ DispatchReturn;
}
/*
@@ -1918,6 +1925,7 @@ void process_main(void)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
SWAPOUT;
c_p->flags &= ~F_DELAY_GC;
+ c_p->arity = 0;
goto do_schedule; /* Will be rescheduled for exit */
}
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
@@ -2160,6 +2168,7 @@ void process_main(void)
* in limbo forever.
*/
SWAPOUT;
+ c_p->arity = 0;
goto do_schedule;
}
#endif
@@ -2578,7 +2587,7 @@ do { \
OpCase(bif1_fbsd):
{
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm tmp_reg[1];
Eterm result;
@@ -2589,7 +2598,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2610,19 +2619,19 @@ do { \
OpCase(bif1_body_bsd):
{
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm tmp_reg[1];
Eterm result;
GetArg1(1, tmp_reg[0]);
- bf = (BifFunction) Arg(0);
+ bf = (ErtsBifFunc) Arg(0);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2635,7 +2644,7 @@ do { \
}
reg[0] = tmp_reg[0];
SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2671,7 +2680,7 @@ do { \
Goto(*I);
}
x(0) = x(live);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2716,7 +2725,7 @@ do { \
live--;
x(0) = x(live);
x(1) = x(live+1);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2762,7 +2771,7 @@ do { \
x(0) = x(live);
x(1) = x(live+1);
x(2) = x(live+2);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2772,17 +2781,17 @@ do { \
OpCase(i_bif2_fbssd):
{
Eterm tmp_reg[2];
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm result;
GetArg2(2, tmp_reg[0], tmp_reg[1]);
- bf = (BifFunction) Arg(1);
+ bf = (ErtsBifFunc) Arg(1);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2803,15 +2812,15 @@ do { \
OpCase(i_bif2_body_bssd):
{
Eterm tmp_reg[2];
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm result;
GetArg2(1, tmp_reg[0], tmp_reg[1]);
- bf = (BifFunction) Arg(0);
+ bf = (ErtsBifFunc) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2824,7 +2833,7 @@ do { \
reg[0] = tmp_reg[0];
reg[1] = tmp_reg[1];
SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2834,29 +2843,31 @@ do { \
*/
OpCase(call_bif_e):
{
- Eterm (*bf)(Process*, Eterm*, BeamInstr*);
+ ErtsBifFunc bf;
Eterm result;
BeamInstr *next;
ErlHeapFragment *live_hf_end;
+ Export *export = (Export*)Arg(0);
if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
/* If we have run out of reductions, we do a context
switch before calling the bif */
- c_p->arity = ((Export *)Arg(0))->code[2];
- c_p->current = ((Export *)Arg(0))->code;
+ c_p->arity = GET_BIF_ARITY(export);
+ c_p->current = &export->info.mfa;
goto context_switch3;
}
- ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(Arg(0)), GET_BIF_ADDRESS(Arg(0)));
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(
+ GET_BIF_MODULE(export), GET_BIF_ADDRESS(export));
- bf = GET_BIF_ADDRESS(Arg(0));
+ bf = GET_BIF_ADDRESS(export);
PRE_BIF_SWAPOUT(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS - 1;
if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
+ save_calls(c_p, export);
}
PreFetch(1, next);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -2870,7 +2881,7 @@ do { \
ERTS_HOLE_CHECK(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (ERTS_IS_GC_DESIRED(c_p)) {
- Uint arity = ((Export *)Arg(0))->code[2];
+ Uint arity = GET_BIF_ARITY(export);
result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, reg, arity);
E = c_p->stop;
}
@@ -2899,7 +2910,7 @@ do { \
* Error handling. SWAPOUT is not needed because it was done above.
*/
ASSERT(c_p->stop == E);
- I = handle_error(c_p, I, reg, bf);
+ I = handle_error(c_p, I, reg, &export->info.mfa);
goto post_error_handling;
}
@@ -2985,7 +2996,7 @@ do { \
}
/*
- * An error occured in an arithmetic operation or test that could
+ * An error occurred in an arithmetic operation or test that could
* appear either in a head or in a body.
* In a head, execution should continue at failure address in Arg(0).
* In a body, Arg(0) == 0 and an exception should be raised.
@@ -3203,7 +3214,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3218,7 +3229,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3231,7 +3242,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3246,7 +3257,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3262,7 +3273,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3363,14 +3374,15 @@ do { \
* called from I[-3], I[-2], and I[-1] respectively.
*/
context_switch_fun:
- c_p->arity = I[-1] + 1;
+ /* Add one for the environment of the fun */
+ c_p->arity = erts_code_to_codemfa(I)->arity + 1;
goto context_switch2;
context_switch:
- c_p->arity = I[-1];
+ c_p->arity = erts_code_to_codemfa(I)->arity;
- context_switch2: /* Entry for fun calls. */
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ context_switch2: /* Entry for fun calls. */
+ c_p->current = erts_code_to_codemfa(I);
context_switch3:
@@ -3511,7 +3523,8 @@ do { \
* code[4]: Not used
*/
HEAVY_SWAPOUT;
- I = call_error_handler(c_p, I-3, reg, am_undefined_function);
+ I = call_error_handler(c_p, erts_code_to_codemfa(I),
+ reg, am_undefined_function);
HEAVY_SWAPIN;
if (I) {
Goto(*I);
@@ -3548,17 +3561,25 @@ do { \
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
* I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
*/
BifFunction vbf;
ErlHeapFragment *live_hf_end;
+ ErtsCodeMFA *codemfa;
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
+ codemfa = erts_code_to_codemfa(I);
+
+ c_p->current = codemfa; /* current and vbf set to please handle_error */
+
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+
HEAVY_SWAPOUT;
+
PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
+ bif_nif_arity = codemfa->arity;
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -3585,19 +3606,19 @@ do { \
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ DTRACE_NIF_RETURN(c_p, codemfa);
goto apply_bif_or_nif_epilogue;
OpCase(apply_bif):
/*
- * At this point, I points to the code[3] in the export entry for
+ * At this point, I points to the code[0] in the export entry for
* the BIF:
*
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&apply_bif
- * code[4]: Function pointer to BIF function
+ * code[-3]: Module
+ * code[-2]: Function
+ * code[-1]: Arity
+ * code[0]: &&apply_bif
+ * code[1]: Function pointer to BIF function
*/
if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
@@ -3606,26 +3627,30 @@ do { \
goto context_switch;
}
- ERTS_MSACC_SET_BIF_STATE_CACHED_X((Eterm)I[-3], (BifFunction)Arg(0));
+ codemfa = erts_code_to_codemfa(I);
- c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0));
+
+
+ /* In case we apply process_info/1,2 or load_nif/1 */
+ c_p->current = codemfa;
c_p->i = I; /* In case we apply check_process_code/2. */
c_p->arity = 0; /* To allow garbage collection on ourselves
* (check_process_code/2).
*/
- DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ DTRACE_BIF_ENTRY(c_p, codemfa);
SWAPOUT;
ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
c_p->fcalls = FCALLS - 1;
vbf = (BifFunction) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
+ bif_nif_arity = codemfa->arity;
ASSERT(bif_nif_arity <= 4);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
{
- Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
+ ErtsBifFunc bf = vbf;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
@@ -3642,7 +3667,7 @@ do { \
if (ERTS_MSACC_IS_ENABLED_CACHED_X())
ERTS_MSACC_UPDATE_CACHE_X();
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ DTRACE_BIF_RETURN(c_p, codemfa);
apply_bif_or_nif_epilogue:
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
@@ -3669,7 +3694,7 @@ do { \
}
Dispatch();
}
- I = handle_error(c_p, c_p->cp, reg, vbf);
+ I = handle_error(c_p, c_p->cp, reg, c_p->current);
goto post_error_handling;
}
}
@@ -3709,8 +3734,9 @@ do { \
goto find_func_info;
OpCase(i_func_info_IaaI): {
+ ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
c_p->freason = EXC_FUNCTION_CLAUSE;
- c_p->current = I + 2;
+ c_p->current = &ci->mfa;
goto handle_error;
}
@@ -4701,11 +4727,11 @@ do { \
*/
OpCase(return_trace): {
- BeamInstr* code = (BeamInstr *) (UWord) E[0];
+ ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]);
SWAPOUT; /* Needed for shared heap */
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return(c_p, code, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
+ erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
SWAPIN;
c_p->cp = NULL;
@@ -4716,9 +4742,8 @@ do { \
OpCase(i_generic_breakpoint): {
BeamInstr real_I;
- ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
HEAVY_SWAPOUT;
- real_I = erts_generic_breakpoint(c_p, I, reg);
+ real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg);
HEAVY_SWAPIN;
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
@@ -4727,7 +4752,7 @@ do { \
OpCase(i_return_time_trace): {
BeamInstr *pc = (BeamInstr *) (UWord) E[0];
SWAPOUT;
- erts_trace_time_return(c_p, pc);
+ erts_trace_time_return(c_p, erts_code_to_codeinfo(pc));
SWAPIN;
c_p->cp = NULL;
SET_I((BeamInstr *) cp_val(E[1]));
@@ -4912,16 +4937,18 @@ do { \
* I[ 0]: &&lb_hipe_trap_call
* ... remainder of original BEAM code
*/
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
+ ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
+ ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
+ c_p->hipe.u.ncallee = (void(*)(void)) ci->native;
++hipe_trap_count;
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8));
+ HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (ci->mfa.arity << 8));
}
OpCase(hipe_trap_call_closure): {
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
+ ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
+ ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
+ c_p->hipe.u.ncallee = (void(*)(void)) ci->native;
++hipe_trap_count;
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8));
+ HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (ci->mfa.arity << 8));
}
OpCase(hipe_trap_return): {
HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RETURN);
@@ -4990,8 +5017,9 @@ do { \
* I[ 0]: &&lb_hipe_call_count
* ... remainder of original BEAM code
*/
- struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
+ ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
+ struct hipe_call_count *hcc = (struct hipe_call_count*)ci->native;
+ ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
ASSERT(hcc != NULL);
ASSERT(VALID_INSTR(hcc->opcode));
++(hcc->count);
@@ -5021,7 +5049,7 @@ do { \
goto do_schedule;
} else {
HEAVY_SWAPIN;
- I = handle_error(c_p, I, reg, hibernate_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa);
goto post_error_handling;
}
}
@@ -5040,7 +5068,7 @@ do { \
} else {
TestHeap(ERTS_SINT64_HEAP_SIZE(ts),0);
r(0) = make_big(HTOP);
-#if defined(ARCH_32) || HALFWORD_HEAP
+#if defined(ARCH_32)
if (ts >= (((Uint64) 1) << 32)) {
*HTOP = make_pos_bignum_header(2);
BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff));
@@ -5060,7 +5088,7 @@ do { \
OpCase(i_debug_breakpoint): {
HEAVY_SWAPOUT;
- I = call_error_handler(c_p, I-3, reg, am_breakpoint);
+ I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint);
HEAVY_SWAPIN;
if (I) {
Goto(*I);
@@ -5115,6 +5143,7 @@ do { \
em_call_error_handler = OpCode(call_error_handler);
em_apply_bif = OpCode(apply_bif);
em_call_nif = OpCode(call_nif);
+ em_call_bif_e = OpCode(call_bif_e);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
beam_apply[1] = (BeamInstr) OpCode(normal_exit);
@@ -5133,10 +5162,10 @@ do { \
bif_table[i].name,
bif_table[i].arity);
bif_export[i] = ep;
- ep->code[3] = (BeamInstr) OpCode(apply_bif);
- ep->code[4] = (BeamInstr) bif_table[i].f;
+ ep->beam[0] = (BeamInstr) OpCode(apply_bif);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
/* XXX: set func info for bifs */
- ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
}
return;
@@ -5216,8 +5245,8 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
goto do_dirty_schedule;
context_switch:
- c_p->arity = I[-1];
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
+ c_p->arity = c_p->current->arity;
{
int reds_used;
@@ -5301,10 +5330,25 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_MSACC_UPDATE_CACHE_X();
- reg = esdp->x_reg_array;
- {
+ /*
+ * Set fcalls even though we ignore it, so we don't
+ * confuse code accessing it...
+ */
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ c_p->fcalls = 0;
+ else
+ c_p->fcalls = CONTEXT_REDS;
+
+ if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
+ erts_execute_dirty_system_task(c_p);
+ goto do_dirty_schedule;
+ }
+ else {
+ ErtsCodeMFA *codemfa;
Eterm* argp;
- int i;
+ int i, exiting;
+
+ reg = esdp->x_reg_array;
argp = c_p->arg_reg;
for (i = c_p->arity - 1; i >= 0; i--) {
@@ -5320,17 +5364,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
I = c_p->i;
- ASSERT(em_call_nif == (BeamInstr *) *I);
-
- /*
- * Set fcalls even though we ignore it, so we don't
- * confuse code accessing it...
- */
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
- c_p->fcalls = 0;
- else
- c_p->fcalls = CONTEXT_REDS;
-
SWAPIN;
#ifdef USE_VM_PROBES
@@ -5342,11 +5375,9 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
if (ERTS_PROC_IS_EXITING(c_p)) {
strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
- NULL, fun_buf);
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
"<unknown/%p>", *I);
@@ -5356,107 +5387,81 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
DTRACE2(process_scheduled, process_buf, fun_buf);
}
#endif
- }
-
- {
-#ifdef DEBUG
- Eterm result;
-#endif
- Eterm arity;
-
- {
- /*
- * call_nif is always first instruction in function:
- *
- * I[-3]: Module
- * I[-2]: Function
- * I[-1]: Arity
- * I[0]: &&call_nif
- * I[1]: Function pointer to NIF function
- * I[2]: Pointer to erl_module_nif
- * I[3]: Function pointer to dirty NIF
- */
- BifFunction vbf;
-
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- arity = I[-1];
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
+ */
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- {
- typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
- NifF* fp = vbf = (NifF*) I[1];
- struct enif_environment_t env;
- ASSERT(!c_p->scheduler_data);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- erts_pre_dirty_nif(esdp, &env, c_p,
- (struct erl_module_nif*)I[2]);
+ codemfa = erts_code_to_codemfa(I);
-#ifdef DEBUG
- result =
-#else
- (void)
-#endif
- (*fp)(&env, arity, reg);
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+ c_p->current = codemfa;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_post_dirty_nif(&env);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if (em_apply_bif == (BeamInstr *) *I) {
+ exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
+ }
+ else {
+ ASSERT(em_call_nif == (BeamInstr *) *I);
+ exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
+ }
- ASSERT(!is_value(result));
- ASSERT(c_p->freason == TRAP);
- ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
+ ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- if (env.exiting)
- goto do_dirty_schedule;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- }
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (exiting)
+ goto do_dirty_schedule;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- ERTS_HOLE_CHECK(c_p);
- SWAPIN;
- I = c_p->i;
- goto context_switch;
- }
+ DTRACE_NIF_RETURN(c_p, codemfa);
+ ERTS_HOLE_CHECK(c_p);
+ SWAPIN;
+ I = c_p->i;
+ goto context_switch;
}
#endif /* ERTS_DIRTY_SCHEDULERS */
}
-static BifFunction
-translate_gc_bif(void* gcf)
+static ErtsCodeMFA *
+gcbif2mfa(void* gcf)
{
- if (gcf == erts_gc_length_1) {
- return length_1;
- } else if (gcf == erts_gc_size_1) {
- return size_1;
- } else if (gcf == erts_gc_bit_size_1) {
- return bit_size_1;
- } else if (gcf == erts_gc_byte_size_1) {
- return byte_size_1;
- } else if (gcf == erts_gc_map_size_1) {
- return map_size_1;
- } else if (gcf == erts_gc_abs_1) {
- return abs_1;
- } else if (gcf == erts_gc_float_1) {
- return float_1;
- } else if (gcf == erts_gc_round_1) {
- return round_1;
- } else if (gcf == erts_gc_trunc_1) {
- return round_1;
- } else if (gcf == erts_gc_binary_part_2) {
- return binary_part_2;
- } else if (gcf == erts_gc_binary_part_3) {
- return binary_part_3;
- } else {
- erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ int i;
+ for (i = 0; erts_gc_bifs[i].bif; i++) {
+ if (erts_gc_bifs[i].gc_bif == gcf)
+ return &bif_export[erts_gc_bifs[i].exp_ix]->info.mfa;
}
+ erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ return NULL;
+}
+
+static ErtsCodeMFA *
+ubif2mfa(void* uf)
+{
+ int i;
+ for (i = 0; erts_u_bifs[i].bif; i++) {
+ if (erts_u_bifs[i].bif == uf)
+ return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
+ }
+ erts_exit(ERTS_ERROR_EXIT, "bad u bif");
+ return NULL;
}
/*
@@ -5515,15 +5520,27 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
*/
static BeamInstr*
-handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
+handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
{
Eterm* hp;
Eterm Value = c_p->fvalue;
Eterm Args = am_true;
- c_p->i = pc; /* In case we call erts_exit(). */
ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
+ if (c_p->freason & EXF_RESTORE_NIF)
+ erts_nif_export_restore_error(c_p, &pc, reg, &bif_mfa);
+
+#ifdef DEBUG
+ if (bif_mfa) {
+ /* Verify that bif_mfa does not point into our nif export */
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(NifExport)));
+ }
+#endif
+
+ c_p->i = pc; /* In case we call erts_exit(). */
+
/*
* Check if we have an arglist for the top level call. If so, this
* is encoded in Value, so we have to dig out the real Value as well
@@ -5546,7 +5563,7 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
* more modular.
*/
if (c_p->freason & EXF_SAVETRACE) {
- save_stacktrace(c_p, pc, reg, bf, Args);
+ save_stacktrace(c_p, pc, reg, bif_mfa, Args);
}
/*
@@ -5616,7 +5633,8 @@ next_catch(Process* c_p, Eterm *reg) {
/* Can not follow cp here - code may be unloaded */
BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
reg[1], reg[2],
ERTS_TRACER_FROM_ETERM(ptr+1));
/* Skip return_trace parameters */
@@ -5644,7 +5662,8 @@ next_catch(Process* c_p, Eterm *reg) {
if (is_catch(*ptr) && active_catches) goto found_catch;
}
if (cp_val(*prev) == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
reg[1], reg[2],
ERTS_TRACER_FROM_ETERM(ptr+1));
}
@@ -5819,11 +5838,12 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
*/
static void
-save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
- Eterm args) {
+save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
+ ErtsCodeMFA *bif_mfa, Eterm args) {
struct StackTrace* s;
int sz;
int depth = erts_backtrace_depth; /* max depth (never negative) */
+
if (depth > 0) {
/* There will always be a current function */
depth --;
@@ -5840,33 +5860,29 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
/*
* If the failure was in a BIF other than 'error/1', 'error/2',
- * 'exit/1' or 'throw/1', find the bif-table index and save the
- * argument registers by consing up an arglist.
+ * 'exit/1' or 'throw/1', save BIF-MFA and save the argument
+ * registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1
- && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2
- && bf != wrap_exit_1 && bf != wrap_throw_1) {
- int i;
- int a = 0;
- for (i = 0; i < BIF_SIZE; i++) {
- if (bf == bif_table[i].f || bf == bif_table[i].traced) {
- Export *ep = bif_export[i];
- s->current = ep->code;
- a = bif_table[i].arity;
+ if (bif_mfa) {
+ if (bif_mfa->module == am_erlang) {
+ switch (bif_mfa->function) {
+ case am_error:
+ if (bif_mfa->arity == 1 || bif_mfa->arity == 2)
+ goto non_bif_stacktrace;
+ break;
+ case am_exit:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ case am_throw:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ default:
break;
}
}
- if (i >= BIF_SIZE) {
- /*
- * The Bif does not really exist (no BIF entry). It is a
- * TRAP and traps are called through apply_bif, which also
- * sets c_p->current (luckily).
- * OR it is a NIF called by call_nif where current is also set.
- */
- ASSERT(c_p->current);
- s->current = c_p->current;
- a = s->current[2];
- }
+ s->current = bif_mfa;
/* Save first stack entry */
ASSERT(pc);
if (depth > 0) {
@@ -5879,8 +5895,11 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
depth--;
}
s->pc = NULL;
- args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
+ args = make_arglist(c_p, reg, bif_mfa->arity); /* Overwrite CAR(c_p->ftrace) */
} else {
+
+ non_bif_stacktrace:
+
s->current = c_p->current;
/*
* For a function_clause error, the arguments are in the beam
@@ -5890,7 +5909,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
(GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
int a;
ASSERT(s->current);
- a = s->current[2];
+ a = s->current->arity;
args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
/* Save first stack entry */
ASSERT(c_p->cp);
@@ -6079,7 +6098,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_lookup_function_info(&fi, s->pc, 1);
} else if (GET_EXC_INDEX(s->freason) ==
GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
- erts_lookup_function_info(&fi, s->current, 1);
+ erts_lookup_function_info(&fi, erts_codemfa_to_code(s->current), 1);
} else {
erts_set_current_function(&fi, s->current);
}
@@ -6090,9 +6109,9 @@ build_stacktrace(Process* c_p, Eterm exc) {
* stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
if (depth <= 0)
- erts_set_current_function(&fi, c_p->u.initial);
+ erts_set_current_function(&fi, &c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6104,10 +6123,10 @@ build_stacktrace(Process* c_p, Eterm exc) {
*/
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.current ? fi.needed + 2 : 0;
+ heap_size = fi.mfa ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -6123,7 +6142,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- if (fi.current) {
+ if (fi.mfa) {
hp = erts_build_mfa_item(&fi, hp, args, &mfa);
res = CONS(hp, mfa, res);
}
@@ -6133,7 +6152,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
}
static BeamInstr*
-call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
+call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func)
{
Eterm* hp;
Export* ep;
@@ -6142,13 +6161,14 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
Uint sz;
int i;
+ DBG_TRACE_MFA_P(mfa, "call_error_handler");
/*
* Search for the error_handler module.
*/
ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
erts_active_code_ix());
if (ep == NULL) { /* No error handler */
- p->current = fi;
+ p->current = mfa;
p->freason = EXC_UNDEF;
return 0;
}
@@ -6157,7 +6177,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
* Create a list with all arguments in the x registers.
*/
- arity = fi[2];
+ arity = mfa->arity;
sz = 2 * arity;
if (HeapWordsLeft(p) < sz) {
erts_garbage_collect(p, sz, reg, arity);
@@ -6173,8 +6193,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
/*
* Set up registers for call to error_handler:<func>/3.
*/
- reg[0] = fi[0];
- reg[1] = fi[1];
+ reg[0] = mfa->module;
+ reg[1] = mfa->function;
reg[2] = args;
return ep->addressv[erts_active_code_ix()];
}
@@ -6234,7 +6254,7 @@ apply_bif_error_adjustment(Process *p, Export *ep,
* and apply_last_IP.
*/
if (I
- && ep->code[3] == (BeamInstr) em_apply_bif
+ && ep->beam[0] == (BeamInstr) em_apply_bif
&& (ep == bif_export[BIF_error_1]
|| ep == bif_export[BIF_error_2]
|| ep == bif_export[BIF_exit_1]
@@ -6251,7 +6271,7 @@ apply_bif_error_adjustment(Process *p, Export *ep,
* stackframe correct. Without the following adjustment,
* 'p->cp' will point into the function that called
* current function when handling the error. We add a
- * dummy stackframe in order to achive this.
+ * dummy stackframe in order to achieve this.
*
* Note that these BIFs unconditionally will cause
* an exception to be raised. That is, our modifications
@@ -6558,11 +6578,11 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_hibernate)) {
+ ErtsCodeMFA cmfa = { module, function, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(c_p, module, function, arity,
- process_name, mfa);
- DTRACE2(process_hibernate, process_name, mfa);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_fun_decode(c_p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_hibernate, process_name, mfa_buf);
}
#endif
/*
@@ -6595,7 +6615,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
#ifndef ERTS_SMP
if (ERTS_PROC_IS_EXITING(c_p)) {
/*
- * See comment in the begining of the function...
+ * See comment in the beginning of the function...
*
* This second test is needed since gc might be traced.
*/
@@ -6609,7 +6629,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- c_p->current = bif_export[BIF_hibernate_3]->code;
+ c_p->current = &bif_export[BIF_hibernate_3]->info.mfa;
c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
return 1;
}
@@ -6632,21 +6652,15 @@ call_fun(Process* p, /* Current process. */
if (is_fun_header(hdr)) {
ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
- ErlFunEntry* fe;
- BeamInstr* code_ptr;
+ ErlFunEntry* fe = funp->fe;
+ BeamInstr* code_ptr = fe->address;
Eterm* var_ptr;
- int actual_arity;
- unsigned num_free;
-
- fe = funp->fe;
- num_free = funp->num_free;
- code_ptr = fe->address;
- actual_arity = (int) code_ptr[-1];
+ unsigned num_free = funp->num_free;
+ ErtsCodeMFA *mfa = erts_code_to_codemfa(code_ptr);
+ int actual_arity = mfa->arity;
if (actual_arity == arity+num_free) {
- DTRACE_LOCAL_CALL(p, (Eterm)code_ptr[-3],
- (Eterm)code_ptr[-2],
- code_ptr[-1]);
+ DTRACE_LOCAL_CALL(p, mfa);
if (num_free == 0) {
return code_ptr;
} else {
@@ -6751,10 +6765,10 @@ call_fun(Process* p, /* Current process. */
int actual_arity;
ep = *((Export **) (export_val(fun) + 1));
- actual_arity = (int) ep->code[2];
+ actual_arity = ep->info.mfa.arity;
if (arity == actual_arity) {
- DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
+ DTRACE_GLOBAL_CALL(p, &ep->info.mfa);
return ep->addressv[erts_active_code_ix()];
} else {
/*
@@ -6842,9 +6856,6 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
funp->fe = fe;
funp->num_free = num_free;
funp->creator = p->common.id;
-#ifdef HIPE
- funp->native_address = fe->native_address;
-#endif
funp->arity = (int)fe->address[-1] - num_free;
for (i = 0; i < num_free; i++) {
*hp++ = reg[i];
@@ -7375,15 +7386,15 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
return 1;
}
- e.code[0] = Mod;
- e.code[1] = Name;
- e.code[2] = arity;
+ e.info.mfa.module = Mod;
+ e.info.mfa.function = Name;
+ e.info.mfa.arity = arity;
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->addressv[erts_active_code_ix()] == ep->code+3
- && (ep->code[3] == (BeamInstr) em_apply_bif);
+ return ep->addressv[erts_active_code_ix()] == ep->beam
+ && (ep->beam[0] == (BeamInstr) em_apply_bif);
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 88b6e89a02..48206a75a8 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -39,11 +39,13 @@
#include "erl_binary.h"
#include "erl_zlib.h"
#include "erl_map.h"
+#include "erl_process_dict.h"
#ifdef HIPE
#include "hipe_bif0.h"
#include "hipe_mode_switch.h"
#include "hipe_arch.h"
+#include "hipe_load.h"
#endif
ErlDrvBinary* erts_gzinflate_buffer(char*, int);
@@ -155,13 +157,15 @@ typedef struct {
#define STR_CHUNK 2
#define IMP_CHUNK 3
#define EXP_CHUNK 4
-#define NUM_MANDATORY 5
+#define MIN_MANDATORY 1
+#define MAX_MANDATORY 5
#define LAMBDA_CHUNK 5
#define LITERAL_CHUNK 6
#define ATTR_CHUNK 7
#define COMPILE_CHUNK 8
#define LINE_CHUNK 9
+#define UTF8_ATOM_CHUNK 10
#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
@@ -171,9 +175,13 @@ typedef struct {
static Uint chunk_types[] = {
/*
- * Mandatory chunk types -- these MUST be present.
+ * Atom chunk types -- Atom or AtU8 MUST be present.
*/
MakeIffId('A', 't', 'o', 'm'), /* 0 */
+
+ /*
+ * Mandatory chunk types -- these MUST be present.
+ */
MakeIffId('C', 'o', 'd', 'e'), /* 1 */
MakeIffId('S', 't', 'r', 'T'), /* 2 */
MakeIffId('I', 'm', 'p', 'T'), /* 3 */
@@ -187,6 +195,7 @@ static Uint chunk_types[] = {
MakeIffId('A', 't', 't', 'r'), /* 7 */
MakeIffId('C', 'I', 'n', 'f'), /* 8 */
MakeIffId('L', 'i', 'n', 'e'), /* 9 */
+ MakeIffId('A', 't', 'U', '8'), /* 10 */
};
/*
@@ -479,15 +488,18 @@ typedef struct LoaderState {
static void free_loader_state(Binary* magic);
static ErlHeapFragment* new_literal_fragment(Uint size);
static void free_literal_fragment(ErlHeapFragment*);
-static void loader_state_dtor(Binary* magic);
+static int loader_state_dtor(Binary* magic);
+#ifdef HIPE
static Eterm stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- BeamCodeHeader* code, Uint size);
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code);
+#endif
static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
- Uint num_types, Uint num_mandatory);
+ Uint num_types);
static int verify_chunks(LoaderState* stp);
-static int load_atom_table(LoaderState* stp);
+static int load_atom_table(LoaderState* stp, ErtsAtomEncoding enc);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
static int is_bif(Eterm mod, Eterm func, unsigned arity);
@@ -537,8 +549,6 @@ static Eterm compilation_info_for_module(Process* p, BeamCodeHeader*);
static Eterm md5_of_module(Process* p, BeamCodeHeader*);
static Eterm has_native(BeamCodeHeader*);
static Eterm native_addresses(Process* p, BeamCodeHeader*);
-int patch_funentries(Eterm Patchlist);
-int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
static int must_swap_floats;
@@ -626,7 +636,7 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
CHKALLOC();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
if (!init_iff_file(stp, code, unloaded_size) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto load_error;
}
@@ -671,9 +681,16 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
*/
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto load_error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto load_error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto load_error;
+ }
}
/*
@@ -806,21 +823,21 @@ erts_finish_loading(Binary* magic, Process* c_p,
num_exps = export_list_size(code_ix);
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep == NULL || ep->code[0] != module) {
+ if (ep == NULL || ep->info.mfa.module != module) {
continue;
}
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
- } else if (ep->code[3] ==
+ } else if (ep->beam[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(mod_tab_p->curr.num_traced_exports > 0);
- erts_clear_export_break(mod_tab_p, ep->code+3);
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
- ep->code[4] = 0;
+ erts_clear_export_break(mod_tab_p, &ep->info);
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
+ ep->beam[1] = 0;
}
- ASSERT(ep->code[4] == 0);
+ ASSERT(ep->beam[1] == 0);
}
}
ASSERT(mod_tab_p->curr.num_breakpoints == 0);
@@ -842,9 +859,7 @@ erts_finish_loading(Binary* magic, Process* c_p,
erts_alloc(ERTS_ALC_T_PREPARED_CODE,
sizeof(struct erl_module_instance));
inst_p = mod_tab_p->on_load;
- inst_p->nif = 0;
- inst_p->num_breakpoints = 0;
- inst_p->num_traced_exports = 0;
+ erts_module_instance_init(inst_p);
}
inst_p->code_hdr = stp->hdr;
@@ -943,6 +958,13 @@ erts_module_for_prepared_code(Binary* magic)
LoaderState* stp;
if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+#ifdef HIPE
+ HipeLoaderState *hipe_stp;
+ if ((hipe_stp = hipe_get_loader_state(magic))
+ && hipe_stp->text_segment != 0) {
+ return hipe_stp->module;
+ }
+#endif
return NIL;
}
stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -1004,7 +1026,7 @@ static void free_literal_fragment(ErlHeapFragment* bp)
/*
* This destructor function can safely be called multiple times.
*/
-static void
+static int
loader_state_dtor(Binary* magic)
{
LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -1089,12 +1111,15 @@ loader_state_dtor(Binary* magic)
*/
ASSERT(stp->genop_blocks == 0);
+ return 1;
}
+#ifdef HIPE
static Eterm
stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- BeamCodeHeader* code_hdr, Uint size)
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code)
{
Module* modp;
Eterm retval;
@@ -1117,6 +1142,9 @@ stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modp->curr.code_hdr = code_hdr;
modp->curr.code_length = size;
modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
+ DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "insert_new_code "
+ "first_hipe_ref = %p", hipe_code->first_hipe_ref);
+ modp->curr.hipe_code = hipe_code;
/*
* Update ranges (used for finding a function from a PC value).
@@ -1125,6 +1153,7 @@ stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
erts_update_ranges((BeamInstr*)modp->curr.code_hdr, size);
return NIL;
}
+#endif
static int
init_iff_file(LoaderState* stp, byte* code, Uint size)
@@ -1198,7 +1227,7 @@ init_iff_file(LoaderState* stp, byte* code, Uint size)
* Scan the IFF file. The header should have been verified by init_iff_file().
*/
static int
-scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types)
{
Uint count;
Uint id;
@@ -1277,7 +1306,16 @@ verify_chunks(LoaderState* stp)
MD5_CTX context;
MD5Init(&context);
- for (i = 0; i < NUM_MANDATORY; i++) {
+
+ if (stp->chunks[UTF8_ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[UTF8_ATOM_CHUNK].start, stp->chunks[UTF8_ATOM_CHUNK].size);
+ } else if (stp->chunks[ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[ATOM_CHUNK].start, stp->chunks[ATOM_CHUNK].size);
+ } else {
+ LoadError0(stp, "mandatory chunk of type 'Atom' or 'AtU8' not found\n");
+ }
+
+ for (i = MIN_MANDATORY; i < MAX_MANDATORY; i++) {
if (stp->chunks[i].start != NULL) {
MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
} else {
@@ -1338,7 +1376,7 @@ verify_chunks(LoaderState* stp)
}
static int
-load_atom_table(LoaderState* stp)
+load_atom_table(LoaderState* stp, ErtsAtomEncoding enc)
{
unsigned int i;
@@ -1357,7 +1395,7 @@ load_atom_table(LoaderState* stp)
GetByte(stp, n);
GetString(stp, atom, n);
- stp->atom[i] = erts_atom_put(atom, n, ERTS_ATOM_ENC_LATIN1, 1);
+ stp->atom[i] = erts_atom_put(atom, n, enc, 1);
}
/*
@@ -1420,8 +1458,8 @@ load_import_table(LoaderState* stp)
* the BIF function.
*/
if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
- if (e->code[3] == (BeamInstr) em_apply_bif) {
- stp->import[i].bf = (BifFunction) e->code[4];
+ if (e->beam[0] == (BeamInstr) em_apply_bif) {
+ stp->import[i].bf = (BifFunction) e->beam[1];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
stp->may_load_nif = 1;
}
@@ -1514,7 +1552,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity)
if (e == NULL) {
return 0;
}
- if (e->code[3] != (BeamInstr) em_apply_bif) {
+ if (e->beam[0] != (BeamInstr) em_apply_bif) {
return 0;
}
if (mod == am_erlang && func == am_apply && arity == 3) {
@@ -1893,7 +1931,7 @@ load_code(LoaderState* stp)
* by both the nif functionality and line instructions.
*/
enum {
- FUNC_INFO_SZ = 5
+ FUNC_INFO_SZ = sizeof(ErtsCodeInfo) / sizeof(Eterm)
};
code = stp->codev;
@@ -2532,15 +2570,10 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
-#ifdef ERTS_DIRTY_SCHEDULERS
- enum { MIN_FUNC_SZ = 4 };
-#else
- enum { MIN_FUNC_SZ = 3 };
-#endif
- if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
+ if (finfo_ix - last_func_start < BEAM_NIF_MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
- int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
- ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
+ int pad = BEAM_NIF_MIN_FUNC_SZ - (finfo_ix - last_func_start);
+ ASSERT(pad > 0 && pad < BEAM_NIF_MIN_FUNC_SZ);
CodeNeed(pad);
sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
FUNC_INFO_SZ*sizeof(BeamInstr));
@@ -2565,8 +2598,11 @@ load_code(LoaderState* stp)
stp->function = code[ci-2];
stp->arity = code[ci-1];
+ /* When this assert is triggered, it is normally a sign that
+ the size of the ops.tab i_func_info instruction is not
+ the same as FUNC_INFO_SZ */
ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ);
- stp->hdr->functions[function_number] = (BeamInstr*) stp->labels[last_label].patches;
+ stp->hdr->functions[function_number] = (ErtsCodeInfo*) stp->labels[last_label].patches;
offset = function_number;
stp->labels[last_label].patches = offset;
function_number++;
@@ -4031,60 +4067,52 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)
op->next = NULL;
return op;
}
+
+static GenOp*
+translate_gc_bif(LoaderState* stp, GenOp* op, GenOpArg Bif)
+{
+ const ErtsGcBif* p;
+ BifFunction bf;
+
+ bf = stp->import[Bif.val].bf;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->bif == bf) {
+ op->a[1].type = TAG_u;
+ op->a[1].val = (BeamInstr) p->gc_bif;
+ return op;
+ }
+ }
+
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
+}
+
/*
- * Rewrite gc_bifs with one parameter (the common case). Utilized
- * in ops.tab to rewrite instructions calling bif's in guards
- * to use a garbage collecting implementation.
+ * Rewrite gc_bifs with one parameter (the common case).
*/
static GenOp*
gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg Src, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == length_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_length_1;
- } else if (bf == size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_size_1;
- } else if (bf == bit_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1;
- } else if (bf == byte_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1;
- } else if (bf == map_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1;
- } else if (bf == abs_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1;
- } else if (bf == float_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_float_1;
- } else if (bf == round_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_round_1;
- } else if (bf == trunc_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif1_5;
op->arity = 5;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Src;
op->a[3] = Live;
op->a[4] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
@@ -4095,35 +4123,18 @@ gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_2) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif2_6;
op->arity = 6;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Live;
op->a[3] = S1;
op->a[4] = S2;
op->a[5] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
@@ -4134,37 +4145,19 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_3) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_ii_gc_bif3_7;
op->arity = 7;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Live;
op->a[3] = S1;
op->a[4] = S2;
op->a[5] = S3;
op->a[6] = Dst;
- op->next = NULL;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
static GenOp*
@@ -4386,22 +4379,25 @@ gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
static int
hash_internal_genop_arg(LoaderState* stp, GenOpArg Key, Uint32* hx)
{
+ Eterm key_term;
switch (Key.type) {
case TAG_a:
- *hx = atom_tab(atom_val(Key.val))->slot.bucket.hvalue;
- return 1;
+ key_term = Key.val;
+ break;
case TAG_i:
- *hx = Key.val;
- return 1;
+ key_term = make_small(Key.val);
+ break;
case TAG_n:
- *hx = make_internal_hash(NIL);
- return 1;
+ key_term = NIL;
+ break;
case TAG_q:
- *hx = make_internal_hash(stp->literals[Key.val].term);
- return 1;
+ key_term = stp->literals[Key.val].term;
+ break;
default:
return 0;
}
+ *hx = erts_pd_make_hx(key_term);
+ return 1;
}
@@ -4556,7 +4552,7 @@ freeze_code(LoaderState* stp)
* function table in the beginning of the file.
*/
- code_hdr->functions[stp->num_functions] = (codev + stp->ci - 1);
+ code_hdr->functions[stp->num_functions] = (ErtsCodeInfo*)(codev + stp->ci - 1);
CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
@@ -4816,16 +4812,16 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
}
ep = erts_export_put(stp->module, stp->export[i].function,
stp->export[i].arity);
- if (!on_load) {
- ep->addressv[erts_staging_code_ix()] = address;
- } else {
+ if (on_load) {
/*
* on_load: Don't make any of the exported functions
* callable yet. Keep any function in the current
* code callable.
*/
- ep->code[4] = (BeamInstr) address;
+ ep->beam[1] = (BeamInstr) address;
}
+ else
+ ep->addressv[erts_staging_code_ix()] = address;
}
/*
@@ -4871,7 +4867,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
}
fe->address = code_ptr;
#ifdef HIPE
- hipe_set_closure_stub(fe, stp->lambdas[i].num_free);
+ hipe_set_closure_stub(fe);
#endif
}
}
@@ -4882,7 +4878,7 @@ transform_engine(LoaderState* st)
{
Uint op;
int ap; /* Current argument. */
- Uint* restart; /* Where to restart if current match fails. */
+ const Uint* restart; /* Where to restart if current match fails. */
GenOpArg var[TE_MAX_VARS]; /* Buffer for variables. */
GenOpArg* rest_args = NULL;
int num_rest_args = 0;
@@ -4891,7 +4887,7 @@ transform_engine(LoaderState* st)
GenOp* instr;
GenOp* first = st->genop;
GenOp* keep = NULL;
- Uint* pc;
+ const Uint* pc;
static Uint restart_fail[1] = {TOP_fail};
ASSERT(gen_opc[first->op].transform != -1);
@@ -5002,7 +4998,7 @@ transform_engine(LoaderState* st)
if (i >= st->num_imports || st->import[i].bf == NULL)
goto restart;
if (bif_number != -1 &&
- bif_export[bif_number]->code[4] != (BeamInstr) st->import[i].bf) {
+ bif_export[bif_number]->beam[1] != (BeamInstr) st->import[i].bf) {
goto restart;
}
}
@@ -5627,18 +5623,16 @@ functions_in_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo* ci = code_hdr->functions[i];
Eterm tuple;
/*
* If the function name is [], this entry is a stub for
* a BIF that should be ignored.
*/
- ASSERT(is_atom(name) || is_nil(name));
- if (is_atom(name)) {
- tuple = TUPLE2(hp, name, make_small(arity));
+ ASSERT(is_atom(ci->mfa.function) || is_nil(ci->mfa.function));
+ if (is_atom(ci->mfa.function)) {
+ tuple = TUPLE2(hp, ci->mfa.function, make_small(ci->mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5726,17 +5720,17 @@ native_addresses(Process* p, BeamCodeHeader* code_hdr)
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo *ci = code_hdr->functions[i];
Eterm tuple;
- ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */
- if (func_info[1] != 0) {
- Eterm addr;
- ASSERT(is_atom(name));
- addr = erts_bld_uint(&hp, NULL, func_info[1]);
- tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
+ ASSERT(is_atom(ci->mfa.function)
+ || is_nil(ci->mfa.function)); /* [] if BIF stub */
+ if (ci->native != 0) {
+ Eterm addr;
+ ASSERT(is_atom(ci->mfa.function));
+ addr = erts_bld_uint(&hp, NULL, ci->native);
+ tuple = erts_bld_tuple(&hp, NULL, 3, ci->mfa.function,
+ make_small(ci->mfa.arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
}
@@ -5763,11 +5757,11 @@ exported_from_module(Process* p, /* Process whose heap to use. */
for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i,code_ix);
- if (ep->code[0] == mod) {
+ if (ep->info.mfa.module == mod) {
Eterm tuple;
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_call_error_handler) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ ep->beam[0] == (BeamInstr) em_call_error_handler) {
/* There is a call to the function, but it does not exist. */
continue;
}
@@ -5777,7 +5771,8 @@ exported_from_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hend = hp + need;
}
- tuple = TUPLE2(hp, ep->code[1], make_small(ep->code[2]));
+ tuple = TUPLE2(hp, ep->info.mfa.function,
+ make_small(ep->info.mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5851,7 +5846,6 @@ md5_of_module(Process* p, /* Process whose heap to use. */
Eterm*
erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
{
- BeamInstr* current = fi->current;
Eterm loc = NIL;
if (fi->loc != LINE_INVALID_LOCATION) {
@@ -5861,7 +5855,7 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
Eterm file_term = NIL;
if (file == 0) {
- Atom* ap = atom_tab(atom_val(fi->current[0]));
+ Atom* ap = atom_tab(atom_val(fi->mfa->module));
file_term = buf_to_intlist(&hp, ".erl", 4, NIL);
file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term);
} else {
@@ -5880,10 +5874,12 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
}
if (is_list(args) || is_nil(args)) {
- *mfa_p = TUPLE4(hp, current[0], current[1], args, loc);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ args, loc);
} else {
- Eterm arity = make_small(current[2]);
- *mfa_p = TUPLE4(hp, current[0], current[1], arity, loc);
+ Eterm arity = make_small(fi->mfa->arity);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ arity, loc);
}
return hp + 5;
}
@@ -5894,9 +5890,9 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
* the function.
*/
void
-erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
+erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa)
{
- fi->current = current;
+ fi->mfa = mfa;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
}
@@ -5905,13 +5901,13 @@ erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
/*
* Returns a pointer to {module, function, arity}, or NULL if not found.
*/
-BeamInstr*
+ErtsCodeMFA*
find_function_from_pc(BeamInstr* pc)
{
FunctionInfo fi;
erts_lookup_function_info(&fi, pc, 0);
- return fi.current;
+ return fi.mfa;
}
/*
@@ -5966,7 +5962,7 @@ code_get_chunk_2(BIF_ALIST_2)
goto error;
}
if (!init_iff_file(stp, start, binary_size(Bin)) ||
- !scan_iff_file(stp, &chunk, 1, 1) ||
+ !scan_iff_file(stp, &chunk, 1) ||
stp->chunks[0].start == NULL) {
res = am_undefined;
goto done;
@@ -6015,7 +6011,7 @@ code_module_md5_1(BIF_ALIST_1)
}
stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
if (!init_iff_file(stp, bytes, binary_size(Bin)) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
res = am_undefined;
goto done;
@@ -6028,24 +6024,21 @@ code_module_md5_1(BIF_ALIST_1)
return res;
}
-#define WORDS_PER_FUNCTION 6
+#ifdef HIPE
+#define WORDS_PER_FUNCTION (sizeof(ErtsCodeInfo) / sizeof(UWord) + 1)
static BeamInstr*
-make_stub(BeamInstr* fp, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
+make_stub(ErtsCodeInfo* info, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
{
- fp[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- fp[1] = native;
- fp[2] = mod;
- fp[3] = func;
- fp[4] = arity;
-#ifdef HIPE
- if (native) {
- fp[5] = BeamOpCode(op_move_return_n);
- hipe_mfa_save_orig_beam_op(mod, func, arity, fp+5);
- }
-#endif
- fp[5] = OpCode;
- return fp + WORDS_PER_FUNCTION;
+ DBG_TRACE_MFA(mod,func,arity,"make beam stub at %p", erts_codeinfo_to_code(info));
+ ASSERT(WORDS_PER_FUNCTION == 6);
+ info->op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ info->native = native;
+ info->mfa.module = mod;
+ info->mfa.function = func;
+ info->mfa.arity = arity;
+ erts_codeinfo_to_code(info)[0] = OpCode;
+ return erts_codeinfo_to_code(info)+1;
}
static byte*
@@ -6104,22 +6097,17 @@ stub_read_export_table(LoaderState* stp)
}
static void
-stub_final_touch(LoaderState* stp, BeamInstr* fp)
+stub_final_touch(LoaderState* stp, ErtsCodeInfo* ci)
{
unsigned int i;
unsigned int n = stp->num_exps;
- Eterm mod = fp[2];
- Eterm function = fp[3];
- int arity = fp[4];
-#ifdef HIPE
Lambda* lp;
-#endif
- if (is_bif(mod, function, arity)) {
- fp[1] = 0;
- fp[2] = 0;
- fp[3] = 0;
- fp[4] = 0;
+ if (is_bif(ci->mfa.module, ci->mfa.function, ci->mfa.arity)) {
+ ci->native = 0;
+ ci->mfa.module = 0;
+ ci->mfa.function = 0;
+ ci->mfa.arity = 0;
return;
}
@@ -6128,9 +6116,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
*/
for (i = 0; i < n; i++) {
- if (stp->export[i].function == function && stp->export[i].arity == arity) {
- Export* ep = erts_export_put(mod, function, arity);
- ep->addressv[erts_staging_code_ix()] = fp+5;
+ if (stp->export[i].function == ci->mfa.function &&
+ stp->export[i].arity == ci->mfa.arity) {
+ Export* ep = erts_export_put(ci->mfa.module,
+ ci->mfa.function,
+ ci->mfa.arity);
+ ep->addressv[erts_staging_code_ix()] = erts_codeinfo_to_code(ci);
+ DBG_TRACE_MFA_P(&ci->mfa,"set beam stub at %p in export at %p (code_ix=%d)",
+ erts_codeinfo_to_code(ci), ep, erts_staging_code_ix());
return;
}
}
@@ -6140,16 +6133,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
* Search the lambda table to find out which.
*/
-#ifdef HIPE
n = stp->num_lambdas;
for (i = 0, lp = stp->lambdas; i < n; i++, lp++) {
ErlFunEntry* fe = stp->lambdas[i].fe;
- if (lp->function == function && lp->arity == arity) {
- fp[5] = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
- fe->address = &(fp[5]);
+ if (lp->function == ci->mfa.function && lp->arity == ci->mfa.arity) {
+ *erts_codeinfo_to_code(ci) = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
+ fe->address = erts_codeinfo_to_code(ci);
}
}
-#endif
return;
}
@@ -6158,10 +6149,9 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
[{Adr, Patchtyppe} | Addresses]
and the address of a fun_entry.
*/
-int
+static int
patch(Eterm Addresses, Uint fe)
{
-#ifdef HIPE
Eterm* listp;
Eterm tuple;
Eterm* tp;
@@ -6197,15 +6187,13 @@ patch(Eterm Addresses, Uint fe)
}
-#endif
return 1;
}
-int
+static int
patch_funentries(Eterm Patchlist)
{
-#ifdef HIPE
while (!is_nil(Patchlist)) {
Eterm Info;
Eterm MFA;
@@ -6294,23 +6282,23 @@ patch_funentries(Eterm Patchlist)
return 0;
}
-#endif
return 1; /* Signal that all went well */
}
-
/*
* Do a dummy load of a module. No threaded code will be loaded.
* Used for loading native code.
* Will also patch all references to fun_entries to point to
* the new fun_entries created.
*/
-
Eterm
-erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
+erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info)
{
Binary* magic;
+ Binary* hipe_magic;
LoaderState* stp;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
BeamInstr Funcs;
BeamInstr Patchlist;
Eterm MD5Bin;
@@ -6333,8 +6321,12 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
magic = erts_alloc_loader_state();
stp = ERTS_MAGIC_BIN_DATA(magic);
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*hipe_code));
- if (is_not_atom(Mod)) {
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
goto error;
}
if (is_not_tuple(Info)) {
@@ -6362,13 +6354,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Scan the Beam binary and read the interesting sections.
*/
- stp->module = Mod;
+ stp->module = hipe_stp->module;
stp->group_leader = p->group_leader;
stp->num_functions = n;
if (!init_iff_file(stp, bytes, size)) {
goto error;
}
- if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto error;
}
@@ -6376,9 +6368,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (!read_code_header(stp)) {
goto error;
}
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto error;
+ }
}
define_file(stp, "export table", EXP_CHUNK);
if (!stub_read_export_table(stp)) {
@@ -6470,20 +6469,17 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Set the pointer and make the stub. Put a return instruction
* as the body until we know what kind of trap we should put there.
*/
- code_hdr->functions[i] = fp;
-#ifdef HIPE
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */
-#else
- op = (Eterm) BeamOpCode(op_move_return_n);
-#endif
- fp = make_stub(fp, Mod, func, arity, (Uint)native_address, op);
+ fp = make_stub((ErtsCodeInfo*)fp, hipe_stp->module, func, arity,
+ (Uint)native_address, op);
}
/*
* Insert the last pointer and the int_code_end instruction.
*/
- code_hdr->functions[i] = fp;
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
*fp++ = (BeamInstr) BeamOp(op_int_code_end);
/*
@@ -6516,11 +6512,20 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
}
/*
+ * Initialise HiPE module
+ */
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ /*
* Insert the module in the module table.
*/
- rval = stub_insert_new_code(p, 0, p->group_leader, Mod,
- code_hdr, code_size);
+ rval = stub_insert_new_code(p, 0, p->group_leader, hipe_stp->module,
+ code_hdr, code_size, hipe_code);
if (rval != NIL) {
goto error;
}
@@ -6531,23 +6536,74 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
fp = code_base;
for (i = 0; i < n; i++) {
- stub_final_touch(stp, fp);
+ stub_final_touch(stp, (ErtsCodeInfo*)fp);
fp += WORDS_PER_FUNCTION;
}
if (patch_funentries(Patchlist)) {
+ Eterm mod = hipe_stp->module;
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
- return Mod;
+ hipe_free_loader_state(hipe_stp);
+
+ return mod;
}
error:
+ erts_free(ERTS_ALC_T_HIPE, hipe_code);
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin)
+{
+ Binary* hipe_magic;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
+ Module* modp;
+
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
+ return 0;
+ }
+
+ modp = erts_get_module(hipe_stp->module, erts_active_code_ix());
+ if (!modp)
+ return 0;
+
+ /*
+ * Initialise HiPE module
+ */
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*hipe_code));
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ modp->curr.hipe_code = hipe_code;
+
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
+ return 1;
+}
+
#undef WORDS_PER_FUNCTION
+#endif /* HIPE */
+
static int safe_mul(UWord a, UWord b, UWord* resp)
{
@@ -6561,3 +6617,46 @@ static int safe_mul(UWord a, UWord b, UWord* resp)
}
}
+#ifdef ENABLE_DBG_TRACE_MFA
+
+#define MFA_MAX 10
+Eterm dbg_trace_m[MFA_MAX];
+Eterm dbg_trace_f[MFA_MAX];
+Uint dbg_trace_a[MFA_MAX];
+unsigned int dbg_trace_ix = 0;
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a)
+{
+ unsigned i = dbg_trace_ix++;
+ ASSERT(i < MFA_MAX);
+ dbg_trace_m[i] = am_atom_put(m, strlen(m));
+ dbg_trace_f[i] = am_atom_put(f, strlen(f));
+ dbg_trace_a[i] = a;
+}
+
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a)
+{
+ unsigned int i;
+ for (i = 0; i < dbg_trace_ix; ++i) {
+ if (m == dbg_trace_m[i] &&
+ (!f || (f == dbg_trace_f[i] && a == dbg_trace_a[i]))) {
+
+ return i+1;
+ }
+ }
+ return 0;
+}
+
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...)
+{
+ va_list arglist;
+ va_start(arglist, format);
+ ASSERT(--ix < MFA_MAX);
+ erts_fprintf(stderr, "MFA TRACE %T:%T/%u: ",
+ dbg_trace_m[ix], dbg_trace_f[ix], (int)dbg_trace_a[ix]);
+
+ erts_vfprintf(stderr, format, arglist);
+ va_end(arglist);
+}
+
+#endif /* ENABLE_DBG_TRACE_MFA */
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 9be5e14e40..659b9c303f 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -35,15 +35,7 @@ typedef struct gen_op_entry {
int transform;
} GenOpEntry;
-extern GenOpEntry gen_opc[];
-
-#ifdef NO_JUMP_TABLE
-#define BeamOp(Op) (Op)
-#else
-extern void** beam_ops;
-#define BeamOp(Op) beam_ops[(Op)]
-#endif
-
+extern const GenOpEntry gen_opc[];
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
@@ -115,10 +107,16 @@ typedef struct beam_code_header {
* The actual loaded code (for the first function) start just beyond
* this table.
*/
- BeamInstr* functions[1];
+ ErtsCodeInfo* functions[1];
}BeamCodeHeader;
+#ifdef ERTS_DIRTY_SCHEDULERS
+# define BEAM_NIF_MIN_FUNC_SZ 4
+#else
+# define BEAM_NIF_MIN_FUNC_SZ 3
+#endif
+
void erts_release_literal_area(struct ErtsLiteralArea_* literal_area);
int erts_is_module_native(BeamCodeHeader* code);
void erts_beam_bif_load_init(void);
@@ -152,4 +150,34 @@ struct BeamCodeLineTab_ {
#define LOC_FILE(Loc) ((Loc) >> 24)
#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
+/*
+ * MFA event debug "tracing" usage:
+ *
+ * #define ENABLE_DBG_TRACE_MFA
+ * call dbg_set_traced_mfa("mymod","myfunc",arity)
+ * for the function(s) to trace, in some init function.
+ *
+ * Run and get stderr printouts when interesting things happen to your MFA.
+ */
+#ifdef ENABLE_DBG_TRACE_MFA
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a);
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a);
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...);
+#define DBG_TRACE_MFA(M,F,A,FMT, ...) do {\
+ unsigned ix;\
+ if ((ix=dbg_is_traced_mfa(M,F,A))) \
+ dbg_vtrace_mfa(ix, FMT"\n", ##__VA_ARGS__);\
+ }while(0)
+
+#define DBG_TRACE_MFA_P(MFA, FMT, ...) \
+ DBG_TRACE_MFA((MFA)->module, (MFA)->function, (MFA)->arity, FMT, ##__VA_ARGS__)
+
+#else
+# define dbg_set_traced_mfa(M,F,A)
+# define DBG_TRACE_MFA(M,F,A,FMT, ...)
+# define DBG_TRACE_MFA_P(MFA,FMT, ...)
+#endif /* ENABLE_DBG_TRACE_MFA */
+
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
index 55342a38c6..9b0335e83d 100644
--- a/erts/emulator/beam/beam_ranges.c
+++ b/erts/emulator/beam/beam_ranges.c
@@ -221,13 +221,13 @@ erts_ranges_sz(void)
void
erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
{
- BeamInstr** low;
- BeamInstr** high;
- BeamInstr** mid;
+ ErtsCodeInfo** low;
+ ErtsCodeInfo** high;
+ ErtsCodeInfo** mid;
Range* rp;
BeamCodeHeader* hdr;
- fi->current = NULL;
+ fi->mfa = NULL;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
rp = find_range(pc);
@@ -240,12 +240,12 @@ erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
high = low + hdr->num_functions;
while (low < high) {
mid = low + (high-low) / 2;
- if (pc < mid[0]) {
+ if (pc < (BeamInstr*)(mid[0])) {
high = mid;
- } else if (pc < mid[1]) {
- fi->current = mid[0]+2;
+ } else if (pc < (BeamInstr*)(mid[1])) {
+ fi->mfa = &mid[0]->mfa;
if (full_info) {
- BeamInstr** fp = hdr->functions;
+ ErtsCodeInfo** fp = hdr->functions;
int idx = mid - fp;
lookup_loc(fi, pc, hdr, idx);
}
@@ -316,7 +316,7 @@ lookup_loc(FunctionInfo* fi, const BeamInstr* pc,
file = LOC_FILE(fi->loc);
if (file == 0) {
/* Special case: Module name with ".erl" appended */
- Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ Atom* mod_atom = atom_tab(atom_val(fi->mfa->module));
fi->needed += 2*(mod_atom->len+4);
} else {
Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index d9048065c8..614bedab12 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -199,7 +199,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
goto res_no_proc;
case ERTS_PORT_OP_SCHEDULED:
if (refp) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
default:
@@ -361,7 +361,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
c_p->common.id,
(mon->name != NIL
? mon->name
- : mon->pid),
+ : mon->u.pid),
ref,
0);
res = (code == ERTS_DSIG_SEND_YIELD ? am_yield : am_true);
@@ -467,7 +467,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target)
}
/* Can return atom true, false, yield, internal_error, badarg or
- * THE_NON_VALUE if error occured or trap has been set up
+ * THE_NON_VALUE if error occurred or trap has been set up
*/
static
BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
@@ -498,7 +498,7 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
res = am_true;
break;
case MON_ORIGIN:
- to = mon->pid;
+ to = mon->u.pid;
*multip = am_false;
if (is_atom(to)) {
/* Monitoring a name at node to */
@@ -597,7 +597,7 @@ BIF_RETTYPE demonitor_2(BIF_ALIST_2)
switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
case THE_NON_VALUE:
- /* If other error occured or trap has been set up - pass through */
+ /* If other error occurred or trap has been set up - pass through */
BIF_RET(THE_NON_VALUE);
case am_false:
if (info)
@@ -782,7 +782,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name)
case ERTS_PORT_OP_DONE:
return ret;
case ERTS_PORT_OP_SCHEDULED: { /* Scheduled a signal */
- ASSERT(is_internal_ref(ret));
+ ASSERT(is_internal_ordinary_ref(ret));
BIF_TRAP3(await_port_send_result_trap, self,
ret, am_true, ret);
/* bif_trap returns */
@@ -1180,7 +1180,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp);
if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
}
@@ -1586,7 +1586,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
ERTS_BIF_CHK_EXITED(BIF_P);
if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
@@ -2221,7 +2221,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
/* Fall through */
case ERTS_PORT_OP_SCHEDULED:
if (is_not_nil(*refp)) {
- ASSERT(is_internal_ref(*refp));
+ ASSERT(is_internal_ordinary_ref(*refp));
ret_val = SEND_AWAIT_RESULT;
}
break;
@@ -2409,7 +2409,7 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
ERTS_BIF_PREP_YIELD_RETURN(retval, p, am_ok);
break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_TRAP3(retval, await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
break;
case SEND_BADARG:
@@ -2446,7 +2446,7 @@ BIF_RETTYPE send_2(BIF_ALIST_2)
static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
{
- Binary* bin = ((ProcBin*) binary_val(BIF_ARG_1))->val;
+ Binary* bin = erts_magic_ref2bin(BIF_ARG_1);
ErtsSendContext* ctx = (ErtsSendContext*) ERTS_MAGIC_BIN_DATA(bin);
Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(BIF_P) * TERM_TO_BINARY_LOOP_FACTOR);
int result;
@@ -2526,7 +2526,7 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
ERTS_BIF_PREP_YIELD_RETURN(retval, p, msg);
break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_TRAP3(retval,
await_port_send_result_trap, p, ref, msg, msg);
break;
@@ -3022,8 +3022,8 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
{
Eterm res;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
- Sint i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
@@ -3033,7 +3033,7 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
}
BIF_ERROR(BIF_P, BADARG);
}
- res = erts_atom_put((byte *) buf, i, ERTS_ATOM_ENC_LATIN1, 1);
+ res = erts_atom_put(buf, i, ERTS_ATOM_ENC_UTF8, 1);
ASSERT(is_atom(res));
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(res);
@@ -3043,17 +3043,17 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
{
- Sint i;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
- if ((i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS)) < 0) {
+ if (i < 0) {
error:
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_ERROR(BIF_P, BADARG);
} else {
Eterm a;
- if (erts_atom_get(buf, i, &a, ERTS_ATOM_ENC_LATIN1)) {
+ if (erts_atom_get((char *) buf, i, &a, ERTS_ATOM_ENC_UTF8)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(a);
} else {
@@ -3865,11 +3865,21 @@ BIF_RETTYPE now_0(BIF_ALIST_0)
/**********************************************************************/
-BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
+/*
+ * Pass atom 'minor' for relaxed generational GC run. This is only
+ * recommendation, major run may still be chosen by VM.
+ * Pass atom 'major' for default behaviour - major GC run (fullsweep)
+ */
+BIF_RETTYPE
+erts_internal_garbage_collect_1(BIF_ALIST_1)
{
- FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
+ switch (BIF_ARG_1) {
+ case am_minor: break;
+ case am_major: FLAGS(BIF_P) |= F_NEED_FULLSWEEP; break;
+ default: BIF_ERROR(BIF_P, BADARG);
+ }
erts_garbage_collect(BIF_P, 0, NULL, 0);
- BIF_RET(am_true);
+ return am_true;
}
/**********************************************************************/
@@ -4099,6 +4109,7 @@ BIF_RETTYPE ref_to_list_1(BIF_ALIST_1)
{
if (is_not_ref(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
+ erts_magic_ref_save_bin(BIF_ARG_1);
BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
}
@@ -4307,7 +4318,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2)
erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
if (new_member == BIF_P
|| !(erts_smp_atomic32_read_nob(&new_member->state)
- & (ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
+ & ERTS_PSFLG_DIRTY_RUNNING)) {
new_member->group_leader = STORE_NC_IN_PROC(new_member,
BIF_ARG_1);
}
@@ -4591,7 +4602,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0;
erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time,
new);
- Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new);
+ Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0);
ASSERT(is_value(ref));
BIF_TRAP2(await_sched_wall_time_mod_trap,
BIF_P,
@@ -4713,25 +4724,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
/**********************************************************************/
-BIF_RETTYPE hash_2(BIF_ALIST_2)
-{
- Uint32 hash;
- Sint range;
-
- if (is_not_small(BIF_ARG_2)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if ((range = signed_val(BIF_ARG_2)) <= 0) { /* [1..MAX_SMALL] */
- BIF_ERROR(BIF_P, BADARG);
- }
-#if defined(ARCH_64)
- if (range > ((1L << 27) - 1))
- BIF_ERROR(BIF_P, BADARG);
-#endif
- hash = make_broken_hash(BIF_ARG_1);
- BIF_RET(make_small(1 + (hash % range))); /* [1..range] */
-}
-
BIF_RETTYPE phash_2(BIF_ALIST_2)
{
Uint32 hash;
@@ -4941,18 +4933,18 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Export bif_return_trap_export;
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(BIF_ALIST_0))
+ Eterm (*bif)(BIF_ALIST))
{
int i;
sys_memset((void *) ep, 0, sizeof(Export));
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->addressv[i] = &ep->code[3];
+ ep->addressv[i] = ep->beam;
}
- ep->code[0] = m;
- ep->code[1] = f;
- ep->code[2] = a;
- ep->code[3] = (BeamInstr) em_apply_bif;
- ep->code[4] = (BeamInstr) bif;
+ ep->info.mfa.module = m;
+ ep->info.mfa.function = f;
+ ep->info.mfa.arity = a;
+ ep->beam[0] = (BeamInstr) em_apply_bif;
+ ep->beam[1] = (BeamInstr) bif;
}
void erts_init_bif(void)
@@ -5002,6 +4994,314 @@ void erts_init_bif(void)
erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED());
}
+/*
+ * Scheduling of BIFs via NifExport...
+ */
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
+
+#define ERTS_SCHED_BIF_TRAP_MARKER ((void *) (UWord) 1)
+
+static ERTS_INLINE void
+schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ ErtsBifFunc dfunc, void *ifunc,
+ Eterm module, Eterm function,
+ int argc, Eterm *argv)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ (void) erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ mfa, pc, (BeamInstr) em_apply_bif,
+ dfunc, ifunc,
+ module, function,
+ argc, argv);
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+ erts_nif_export_restore(BIF_P, nep, BIF_ARG_1);
+ BIF_RET(BIF_ARG_1);
+}
+
+static BIF_RETTYPE dirty_bif_trap(BIF_ALIST)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+
+ /*
+ * Arity and argument registers already set
+ * correct by call to dirty_bif_trap()...
+ */
+
+ ASSERT(BIF_P->arity == nep->exp.info.mfa.arity);
+
+ erts_nif_export_restore(BIF_P, nep, THE_NON_VALUE);
+
+ BIF_P->i = (BeamInstr *) nep->func;
+ BIF_P->freason = TRAP;
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2)
+{
+ Eterm freason;
+
+ ASSERT(is_small(BIF_ARG_1));
+
+ freason = signed_val(BIF_ARG_1);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ freason |= EXF_RESTORE_NIF;
+
+ BIF_P->fvalue = BIF_ARG_2;
+
+ BIF_ERROR(BIF_P, freason);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+extern BeamInstr* em_call_bif_e;
+static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I);
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc bif,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc)
+{
+ Process *c_p, *dirty_shadow_proc;
+ ErtsCodeMFA *mfa;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ dirty_shadow_proc = proc;
+ c_p = proc->next;
+ ASSERT(c_p->common.id == dirty_shadow_proc->common.id);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ else
+#endif
+ {
+ dirty_shadow_proc = NULL;
+ c_p = proc;
+ }
+
+ if (!ERTS_PROC_IS_EXITING(c_p)) {
+ Export *exp;
+ BifFunction dbif, ibif;
+ BeamInstr *pc;
+
+ /*
+ * dbif - direct bif
+ * ibif - indirect bif
+ */
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_aint32_t set, mask;
+ mask = (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC);
+ switch (sched_type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ set = ERTS_PSFLG_DIRTY_CPU_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ set = ERTS_PSFLG_DIRTY_IO_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_NORMAL:
+ default:
+ set = 0;
+ dbif = call_bif;
+ ibif = bif;
+ break;
+ }
+
+ (void) erts_smp_atomic32_read_bset_nob(&c_p->state, mask, set);
+#else
+ dbif = call_bif;
+ ibif = bif;
+#endif
+
+ if (i == NULL) {
+ ERTS_INTERNAL_ERROR("Missing instruction pointer");
+ }
+#ifdef HIPE
+ else if (proc->flags & F_HIPE_MODE) {
+ /* Pointer to bif export in i */
+ exp = (Export *) i;
+ pc = c_p->cp;
+ mfa = &exp->info.mfa;
+ }
+#endif
+ else if (em_call_bif_e == (BeamInstr *) *i) {
+ /* Pointer to bif export in i+1 */
+ exp = (Export *) i[1];
+ pc = i;
+ mfa = &exp->info.mfa;
+ }
+ else if (em_apply_bif == (BeamInstr *) *i) {
+ /* Pointer to bif in i+1, and mfa in i-3 */
+ pc = c_p->cp;
+ mfa = erts_code_to_codemfa(i);
+ }
+ else {
+ ERTS_INTERNAL_ERROR("erts_schedule_bif() called "
+ "from unexpected instruction");
+ }
+ ASSERT(bif);
+
+ if (argc < 0) { /* reschedule original call */
+ mod = mfa->module;
+ func = mfa->function;
+ argc = (int) mfa->arity;
+ }
+
+ schedule(c_p, dirty_shadow_proc, mfa, pc, dbif, ibif,
+ mod, func, argc, argv);
+ }
+
+ if (dirty_shadow_proc)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE
+call_bif(Process *c_p, Eterm *reg, BeamInstr *I)
+{
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsBifFunc bif = (ErtsBifFunc) nep->func;
+ BIF_RETTYPE ret;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ ASSERT(bif);
+
+ ret = (*bif)(c_p, reg, I);
+
+ if (is_value(ret))
+ erts_nif_export_restore(c_p, nep, ret);
+ else if (c_p->freason != TRAP)
+ c_p->freason |= EXF_RESTORE_NIF; /* restore in handle_error() */
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* BIF did an ordinary trap... */
+ erts_nif_export_restore(c_p, nep, ret);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+
+ return ret;
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+int
+erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ BIF_RETTYPE result;
+ int exiting;
+ Process *dirty_shadow_proc;
+ ErtsBifFunc bf;
+ NifExport *nep;
+#ifdef DEBUG
+ Eterm *c_p_htop;
+ erts_aint32_t state;
+
+ ASSERT(!c_p->scheduler_data);
+ state = erts_smp_atomic32_read_nob(&c_p->state);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+#endif
+
+ nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ bf = (ErtsBifFunc) I[1];
+
+ erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ dirty_shadow_proc->freason = c_p->freason;
+ dirty_shadow_proc->fvalue = c_p->fvalue;
+ dirty_shadow_proc->ftrace = c_p->ftrace;
+ dirty_shadow_proc->cp = c_p->cp;
+ dirty_shadow_proc->i = c_p->i;
+
+#ifdef DEBUG
+ c_p_htop = c_p->htop;
+#endif
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*bf)(dirty_shadow_proc, reg, I);
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p_htop == c_p->htop);
+ ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(dirty_shadow_proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (is_value(result))
+ schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_result,
+ NULL, am_erts_internal, am_dirty_bif_result, 1, &result);
+ else if (dirty_shadow_proc->freason != TRAP) {
+ Eterm argv[2];
+ ASSERT(dirty_shadow_proc->freason <= MAX_SMALL);
+ argv[0] = make_small(dirty_shadow_proc->freason);
+ argv[1] = dirty_shadow_proc->fvalue;
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_exception, NULL, am_erts_internal,
+ am_dirty_bif_exception, 2, argv);
+ }
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* Dirty BIF did an ordinary trap... */
+ ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state)
+ & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)));
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_trap, (void *) dirty_shadow_proc->i,
+ am_erts_internal, am_dirty_bif_trap,
+ dirty_shadow_proc->arity, reg);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+ c_p->freason = dirty_shadow_proc->freason;
+ c_p->fvalue = dirty_shadow_proc->fvalue;
+ c_p->ftrace = dirty_shadow_proc->ftrace;
+ c_p->cp = dirty_shadow_proc->cp;
+ c_p->i = dirty_shadow_proc->i;
+ c_p->arity = dirty_shadow_proc->arity;
+ }
+
+ erts_flush_dirty_shadow_proc(dirty_shadow_proc);
+
+ return exiting;
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+
#ifdef HARDDEBUG
/*
You'll need this line in bif.tab to be able to use this debug bif
@@ -5256,12 +5556,10 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
SEQ_TRACE_TOKEN(BIF_P) = am_have_dt_utag;
}
}
-#else
+#else
if (BIF_ARG_1 != am_true) {
BIF_ERROR(BIF_P,BADARG);
}
#endif
BIF_RET(am_true);
}
-
-
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 2203182a0d..01cca90a7a 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -29,17 +29,35 @@ extern Export *erts_convert_time_unit_trap;
#define BIF_P A__p
-#define BIF_ALIST_0 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_4 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST Process* A__p, Eterm* BIF__ARGS, BeamInstr *A__I
+#define BIF_CALL_ARGS A__p, BIF__ARGS, A__I
+
+#define BIF_ALIST_0 BIF_ALIST
+#define BIF_ALIST_1 BIF_ALIST
+#define BIF_ALIST_2 BIF_ALIST
+#define BIF_ALIST_3 BIF_ALIST
+#define BIF_ALIST_4 BIF_ALIST
#define BIF_ARG_1 (BIF__ARGS[0])
#define BIF_ARG_2 (BIF__ARGS[1])
#define BIF_ARG_3 (BIF__ARGS[2])
#define BIF_ARG_4 (BIF__ARGS[3])
+#define BIF_I A__I
+
+/* NBIF_* is for bif calls from native code... */
+
+#define NBIF_ALIST Process* A__p, Eterm* BIF__ARGS
+#define NBIF_CALL_ARGS A__p, BIF__ARGS
+
+#define NBIF_ALIST_0 NBIF_ALIST
+#define NBIF_ALIST_1 NBIF_ALIST
+#define NBIF_ALIST_2 NBIF_ALIST
+#define NBIF_ALIST_3 NBIF_ALIST
+#define NBIF_ALIST_4 NBIF_ALIST
+
+typedef BIF_RETTYPE (*ErtsBifFunc)(BIF_ALIST);
+
#define ERTS_IS_PROC_OUT_OF_REDS(p) \
((p)->fcalls > 0 \
? 0 \
@@ -159,7 +177,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
do { \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
return THE_NON_VALUE; \
} while (0)
@@ -167,7 +185,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
return THE_NON_VALUE; \
} while (0)
@@ -176,7 +194,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
return THE_NON_VALUE; \
@@ -186,7 +204,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
@@ -202,7 +220,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
do { \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -210,7 +228,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -219,7 +237,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
(Ret) = THE_NON_VALUE; \
@@ -229,7 +247,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
@@ -480,6 +498,43 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+#ifdef ERTS_DIRTY_SCHEDULERS
+int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+#endif
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc);
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type)
+{
+ return erts_schedule_bif(proc, argv, i, dbf, sched_type,
+ THE_NON_VALUE, THE_NON_VALUE, -1);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#ifdef ERL_WANT_HIPE_BIF_WRAPPER__
#ifndef HIPE
@@ -510,16 +565,16 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
-BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
- Eterm* args); \
-BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
- Eterm* args) \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST); \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST) \
{ \
BIF_RETTYPE res; \
- hipe_reserve_beam_trap_frame(c_p, args, ARITY); \
- res = BIF_NAME ## _ ## ARITY (c_p, args); \
- if (is_value(res) || c_p->freason != TRAP) { \
- hipe_unreserve_beam_trap_frame(c_p); \
+ hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, ARITY); \
+ res = nbif_impl_ ## BIF_NAME ## _ ## ARITY (NBIF_CALL_ARGS); \
+ if (is_value(res) || BIF_P->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(BIF_P); \
} \
return res; \
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 80db4eb6ff..ce2cffa498 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -23,22 +23,24 @@
#
# Lines starting with '#' are ignored.
#
-# <bif-decl> ::= "bif" <bif> <C-name>* | "ubif" <bif> <C-name>*
+# <bif-decl> ::= "bif" <bif> <C-name>* |
+# "ubif" <bif> <C-name>* |
+# "gcbif" <bif> <C-name>*
# <bif> ::= <module> ":" <name> "/" <arity>
#
-# "ubif" is an unwrapped bif, i.e. a bif without a trace wrapper,
-# or rather; the trace entry point in the export entry is the same
-# as the normal entry point, and no trace wrapper is generated.
+# ubif: Use for operators and guard BIFs that never build anything
+# on the heap (such as tuple_size/1) and operators.
#
-# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs.
+# gcbif: Use for guard BIFs that may build on the heap (such as abs/1).
+#
+# bif: Use for all other BIFs.
#
# Add new BIFs to the end of the file.
#
-# Note: Guards BIFs require special support in the compiler (to be able to actually
-# call them from within a guard).
+# Note: Guards BIFs usually require special support in the compiler.
#
-ubif erlang:abs/1
+gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
bif erlang:adler32_combine/3
@@ -62,11 +64,11 @@ bif erlang:exit/1
bif erlang:exit/2
bif erlang:external_size/1
bif erlang:external_size/2
-ubif erlang:float/1
+gcbif erlang:float/1
bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
-bif erlang:garbage_collect/0
+bif erts_internal:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
@@ -79,7 +81,7 @@ bif erlang:phash2/2
ubif erlang:hd/1
bif erlang:integer_to_list/1
bif erlang:is_alive/0
-ubif erlang:length/1
+gcbif erlang:length/1
bif erlang:link/1
bif erlang:list_to_atom/1
bif erlang:list_to_binary/1
@@ -126,10 +128,10 @@ bif erlang:processes/0
bif erlang:put/2
bif erlang:register/2
bif erlang:registered/0
-ubif erlang:round/1
+gcbif erlang:round/1
ubif erlang:self/0
bif erlang:setelement/3
-ubif erlang:size/1
+gcbif erlang:size/1
bif erlang:spawn/3
bif erlang:spawn_link/3
bif erlang:split_binary/2
@@ -139,7 +141,7 @@ bif erlang:term_to_binary/2
bif erlang:throw/1
bif erlang:time/0
ubif erlang:tl/1
-ubif erlang:trunc/1
+gcbif erlang:trunc/1
bif erlang:tuple_to_list/1
bif erlang:universaltime/0
bif erlang:universaltime_to_localtime/1
@@ -162,7 +164,7 @@ bif erts_internal:port_connect/2
bif erts_internal:request_system_task/3
bif erts_internal:request_system_task/4
-bif erts_internal:check_process_code/2
+bif erts_internal:check_process_code/1
bif erts_internal:map_to_tuple_keys/1
bif erts_internal:term_type/1
@@ -417,6 +419,9 @@ bif erts_debug:set_internal_state/2
bif erts_debug:display/1
bif erts_debug:dist_ext_to_term/2
bif erts_debug:instructions/0
+bif erts_debug:dirty_cpu/2
+bif erts_debug:dirty_io/2
+bif erts_debug:dirty/3
#
# Monitor testing bif's...
@@ -464,8 +469,8 @@ bif erlang:list_to_existing_atom/1
#
ubif erlang:is_bitstring/1
ubif erlang:tuple_size/1
-ubif erlang:byte_size/1
-ubif erlang:bit_size/1
+gcbif erlang:byte_size/1
+gcbif erlang:bit_size/1
bif erlang:list_to_bitstring/1
bif erlang:bitstring_to_list/1
@@ -517,8 +522,8 @@ bif erlang:binary_to_term/2
#
# The searching/splitting/substituting thingies
#
-ubif erlang:binary_part/2
-ubif erlang:binary_part/3
+gcbif erlang:binary_part/2
+gcbif erlang:binary_part/3
bif binary:compile_pattern/1
bif binary:match/2
@@ -610,7 +615,7 @@ bif os:unsetenv/1
bif re:inspect/2
ubif erlang:is_map/1
-ubif erlang:map_size/1
+gcbif erlang:map_size/1
bif maps:to_list/1
bif maps:find/2
bif maps:get/2
@@ -657,7 +662,13 @@ bif erlang:has_prepared_code_on_load/1
bif maps:take/2
#
-# Obsolete
+# New in 20.0
#
-bif erlang:hash/2
+gcbif erlang:floor/1
+gcbif erlang:ceil/1
+bif math:floor/1
+bif math:ceil/1
+bif math:fmod/2
+bif os:set_signal/2
+bif erts_internal:maps_to_list/2
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 071a356260..4dd8316dad 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -47,13 +47,8 @@ void
erts_init_binary(void)
{
/* Verify Binary alignment... */
- if ((((UWord) &((Binary *) 0)->orig_bytes[0]) % ((UWord) 8)) != 0) {
- /* I assume that any compiler should be able to optimize this
- away. If not, this test is not very expensive... */
- erts_exit(ERTS_ABORT_EXIT,
- "Internal error: Address of orig_bytes[0] of a Binary"
- " is *not* 8-byte aligned\n");
- }
+ ERTS_CT_ASSERT((offsetof(Binary,orig_bytes) % 8) == 0);
+ ERTS_CT_ASSERT((offsetof(ErtsMagicBinary,u.aligned.data) % 8) == 0);
erts_init_trap_export(&binary_to_list_continue_export,
am_erts_internal, am_binary_to_list_continue, 1,
@@ -107,7 +102,7 @@ new_binary(Process *p, byte *buf, Uint len)
pb->flags = 0;
/*
- * Miscellanous updates. Return the tagged binary.
+ * Miscellaneous updates. Return the tagged binary.
*/
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
@@ -144,7 +139,7 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len)
pb->flags = 0;
/*
- * Miscellanous updates. Return the tagged binary.
+ * Miscellaneous updates. Return the tagged binary.
*/
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
@@ -355,9 +350,10 @@ typedef struct {
Uint bitoffs;
} ErtsB2LState;
-static void b2l_state_destructor(Binary *mbp)
+static int b2l_state_destructor(Binary *mbp)
{
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+ return 1;
}
static BIF_RETTYPE
@@ -445,18 +441,17 @@ binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes,
sp->size = size;
sp->bitoffs = bitoffs;
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
}
}
static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
{
- Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
-
ASSERT(BIF_P->flags & F_DISABLE_GC);
return binary_to_list_chunk(BIF_P,
@@ -729,12 +724,13 @@ list_to_binary_engine(ErtsL2BState *sp)
}
}
-static void
+static int
l2b_state_destructor(Binary *mbp)
{
ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+ return 1;
}
static ERTS_INLINE Eterm
@@ -792,8 +788,8 @@ list_to_binary_chunk(Eterm mb_eterm,
ERTS_L2B_STATE_MOVE(new_sp, sp);
sp = new_sp;
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb_eterm = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ASSERT(is_value(mb_eterm));
@@ -839,9 +835,9 @@ list_to_binary_chunk(Eterm mb_eterm,
static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
{
- Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
ASSERT(BIF_P->flags & F_DISABLE_GC);
return list_to_binary_chunk(BIF_ARG_1,
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 4c7c910419..4dad5736c5 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -178,19 +178,28 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
prefix = "";
}
- if (mon->type == MON_ORIGIN) {
- if (is_atom(mon->pid)) { /* dist by name */
- ASSERT(is_node_name_atom(mon->pid));
+ switch (mon->type) {
+ case MON_ORIGIN:
+ if (is_atom(mon->u.pid)) { /* dist by name */
+ ASSERT(is_node_name_atom(mon->u.pid));
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
- mon->pid, mon->ref);
+ mon->u.pid, mon->ref);
} else if (is_atom(mon->name)){ /* local by name */
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
erts_this_dist_entry->sysname, mon->ref);
} else { /* local and distributed by pid */
- erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->pid, mon->ref);
+ erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->u.pid, mon->ref);
}
- } else { /* MON_TARGET */
- erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->pid, mon->ref);
+ break;
+ case MON_TARGET:
+ erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->u.pid, mon->ref);
+ break;
+ case MON_NIF_TARGET: {
+ ErtsResource* rsrc = mon->u.resource;
+ erts_print(to, to_arg, "%s{from,{%T,%T},%T}", prefix, rsrc->type->module,
+ rsrc->type->name, mon->ref);
+ break;
+ }
}
}
@@ -230,9 +239,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
* Display the initial function name
*/
erts_print(to, to_arg, "Spawned as: %T:%T/%bpu\n",
- p->u.initial[INITIAL_MOD],
- p->u.initial[INITIAL_FUN],
- p->u.initial[INITIAL_ARI]);
+ p->u.initial.module,
+ p->u.initial.function,
+ p->u.initial.arity);
if (p->current != NULL) {
if (running) {
@@ -241,9 +250,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "Current call: ");
}
erts_print(to, to_arg, "%T:%T/%bpu\n",
- p->current[0],
- p->current[1],
- p->current[2]);
+ p->current->module,
+ p->current->function,
+ p->current->arity);
}
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
@@ -290,9 +299,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "timeout");
else
erts_print(to, to_arg, "%T:%T/%bpu\n",
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- scb->ct[j]->code[2]);
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ scb->ct[j]->info.mfa.arity);
}
erts_print(to, to_arg, "\n");
}
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
index 584a605771..1b451bf921 100644
--- a/erts/emulator/beam/code_ix.h
+++ b/erts/emulator/beam/code_ix.h
@@ -56,12 +56,49 @@
# endif
# include "sys.h"
#endif
+
+#include "beam_opcodes.h"
+
struct process;
#define ERTS_NUM_CODE_IX 3
typedef unsigned ErtsCodeIndex;
+typedef struct ErtsCodeMFA_ {
+ Eterm module;
+ Eterm function;
+ Uint arity;
+} ErtsCodeMFA;
+
+/*
+ * The ErtsCodeInfo structure is used both in the Export entry
+ * and in the code as the function header.
+ */
+
+/* If you change the size of this, you also have to update the code
+ in ops.tab to reflect the new func_info size */
+typedef struct ErtsCodeInfo_ {
+ BeamInstr op; /* OpCode(i_func_info) */
+ BeamInstr native; /* Used by hipe and trace to store extra data */
+ ErtsCodeMFA mfa;
+} ErtsCodeInfo;
+
+/* Get the code associated with a ErtsCodeInfo ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci);
+
+/* Get the ErtsCodeInfo for from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I);
+
+/* Get the code associated with a ErtsCodeMFA ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa);
+
+/* Get the ErtsCodeMFA from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I);
/* Called once at emulator initialization.
*/
@@ -121,10 +158,47 @@ void erts_abort_staging_code_ix(void);
int erts_has_code_write_permission(void);
#endif
-
+/* module/function/arity can be NIL/NIL/-1 when the MFA is pointing to some
+ invalid code, for instance unloaded_fun. */
+#define ASSERT_MFA(MFA) \
+ ASSERT((is_atom((MFA)->module) || is_nil((MFA)->module)) && \
+ (is_atom((MFA)->function) || is_nil((MFA)->function)) && \
+ (((MFA)->arity >= 0 && (MFA)->arity < 1024) || (MFA)->arity == -1))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci)
+{
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return (BeamInstr*)(ci + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I)
+{
+ ErtsCodeInfo *ci = ((ErtsCodeInfo *)(((char *)(I)) - sizeof(ErtsCodeInfo)));
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return ci;
+}
+
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa)
+{
+ ASSERT_MFA(mfa);
+ return (BeamInstr*)(mfa + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I)
+{
+ ErtsCodeMFA *mfa = ((ErtsCodeMFA *)(((char *)(I)) - sizeof(ErtsCodeMFA)));
+ ASSERT_MFA(mfa);
+ return mfa;
+}
+
extern erts_smp_atomic32_t the_active_code_index;
extern erts_smp_atomic32_t the_staging_code_index;
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 78db141cfc..e567eabc82 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -40,7 +40,8 @@ static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int);
/*
* Copy object "obj" to process p.
*/
-Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
+Eterm copy_object_x(Eterm obj, Process* to, Uint extra)
+{
if (!is_immed(obj)) {
Uint size = size_object(obj);
Eterm* hp = HAllocX(to, size, extra);
@@ -70,33 +71,46 @@ Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
* Return the "flat" size of the object.
*/
-Uint size_object(Eterm obj)
+#define in_literal_purge_area(PTR) \
+ (lit_purge_ptr && ( \
+ (lit_purge_ptr <= (PTR) && \
+ (PTR) < (lit_purge_ptr + lit_purge_sz))))
+
+Uint size_object_x(Eterm obj, erts_literal_area_t *litopt)
{
Uint sum = 0;
Eterm* ptr;
int arity;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
-
DECLARE_ESTACK(s);
-
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj));
for (;;) {
switch (primary_tag(obj)) {
case TAG_PRIMARY_LIST:
- sum += 2;
ptr = list_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ sum += 2;
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
{
- Eterm hdr = *boxed_val(obj);
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ hdr = *ptr;
ASSERT(is_header(hdr));
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -279,10 +293,6 @@ do { \
#define COUNT_OFF_HEAP (0)
-#define IN_LITERAL_PURGE_AREA(info, ptr) \
- ((info)->range_ptr && ( \
- (info)->range_ptr <= (ptr) && \
- (ptr) < ((info)->range_ptr + (info)->range_sz)))
/*
* Return the real size of an object and find sharing information
* This currently returns the same as erts_debug:size/1.
@@ -599,7 +609,7 @@ cleanup:
/*
* Copy a structure to a heap.
*/
-Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz)
+Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt)
{
char* hstart;
Uint hsize;
@@ -616,6 +626,8 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
Eterm hdr;
Eterm *hend;
int i;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm org_obj = obj;
Uint org_sz = sz;
@@ -651,7 +663,6 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy:
while (hp != htop) {
obj = *hp;
-
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1:
hp++;
@@ -667,6 +678,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_list:
tailp = argp;
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
for (;;) {
tp = tailp;
elem = CAR(objp);
@@ -674,18 +689,23 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
hbot -= 2;
CAR(hbot) = elem;
tailp = &CDR(hbot);
- }
- else {
+ } else {
CAR(htop) = elem;
tailp = &CDR(htop);
htop += 2;
}
*tp = make_list(tailp - 1);
obj = CDR(objp);
+
if (!is_list(obj)) {
break;
}
objp = list_val(obj);
+
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
}
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
@@ -695,7 +715,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
"%s, line %d: Internal error in copy_struct: 0x%08x\n",
__FILE__, __LINE__,obj);
}
-
+
case TAG_PRIMARY_BOXED:
if (ErtsInArea(boxed_val(obj),hstart,hsize)) {
hp++;
@@ -705,6 +725,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_boxed:
objp = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *argp = obj;
+ break;
+ }
hdr = *objp;
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -765,7 +789,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
extra_bytes = 1;
} else {
extra_bytes = 0;
- }
+ }
real_size = size+extra_bytes;
objp = binary_val(real_bin);
if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
@@ -780,7 +804,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
} else {
ProcBin* from = (ProcBin *) objp;
ProcBin* to;
-
+
ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
if (from->flags) {
erts_emasculate_writable_binary(from);
@@ -834,20 +858,24 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing *etp = (ExternalThing *) htop;
-
+ ExternalThing *etp = (ExternalThing *) objp;
+ erts_smp_refc_inc(&etp->node->refc, 2);
+ }
+ L_off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) htop;
i = thing_arityval(hdr) + 1;
+ *argp = make_boxed(htop);
tp = htop;
while (i--) {
*htop++ = *objp++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)etp;
- erts_smp_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
- *argp = make_external(tp);
}
break;
case MAP_SUBTAG:
@@ -875,6 +903,13 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case BIN_MATCHSTATE_SUBTAG:
erts_exit(ERTS_ABORT_EXIT,
"copy_struct: matchstate term not allowed");
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(objp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) objp;
+ erts_refc_inc(&mreft->mb->refc, 2);
+ goto L_off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
i = thing_arityval(hdr)+1;
hbot -= i;
@@ -900,6 +935,12 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
*bsz = hend - hbot;
} else {
#ifdef DEBUG
+ if (!eq(org_obj, res)) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " not equal to copy %T\n",
+ org_obj, res);
+ }
if (htop != hbot)
erts_exit(ERTS_ABORT_EXIT,
"Internal error in copy_struct() when copying %T:"
@@ -1036,6 +1077,8 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Uint e;
unsigned sz;
Eterm* ptr;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1081,7 +1124,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1132,7 +1175,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1298,6 +1341,8 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
Eterm* resp;
Eterm *hbot, *hend;
unsigned remaining;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1347,11 +1392,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1415,11 +1460,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1615,20 +1660,33 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
}
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG: {
- ExternalThing *etp = (ExternalThing *) hp;
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing *etp = (ExternalThing *) ptr;
+ erts_smp_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) hp;
sz = thing_arityval(hdr);
- *resp = make_external(hp);
+ *resp = make_boxed(hp);
*hp++ = hdr;
ptr++;
while (sz-- > 0) {
*hp++ = *ptr++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*) etp;
- erts_smp_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
goto cleanup_next;
}
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(ptr)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) ptr;
+ erts_refc_inc(&mreft->mb->refc, 2);
+ goto off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
sz = thing_arityval(hdr);
*resp = make_boxed(hp);
@@ -1825,6 +1883,15 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
off_heap->first = ohh;
}
break;
+ case REF_SUBTAG: {
+ ErtsRefThing *rtp = (ErtsRefThing *) (tp - 1);
+ if (is_magic_ref_thing(rtp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) rtp;
+ erts_refc_inc(&mreft->mb->refc, 2);
+ goto off_heap_common;
+ }
+ /* Fall through... */
+ }
default:
{
int tari = header_arity(val);
@@ -1923,8 +1990,11 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
if (is_header(val)) {
struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ move_boxed(&ptr, val, &hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hdr))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -1937,7 +2007,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, hp, &dummy_ref);
+ move_cons(&ptr, val, &hp, &dummy_ref);
ptr += 2;
}
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 52b2174609..d1c2da9074 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -25,6 +25,7 @@
/* define this to get a lot of debug output */
/* #define ERTS_DIST_MSG_DBG */
+/* #define ERTS_RAW_DIST_MSG_DBG */
#ifdef HAVE_CONFIG_H
# include "config.h"
@@ -258,12 +259,12 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
if (!rp)
goto done;
if (mon->type == MON_ORIGIN) {
- /* local pid is beeing monitored */
+ /* local pid is being monitored */
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); nope, can happen during process exit */
if (rmon != NULL) {
@@ -277,10 +278,11 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); can happen during process exit */
if (rmon != NULL) {
+ ASSERT(rmon->type == MON_ORIGIN);
ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
watched = (is_atom(rmon->name)
? TUPLE2(lhp, rmon->name, dep->sysname)
- : rmon->pid);
+ : rmon->u.pid);
#ifdef ERTS_SMP
rp_locks |= ERTS_PROC_LOCKS_MSG_SEND;
erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
@@ -712,7 +714,7 @@ static void clear_dist_entry(DistEntry *dep)
}
}
-void erts_dsend_context_dtor(Binary* ctx_bin)
+int erts_dsend_context_dtor(Binary* ctx_bin)
{
ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
switch (ctx->dss.phase) {
@@ -729,6 +731,8 @@ void erts_dsend_context_dtor(Binary* ctx_bin)
}
if (ctx->dep_to_deref)
erts_deref_dist_entry(ctx->dep_to_deref);
+
+ return 1;
}
Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
@@ -740,7 +744,7 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
Binary* ctx_bin = erts_create_magic_binary(sizeof(struct exported_ctx),
erts_dsend_context_dtor);
struct exported_ctx* dst = ERTS_MAGIC_BIN_DATA(ctx_bin);
- Eterm* hp = HAlloc(p, PROC_BIN_SIZE);
+ Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
sys_memcpy(&dst->ctx, ctx, sizeof(ErtsSendContext));
ASSERT(ctx->dss.ctl == make_tuple(ctx->ctl_heap));
@@ -749,7 +753,7 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
sys_memcpy(&dst->acm, ctx->dss.acmp, sizeof(ErtsAtomCacheMap));
dst->ctx.dss.acmp = &dst->acm;
}
- return erts_mk_magic_binary_term(&hp, &MSO(p), ctx_bin);
+ return erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
}
@@ -794,7 +798,7 @@ erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
}
-/* A local process that's beeing monitored by a remote one exits. We send:
+/* A local process that's being monitored by a remote one exits. We send:
{DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason},
which is rather sad as only the ref is needed, no pid's... */
int
@@ -1388,7 +1392,7 @@ int erts_net_message(Port *prt,
if (mon == NULL) {
break;
}
- watched = mon->pid;
+ watched = mon->u.pid;
erts_destroy_monitor(mon);
rp = erts_pid2proc_opt(NULL, 0,
watched, ERTS_PROC_LOCK_LINK,
@@ -1546,7 +1550,7 @@ int erts_net_message(Port *prt,
if (mon == NULL) {
break;
}
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
erts_destroy_monitor(mon);
if (rp == NULL) {
@@ -1563,7 +1567,7 @@ int erts_net_message(Port *prt,
watched = (is_not_nil(mon->name)
? TUPLE2(&lhp[0], mon->name, sysname)
- : mon->pid);
+ : mon->u.pid);
erts_queue_monitor_message(rp, &rp_locks,
ref, am_process, watched, reason);
@@ -2412,21 +2416,21 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
void *arg = ((struct print_to_data *) vptdp)->arg;
Process *rp;
ErtsMonitor *rmon;
- rp = erts_proc_lookup(mon->pid);
+ rp = erts_proc_lookup(mon->u.pid);
if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) {
- erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
+ erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->u.pid);
} else if (mon->type == MON_ORIGIN) {
/* Local pid is being monitored */
erts_print(to, arg, "Remotely monitored by: %T %T\n",
- mon->pid, rmon->pid);
+ mon->u.pid, rmon->u.pid);
} else {
- erts_print(to, arg, "Remote monitoring: %T ", mon->pid);
- if (is_not_atom(rmon->pid))
- erts_print(to, arg, "%T\n", rmon->pid);
+ erts_print(to, arg, "Remote monitoring: %T ", mon->u.pid);
+ if (is_not_atom(rmon->u.pid))
+ erts_print(to, arg, "%T\n", rmon->u.pid);
else
erts_print(to, arg, "{%T, %T}\n",
rmon->name,
- rmon->pid); /* which in this case is the
+ rmon->u.pid); /* which in this case is the
remote system name... */
}
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index e82b416286..8f6be1061a 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -115,8 +115,8 @@ extern int erts_is_alive;
* erts_dsig_prepare() prepares a send of a distributed signal.
* One of the values defined below are returned. If the returned
* value is another than ERTS_DSIG_PREP_CONNECTED, the
- * distributed signal cannot be sent before apropriate actions
- * have been taken. Apropriate actions would typically be setting
+ * distributed signal cannot be sent before appropriate actions
+ * have been taken. Appropriate actions would typically be setting
* up the connection.
*/
@@ -375,7 +375,7 @@ extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
extern int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx);
-extern void erts_dsend_context_dtor(Binary*);
+extern int erts_dsend_context_dtor(Binary*);
extern Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx);
extern int erts_dist_command(Port *prt, int reds);
diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h
index 6f70d5961e..f6fc28b801 100644
--- a/erts/emulator/beam/dtrace-wrapper.h
+++ b/erts/emulator/beam/dtrace-wrapper.h
@@ -74,7 +74,7 @@
#if defined(_SDT_PROBE) && !defined(STAP_PROBE11)
/* SLF: This is Ubuntu 11-style SystemTap hackery */
-/* work arround for missing STAP macro */
+/* workaround for missing STAP macro */
#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
_SDT_PROBE(provider, name, 11, \
(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 214fb1f2af..a374593c5d 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -44,9 +44,11 @@
#include "erl_hl_timer.h"
#include "erl_cpu_topology.h"
#include "erl_thr_queue.h"
+#include "erl_nfunc_sched.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
#include "erl_check_io.h"
#endif
+#include "erl_bif_unique.h"
#define GET_ERL_GF_ALLOC_IMPL
#include "erl_goodfit_alloc.h"
@@ -151,8 +153,7 @@ typedef struct {
int internal;
Uint req_sched;
Process *proc;
- Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ ErtsIRefStorage iref;
int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
} ErtsAllocInfoReq;
@@ -373,10 +374,16 @@ set_default_exec_alloc_opts(struct au_init *ip)
ip->init.util.rmbcmt = 0;
ip->init.util.acul = 0;
+# ifdef ERTS_HAVE_EXEC_MMAPPER
ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
ip->init.util.mseg_mmapper = &erts_exec_mmapper;
+# else
+ ip->init.util.mseg_alloc = &erts_alcu_exec_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_exec_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_exec_mseg_dealloc;
+# endif
}
#endif /* ERTS_ALC_A_EXEC */
@@ -657,6 +664,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= sizeof(ErtsDrvEventDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
= sizeof(ErtsDrvSelectDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
+ = sizeof(ErtsNifSelectDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
= sizeof(ErtsMessageRef);
#ifdef ERTS_SMP
@@ -673,6 +682,12 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER);
#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
+ = sizeof(NifExportTrace);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
+ = sizeof(ErtsNSchedMagicRefTableEntry);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
+ = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
#ifdef HARD_DEBUG
hdbg_init();
@@ -1571,7 +1586,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
break;
case 'X':
if (has_prefix("scs", argv[i]+3)) {
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
init->mseg.exec_mmap.scs =
#endif
get_mb_value(argv[i]+6, argv, &i);
@@ -2431,6 +2446,10 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
fi,
ERTS_ALC_T_ABIF_TIMER);
#endif
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_NIF_EXP_TRACE);
}
if (want.atom || want.atom_used) {
@@ -2852,7 +2871,7 @@ erts_allocator_info(fmtfn_t to, void *arg)
erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
#endif
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
erts_print(to, arg, "=allocator:erts_mmap.exec_mmap\n");
erts_mmap_info(&erts_exec_mmapper, &to, arg, NULL, NULL, &emis);
#endif
@@ -3010,7 +3029,7 @@ erts_allocator_options(void *proc)
#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
terms[length++] = ERTS_MAKE_AM("literal_mmap");
#endif
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
terms[length++] = ERTS_MAKE_AM("exec_mmap");
#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
@@ -3102,7 +3121,7 @@ reply_alloc_info(void *vair)
# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
struct erts_mmap_info_struct mmap_info_literal;
# endif
-# ifdef ERTS_ALC_A_EXEC
+# ifdef ERTS_HAVE_EXEC_MMAPPER
struct erts_mmap_info_struct mmap_info_exec;
# endif
#endif
@@ -3126,9 +3145,10 @@ reply_alloc_info(void *vair)
while (1) {
if (hpp)
- ref_copy = STORE_NC(hpp, ohp, air->ref);
+ ref_copy = erts_iref_storage_make_ref(&air->iref,
+ hpp, ohp, 0);
else
- *szp += REF_THING_SIZE;
+ *szp += erts_iref_storage_heap_size(&air->iref);
ai_list = NIL;
for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
@@ -3232,7 +3252,7 @@ reply_alloc_info(void *vair)
erts_bld_atom(hpp,szp,"literal_mmap"),
ainfo);
# endif
-# ifdef ERTS_ALC_A_EXEC
+# ifdef ERTS_HAVE_EXEC_MMAPPER
ai_list = erts_bld_cons(hpp, szp,
ainfo, ai_list);
ainfo = (air->only_sz ? NIL :
@@ -3357,8 +3377,10 @@ reply_alloc_info(void *vair)
erts_smp_proc_unlock(rp, rp_locks);
erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
+ if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0) {
+ erts_iref_storage_clean(&air->iref);
aireq_free(air);
+ }
}
int
@@ -3371,7 +3393,6 @@ erts_request_alloc_info(struct process *c_p,
ErtsAllocInfoReq *air = aireq_alloc();
Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
Eterm alist;
- Eterm *hp;
int airix = 0, ai;
air->req_sched = erts_get_scheduler_id();
@@ -3385,8 +3406,7 @@ erts_request_alloc_info(struct process *c_p,
if (is_not_internal_ref(ref))
return 0;
- hp = &air->ref_heap[0];
- air->ref = STORE_NC(&hp, NULL, ref);
+ erts_iref_storage_save(&air->iref, ref);
if (is_not_list(allocs))
return 0;
@@ -4129,12 +4149,20 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *dptr;
Uint size;
+ int free_pattern = n;
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
- sys_memset((void *) dptr, n, size + FENCE_SZ);
+#ifdef ERTS_ALC_A_EXEC
+# if defined(__i386__) || defined(__x86_64__)
+ if (ERTS_ALC_T2A(ERTS_ALC_N2T(n)) == ERTS_ALC_A_EXEC) {
+ free_pattern = 0x0f; /* Illegal instruction */
+ }
+# endif
+#endif
+ sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
(*real_af->free)(n, real_af->extra, dptr);
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6e8710eb8a..3d5de72ee7 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -65,7 +65,7 @@
# --- Allocator declarations -------------------------------------------------
#
-# If, and only if, the same thread performes *all* allocations,
+# If, and only if, the same thread performs *all* allocations,
# reallocations and deallocations of all memory types that are handled
# by a specific allocator (<ALLOCATOR> in type declaration), set
# <MULTI_THREAD> for this specific allocator to false; otherwise, set
@@ -280,6 +280,11 @@ type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
type DIRTY_START STANDARD PROCESSES dirty_start
type DIRTY_SL SHORT_LIVED SYSTEM dirty_short_lived
+type MREF_NSCHED_ENT FIXED_SIZE SYSTEM nsched_magic_ref_entry
+type MREF_ENT STANDARD SYSTEM magic_ref_entry
+type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets
+type MREF_TAB LONG_LIVED SYSTEM magic_ref_table
+type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
@@ -376,7 +381,8 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
-type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
+type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
+type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
@@ -396,6 +402,7 @@ type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
+type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
type FD_LIST SHORT_LIVED SYSTEM fd_list
type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array
type POLLSET LONG_LIVED SYSTEM pollset
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 230ca6ccbb..a6bac06478 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -907,7 +907,9 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
#elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
-/* Used by literal allocator that has its own mmapper (super carrier) */
+/* For allocators that have their own mmapper (super carrier),
+ * like literal_alloc and exec_alloc on amd64
+ */
void*
erts_alcu_mmapper_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
@@ -948,6 +950,50 @@ erts_alcu_mmapper_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
}
#endif /* ARCH_64 && ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
+#if defined(ERTS_ALC_A_EXEC) && !defined(ERTS_HAVE_EXEC_MMAPPER)
+
+/*
+ * For exec_alloc on non-amd64 that just need memory with PROT_EXEC
+ */
+void*
+erts_alcu_exec_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+{
+ void* res = erts_alcu_mseg_alloc(allctr, size_p, flags);
+
+ if (res) {
+ int r = mprotect(res, *size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
+
+void*
+erts_alcu_exec_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
+{
+ void *res;
+
+ if (seg && old_size) {
+ int r = mprotect(seg, old_size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ res = erts_alcu_mseg_realloc(allctr, seg, old_size, new_size_p);
+ if (res) {
+ int r = mprotect(res, *new_size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
+
+void
+erts_alcu_exec_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
+{
+ int r = mprotect(seg, size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ erts_alcu_mseg_dealloc(allctr, seg, size, flags);
+}
+#endif /* ERTS_ALC_A_EXEC && !ERTS_HAVE_EXEC_MMAPPER */
+
#endif /* HAVE_ERTS_MSEG */
void*
@@ -1361,6 +1407,7 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
res = fix->list;
if (res) {
@@ -1372,8 +1419,6 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
fix_cpool_check_shrink(allctr, type, fix, NULL);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (size >= allctr->sbc_threshold) {
Block_t *blk;
blk = create_carrier(allctr, size, CFLG_SBC);
@@ -1493,6 +1538,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
fix->u.nocpool.used++;
@@ -1515,8 +1561,6 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (fix->u.nocpool.limit < fix->u.nocpool.used)
fix->u.nocpool.limit = fix->u.nocpool.used;
if (fix->u.nocpool.max_used < fix->u.nocpool.used)
@@ -5142,7 +5186,7 @@ erts_alcu_sz_info(Allctr_t *allctr,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -5228,7 +5272,7 @@ erts_alcu_info(Allctr_t *allctr,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -6039,7 +6083,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(FreeBlkFtr_t));
-#if ERTS_SMP
+#ifdef ERTS_SMP
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
sz += (init->fix ?
@@ -6090,18 +6134,18 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
#ifdef USE_THREADS
if (init->ts) {
allctr->thread_safe = 1;
-
+
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x_opt(&allctr->mutex,
"alcu_allocator",
make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC,1);
+ ERTS_LCNT_LT_ALLOC);
#else
erts_mtx_init_x(&allctr->mutex,
"alcu_allocator",
- make_small(allctr->alloc_no),1);
+ make_small(allctr->alloc_no));
#endif /*ERTS_ENABLE_LOCK_COUNT*/
-
+
#ifdef DEBUG
allctr->debug.saved_tid = 0;
#endif
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 81180382af..f570703f25 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -210,6 +210,12 @@ void* erts_alcu_mmapper_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
void* erts_alcu_mmapper_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
void erts_alcu_mmapper_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
# endif
+
+# if defined(ERTS_ALC_A_EXEC) && !defined(ERTS_HAVE_EXEC_MMAPPER)
+void* erts_alcu_exec_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
+void* erts_alcu_exec_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+void erts_alcu_exec_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
+# endif
#endif /* HAVE_ERTS_MSEG */
void* erts_alcu_sys_alloc(Allctr_t*, Uint *size_p, int superalign);
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 6e10980b6b..62a752d854 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -239,13 +239,13 @@ static void dump_ac_node(ACNode *node, int indent, int ch);
/*
* Callback for the magic binary
*/
-static void cleanup_my_data_ac(Binary *bp)
+static int cleanup_my_data_ac(Binary *bp)
{
- return;
+ return 1;
}
-static void cleanup_my_data_bm(Binary *bp)
+static int cleanup_my_data_bm(Binary *bp)
{
- return;
+ return 1;
}
/*
@@ -380,7 +380,7 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
qbuff[qt++] = child;
/* Search for correct failure function, follow the parent's
failure function until you find a similar transition
- funtion to this child's */
+ function to this child's */
r = parent->h;
while (r != NULL && r->g[i] == NULL) {
r = r->h;
@@ -1010,8 +1010,8 @@ BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1)
if (do_binary_match_compile(BIF_ARG_1,&tag,&bin)) {
BIF_ERROR(BIF_P,BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE+3);
- ret = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE+3);
+ ret = erts_mk_magic_ref(&hp, &MSO(BIF_P), bin);
ret = TUPLE2(hp, tag, ret);
BIF_RET(ret);
}
@@ -1426,11 +1426,11 @@ binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags)
goto badarg;
}
if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
+ !is_internal_magic_ref(tp[2])) {
goto badarg;
}
bfs.type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
+ bin = erts_magic_ref2bin(tp[2]);
if (bfs.type == am_bm &&
ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
goto badarg;
@@ -1448,8 +1448,8 @@ binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags)
bfs.global_result = &do_match_global_result;
runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
+ Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1512,11 +1512,11 @@ binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
goto badarg;
}
if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
+ !is_internal_magic_ref(tp[2])) {
goto badarg;
}
bfs.type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
+ bin = erts_magic_ref2bin(tp[2]);
if (bfs.type == am_bm &&
ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
goto badarg;
@@ -1534,8 +1534,8 @@ binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
bfs.global_result = &do_split_global_result;
runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
+ Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1767,7 +1767,8 @@ static BIF_RETTYPE binary_find_trap(BIF_ALIST_3)
{
int runres;
Eterm result;
- Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ Binary *bin = erts_magic_ref2bin(BIF_ARG_3);
+
runres = do_binary_find(BIF_P, BIF_ARG_1, NULL, bin, BIF_ARG_2, &result);
if (runres == DO_BIN_MATCH_OK) {
BIF_RET(result);
@@ -2066,7 +2067,7 @@ static int do_search_backward(CommonData *cd, Uint *posp, Uint *redsp)
}
}
-static void cleanup_common_data(Binary *bp)
+static int cleanup_common_data(Binary *bp)
{
int i;
CommonData *cd;
@@ -2083,7 +2084,7 @@ static void cleanup_common_data(Binary *bp)
break;
}
}
- return;
+ return 1;
}
static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
@@ -2186,8 +2187,8 @@ static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
cd[i].type = CL_TYPE_HEAP;
}
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP3(trapper, p, bin_term, epos,list);
}
@@ -2214,8 +2215,7 @@ static BIF_RETTYPE do_longest_common_trap(Process *p, Eterm bin_term, Eterm curr
#else
term_to_Uint(current_pos, &pos);
#endif
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin_term));
- bin = ((ProcBin *) binary_val(bin_term))->val;
+ bin = erts_magic_ref2bin(bin_term);
cd = (CommonData *) ERTS_MAGIC_BIN_DATA(bin);
if (direction == DIRECTION_PREFIX) {
trapper = &binary_longest_prefix_trap_export;
@@ -2563,7 +2563,7 @@ typedef struct {
#define BINARY_COPY_LOOP_FACTOR 100
-static void cleanup_copy_bin_state(Binary *bp)
+static int cleanup_copy_bin_state(Binary *bp)
{
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(bp);
if (cbs->result != NULL) {
@@ -2583,6 +2583,7 @@ static void cleanup_copy_bin_state(Binary *bp)
break;
}
cbs->source_type = BC_TYPE_EMPTY;
+ return 1;
}
/*
@@ -2680,8 +2681,8 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
cbs->source_size = size;
cbs->result_pos = pos;
cbs->times_left = n-i;
- hp = HAlloc(p,PROC_BIN_SIZE);
- trap_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ trap_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP2(&binary_copy_trap_export, p, bin, trap_term);
} else {
@@ -2716,7 +2717,7 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
Uint reds = get_reds(BIF_P, BINARY_COPY_LOOP_FACTOR);
byte *t;
Uint pos;
- Binary *mb = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ Binary *mb = erts_magic_ref2bin(BIF_ARG_2);
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(mb);
Uint opos;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index bace51bbbb..2ce872d2bc 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1099,7 +1099,7 @@ void erts_ddll_increment_port_count(DE_Handle *dh)
void erts_ddll_decrement_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
-#if DEBUG
+#ifdef DEBUG
ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0);
#else
erts_smp_atomic32_dec_nob(&dh->port_count);
@@ -1476,8 +1476,10 @@ static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
static Eterm copy_ref(Eterm ref, Eterm *hp)
{
- RefThing *ptr = ref_thing_ptr(ref);
- memcpy(hp, ptr, sizeof(RefThing));
+ ErtsORefThing *ptr;
+ ASSERT(is_internal_ordinary_ref(ref));
+ ptr = ordinary_ref_thing_ptr(ref);
+ memcpy(hp, ptr, sizeof(ErtsORefThing));
return (make_internal_ref(hp));
}
@@ -1720,10 +1722,10 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
Eterm e;
mp = erts_alloc_message_heap(proc, &rp_locks,
(6 /* tuple */ + 3 /* Error tuple */ +
- REF_THING_SIZE + need),
+ ERTS_REF_THING_SIZE + need),
&hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
e = build_load_error_hp(hp, errcode);
hp += need;
mess = TUPLE2(hp,tag,e);
@@ -1731,10 +1733,10 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
mess = TUPLE5(hp,type,r,am_driver,driver_name,mess);
} else {
mp = erts_alloc_message_heap(proc, &rp_locks,
- 6 /* tuple */ + REF_THING_SIZE,
+ 6 /* tuple */ + ERTS_REF_THING_SIZE,
&hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
erts_queue_message(proc, rp_locks, mp, mess, am_system);
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index 74cf7cf11a..cdac4c1d11 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -141,6 +141,39 @@ BIF_RETTYPE trunc_1(BIF_ALIST_1)
BIF_RET(res);
}
+BIF_RETTYPE floor_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ GET_DOUBLE(BIF_ARG_1, f);
+ res = double_to_integer(BIF_P, floor(f.fd));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE ceil_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ /* check arg */
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ /* get the float */
+ GET_DOUBLE(BIF_ARG_1, f);
+
+ res = double_to_integer(BIF_P, ceil(f.fd));
+ BIF_RET(res);
+}
+
BIF_RETTYPE round_1(BIF_ALIST_1)
{
Eterm res;
@@ -620,6 +653,38 @@ Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
reg, live);
}
+Eterm erts_gc_floor_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, floor(f.fd), reg, live);
+}
+
+Eterm erts_gc_ceil_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, ceil(f.fd), reg, live);
+}
+
static Eterm
gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
{
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 95a56a3de9..06a73ffea5 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -94,6 +94,9 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
" [ds:%beu:%beu:%beu]"
#endif
+#if defined(ERTS_DIRTY_SCHEDULERS_TEST)
+ " [dirty-schedulers-TEST]"
+#endif
" [async-threads:%d]"
#endif
#ifdef HIPE
@@ -184,6 +187,32 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
return res;
}
+static Eterm
+bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
+{
+ struct erl_off_heap_header* ohh;
+ Eterm res = NIL;
+ Eterm tuple;
+
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (is_ref_thing_header((*((Eterm *) ohh)))) {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
+ Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_refc_read(&mrtp->mb->refc, 1);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
+ }
+ }
+ return res;
+}
+
/*
make_monitor_list:
@@ -199,8 +228,10 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
- *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
+ *psz += NC_HEAP_SIZE(mon->ref);
+ *psz += (mon->type == MON_NIF_TARGET ?
+ erts_resource_ref_size(mon->u.resource) :
+ (is_immed(mon->u.pid) ? 0 : NC_HEAP_SIZE(mon->u.pid)));
*psz += 8; /* CONS + 5-tuple */
}
@@ -215,12 +246,11 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
{
MonListContext *pmlc = vpmlc;
Eterm tup;
- Eterm r = (IS_CONST(mon->ref)
- ? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
- Eterm p = (IS_CONST(mon->pid)
- ? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
+ Eterm r = STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref);
+ Eterm p = (mon->type == MON_NIF_TARGET ?
+ erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->u.resource)
+ : (is_immed(mon->u.pid) ? mon->u.pid
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->u.pid)));
tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
pmlc->hp += 6;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
@@ -259,7 +289,7 @@ make_monitor_list(Process *p, ErtsMonitor *root)
static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
+ *psz += is_immed(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
/* Node links use this pointer as ref counter... */
erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
@@ -279,7 +309,7 @@ static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
LnkListContext *pllc = vpllc;
Eterm tup;
Eterm old_res, targets = NIL;
- Eterm p = (IS_CONST(lnk->pid)
+ Eterm p = (is_immed(lnk->pid)
? lnk->pid
: STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
if (lnk->type == LINK_NODE) {
@@ -363,8 +393,12 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
typedef struct {
/* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
* {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
- * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name */
- Eterm entity;
+ * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
+ * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
+ union {
+ Eterm term;
+ ErtsResource* resource;
+ }entity;
Eterm node;
/* pid is actual target being monitored, no matter pid/port or name */
Eterm pid;
@@ -410,7 +444,7 @@ static void collect_one_link(ErtsLink *lnk, void *vmicp)
if (!(lnk->type == LINK_PID)) {
return;
}
- micp->mi[micp->mi_i].entity = lnk->pid;
+ micp->mi[micp->mi_i].entity.term = lnk->pid;
micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
micp->mi_i++;
}
@@ -423,20 +457,20 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
return;
}
EXTEND_MONITOR_INFOS(micp);
- if (is_atom(mon->pid)) { /* external by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = mon->pid;
+ if (is_atom(mon->u.pid)) { /* external by name */
+ micp->mi[micp->mi_i].entity.term = mon->name;
+ micp->mi[micp->mi_i].node = mon->u.pid;
micp->sz += 3; /* need one 2-tuple */
- } else if (is_external_pid(mon->pid)) { /* external by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
+ } else if (is_external_pid(mon->u.pid)) { /* external by pid */
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
micp->mi[micp->mi_i].node = NIL;
- micp->sz += NC_HEAP_SIZE(mon->pid);
+ micp->sz += NC_HEAP_SIZE(mon->u.pid);
} else if (!is_nil(mon->name)) { /* internal by name */
- micp->mi[micp->mi_i].entity = mon->name;
+ micp->mi[micp->mi_i].entity.term = mon->name;
micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
micp->sz += 3; /* need one 2-tuple */
} else { /* internal by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
micp->mi[micp->mi_i].node = NIL;
/* no additional heap space needed */
}
@@ -444,7 +478,7 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
/* have always pid at hand, to assist with figuring out if its a port or
* a process, when we monitored by name and process_info is requested.
* See: erl_bif_info.c:process_info_aux section for am_monitors */
- micp->mi[micp->mi_i].pid = mon->pid;
+ micp->mi[micp->mi_i].pid = mon->u.pid;
micp->mi_i++;
micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
@@ -454,15 +488,24 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_TARGET) {
- return;
+ if (mon->type != MON_TARGET && mon->type != MON_NIF_TARGET) {
+ return;
}
EXTEND_MONITOR_INFOS(micp);
- micp->mi[micp->mi_i].node = NIL;
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
+
+ if (mon->type == MON_NIF_TARGET) {
+ micp->mi[micp->mi_i].entity.resource = mon->u.resource;
+ micp->mi[micp->mi_i].node = make_small(MON_NIF_TARGET);
+ micp->sz += erts_resource_ref_size(mon->u.resource);
+ }
+ else {
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->u.pid);
+ }
+ micp->sz += 2; /* cons */;
micp->mi_i++;
}
@@ -610,7 +653,8 @@ static Eterm pi_args[] = {
am_current_location,
am_current_stacktrace,
am_message_queue_data,
- am_garbage_collection_info
+ am_garbage_collection_info,
+ am_magic_ref
};
#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
@@ -661,6 +705,7 @@ pi_arg2ix(Eterm arg)
case am_current_stacktrace: return 31;
case am_message_queue_data: return 32;
case am_garbage_collection_info: return 33;
+ case am_magic_ref: return 34;
default: return -1;
}
}
@@ -1114,9 +1159,9 @@ process_info_aux(Process *BIF_P,
case am_initial_call:
hp = HAlloc(BIF_P, 3+4);
res = TUPLE3(hp,
- rp->u.initial[INITIAL_MOD],
- rp->u.initial[INITIAL_FUN],
- make_small(rp->u.initial[INITIAL_ARI]));
+ rp->u.initial.module,
+ rp->u.initial.function,
+ make_small(rp->u.initial.arity));
hp += 4;
break;
@@ -1191,7 +1236,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1209,7 +1254,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- if (is_atom(mic.mi[i].entity)) {
+ if (is_atom(mic.mi[i].entity.term)) {
/* Monitor by name.
* Build {process|port, {Name, Node}} and cons it.
*/
@@ -1221,7 +1266,7 @@ process_info_aux(Process *BIF_P,
|| is_port(mic.mi[i].pid)
|| is_atom(mic.mi[i].pid));
- t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
+ t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
hp += 3;
t2 = TUPLE2(hp, m_type, t1);
hp += 3;
@@ -1231,7 +1276,7 @@ process_info_aux(Process *BIF_P,
else {
/* Monitor by pid. Build {process|port, Pid} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
ASSERT(is_pid(mic.mi[i].pid)
@@ -1258,7 +1303,12 @@ process_info_aux(Process *BIF_P,
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ if (mic.mi[i].node == make_small(MON_NIF_TARGET)) {
+ item = erts_bld_resource_ref(&hp, &MSO(BIF_P), mic.mi[i].entity.resource);
+ }
+ else {
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
+ }
res = CONS(hp, item, res);
hp += 2;
}
@@ -1563,9 +1613,9 @@ process_info_aux(Process *BIF_P,
term = am_timeout;
else {
term = TUPLE3(hp,
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- make_small(scb->ct[j]->code[2]));
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ make_small(scb->ct[j]->info.mfa.arity));
hp += 4;
}
list = CONS(hp, term, list);
@@ -1596,6 +1646,14 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3);
break;
+ case am_magic_ref: {
+ Uint sz = 3;
+ (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
+ hp = HAlloc(BIF_P, sz);
+ res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
+ break;
+ }
+
default:
return THE_NON_VALUE; /* will produce badarg */
@@ -1614,10 +1672,10 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
if (rp->current == NULL) {
erts_lookup_function_info(&fi, rp->i, full_info);
- rp->current = fi.current;
+ rp->current = fi.mfa;
} else if (full_info) {
erts_lookup_function_info(&fi, rp->i, full_info);
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
/* Use the current function without location info */
erts_set_current_function(&fi, rp->current);
}
@@ -1633,9 +1691,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* instead if it can be looked up.
*/
erts_lookup_function_info(&fi2, rp->cp, full_info);
- if (fi2.current) {
+ if (fi2.mfa) {
fi = fi2;
- rp->current = fi2.current;
+ rp->current = fi2.mfa;
}
}
@@ -1650,8 +1708,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
hp = erts_build_mfa_item(&fi, hp, am_true, &res);
} else {
hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
+ res = TUPLE3(hp, rp->current->module,
+ rp->current->function,
+ make_small(rp->current->arity));
hp += 4;
}
*hpp = hp;
@@ -1692,7 +1751,7 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
heap_size = 3;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -2391,7 +2450,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ERTS_ATOM_ENC_LATIN1,
1),
erts_bld_uint(hpp, hszp,
- opc[i].count)),
+ erts_instr_count[i])),
res);
}
@@ -2676,20 +2735,30 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(active));
#endif
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
+#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
+#else
+ dirty_cpu = 0;
+#endif
BIF_RET(make_small(dirty_cpu));
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
Uint dirty_cpu_onln;
+#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
+#else
+ dirty_cpu_onln = 0;
+#endif
BIF_RET(make_small(dirty_cpu_onln));
} else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
Uint dirty_io;
+#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
- BIF_RET(make_small(dirty_io));
+#else
+ dirty_io = 0;
#endif
+ BIF_RET(make_small(dirty_io));
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
@@ -2859,6 +2928,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_get_atom_limit()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
+ BIF_RET(make_small(atom_table_size()));
+ }
else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
if (erts_has_time_correction()
&& erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
@@ -2883,27 +2958,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(AM_tag);
#endif
}
- else if (ERTS_IS_ATOM_STR("check_process_code",BIF_ARG_1)) {
- Eterm terms[3];
- Sint length = 1;
- Uint sz = 0;
- Eterm *hp, res;
- DECL_AM(direct_references);
-
- terms[0] = AM_direct_references;
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- {
- DECL_AM(indirect_references);
- terms[1] = AM_indirect_references;
- terms[2] = am_copy_literals;
- length = 3;
- }
-#endif
- erts_bld_list(NULL, &sz, length, terms);
- hp = HAlloc(BIF_P, sz);
- res = erts_bld_list(&hp, NULL, length, terms);
- BIF_RET(res);
- }
BIF_ERROR(BIF_P, BADARG);
}
@@ -2951,7 +3005,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -2982,7 +3036,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
Eterm t;
Eterm m_type;
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
m_type = is_port(item) ? am_port : am_process;
t = TUPLE2(*hpp, m_type, item);
*hpp += 3;
@@ -3011,7 +3065,8 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ ASSERT(mic.mi[i].node == NIL);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -3248,7 +3303,7 @@ fun_info_2(BIF_ALIST_2)
break;
case am_module:
hp = HAlloc(p, 3);
- val = exp->code[0];
+ val = exp->info.mfa.module;
break;
case am_new_index:
hp = HAlloc(p, 3);
@@ -3276,11 +3331,11 @@ fun_info_2(BIF_ALIST_2)
break;
case am_arity:
hp = HAlloc(p, 3);
- val = make_small(exp->code[2]);
+ val = make_small(exp->info.mfa.arity);
break;
case am_name:
hp = HAlloc(p, 3);
- val = exp->code[1];
+ val = exp->info.mfa.function;
break;
default:
goto error;
@@ -3306,7 +3361,9 @@ fun_info_mfa_1(BIF_ALIST_1)
} else if (is_export(fun)) {
Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
hp = HAlloc(p, 4);
- BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ BIF_RET(TUPLE3(hp,exp->info.mfa.module,
+ exp->info.mfa.function,
+ make_small(exp->info.mfa.arity)));
}
BIF_ERROR(p, BADARG);
}
@@ -3384,7 +3441,12 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm* hp;
if (BIF_ARG_1 == am_scheduler_wall_time) {
- res = erts_sched_wall_time_request(BIF_P, 0, 0);
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
if (is_non_value(res))
BIF_RET(am_undefined);
BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
@@ -3525,6 +3587,11 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
static erts_smp_atomic_t available_internal_state;
+static int empty_magic_ref_destructor(Binary *bin)
+{
+ return 1;
+}
+
BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
{
/*
@@ -3563,8 +3630,14 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
- BIF_RET(make_small((Uint) words));
+ Eterm* hp = HAlloc(BIF_P ,3);
+ BIF_RET(TUPLE2(hp, make_small((Uint) words),
+ erts_ets_hash_sizeof_ext_segtab()));
}
+ else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) {
+ /* Used by ets_SUITE (stdlib) */
+ BIF_RET(erts_ets_get_meta_state(BIF_P));
+ }
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
/* Used by driver_SUITE (emulator) */
Uint sz, *szp;
@@ -3884,6 +3957,34 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
return make_atom(ix);
}
+ else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
+ Binary *bin;
+ UWord bin_addr, refc;
+ Eterm bin_addr_term, refc_term, test_type;
+ Uint sz;
+ Eterm *hp;
+ if (!is_internal_magic_ref(tp[2])) {
+ if (is_internal_ordinary_ref(tp[2])) {
+ ErtsORefThing *rtp;
+ rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
+ if (erts_is_ref_numbers_magic(rtp->num))
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+ }
+ bin = erts_magic_ref2bin(tp[2]);
+ refc = erts_refc_read(&bin->refc, 1);
+ bin_addr = (UWord) bin;
+ sz = 4;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ erts_bld_uword(NULL, &sz, refc);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ refc_term = erts_bld_uword(&hp, NULL, refc);
+ test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
+ ? am_true : am_false);
+ BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
+ }
break;
}
@@ -3972,7 +4073,6 @@ static void broken_halt_test(Eterm bif_arg_2)
erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
}
-
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
/*
@@ -4298,6 +4398,25 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
BIF_RET(am_ok);
}
+ else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) {
+ /* Used by ets_SUITE (stdlib) */
+ BIF_RET(erts_ets_restore_meta_state(BIF_P, BIF_ARG_2));
+ }
+ else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
+ Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
+ UWord bin_addr = (UWord) bin;
+ Eterm bin_addr_term, magic_ref, res;
+ Eterm *hp;
+ Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
+ res = TUPLE2(hp, magic_ref, bin_addr_term);
+ BIF_RET(res);
+ }
+ }
}
BIF_ERROR(BIF_P, BADARG);
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index aecb8bf0c1..a594ec1493 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -260,7 +260,7 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
} else if (is_export(arg1)) {
Export* exp = (Export *) (export_val(arg1)[1]);
- if (exp->code[2] == (Uint) arity) {
+ if (exp->info.mfa.arity == (Uint) arity) {
BIF_RET(am_true);
}
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 46777d3aa5..edc3c82b23 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -203,3 +203,17 @@ BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1)
}
BIF_RET(am_true);
}
+
+BIF_RETTYPE os_set_signal_2(BIF_ALIST_2) {
+ if (is_atom(BIF_ARG_1) && ((BIF_ARG_2 == am_ignore) ||
+ (BIF_ARG_2 == am_default) ||
+ (BIF_ARG_2 == am_handle))) {
+ if (!erts_set_signal(BIF_ARG_1, BIF_ARG_2))
+ goto error;
+
+ BIF_RET(am_ok);
+ }
+
+error:
+ BIF_ERROR(BIF_P, BADARG);
+}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 90e78a9b0b..09d2c5376b 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -214,7 +214,7 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
/* Fall through... */
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_RET(res, ref);
break;
case ERTS_PORT_OP_DONE:
@@ -260,7 +260,7 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -310,7 +310,7 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -356,7 +356,7 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
case ERTS_PORT_OP_DONE:
BIF_RET(am_true);
@@ -389,7 +389,7 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
break;
case ERTS_PORT_OP_DONE:
@@ -428,7 +428,7 @@ BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -467,7 +467,7 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index ff7746ce1d..a66b05c6ff 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -600,10 +600,11 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
/*
@@ -1319,17 +1320,17 @@ handle_iolist:
Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
cleanup_restart_context_bin);
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
- Eterm magic_bin;
+ Eterm magic_ref;
Eterm *hp;
memcpy(restartp,&restart,sizeof(RestartContext));
BUMP_ALL_REDS(p);
- hp = HAlloc(p, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(p), mbp);
BIF_TRAP3(&re_exec_trap_export,
p,
arg1,
arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
- magic_bin);
+ magic_ref);
}
res = build_exec_return(p, rc, &restart, arg1);
@@ -1366,9 +1367,7 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
Uint loop_limit_tmp;
Eterm res;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_3));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_3);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 0627526d7e..f471390501 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -125,7 +125,6 @@ erts_internal_trace_pattern_3(BIF_ALIST_3)
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
- DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
int matches = -1;
int specified = 0;
@@ -308,30 +307,30 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
matches = 0;
} else if (is_tuple(MFA)) {
+ ErtsCodeMFA mfa;
Eterm *tp = tuple_val(MFA);
if (tp[0] != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+ if (specified == 3) {
+ mfa.arity = signed_val(tp[3]);
}
- matches = erts_set_trace_pattern(p, mfa, specified,
+ matches = erts_set_trace_pattern(p, &mfa, specified,
match_prog_set, match_prog_set,
on, flags, meta_tracer, 0);
} else if (is_atom(MFA)) {
@@ -343,7 +342,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
error:
MatchSetUnref(match_prog_set);
- UnUseTmpHeap(3,p);
ERTS_TRACER_CLEAR(&meta_tracer);
@@ -975,13 +973,14 @@ static int function_is_traced(Process *p,
Export e;
Export* ep;
BeamInstr* pc;
+ ErtsCodeInfo *ci;
/* First look for an export entry */
- e.code[0] = mfa[0];
- e.code[1] = mfa[1];
- e.code[2] = mfa[2];
+ e.info.mfa.module = mfa[0];
+ e.info.mfa.function = mfa[1];
+ e.info.mfa.arity = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- pc = ep->code+3;
+ pc = ep->beam;
if (ep->addressv[erts_active_code_ix()] == pc &&
*pc != (BeamInstr) em_call_error_handler) {
@@ -990,17 +989,17 @@ static int function_is_traced(Process *p,
ASSERT(*pc == (BeamInstr) em_apply_bif ||
*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
- if (erts_is_trace_break(pc, ms, 0)) {
+ if (erts_is_trace_break(&ep->info, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (erts_is_trace_break(pc, ms, 1)) {
+ if (erts_is_trace_break(&ep->info, ms, 1)) {
r |= FUNC_TRACE_LOCAL_TRACE;
}
- if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ if (erts_is_mtrace_break(&ep->info, ms_meta, tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
- if (erts_is_time_break(p, pc, call_time)) {
+ if (erts_is_time_break(p, &ep->info, call_time)) {
r |= FUNC_TRACE_TIME_TRACE;
}
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1008,15 +1007,15 @@ static int function_is_traced(Process *p,
}
/* OK, now look for breakpoint tracing */
- if ((pc = erts_find_local_func(mfa)) != NULL) {
+ if ((ci = erts_find_local_func(&e.info.mfa)) != NULL) {
int r =
- (erts_is_trace_break(pc, ms, 1)
+ (erts_is_trace_break(ci, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(ci, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(pc, count)
+ | (erts_is_count_break(ci, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, pc, call_time)
+ | (erts_is_time_break(p, ci, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1357,7 +1356,7 @@ trace_info_event(Process* p, Eterm event, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
ErtsTracer meta_tracer, int is_blocking)
@@ -1377,22 +1376,23 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
n = finish_bp.e.matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ ErtsCodeInfo *ci = fp[i].ci;
+ BeamInstr* pc = erts_codeinfo_to_code(ci);
+ Export* ep = ErtsContainerStruct(ci, Export, info);
if (on && !flags.breakpoint) {
/* Turn on global call tracing */
if (ep->addressv[code_ix] != pc) {
fp[i].mod->curr.num_traced_exports++;
#ifdef DEBUG
- pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
#endif
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
- pc[1] = (BeamInstr) ep->addressv[code_ix];
+ ep->beam[0] = (BeamInstr) BeamOp(op_jump_f);
+ ep->beam[1] = (BeamInstr) ep->addressv[code_ix];
}
- erts_set_call_trace_bif(pc, match_prog_set, 0);
+ erts_set_call_trace_bif(ci, match_prog_set, 0);
if (ep->addressv[code_ix] != pc) {
- pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ ep->beam[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
}
} else if (!on && flags.breakpoint) {
/* Turn off breakpoint tracing -- nothing to do here. */
@@ -1401,9 +1401,9 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
* Turn off global tracing, either explicitly or implicitly
* before turning on breakpoint tracing.
*/
- erts_clear_call_trace_bif(pc, 0);
- if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ erts_clear_call_trace_bif(ci, 0);
+ if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ ep->beam[0] = (BeamInstr) BeamOp(op_jump_f);
}
}
}
@@ -1413,68 +1413,76 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
*/
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- int j;
-
+
if (!ExportIsBuiltIn(ep)) {
continue;
}
-
+
if (bif_table[i].f == bif_table[i].traced) {
/* Trace wrapper same as regular function - untraceable */
continue;
}
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
- if (! flags.breakpoint) { /* Export entry call trace */
- if (on) {
- erts_clear_call_trace_bif(pc, 1);
- erts_clear_mtrace_bif(pc);
- erts_set_call_trace_bif(pc, match_prog_set, 0);
- } else { /* off */
- erts_clear_call_trace_bif(pc, 0);
- }
- matches++;
- } else { /* Breakpoint call trace */
- int m = 0;
-
- if (on) {
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 0);
- erts_set_call_trace_bif(pc, match_prog_set, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_set_mtrace_bif(pc, meta_match_prog_set,
- meta_tracer);
- m = 1;
- }
- if (flags.call_time) {
- erts_set_time_trace_bif(pc, on);
- /* I don't want to remove any other tracers */
- m = 1;
- }
- } else { /* off */
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_clear_mtrace_bif(pc);
- m = 1;
- }
- if (flags.call_time) {
- erts_clear_time_trace_bif(pc);
- m = 1;
- }
- }
- matches += m;
- }
- }
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ if (! flags.breakpoint) { /* Export entry call trace */
+ if (on) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ erts_clear_mtrace_bif(&ep->info);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 0);
+ } else { /* off */
+ erts_clear_call_trace_bif(&ep->info, 0);
+ }
+ matches++;
+ } else { /* Breakpoint call trace */
+ int m = 0;
+
+ if (on) {
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 0);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_set_mtrace_bif(&ep->info, meta_match_prog_set,
+ meta_tracer);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_set_time_trace_bif(&ep->info, on);
+ /* I don't want to remove any other tracers */
+ m = 1;
+ }
+ } else { /* off */
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_clear_mtrace_bif(&ep->info);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(&ep->info);
+ m = 1;
+ }
+ }
+ matches += m;
+ }
}
/*
@@ -1676,10 +1684,9 @@ install_exp_breakpoints(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- ep->addressv[code_ix] = pc;
+ ep->addressv[code_ix] = ep->beam;
}
}
@@ -1692,14 +1699,13 @@ uninstall_exp_breakpoints(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] != pc) {
+ if (ep->addressv[code_ix] != ep->beam) {
continue;
}
- ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
+ ASSERT(ep->beam[0] == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
}
}
@@ -1712,15 +1718,14 @@ clean_export_entries(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] == pc) {
+ if (ep->addressv[code_ix] == ep->beam) {
continue;
}
- if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
- ep->code[3] = (BeamInstr) 0;
- ep->code[4] = (BeamInstr) 0;
+ if (ep->beam[0] == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->beam[0] = (BeamInstr) 0;
+ ep->beam[1] = (BeamInstr) 0;
}
}
}
@@ -1732,11 +1737,11 @@ setup_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ GenericBp* g = (GenericBp *) ep->info.native;
if (g) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].traced;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].traced;
}
}
}
@@ -1750,12 +1755,11 @@ reset_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- BeamInstr* pc = ep->code+3;
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = (GenericBp *) ep->info.native;
if (g && g->data[active].flags == 0) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].f;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
}
}
}
@@ -2359,7 +2363,7 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Eterm target;
erts_smp_atomic32_t refc;
} ErtsTraceDeliveredAll;
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index 7c70217d8d..3fd4c87094 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -22,12 +22,15 @@
# include "config.h"
#endif
+#define ERL_BIF_UNIQUE_C__
#include "sys.h"
#include "erl_vm.h"
#include "erl_alloc.h"
#include "export.h"
#include "bif.h"
#include "erl_bif_unique.h"
+#include "hash.h"
+#include "erl_binary.h"
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Reference *
@@ -58,9 +61,20 @@ static union {
static Uint32 max_thr_id;
#endif
+static void init_magic_ref_tables(void);
+
+static Uint64 ref_init_value;
+
static void
init_reference(void)
{
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ ref_init_value = 0;
+ ref_init_value |= (Uint64) tv.tv_sec;
+ ref_init_value |= ((Uint64) tv.tv_usec) << 32;
+ ref_init_value *= (Uint64) 268438039;
+ ref_init_value += (Uint64) tv.tv_usec;
#ifdef DEBUG
max_thr_id = (Uint32) erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -68,11 +82,13 @@ init_reference(void)
max_thr_id += (Uint32) erts_no_dirty_io_schedulers;
#endif
#endif
- erts_atomic64_init_nob(&global_reference.count, 0);
+ erts_atomic64_init_nob(&global_reference.count,
+ (erts_aint64_t) ref_init_value);
+ init_magic_ref_tables();
}
static ERTS_INLINE void
-global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_REF_NUMBERS])
{
Uint64 value;
@@ -82,7 +98,7 @@ global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
}
static ERTS_INLINE void
-make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
if (esdp)
@@ -92,15 +108,23 @@ make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
}
void
-erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ make_ref_in_array(ref);
+}
+
+void
+erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
{
make_ref_in_array(ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
}
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE])
{
Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
make_ref_in_array(ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
@@ -110,11 +134,11 @@ Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
Eterm erts_make_ref(Process *c_p)
{
Eterm* hp;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
make_ref_in_array(ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
@@ -122,6 +146,272 @@ Eterm erts_make_ref(Process *c_p)
return make_internal_ref(hp);
}
+/*
+ * Magic reference tables
+ */
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+ Uint32 thr_id;
+} ErtsMagicRefTableEntry;
+
+typedef struct {
+ erts_rwmtx_t rwmtx;
+ Hash hash;
+ char name[32];
+} ErtsMagicRefTable;
+
+typedef struct {
+ union {
+ ErtsMagicRefTable table;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMagicRefTable))];
+ } u;
+} ErtsAlignedMagicRefTable;
+
+ErtsAlignedMagicRefTable *magic_ref_table;
+
+ErtsMagicBinary *
+erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicBinary *mb;
+ ErtsMagicRefTable *tblp;
+
+ ASSERT(erts_is_ref_numbers_magic(refn));
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+ if (!tep)
+ mb = NULL;
+ else {
+ erts_aint_t refc;
+ mb = tep->mb;
+ refc = erts_refc_inc_unless(&mb->refc, 0, 0);
+ if (refc == 0)
+ mb = NULL;
+ }
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ return mb;
+}
+
+void
+erts_magic_ref_save_bin__(Eterm ref)
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMRefThing *mrtp;
+ ErtsMagicRefTable *tblp;
+ Uint32 *refn;
+
+ ASSERT(is_internal_magic_ref(ref));
+
+ mrtp = (ErtsMRefThing *) internal_ref_val(ref);
+ refn = mrtp->mb->refn;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (!tep) {
+ ErtsMagicRefTableEntry *used_tep;
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ if (tblp != &magic_ref_table[0].u.table) {
+ tep = erts_alloc(ERTS_ALC_T_MREF_NSCHED_ENT,
+ sizeof(ErtsNSchedMagicRefTableEntry));
+ }
+ else {
+ tep = erts_alloc(ERTS_ALC_T_MREF_ENT,
+ sizeof(ErtsMagicRefTableEntry));
+ tep->thr_id = tmpl.thr_id;
+ }
+
+ tep->value = tmpl.value;
+ tep->mb = mrtp->mb;
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ used_tep = hash_put(&tblp->hash, tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (used_tep != tep) {
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+ }
+}
+
+void
+erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicRefTable *tblp;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (tep) {
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ tep = hash_remove(&tblp->hash, &tmpl);
+ ASSERT(tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+}
+
+static int nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsNSchedMagicRefTableEntry *e1 = ve1;
+ ErtsNSchedMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value;
+}
+
+static int non_nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsMagicRefTableEntry *e1 = ve1;
+ ErtsMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value || e1->thr_id != e2->thr_id;
+}
+
+static HashValue nsched_mreft_hash(void *ve)
+{
+ ErtsNSchedMagicRefTableEntry *e = ve;
+ return (HashValue) e->value;
+}
+
+static HashValue non_nsched_mreft_hash(void *ve)
+{
+ ErtsMagicRefTableEntry *e = ve;
+ HashValue h;
+ h = (HashValue) e->thr_id;
+ h *= 268440163;
+ h += (HashValue) e->value;
+ return h;
+}
+
+static void *mreft_alloc(void *ve)
+{
+ /*
+ * We allocate the element before
+ * hash_put() and pass it as
+ * template which we get as
+ * input...
+ */
+ return ve;
+}
+
+static void mreft_free(void *ve)
+{
+ /*
+ * We free the element ourselves
+ * after hash_remove()...
+ */
+}
+
+static void *mreft_meta_alloc(int i, size_t size)
+{
+ return erts_alloc(ERTS_ALC_T_MREF_TAB_BKTS, size);
+}
+
+static void mreft_meta_free(int i, void *ptr)
+{
+ erts_free(ERTS_ALC_T_MREF_TAB_BKTS, ptr);
+}
+
+static void
+init_magic_ref_tables(void)
+{
+ HashFunctions hash_funcs;
+ int i;
+ ErtsMagicRefTable *tblp;
+
+ magic_ref_table = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MREF_TAB,
+ (sizeof(ErtsAlignedMagicRefTable)
+ * (erts_no_schedulers + 1)));
+
+ hash_funcs.hash = non_nsched_mreft_hash;
+ hash_funcs.cmp = non_nsched_mreft_cmp;
+
+ hash_funcs.alloc = mreft_alloc;
+ hash_funcs.free = mreft_free;
+ hash_funcs.meta_alloc = mreft_meta_alloc;
+ hash_funcs.meta_free = mreft_meta_free;
+ hash_funcs.meta_print = erts_print;
+
+ tblp = &magic_ref_table[0].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_0");
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table");
+
+ hash_funcs.hash = nsched_mreft_hash;
+ hash_funcs.cmp = nsched_mreft_cmp;
+
+ for (i = 1; i <= erts_no_schedulers; i++) {
+ ErtsMagicRefTable *tblp = &magic_ref_table[i].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_%d", i);
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table");
+ }
+}
+
+void erts_ref_bin_free(ErtsMagicBinary *mb)
+{
+ erts_bin_free((Binary *) mb);
+}
+
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Unique Integer *
\* */
@@ -498,7 +788,7 @@ void
erts_sched_bif_unique_init(ErtsSchedulerData *esdp)
{
esdp->unique = (Uint64) 0;
- esdp->ref = (Uint64) 0;
+ esdp->ref = (Uint64) ref_init_value;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -513,7 +803,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
- hp = HAlloc(BIF_P, REF_THING_SIZE);
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index c6481864d0..27c2a15a5e 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -21,16 +21,25 @@
#ifndef ERTS_BIF_UNIQUE_H__
#define ERTS_BIF_UNIQUE_H__
+#include "erl_term.h"
#include "erl_process.h"
#include "big.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
void erts_bif_unique_init(void);
void erts_sched_bif_unique_init(ErtsSchedulerData *esdp);
/* reference */
Eterm erts_make_ref(Process *);
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
-void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS]);
+void erts_magic_ref_save_bin__(Eterm ref);
+ErtsMagicBinary *erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS]);
+
/* strict monotonic counter */
@@ -67,19 +76,35 @@ Eterm erts_debug_make_unique_integer(Process *c_p,
Eterm etval1);
-ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS],
+ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS],
Uint32 thr_id, Uint64 value);
-ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE int erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS]);
ERTS_GLB_INLINE void erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
- Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE void erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS]);
ERTS_GLB_INLINE Eterm erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
- Eterm buffer[REF_THING_SIZE]);
+ Eterm buffer[ERTS_REF_THING_SIZE]);
+ERTS_GLB_INLINE Eterm erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp);
+ERTS_GLB_INLINE Binary *erts_magic_ref2bin(Eterm mref);
+ERTS_GLB_INLINE void erts_magic_ref_save_bin(Eterm ref);
+ERTS_GLB_INLINE ErtsMagicBinary *erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS]);
+
+#define ERTS_REF1_MAGIC_MARKER_BIT_NO__ \
+ (_REF_NUM_SIZE-1)
+#define ERTS_REF1_MAGIC_MARKER_BIT__ \
+ (((Uint32) 1) << ERTS_REF1_MAGIC_MARKER_BIT_NO__)
+#define ERTS_REF1_THR_ID_MASK__ \
+ (ERTS_REF1_MAGIC_MARKER_BIT__-1)
+#define ERTS_REF1_NUM_MASK__ \
+ (~(ERTS_REF1_THR_ID_MASK__|ERTS_REF1_MAGIC_MARKER_BIT__))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 value)
+erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS], Uint32 thr_id, Uint64 value)
{
/*
* We cannot use thread id in the first 18-bit word since
@@ -87,30 +112,39 @@ erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 val
* we did, we would get really poor hash values. Instead
* we have to shuffle the bits a bit.
*/
- ASSERT(thr_id == (thr_id & ((Uint32) 0x3ffff)));
- ref[0] = (Uint32) (value & ((Uint64) 0x3ffff));
- ref[1] = (((Uint32) (value & ((Uint64) 0xfffc0000)))
- | (thr_id & ((Uint32) 0x3ffff)));
+ ASSERT(thr_id == (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
+ ref[0] = (Uint32) (value & ((Uint64) REF_MASK));
+ ref[1] = (((Uint32) (value & ((Uint64) ERTS_REF1_NUM_MASK__)))
+ | (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
ref[2] = (Uint32) ((value >> 32) & ((Uint64) 0xffffffff));
}
ERTS_GLB_INLINE Uint32
-erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS])
{
- return ref[1] & ((Uint32) 0x3ffff);
+ return ref[1] & ((Uint32) ERTS_REF1_THR_ID_MASK__);
+}
+
+ERTS_GLB_INLINE int
+erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ return !!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__);
}
ERTS_GLB_INLINE Uint64
-erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS])
{
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ | REF_MASK) == 0xffffffff);
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ & REF_MASK) == 0);
+
return (((((Uint64) ref[2]) & ((Uint64) 0xffffffff)) << 32)
- | (((Uint64) ref[1]) & ((Uint64) 0xfffc0000))
- | (((Uint64) ref[0]) & ((Uint64) 0x3ffff)));
+ | (((Uint64) ref[1]) & ((Uint64) ERTS_REF1_NUM_MASK__))
+ | (((Uint64) ref[0]) & ((Uint64) REF_MASK)));
}
ERTS_GLB_INLINE void
erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
- Uint32 ref[ERTS_MAX_REF_NUMBERS])
+ Uint32 ref[ERTS_REF_NUMBERS])
{
Uint64 value;
@@ -119,18 +153,288 @@ erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
erts_set_ref_numbers(ref, (Uint32) esdp->thr_id, value);
}
+ERTS_GLB_INLINE void
+erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS])
+{
+ erts_sched_make_ref_in_array(esdp, ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
+}
+
ERTS_GLB_INLINE Eterm
erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
- Eterm buffer[REF_THING_SIZE])
+ Eterm buffer[ERTS_REF_THING_SIZE])
{
Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
erts_sched_make_ref_in_array(esdp, ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
return make_internal_ref(hp);
}
+ERTS_GLB_INLINE Eterm
+erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *bp)
+{
+ Eterm *hp = *hpp;
+ ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ write_magic_ref_thing(hp, ohp, (ErtsMagicBinary *) bp);
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ erts_refc_inc(&bp->refc, 1);
+ OH_OVERHEAD(ohp, bp->orig_size / sizeof(Eterm));
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_magic_ref2bin(Eterm mref)
+{
+ ErtsMRefThing *mrtp;
+ ASSERT(is_internal_magic_ref(mref));
+ mrtp = (ErtsMRefThing *) internal_ref_val(mref);
+ return (Binary *) mrtp->mb;
+}
+
+/*
+ * Save the magic binary of a ref when the
+ * ref is exposed to the outside world...
+ */
+ERTS_GLB_INLINE void
+erts_magic_ref_save_bin(Eterm ref)
+{
+ if (is_internal_magic_ref(ref))
+ erts_magic_ref_save_bin__(ref);
+}
+
+/*
+ * Look up the magic binary of a magic ref
+ * when the ref comes from the outside world...
+ */
+ERTS_GLB_INLINE ErtsMagicBinary *
+erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ if (!erts_is_ref_numbers_magic(ref))
+ return NULL;
+ return erts_magic_ref_lookup_bin__(ref);
+}
+
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * Storage of internal refs in misc structures...
+ */
+
+#include "erl_message.h"
+
+#if ERTS_REF_NUMBERS != 3
+# error fix this...
+#endif
+
+ERTS_GLB_INLINE int erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS])
+{
+ if (num1[2] != num2[2])
+ return (int) ((Sint64) num1[2] - (Sint64) num2[2]);
+ if (num1[1] != num2[1])
+ return (int) ((Sint64) num1[1] - (Sint64) num2[1]);
+ if (num1[0] != num2[0])
+ return (int) ((Sint64) num1[0] - (Sint64) num2[0]);
+ return 0;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* Iref storage for all internal references... */
+typedef struct {
+ Uint32 is_magic;
+ union {
+ ErtsMagicBinary *mb;
+ Uint32 num[ERTS_REF_NUMBERS];
+ } u;
+} ErtsIRefStorage;
+
+void erts_ref_bin_free(ErtsMagicBinary *mb);
+
+ERTS_GLB_INLINE void erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref);
+ERTS_GLB_INLINE void erts_iref_storage_clean(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Uint erts_iref_storage_heap_size(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Eterm erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage);
+ERTS_GLB_INLINE int erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref)
+{
+ Eterm *hp;
+
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ref(ref));
+
+ hp = boxed_val(ref);
+
+ if (is_ordinary_ref_thing(hp)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) hp;
+ iref->is_magic = 0;
+ iref->u.num[0] = rtp->num[0];
+ iref->u.num[1] = rtp->num[1];
+ iref->u.num[2] = rtp->num[2];
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) hp;
+ ASSERT(is_magic_ref_thing(hp));
+ iref->is_magic = 1;
+ iref->u.mb = mrtp->mb;
+ erts_refc_inc(&mrtp->mb->refc, 1);
+ }
+}
+
+ERTS_GLB_INLINE void
+erts_iref_storage_clean(ErtsIRefStorage *iref)
+{
+ if (iref->is_magic && erts_refc_dectest(&iref->u.mb->refc, 0) == 0)
+ erts_ref_bin_free(iref->u.mb);
+#ifdef DEBUG
+ memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+}
+
+ERTS_GLB_INLINE Uint
+erts_iref_storage_heap_size(ErtsIRefStorage *iref)
+{
+ return iref->is_magic ? ERTS_MAGIC_REF_THING_SIZE : ERTS_REF_THING_SIZE;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage)
+{
+ Eterm *hp = *hpp;
+ if (!iref->is_magic) {
+ write_ref_thing(hp, iref->u.num[0], iref->u.num[1],
+ iref->u.num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ }
+ else {
+ write_magic_ref_thing(hp, ohp, iref->u.mb);
+ OH_OVERHEAD(ohp, iref->u.mb->orig_size / sizeof(Eterm));
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ /*
+ * If we clean storage, the term inherits the
+ * refc increment of the cleaned storage...
+ */
+ if (!clean_storage)
+ erts_refc_inc(&iref->u.mb->refc, 1);
+ }
+
+#ifdef DEBUG
+ if (clean_storage)
+ memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2)
+{
+ Uint32 *num1 = iref1->is_magic ? iref1->u.mb->refn : iref1->u.num;
+ Uint32 *num2 = iref2->is_magic ? iref2->u.mb->refn : iref2->u.num;
+ return erts_internal_ref_number_cmp(num1, num2);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/* OIref storage for ordinary internal references only... */
+typedef struct {
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsOIRefStorage;
+
+ERTS_GLB_INLINE void erts_oiref_storage_save(ErtsOIRefStorage *oiref,
+ Eterm ref);
+ERTS_GLB_INLINE Eterm erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref,
+ Eterm **hpp);
+ERTS_GLB_INLINE int erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_oiref_storage_save(ErtsOIRefStorage *oiref, Eterm ref)
+{
+ ErtsORefThing *rtp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ rtp = (ErtsORefThing *) internal_ref_val(ref);
+
+ oiref->num[0] = rtp->num[0];
+ oiref->num[1] = rtp->num[1];
+ oiref->num[2] = rtp->num[2];
+}
+
+ERTS_GLB_INLINE Eterm
+erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref, Eterm **hpp)
+{
+ Eterm *hp = *hpp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ write_ref_thing(hp, oiref->num[0], oiref->num[1], oiref->num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2)
+{
+ return erts_internal_ref_number_cmp(oiref1->num, oiref2->num);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Eterm *hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif
+
#endif /* ERTS_BIF_UNIQUE_H__ */
+
+#if (defined(ERTS_ALLOC_C__) || defined(ERL_BIF_UNIQUE_C__)) \
+ && !defined(ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__)
+#define ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__
+
+#include "hash.h"
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+} ErtsNSchedMagicRefTableEntry;
+
+#endif
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index fdc5efea98..6ff71ec6d1 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -18,34 +18,151 @@
* %CopyrightEnd%
*/
-#ifndef __ERL_BINARY_H
-#define __ERL_BINARY_H
+#ifndef ERL_BINARY_H__TYPES__
+#define ERL_BINARY_H__TYPES__
-#include "erl_threads.h"
-#include "bif.h"
+/*
+** Just like the driver binary but with initial flags
+** Note that the two structures Binary and ErlDrvBinary HAVE to
+** be equal except for extra fields in the beginning of the struct.
+** ErlDrvBinary is defined in erl_driver.h.
+** When driver_alloc_binary is called, a Binary is allocated, but
+** the pointer returned is to the address of the first element that
+** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
+** The driver need never know about additions to the internal Binary of the
+** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
+** and Binary, the macros below can convert one type to the other, as they both
+** in reality are equal.
+*/
+
+#ifdef ARCH_32
+ /* *DO NOT USE* only for alignment. */
+#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
+#else
+#define ERTS_BINARY_STRUCT_ALIGNMENT
+#endif
+
+/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
+#define ERTS_INTERNAL_BINARY_FIELDS \
+ UWord flags; \
+ erts_refc_t refc; \
+ ERTS_BINARY_STRUCT_ALIGNMENT
+
+typedef struct binary {
+ ERTS_INTERNAL_BINARY_FIELDS
+ SWord orig_size;
+ char orig_bytes[1]; /* to be continued */
+} Binary;
+
+#define ERTS_SIZEOF_Binary(Sz) \
+ (offsetof(Binary,orig_bytes) + (Sz))
+
+#if ERTS_REF_NUMBERS != 3
+#error "Update ErtsMagicBinary"
+#endif
+
+typedef struct magic_binary ErtsMagicBinary;
+struct magic_binary {
+ ERTS_INTERNAL_BINARY_FIELDS
+ SWord orig_size;
+ int (*destructor)(Binary *);
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsAlcType_t alloc_type;
+ union {
+ struct {
+ ERTS_BINARY_STRUCT_ALIGNMENT
+ char data[1];
+ } aligned;
+ struct {
+ char data[1];
+ } unaligned;
+ } u;
+};
+
+#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - \
+ offsetof(ErtsMagicBinary,u.unaligned.data))
+
+typedef union {
+ Binary binary;
+ ErtsMagicBinary magic_binary;
+ struct {
+ ERTS_INTERNAL_BINARY_FIELDS
+ ErlDrvBinary binary;
+ } driver;
+} ErtsBinary;
/*
- * Maximum number of bytes to place in a heap binary.
+ * 'Binary' alignment:
+ * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
+ * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
+ * 32-bits architectures and 8 bytes on 64-bits architectures.
*/
-#define ERL_ONHEAP_BIN_LIMIT 64
+#define ERTS_MAGIC_BIN_REFN(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.refn
+#define ERTS_MAGIC_BIN_ATYPE(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.alloc_type
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.destructor
+#define ERTS_MAGIC_BIN_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
+#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_DATA_OFFSET)
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.aligned.data)))
+
+/* On 32-bit arch these macro variants will save memory
+ by not forcing 8-byte alignment for the magic payload.
+*/
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
+#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
+#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
+
+
+#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
+#define ErlDrvBinary2Binary(D) ((Binary *) \
+ (((char *) (D)) \
+ - offsetof(ErtsBinary, driver.binary)))
+
+/* A "magic" binary flag */
+#define BIN_FLAG_MAGIC 1
+#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
+#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
+#define BIN_FLAG_DRV 8
+
+#endif /* ERL_BINARY_H__TYPES__ */
+
+#if !defined(ERL_BINARY_H__) && !defined(ERTS_BINARY_TYPES_ONLY__)
+#define ERL_BINARY_H__
+
+#include "erl_threads.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#include "erl_bits.h"
/*
- * This structure represents a SUB_BINARY.
- *
- * Note: The last field (orig) is not counted in arityval in the header.
- * This simplifies garbage collection.
+ * Maximum number of bytes to place in a heap binary.
*/
-typedef struct erl_sub_bin {
- Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
- Uint size; /* Binary size in bytes. */
- Uint offs; /* Offset into original binary. */
- byte bitsize;
- byte bitoffs;
- byte is_writable; /* The underlying binary is writable */
- Eterm orig; /* Original binary (REFC or HEAP binary). */
-} ErlSubBin;
+#define ERL_ONHEAP_BIN_LIMIT 64
#define ERL_SUB_BIN_SIZE (sizeof(ErlSubBin)/sizeof(Eterm))
#define HEADER_SUB_BIN _make_header(ERL_SUB_BIN_SIZE-2,_TAG_HEADER_SUB_BIN)
@@ -165,6 +282,17 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
+typedef union {
+ /*
+ * These two are almost always of
+ * the same size, but when fallback
+ * atomics are used they might
+ * differ in size.
+ */
+ erts_smp_atomic_t smp_atomic_word;
+ erts_atomic_t atomic_word;
+} ErtsMagicIndirectionWord;
+
#if defined(__i386__) || !defined(__GNUC__)
/*
* Doubles aren't required to be 8-byte aligned on intel x86.
@@ -189,10 +317,14 @@ ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
- void (*destructor)(Binary *),
+ int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
int unaligned);
ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
- void (*destructor)(Binary *));
+ int (*destructor)(Binary *));
+ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *));
+ERTS_GLB_INLINE erts_smp_atomic_t *erts_smp_binary_to_magic_indirection(Binary *bp);
+ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -320,39 +452,79 @@ erts_bin_realloc(Binary *bp, Uint size)
ERTS_GLB_INLINE void
erts_bin_free(Binary *bp)
{
- if (bp->flags & BIN_FLAG_MAGIC)
- ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp);
- if (bp->flags & BIN_FLAG_DRV)
+ if (bp->flags & BIN_FLAG_MAGIC) {
+ if (!ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp)) {
+ /* Destructor took control of the deallocation */
+ return;
+ }
+ erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
+ erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
+ }
+ else if (bp->flags & BIN_FLAG_DRV)
erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
else
erts_free(ERTS_ALC_T_BINARY, (void *) bp);
}
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary_x(Uint size, void (*destructor)(Binary *),
+erts_create_magic_binary_x(Uint size, int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
int unaligned)
{
Uint bsize = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_SIZE(size)
: ERTS_MAGIC_BIN_SIZE(size);
- Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ Binary* bptr = erts_alloc_fnf(alloc_type, bsize);
ASSERT(bsize > size);
if (!bptr)
- erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
+ erts_alloc_n_enomem(ERTS_ALC_T2N(alloc_type), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
bptr->flags = BIN_FLAG_MAGIC;
bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size)
: ERTS_MAGIC_BIN_ORIG_SIZE(size);
erts_refc_init(&bptr->refc, 0);
ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
+ ERTS_MAGIC_BIN_ATYPE(bptr) = alloc_type;
+ erts_make_magic_ref_in_array(ERTS_MAGIC_BIN_REFN(bptr));
return bptr;
}
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+erts_create_magic_binary(Uint size, int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(size, destructor,
+ ERTS_ALC_T_BINARY, 0);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_create_magic_indirection(int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(sizeof(ErtsMagicIndirectionWord),
+ destructor,
+ ERTS_ALC_T_MINDIRECTION,
+ 1); /* Not 64-bit aligned,
+ but word aligned */
+}
+
+ERTS_GLB_INLINE erts_smp_atomic_t *
+erts_smp_binary_to_magic_indirection(Binary *bp)
+{
+ ErtsMagicIndirectionWord *mip;
+ ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
+ mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
+ return &mip->smp_atomic_word;
+}
+
+ERTS_GLB_INLINE erts_atomic_t *
+erts_binary_to_magic_indirection(Binary *bp)
{
- return erts_create_magic_binary_x(size, destructor, 0);
+ ErtsMagicIndirectionWord *mip;
+ ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
+ mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
+ return &mip->atomic_word;
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* !__ERL_BINARY_H */
+#endif /* !ERL_BINARY_H__ */
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 6bf52fb303..885e955332 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -110,9 +110,6 @@ erts_init_bits(void)
{
ERTS_CT_ASSERT(offsetof(Binary,orig_bytes) % 8 == 0);
ERTS_CT_ASSERT(offsetof(ErtsMagicBinary,u.aligned.data) % 8 == 0);
- ERTS_CT_ASSERT(ERTS_MAGIC_BIN_BYTES_TO_ALIGN ==
- (offsetof(ErtsMagicBinary,u.aligned.data)
- - offsetof(ErtsMagicBinary,u.unaligned.data)));
ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes)
== offsetof(Binary,orig_bytes));
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 4bd5b24157..1b4546722f 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -22,6 +22,23 @@
#define __ERL_BITS_H__
/*
+ * This structure represents a SUB_BINARY.
+ *
+ * Note: The last field (orig) is not counted in arityval in the header.
+ * This simplifies garbage collection.
+ */
+
+typedef struct erl_sub_bin {
+ Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ Uint offs; /* Offset into original binary. */
+ byte bitsize;
+ byte bitoffs;
+ byte is_writable; /* The underlying binary is writable */
+ Eterm orig; /* Original binary (REFC or HEAP binary). */
+} ErlSubBin;
+
+/*
* This structure represents a binary to be matched.
*/
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 3cc2b21a20..9f077dd407 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -2779,7 +2779,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
BIF_RETTYPE ets_is_compiled_ms_1(BIF_ALIST_1)
{
- if (erts_db_is_compiled_ms(BIF_ARG_1)) {
+ if (erts_db_get_match_prog_binary(BIF_ARG_1)) {
BIF_RET(am_true);
} else {
BIF_RET(am_false);
@@ -2794,9 +2794,9 @@ BIF_RETTYPE ets_match_spec_compile_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
- BIF_RET(erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mp));
+ BIF_RET(erts_db_make_match_prog_ref(BIF_P, mp, &hp));
}
BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
@@ -2805,24 +2805,18 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
int i = 0;
Eterm *hp;
Eterm lst;
- ProcBin *bp;
Binary *mp;
Eterm res;
Uint32 dummy;
- if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) || !is_binary(BIF_ARG_2)) {
+ if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- bp = (ProcBin*) binary_val(BIF_ARG_2);
- if (thing_subtag(bp->thing_word) != REFC_BINARY_SUBTAG) {
+ mp = erts_db_get_match_prog_binary(BIF_ARG_2);
+ if (!mp)
goto error;
- }
- mp = bp->val;
- if (!IsMatchProgBinary(mp)) {
- goto error;
- }
if (BIF_ARG_1 == NIL) {
BIF_RET(BIF_ARG_3);
@@ -3978,6 +3972,36 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
+/*
+ * For testing only
+ * Retreive meta table size state
+ */
+Eterm erts_ets_get_meta_state(Process* p)
+{
+ Eterm* hp = HAlloc(p, 3);
+ return TUPLE2(hp,
+ erts_ets_hash_get_memstate(p, &meta_pid_to_tab->hash),
+ erts_ets_hash_get_memstate(p, &meta_pid_to_fixed_tab->hash));
+}
+/*
+ * For testing only
+ * Restore a previously retrieved meta table size state.
+ * We do this to suppress failed memory checks
+ * caused by the hysteresis of meta tables grow/shrink limits.
+ */
+Eterm erts_ets_restore_meta_state(Process* p, Eterm meta_state)
+{
+ Eterm* tv;
+ Eterm* hp;
+ if (!is_tuple_arity(meta_state, 2))
+ return am_badarg;
+
+ tv = tuple_val(meta_state);
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp,
+ erts_ets_hash_restore_memstate(&meta_pid_to_tab->hash, tv[1]),
+ erts_ets_hash_restore_memstate(&meta_pid_to_fixed_tab->hash, tv[2]));
+}
#ifdef HARDDEBUG /* Here comes some debug functions */
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index b0508f2e74..852440ff31 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -90,6 +90,8 @@ extern Export ets_select_continue_exp;
extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
+Eterm erts_ets_get_meta_state(Process* p);
+Eterm erts_ets_restore_meta_state(Process* p, Eterm target_state);
Uint erts_db_get_max_tabs(void);
@@ -267,7 +269,5 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#undef ERTS_DB_ALC_MEM_UPDATE_
-
#endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 390369fdb9..bb29d56aa7 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -84,25 +84,28 @@
#include "erl_db_hash.h"
-#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */
-# define IF_DEBUG(x) x
-# define MY_ASSERT(x) ASSERT(x)
-#else
-# define IF_DEBUG(x)
-# define MY_ASSERT(x)
-#endif
-
/*
* The following symbols can be manipulated to "tune" the linear hash array
*/
-#define CHAIN_LEN 6 /* Medium bucket chain len */
+#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1)
+#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2)
-/* Number of slots per segment */
-#define SEGSZ_EXP 8
-#define SEGSZ (1 << SEGSZ_EXP)
-#define SEGSZ_MASK (SEGSZ-1)
+/*
+** We want the first mandatory segment to be small (to reduce minimal footprint)
+** and larger extra segments (to reduce number of alloc/free calls).
+*/
-#define NSEG_1 2 /* Size of first segment table (must be at least 2) */
+/* Number of slots in first segment */
+#define FIRST_SEGSZ_EXP 8
+#define FIRST_SEGSZ (1 << FIRST_SEGSZ_EXP)
+#define FIRST_SEGSZ_MASK (FIRST_SEGSZ - 1)
+
+/* Number of slots per extra segment */
+#define EXT_SEGSZ_EXP 11
+#define EXT_SEGSZ (1 << EXT_SEGSZ_EXP)
+#define EXT_SEGSZ_MASK (EXT_SEGSZ-1)
+
+#define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*))
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
@@ -123,7 +126,9 @@
#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
-#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK]
+#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
+
+#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK]
/*
* When deleting a table, the number of records to delete.
@@ -189,6 +194,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
+# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
/* Fine grained read lock */
static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
@@ -318,27 +324,23 @@ struct mp_info {
/* A table segment */
struct segment {
- HashDbTerm* buckets[SEGSZ];
-#ifdef MYDEBUG
- int is_ext_segment;
-#endif
+ HashDbTerm* buckets[1];
};
+#define SIZEOF_SEGMENT(N) \
+ (offsetof(struct segment,buckets) + sizeof(HashDbTerm*)*(N))
-/* A segment that also contains a segment table */
-struct ext_segment {
- struct segment s; /* The segment itself. Must be first */
-
+/* An extended segment table */
+struct ext_segtab {
+#ifdef ERTS_SMP
+ ErtsThrPrgrLaterOp lop;
+#endif
struct segment** prev_segtab; /* Used when table is shrinking */
- int nsegs; /* Size of segtab */
+ int prev_nsegs; /* Size of prev_segtab */
+ int nsegs; /* Size of this segtab */
struct segment* segtab[1]; /* The segment table */
};
-#define SIZEOF_EXTSEG(NSEGS) \
- (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS))
-
-#if defined(DEBUG) || defined(VALGRIND)
-# define EXTSEG(SEGTAB_PTR) \
- ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
-#endif
+#define SIZEOF_EXT_SEGTAB(NSEGS) \
+ (offsetof(struct ext_segtab,segtab) + sizeof(struct segment*)*(NSEGS))
static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
@@ -348,49 +350,22 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
else
erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
-#ifdef VALGRIND
- tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
-#endif
}
-/* How the table segments relate to each other:
-
- ext_segment: ext_segment: "plain" segment
- #=================# #================# #=============#
- | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] |
- | bucket[1] | | | | [257] | | | [513] |
- : : | | : : | : :
- | bucket[255] | | | | [511] | | | [767] |
- |-----------------| | | |----------------| | #=============#
- | prev_segtab=NULL| | | +--<---prev_segtab | |
- | nsegs = 2 | | | | | nsegs = 256 | |
-+->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ |
-| | segtab[1] -->-----------+---|--<---segtab[1] | | |
-| #=================# | | segtab[2] -->-----|--+ ext_segment:
-| | : : | #================#
-+----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]|
- #================# | | |
- | : :
- | |----------------|
- +----<---prev_segtab |
- : :
-*/
-
/*
** Forward decl's (static functions)
*/
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab);
-static int alloc_seg(DbTableHash *tb);
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
+static void alloc_seg(DbTableHash *tb);
static int free_seg(DbTableHash *tb, int free_records);
static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
HashValue hval, HashDbTerm *list);
-static void shrink(DbTableHash* tb, int nactive);
-static void grow(DbTableHash* tb, int nactive);
+static void shrink(DbTableHash* tb, int nitems);
+static void grow(DbTableHash* tb, int nitems);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
Uint sz, DbTableHash*);
static int analyze_pattern(DbTableHash *tb, Eterm pattern,
@@ -463,9 +438,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
int nactive = NACTIVE(tb);
- if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN)
+ int nitems = NITEMS(tb);
+ if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)
&& !IS_FIXED(tb)) {
- shrink(tb, nactive);
+ shrink(tb, nitems);
}
}
@@ -662,16 +638,20 @@ int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK);
- erts_smp_atomic_init_nob(&tb->nactive, SEGSZ);
+ erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
+ erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
- SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab);
+ SET_SEGTAB(tb, tb->first_segtab);
tb->nsegs = NSEG_1;
- tb->nslots = SEGSZ;
+ tb->nslots = FIRST_SEGSZ;
+ tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(FIRST_SEGSZ));
+ sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
- erts_smp_atomic_init_nob(&tb->is_resizing, 0);
#ifdef ERTS_SMP
+ erts_smp_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
int i;
@@ -686,7 +666,7 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
"db_hash_slot", make_small(i));
}
- /* This important property is needed to guarantee that the buckets
+ /* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
*/
ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
@@ -862,8 +842,8 @@ Lnew:
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
}
CHECK_TABLES();
@@ -1307,15 +1287,13 @@ static int db_select_continue_hash(Process *p,
if (arityval(*tptr) != 6)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!is_small(tptr[2]) || !is_small(tptr[3]) || !is_binary(tptr[4]) ||
+ if (!is_small(tptr[2]) || !is_small(tptr[3]) ||
!(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
if ((chunk_size = signed_val(tptr[3])) < 0)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
match_list = tptr[5];
@@ -1514,6 +1492,7 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl,
match_list = CONS(hp, match_res, match_list);
++got;
}
+ --num_left;
}
current = current->next;
}
@@ -1531,7 +1510,6 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl,
}
}
else { /* Key is variable */
- --num_left;
if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
slot_ix = -1;
@@ -1574,8 +1552,8 @@ done:
been in 'user space' */
}
if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- hp = HAlloc(p,3+7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
+ hp = HAlloc(p,3+7+ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(p,(mpi.mp),&hp);
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix),
@@ -1600,8 +1578,8 @@ trap:
BUMP_ALL_REDS(p);
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- hp = HAlloc(p,7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
+ hp = HAlloc(p,7+ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(p,(mpi.mp),&hp);
continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix),
make_small(chunk_size),
mpb, match_list,
@@ -1713,15 +1691,15 @@ done:
trap:
BUMP_ALL_REDS(p);
if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + 5);
egot = make_small(got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + ERTS_MAGIC_REF_THING_SIZE + 5);
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
mpb,
egot);
@@ -1858,15 +1836,15 @@ done:
trap:
BUMP_ALL_REDS(p);
if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + 5);
egot = make_small(got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + ERTS_MAGIC_REF_THING_SIZE + 5);
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
mpb,
egot);
@@ -1907,7 +1885,7 @@ static int db_select_delete_continue_hash(Process *p,
tptr = tuple_val(continuation);
slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[3]);
if (is_big(tptr[4])) {
got = big_to_uint32(tptr[4]);
} else {
@@ -2019,7 +1997,7 @@ static int db_select_count_continue_hash(Process *p,
tptr = tuple_val(continuation);
slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[3]);
if (is_big(tptr[4])) {
got = big_to_uint32(tptr[4]);
} else {
@@ -2250,12 +2228,12 @@ static int db_free_table_continue_hash(DbTable *tbl)
done /= 2;
while(tb->nslots != 0) {
- free_seg(tb, 1);
+ done += 1 + EXT_SEGSZ/64 + free_seg(tb, 1);
/*
* If we have done enough work, get out here.
*/
- if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) {
+ if (done >= DELETE_RECORD_LIMIT) {
return 0; /* Not done */
}
}
@@ -2414,90 +2392,81 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab)
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix)
{
- int nsegs;
- struct ext_segment* eseg;
+ struct segment** old_segtab = SEGTAB(tb);
+ int nsegs = 0;
+ struct ext_segtab* est;
+ ASSERT(seg_ix >= NSEG_1);
switch (seg_ix) {
- case 0: nsegs = NSEG_1; break;
- case 1: nsegs = NSEG_2; break;
- default: nsegs = seg_ix + NSEG_INC; break;
- }
- eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- SIZEOF_EXTSEG(nsegs));
- ASSERT(eseg != NULL);
- sys_memset(&eseg->s, 0, sizeof(struct segment));
- IF_DEBUG(eseg->s.is_ext_segment = 1);
- eseg->prev_segtab = old_segtab;
- eseg->nsegs = nsegs;
- if (old_segtab) {
- ASSERT(nsegs > tb->nsegs);
- sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
- }
+ case NSEG_1: nsegs = NSEG_2; break;
+ default: nsegs = seg_ix + NSEG_INC; break;
+ }
+ ASSERT(nsegs > tb->nsegs);
+ est = (struct ext_segtab*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_EXT_SEGTAB(nsegs));
+ est->nsegs = nsegs;
+ est->prev_segtab = old_segtab;
+ est->prev_nsegs = tb->nsegs;
+ sys_memcpy(est->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
#ifdef DEBUG
- sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
+ sys_memset(&est->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
#endif
- eseg->segtab[seg_ix] = &eseg->s;
- return eseg;
+ return est;
}
/* Extend table with one new segment
*/
-static int alloc_seg(DbTableHash *tb)
+static void alloc_seg(DbTableHash *tb)
{
- int seg_ix = tb->nslots >> SEGSZ_EXP;
-
- if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */
- struct segment** segtab = SEGTAB(tb);
- struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab);
- if (seg == NULL) return 0;
- segtab[seg_ix] = &seg->s;
- /* We don't use the new segtab until next call (see "shrink race") */
- }
- else { /* Just a new plain segment */
- struct segment** segtab;
- if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */
- struct ext_segment* eseg;
- eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
- MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- SET_SEGTAB(tb, eseg->segtab);
- tb->nsegs = eseg->nsegs;
- }
- ASSERT(seg_ix < tb->nsegs);
- segtab = SEGTAB(tb);
- ASSERT(segtab[seg_ix] == NULL);
- segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- sizeof(struct segment));
- if (segtab[seg_ix] == NULL) return 0;
- sys_memset(segtab[seg_ix], 0, sizeof(struct segment));
- }
- tb->nslots += SEGSZ;
- return 1;
+ int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
+ struct segment** segtab;
+
+ ASSERT(seg_ix > 0);
+ if (seg_ix == tb->nsegs) { /* New segtab needed */
+ struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix);
+ SET_SEGTAB(tb, est->segtab);
+ tb->nsegs = est->nsegs;
+ }
+ ASSERT(seg_ix < tb->nsegs);
+ segtab = SEGTAB(tb);
+ segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(EXT_SEGSZ));
+ sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ));
+ tb->nslots += EXT_SEGSZ;
}
+#ifdef ERTS_SMP
+static void dealloc_ext_segtab(void* lop_data)
+{
+ struct ext_segtab* est = (struct ext_segtab*) lop_data;
+
+ erts_free(ERTS_ALC_T_DB_SEG, est);
+}
+#endif
+
/* Shrink table by freeing the top segment
** free_records: 1=free any records in segment, 0=assume segment is empty
*/
static int free_seg(DbTableHash *tb, int free_records)
{
- const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1;
+ const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
struct segment** const segtab = SEGTAB(tb);
- struct ext_segment* const top = (struct ext_segment*) segtab[seg_ix];
- int bytes;
+ struct segment* const segp = segtab[seg_ix];
+ Uint seg_sz;
int nrecords = 0;
- ASSERT(top != NULL);
+ ASSERT(segp != NULL);
#ifndef DEBUG
if (free_records)
#endif
{
- int i;
- for (i=0; i<SEGSZ; ++i) {
- HashDbTerm* p = top->s.buckets[i];
+ int i = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ while (i--) {
+ HashDbTerm* p = segp->buckets[i];
while(p != 0) {
HashDbTerm* nxt = p->next;
ASSERT(free_records); /* segment not empty as assumed? */
@@ -2507,55 +2476,46 @@ static int free_seg(DbTableHash *tb, int free_records)
}
}
}
-
- /* The "shrink race":
- * We must avoid deallocating an extended segment while its segtab may
- * still be used by other threads.
- * The trick is to stop use a segtab one call earlier. That is, stop use
- * a segtab when the segment above it is deallocated. When the segtab is
- * later deallocated, it has not been used for a very long time.
- * It is even theoretically safe as we have by then rehashed the entire
- * segment, seizing *all* locks, so there cannot exist any retarded threads
- * still hanging in BUCKET macro with an old segtab pointer.
- * For this to work, we must of course allocate a new segtab one call
- * earlier in alloc_seg() as well. And this is also the reason why
- * the minimum size of the first segtab is 2 and not 1 (NSEG_1).
- */
- if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */
- MY_ASSERT(top->s.is_ext_segment);
- ASSERT(segtab != top->segtab || seg_ix==0);
- bytes = SIZEOF_EXTSEG(top->nsegs);
- }
- else { /* Dealloc plain segment */
- struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1];
- MY_ASSERT(!top->s.is_ext_segment);
-
- if (segtab == newtop->segtab) { /* New top segment is extended */
- MY_ASSERT(newtop->s.is_ext_segment);
- if (newtop->prev_segtab != NULL) {
- /* Time to use a smaller segtab */
- SET_SEGTAB(tb, newtop->prev_segtab);
- tb->nsegs = seg_ix;
- ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
- }
- else {
- ASSERT(NSEG_1 > 2 && seg_ix==1);
- }
- }
- bytes = sizeof(struct segment);
+ if (seg_ix >= NSEG_1) {
+ struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab);
+
+ if (seg_ix == est->prev_nsegs) { /* Dealloc extended segtab */
+ ASSERT(est->prev_segtab != NULL);
+ SET_SEGTAB(tb, est->prev_segtab);
+ tb->nsegs = est->prev_nsegs;
+
+#ifdef ERTS_SMP
+ if (!tb->common.is_thread_safe) {
+ /*
+ * Table is doing a graceful shrink operation and we must avoid
+ * deallocating this segtab while it may still be read by other
+ * threads. Schedule deallocation with thread progress to make
+ * sure no lingering threads are still hanging in BUCKET macro
+ * with an old segtab pointer.
+ */
+ Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs);
+ ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0);
+ erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab,
+ est,
+ &est->lop,
+ sz);
+ }
+ else
+#endif
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
+ }
}
+ seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz));
- erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
- (void*)top, bytes);
#ifdef DEBUG
- if (seg_ix > 0) {
- segtab[seg_ix] = NULL;
- } else {
- SET_SEGTAB(tb, NULL);
- }
+ if (seg_ix < tb->nsegs)
+ SEGTAB(tb)[seg_ix] = NULL;
#endif
- tb->nslots -= SEGSZ;
+ tb->nslots -= seg_sz;
ASSERT(tb->nslots >= 0);
return nrecords;
}
@@ -2604,104 +2564,111 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
+#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
- return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
- else {
- if (erts_smp_atomic_read_nob(&tb->is_resizing))
- return 0;
- erts_smp_atomic_set_nob(&tb->is_resizing, 1);
- return 1;
- }
+ return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
+ else
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+#endif
+ return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
+#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
- else
- erts_smp_atomic_set_nob(&tb->is_resizing, 0);
+ erts_atomic_set_relb(&tb->is_resizing, 0);
+#endif
}
-/* Grow table with one new bucket.
+/* Grow table with one or more new buckets.
** Allocate new segment if needed.
*/
-static void grow(DbTableHash* tb, int nactive)
+static void grow(DbTableHash* tb, int nitems)
{
HashDbTerm** pnext;
HashDbTerm** to_pnext;
HashDbTerm* p;
erts_smp_rwmtx_t* lck;
- int from_ix;
+ int nactive;
+ int from_ix, to_ix;
int szm;
+ int loop_limit = 5;
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) != nactive) {
- goto abort; /* already done (race) */
- }
-
- /* Ensure that the slot nactive exists */
- if (nactive == tb->nslots) {
- /* Time to get a new segment */
- ASSERT((nactive & SEGSZ_MASK) == 0);
- if (!alloc_seg(tb)) goto abort;
- }
- ASSERT(nactive < tb->nslots);
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (nitems <= GROW_LIMIT(nactive)) {
+ goto abort; /* already done (race) */
+ }
- szm = erts_smp_atomic_read_nob(&tb->szm);
- if (nactive <= szm) {
- from_ix = nactive & (szm >> 1);
- } else {
- ASSERT(nactive == szm+1);
- from_ix = 0;
- szm = (szm<<1) | 1;
- }
+ /* Ensure that the slot nactive exists */
+ if (nactive == tb->nslots) {
+ /* Time to get a new segment */
+ ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0);
+ alloc_seg(tb);
+ }
+ ASSERT(nactive < tb->nslots);
- lck = WLOCK_HASH(tb, from_ix);
- /* Now a final double check (with the from_ix lock held)
- * that we did not get raced by a table fixer.
- */
- if (IS_FIXED(tb)) {
- WUNLOCK_HASH(lck);
- goto abort;
- }
- erts_smp_atomic_inc_nob(&tb->nactive);
- if (from_ix == 0) {
- if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->szm, szm);
- else
- erts_smp_atomic_set_nob(&tb->szm, szm);
- }
- done_resizing(tb);
+ szm = erts_smp_atomic_read_nob(&tb->szm);
+ if (nactive <= szm) {
+ from_ix = nactive & (szm >> 1);
+ } else {
+ ASSERT(nactive == szm+1);
+ from_ix = 0;
+ szm = (szm<<1) | 1;
+ }
+ to_ix = nactive;
+
+ lck = WLOCK_HASH(tb, from_ix);
+ ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix));
+ /* Now a final double check (with the from_ix lock held)
+ * that we did not get raced by a table fixer.
+ */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+ erts_smp_atomic_set_nob(&tb->nactive, ++nactive);
+ if (from_ix == 0) {
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_smp_atomic_set_relb(&tb->szm, szm);
+ else
+ erts_smp_atomic_set_nob(&tb->szm, szm);
+ }
+ done_resizing(tb);
+
+ /* Finally, let's split the bucket. We try to do it in a smart way
+ to keep link order and avoid unnecessary updates of next-pointers */
+ pnext = &BUCKET(tb, from_ix);
+ p = *pnext;
+ to_pnext = &BUCKET(tb, to_ix);
+ while (p != NULL) {
+ if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
+ *pnext = p->next;
+ free_term(tb, p);
+ p = *pnext;
+ }
+ else {
+ int ix = p->hvalue & szm;
+ if (ix != from_ix) {
+ ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
+ *to_pnext = p;
+ /* Swap "from" and "to": */
+ from_ix = ix;
+ to_pnext = pnext;
+ }
+ pnext = &p->next;
+ p = *pnext;
+ }
+ }
+ *to_pnext = NULL;
+ WUNLOCK_HASH(lck);
- /* Finally, let's split the bucket. We try to do it in a smart way
- to keep link order and avoid unnecessary updates of next-pointers */
- pnext = &BUCKET(tb, from_ix);
- p = *pnext;
- to_pnext = &BUCKET(tb, nactive);
- while (p != NULL) {
- if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
- *pnext = p->next;
- free_term(tb, p);
- p = *pnext;
- }
- else {
- int ix = p->hvalue & szm;
- if (ix != from_ix) {
- ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
- *to_pnext = p;
- /* Swap "from" and "to": */
- from_ix = ix;
- to_pnext = pnext;
- }
- pnext = &p->next;
- p = *pnext;
- }
- }
- *to_pnext = NULL;
+ }while (--loop_limit && nitems > GROW_LIMIT(nactive));
- WUNLOCK_HASH(lck);
return;
abort:
@@ -2712,60 +2679,78 @@ abort:
/* Shrink table by joining top bucket.
** Remove top segment if it gets empty.
*/
-static void shrink(DbTableHash* tb, int nactive)
-{
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) == nactive) {
- erts_smp_rwmtx_t* lck;
- int src_ix = nactive - 1;
- int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
- int dst_ix = src_ix & low_szm;
-
- ASSERT(dst_ix < src_ix);
- ASSERT(nactive > SEGSZ);
- lck = WLOCK_HASH(tb, dst_ix);
- /* Double check for racing table fixers */
- if (!IS_FIXED(tb)) {
- HashDbTerm** src_bp = &BUCKET(tb, src_ix);
- HashDbTerm** dst_bp = &BUCKET(tb, dst_ix);
- HashDbTerm** bp = src_bp;
-
- /* Q: Why join lists by appending "dst" at the end of "src"?
- A: Must step through "src" anyway to purge pseudo deleted. */
- while(*bp != NULL) {
- if ((*bp)->hvalue == INVALID_HASH) {
- HashDbTerm* deleted = *bp;
- *bp = deleted->next;
- free_term(tb, deleted);
- } else {
- bp = &(*bp)->next;
- }
- }
- *bp = *dst_bp;
- *dst_bp = *src_bp;
- *src_bp = NULL;
-
- erts_smp_atomic_set_nob(&tb->nactive, src_ix);
- if (dst_ix == 0) {
- erts_smp_atomic_set_relb(&tb->szm, low_szm);
- }
- WUNLOCK_HASH(lck);
-
- if (tb->nslots - src_ix >= SEGSZ) {
- free_seg(tb, 0);
- }
- }
- else {
- WUNLOCK_HASH(lck);
- }
+static void shrink(DbTableHash* tb, int nitems)
+{
+ HashDbTerm** src_bp;
+ HashDbTerm** dst_bp;
+ HashDbTerm** bp;
+ erts_smp_rwmtx_t* lck;
+ int src_ix, dst_ix, low_szm;
+ int nactive;
+ int loop_limit = 5;
- }
- /*else already done */
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) {
+ goto abort; /* already done (race) */
+ }
+ src_ix = nactive - 1;
+ low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
+ dst_ix = src_ix & low_szm;
+
+ ASSERT(dst_ix < src_ix);
+ ASSERT(nactive > FIRST_SEGSZ);
+ lck = WLOCK_HASH(tb, dst_ix);
+ ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix));
+ /* Double check for racing table fixers */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+
+ src_bp = &BUCKET(tb, src_ix);
+ dst_bp = &BUCKET(tb, dst_ix);
+ bp = src_bp;
+
+ /*
+ * We join lists by appending "dst" at the end of "src"
+ * as we must step through "src" anyway to purge pseudo deleted.
+ */
+ while(*bp != NULL) {
+ if ((*bp)->hvalue == INVALID_HASH) {
+ HashDbTerm* deleted = *bp;
+ *bp = deleted->next;
+ free_term(tb, deleted);
+ } else {
+ bp = &(*bp)->next;
+ }
+ }
+ *bp = *dst_bp;
+ *dst_bp = *src_bp;
+ *src_bp = NULL;
+
+ nactive = src_ix;
+ erts_smp_atomic_set_nob(&tb->nactive, nactive);
+ if (dst_ix == 0) {
+ erts_smp_atomic_set_relb(&tb->szm, low_szm);
+ }
+ WUNLOCK_HASH(lck);
+
+ if (tb->nslots - src_ix >= EXT_SEGSZ) {
+ free_seg(tb, 0);
+ }
+ done_resizing(tb);
+
+ } while (--loop_limit
+ && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive));
+ return;
+
+abort:
done_resizing(tb);
}
-
/* Search a list of tuples for a matching key */
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
@@ -2931,8 +2916,8 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
WUNLOCK_HASH(lck);
nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
} else {
WUNLOCK_HASH(lck);
@@ -3016,6 +3001,51 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
stats->kept_items = kept_items;
}
+
+/* For testing only */
+Eterm erts_ets_hash_sizeof_ext_segtab(void)
+{
+ return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
+}
+/* For testing only */
+Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb)
+{
+ Eterm seg_cnt;
+ while (!begin_resizing(tb))
+ /*spinn*/;
+
+ seg_cnt = make_small(SLOT_IX_TO_SEG_IX(tb->nslots));
+ done_resizing(tb);
+ return seg_cnt;
+}
+/* For testing only */
+Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate)
+{
+ int seg_cnt, target;
+
+ if (!is_small(memstate))
+ return make_small(__LINE__);
+
+ target = signed_val(memstate);
+ if (target < 1)
+ return make_small(__LINE__);
+ while (1) {
+ while (!begin_resizing(tb))
+ /*spin*/;
+ seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots);
+ done_resizing(tb);
+
+ if (target == seg_cnt)
+ return am_ok;
+ if (IS_FIXED(tb))
+ return make_small(__LINE__);
+ if (target < seg_cnt)
+ shrink(tb, 0);
+ else
+ grow(tb, INT_MAX);
+ }
+}
+
#ifdef HARDDEBUG
void db_check_table_hash(DbTable *tbl)
@@ -3024,7 +3054,7 @@ void db_check_table_hash(DbTable *tbl)
HashDbTerm* list;
int j;
- for (j = 0; j < tb->nactive; j++) {
+ for (j = 0; j < NACTIVE(tb); j++) {
if ((list = BUCKET(tb,j)) != 0) {
while (list != 0) {
if (!is_tuple(make_tuple(list->dbterm.tpl))) {
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e654363cd5..6d25c73549 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -50,23 +50,23 @@ typedef struct db_table_hash_fine_locks {
typedef struct db_table_hash {
DbTableCommon common;
- erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
+ /* SMP: szm and nactive are write-protected by is_resizing or table write lock */
erts_smp_atomic_t szm; /* current size mask. */
-
+ erts_smp_atomic_t nactive; /* Number of "active" slots */
+
+ erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
+ struct segment* first_segtab[1];
+
/* SMP: nslots and nsegs are protected by is_resizing or table write lock */
int nslots; /* Total number of slots */
int nsegs; /* Size of segment table */
/* List of slots where elements have been deleted while table was fixed */
erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
- erts_smp_atomic_t nactive; /* Number of "active" slots */
- erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
#ifdef ERTS_SMP
+ erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
DbTableHashFineLocks* locks;
#endif
-#ifdef VALGRIND
- struct ext_segment* top_ptr_to_segment_with_active_segtab;
-#endif
} DbTableHash;
@@ -109,5 +109,8 @@ typedef struct {
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
+Eterm erts_ets_hash_sizeof_ext_segtab(void);
+Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb);
+Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate);
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index dd9403e132..c4ecd2ba37 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -935,17 +935,15 @@ static int db_select_continue_tree(Process *p,
if (arityval(*tptr) != 8)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!is_small(tptr[4]) || !is_binary(tptr[5]) ||
+ if (!is_small(tptr[4]) ||
!(is_list(tptr[6]) || tptr[6] == NIL) || !is_small(tptr[7]) ||
!is_small(tptr[8]))
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[5])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[5]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[5]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
chunk_size = signed_val(tptr[4]);
@@ -1145,11 +1143,11 @@ static int db_select_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb=db_make_mp_binary(p,mpi.mp,&hp);
+ mpb= erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
@@ -1208,10 +1206,8 @@ static int db_select_count_continue_tree(Process *p,
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
sc.p = p;
@@ -1338,18 +1334,18 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
if (IS_USMALL(0, sc.got)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = make_small(sc.got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = uint_to_big(sc.got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
@@ -1470,11 +1466,11 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
@@ -1495,12 +1491,12 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
tb->common.id,
@@ -1558,7 +1554,7 @@ static int db_select_delete_continue_tree(Process *p,
sc.erase_lastterm = 0; /* Before first RET_TO_BIF */
sc.lastterm = NULL;
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
sc.p = p;
sc.tb = tb;
if (is_big(tptr[5])) {
@@ -1682,16 +1678,16 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = make_small(sc.accum);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = uint_to_big(sc.accum, hp);
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index f4db7ca3dd..6f30b1d3dd 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1702,17 +1702,18 @@ error: /* Here is were we land when compilation failed. */
/*
** Free a match program (in a binary)
*/
-void erts_db_match_prog_destructor(Binary *bprog)
+int erts_db_match_prog_destructor(Binary *bprog)
{
MatchProg *prog;
if (bprog == NULL)
- return;
+ return 1;
prog = Binary2MatchProg(bprog);
if (prog->term_save != NULL) {
free_message_buffer(prog->term_save);
}
if (prog->saved_program_buf != NULL)
free_message_buffer(prog->saved_program_buf);
+ return 1;
}
void
@@ -1769,7 +1770,7 @@ Eterm db_prog_match(Process *c_p,
Eterm t;
Eterm *esp;
MatchVariable* variables;
- BeamInstr *cp;
+ ErtsCodeMFA *cp;
const UWord *pc = prog->text;
Eterm *ehp;
Eterm ret;
@@ -2408,9 +2409,9 @@ restart:
ehp = HAllocX(build_proc, 4, HEAP_XTRA);
*esp++ = make_tuple(ehp);
ehp[0] = make_arityval(3);
- ehp[1] = cp[0];
- ehp[2] = cp[1];
- ehp[3] = make_small((Uint) cp[2]);
+ ehp[1] = cp->module;
+ ehp[2] = cp->function;
+ ehp[3] = make_small((Uint) cp->arity);
}
break;
case matchSilent:
@@ -2545,14 +2546,6 @@ success:
}
-/*
- * Convert a match program to a "magic" binary to return up to erlang
- */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp)
-{
- return erts_mk_magic_binary_term(hpp, &MSO(p), mp);
-}
-
DMCErrInfo *db_new_dmc_err_info(void)
{
DMCErrInfo *ret = erts_alloc(ERTS_ALC_T_DB_DMC_ERR_INFO,
@@ -3111,6 +3104,11 @@ void db_cleanup_offheap_comp(DbTerm* obj)
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ if (erts_refc_dectest(&u.mref->mb->refc, 0) == 0)
+ erts_bin_free((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
ASSERT(u.pb != &tmp);
@@ -3266,13 +3264,6 @@ int db_has_variable(Eterm node) {
return 0;
}
-int erts_db_is_compiled_ms(Eterm term)
-{
- return (is_binary(term)
- && (thing_subtag(*binary_val(term)) == REFC_BINARY_SUBTAG)
- && IsMatchProgBinary((((ProcBin *) binary_val(term))->val)));
-}
-
/*
** Local (static) utilities.
*/
@@ -5289,24 +5280,35 @@ void db_match_dis(Binary *bp)
case matchEqRef:
++t;
{
- RefThing *rt = (RefThing *) t;
+ Uint32 *num;
int ri;
- n = thing_arityval(rt->header);
- erts_printf("EqRef\t(%d) {", (int) n);
+
+ if (is_ordinary_ref_thing(t)) {
+ ErtsORefThing *rt = (ErtsORefThing *) t;
+ num = rt->num;
+ t += TermWords(ERTS_REF_THING_SIZE);
+ }
+ else {
+ ErtsMRefThing *mrt = (ErtsMRefThing *) t;
+ ASSERT(is_magic_ref_thing(t));
+ num = mrt->mb->refn;
+ t += TermWords(ERTS_MAGIC_REF_THING_SIZE);
+ }
+
+ erts_printf("EqRef\t(%d) {", (int) ERTS_REF_NUMBERS);
first = 1;
- for (ri = 0; ri < n; ++ri) {
+ for (ri = 0; ri < ERTS_REF_NUMBERS; ++ri) {
if (first)
first = 0;
else
erts_printf(", ");
#if defined(ARCH_64)
- erts_printf("0x%016bex", rt->data.ui[ri]);
+ erts_printf("0x%016bex", num[ri]);
#else
- erts_printf("0x%08bex", rt->data.ui[ri]);
+ erts_printf("0x%08bex", num[ri]);
#endif
}
}
- t += TermWords(REF_THING_SIZE);
erts_printf("}\n");
break;
case matchEqBig:
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 3fb063797e..471fefe3cb 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -23,6 +23,7 @@
#include "global.h"
#include "erl_message.h"
+#include "erl_bif_unique.h"
/*#define HARDDEBUG 1*/
@@ -355,7 +356,7 @@ Eterm db_add_counter(Eterm** hpp, Wterm counter, Eterm incr);
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags);
Binary *db_match_set_compile(Process *p, Eterm matchexpr,
Uint flags);
-void erts_db_match_prog_destructor(Binary *);
+int erts_db_match_prog_destructor(Binary *);
typedef struct match_prog {
ErlHeapFragment *term_save; /* Only if needed, a list of message
@@ -456,10 +457,44 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei);
void db_free_dmc_err_info(DMCErrInfo *ei);
/* Completely free's an error info structure, including all recorded
errors */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp);
-/* Convert a match program to a erlang "magic" binary to be returned to userspace,
- increments the reference counter. */
-int erts_db_is_compiled_ms(Eterm term);
+
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary(Eterm term);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary_unchecked(Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * Convert a match program to a "magic" ref to return up to erlang
+ */
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp)
+{
+ return erts_mk_magic_ref(hpp, &MSO(p), mp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary_unchecked(Eterm term)
+{
+ Binary *bp = erts_magic_ref2bin(term);
+ ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT((ERTS_MAGIC_BIN_DESTRUCTOR(bp) == erts_db_match_prog_destructor));
+ return bp;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary(Eterm term)
+{
+ Binary *bp;
+ if (!is_internal_magic_ref(term))
+ return NULL;
+ bp = erts_magic_ref2bin(term);
+ ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(bp) != erts_db_match_prog_destructor)
+ return NULL;
+ return bp;
+}
+
+#endif
/*
** Convenience when compiling into Binary structures
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 3526bb684d..517dad9cbb 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -130,7 +130,7 @@ pdisplay1(fmtfn_t to, void *to_arg, Process* p, Eterm obj)
Uint32 *ref_num;
erts_print(to, to_arg, "#Ref<%lu", ref_channel_no(obj));
ref_num = ref_numbers(obj);
- for (i = ref_no_of_numbers(obj)-1; i >= 0; i--)
+ for (i = ref_no_numbers(obj)-1; i >= 0; i--)
erts_print(to, to_arg, ",%lu", ref_num[i]);
erts_print(to, to_arg, ">");
break;
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
new file mode 100644
index 0000000000..69421dcfcc
--- /dev/null
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -0,0 +1,82 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2016. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Static declaration of BIFs that should execute on dirty schedulers.
+#
+# <dirty-bif-decl> ::= <type> <bif>
+# <bif> ::= <module> ":" <name> "/" <arity>
+# <type> ::= dirty-cpu | dirty-io | dirty-cpu-test | dirty-io-test
+#
+# When dirty scheduler support is available, a BIF declared with the
+# 'dirty-cpu' type will unconditionally execute on a dirty CPU scheduler,
+# and a BIF declared with the type 'dirty-io' will unconditionally execute
+# on a dirty IO scheduler. When dirty scheduler support is not available
+# all BIFs will of course execute on normal schedulers.
+#
+# When the emulator has been configured with the debug option
+# '--enable-dirty-schedulers-test', BIFs with the types 'dirty-cpu-test',
+# and 'dirty-io-test' will unconditionally execute on dirty schedulers.
+# When this debug option has not been enabled, these BIFs will be executed
+# on normal schedulers.
+#
+# BIFs marked as 'ubif' in ./bif.tab will be ignored, i.e., will always
+# execute on normal schedulers.
+#
+
+# --- Dirty BIFs ---
+
+dirty-cpu erts_debug:dirty_cpu/2
+dirty-io erts_debug:dirty_io/2
+
+# --- TEST of Dirty BIF functionality ---
+# Functions below will execute on dirty schedulers when emulator has
+# been configured for testing dirty schedulers. This is used for test
+# and debug purposes only. We really do *not* want to execute these
+# on dirty schedulers on a real system.
+
+dirty-cpu-test erlang:'++'/2
+dirty-cpu-test erlang:append/2
+dirty-cpu-test erlang:'--'/2
+dirty-cpu-test erlang:subtract/2
+dirty-cpu-test erlang:iolist_size/1
+dirty-cpu-test erlang:make_tuple/2
+dirty-cpu-test erlang:make_tuple/3
+dirty-cpu-test erlang:append_element/2
+dirty-cpu-test erlang:insert_element/3
+dirty-cpu-test erlang:delete_element/2
+dirty-cpu-test erlang:atom_to_list/1
+dirty-cpu-test erlang:list_to_atom/1
+dirty-cpu-test erlang:list_to_existing_atom/1
+dirty-cpu-test erlang:integer_to_list/1
+dirty-cpu-test erlang:string_to_integer/1
+dirty-cpu-test erlang:list_to_integer/1
+dirty-cpu-test erlang:list_to_integer/2
+dirty-cpu-test erlang:float_to_list/1
+dirty-cpu-test erlang:float_to_list/2
+dirty-cpu-test erlang:float_to_binary/1
+dirty-cpu-test erlang:float_to_binary/2
+dirty-cpu-test erlang:string_to_float/1
+dirty-cpu-test erlang:list_to_float/1
+dirty-cpu-test erlang:binary_to_float/1
+dirty-cpu-test erlang:tuple_to_list/1
+dirty-cpu-test erlang:list_to_tuple/1
+dirty-cpu-test erlang:display/1
+dirty-cpu-test erlang:display_string/1
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 97a69140c3..b386b68cff 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -76,11 +76,10 @@ typedef struct {
# endif
#endif
-/* Values for mode arg to driver_select() */
-#define ERL_DRV_READ (1 << 0)
-#define ERL_DRV_WRITE (1 << 1)
-#define ERL_DRV_USE (1 << 2)
-#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (1 << 3))
+#define ERL_DRV_READ ((int)ERL_NIF_SELECT_READ)
+#define ERL_DRV_WRITE ((int)ERL_NIF_SELECT_WRITE)
+#define ERL_DRV_USE ((int)ERL_NIF_SELECT_STOP)
+#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (ERL_DRV_USE << 1))
/* Old deprecated */
#define DO_READ ERL_DRV_READ
@@ -176,13 +175,6 @@ struct erl_drv_event_data {
#endif
typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-/*
- * A driver monitor
- */
-typedef struct {
- unsigned char data[sizeof(void *)*4];
-} ErlDrvMonitor;
-
typedef struct {
unsigned long megasecs;
unsigned long secs;
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 6ec5fbb895..2d21dac87a 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -49,6 +49,21 @@ typedef enum {
ERL_DIRTY_JOB_IO_BOUND = 2
} ErlDirtyJobFlags;
+/* Values for enif_select AND mode arg for driver_select() */
+enum ErlNifSelectFlags {
+ ERL_NIF_SELECT_READ = (1 << 0),
+ ERL_NIF_SELECT_WRITE = (1 << 1),
+ ERL_NIF_SELECT_STOP = (1 << 2)
+};
+
+/*
+ * A driver monitor
+ */
+typedef struct {
+ unsigned char data[sizeof(void *)*4];
+} ErlDrvMonitor;
+
+
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
# undef SIZEOF_CHAR
@@ -69,10 +84,6 @@ typedef enum {
# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
# undef SIZEOF_LONG_LONG
#endif
-#ifdef HALFWORD_HEAP_EMULATOR
-# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
-# undef HALFWORD_HEAP_EMULATOR
-#endif
#include "erl_int_sizes_config.h"
#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
# error SIZEOF_CHAR mismatch
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 92edce5176..0e6aadf568 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -54,6 +54,9 @@ fatal_error(int err, char *func)
struct ErlDrvMutex_ {
ethr_mutex mtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt;
+#endif
char *name;
};
@@ -64,6 +67,9 @@ struct ErlDrvCond_ {
struct ErlDrvRWLock_ {
ethr_rwmutex rwmtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt;
+#endif
char *name;
};
@@ -161,14 +167,17 @@ erl_drv_mutex_create(char *name)
opt.posix_compliant = 1;
if (ethr_mutex_init_opt(&dmtx->mtx, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
- dmtx = NULL;
+ return NULL;
}
- else if (!name)
- dmtx->name = no_name;
- else {
+ if (name) {
dmtx->name = ((char *) dmtx) + sizeof(ErlDrvMutex);
sys_strcpy(dmtx->name, name);
+ } else {
+ dmtx->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&dmtx->lcnt, dmtx->name, ERTS_LCNT_LT_MUTEX);
+#endif
}
return dmtx;
#else
@@ -180,7 +189,11 @@ void
erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_destroy_lock(&dmtx->lcnt);
+#endif
+ res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_mutex_destroy()");
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
@@ -202,9 +215,14 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
+ int res;
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_trylock()");
- return ethr_mutex_trylock(&dmtx->mtx);
+ res = ethr_mutex_trylock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&dmtx->lcnt, res);
+#endif
+ return res;
#else
return 0;
#endif
@@ -216,8 +234,14 @@ erl_drv_mutex_lock(ErlDrvMutex *dmtx)
#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_lock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+#endif
ethr_mutex_lock(&dmtx->mtx);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&dmtx->lcnt);
+#endif
}
void
@@ -226,6 +250,9 @@ erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
+#endif
ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -306,10 +333,18 @@ erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
if (!dcnd || !dmtx) {
fatal_error(EINVAL, "erl_drv_cond_wait()");
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
+#endif
while (1) {
int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
- if (res == 0)
+ if (res == 0) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+ erts_lcnt_lock_post(&dmtx->lcnt);
+#endif
break;
+ }
}
#endif
}
@@ -324,14 +359,17 @@ erl_drv_rwlock_create(char *name)
if (drwlck) {
if (ethr_rwmutex_init(&drwlck->rwmtx) != 0) {
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
- drwlck = NULL;
+ return NULL;
}
- else if (!name)
- drwlck->name = no_name;
- else {
+ if (name) {
drwlck->name = ((char *) drwlck) + sizeof(ErlDrvRWLock);
sys_strcpy(drwlck->name, name);
+ } else {
+ drwlck->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&drwlck->lcnt, drwlck->name, ERTS_LCNT_LT_RWMUTEX);
+#endif
}
return drwlck;
#else
@@ -343,7 +381,11 @@ void
erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_destroy_lock(&drwlck->lcnt);
+#endif
+ res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_rwlock_destroy()");
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
@@ -364,9 +406,14 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
- return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
+ res = ethr_rwmutex_tryrlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LCNT_LO_READ);
+#endif
+ return res;
#else
return 0;
#endif
@@ -378,7 +425,13 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ);
+#endif
ethr_rwmutex_rlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
+#endif
#endif
}
@@ -388,6 +441,9 @@ erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ);
+#endif
ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -396,9 +452,14 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
- return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
+ res = ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+#endif
+ return res;
#else
return 0;
#endif
@@ -410,7 +471,13 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ_WRITE);
+#endif
ethr_rwmutex_rwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
+#endif
#endif
}
@@ -420,6 +487,9 @@ erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ_WRITE);
+#endif
ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index fd4d5b1c5c..cedc0517e1 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -31,6 +31,9 @@
static Hash erts_fun_table;
#include "erl_smp.h"
+#ifdef HIPE
+# include "hipe_mode_switch.h"
+#endif
static erts_smp_rwmtx_t erts_fun_table_lock;
@@ -49,8 +52,8 @@ static void fun_free(ErlFunEntry* obj);
* to unloaded_fun[]. The -1 in unloaded_fun[0] will be interpreted
* as an illegal arity when attempting to call a fun.
*/
-static BeamInstr unloaded_fun_code[3] = {NIL, -1, 0};
-static BeamInstr* unloaded_fun = unloaded_fun_code + 2;
+static BeamInstr unloaded_fun_code[4] = {NIL, NIL, -1, 0};
+static BeamInstr* unloaded_fun = unloaded_fun_code + 3;
void
erts_init_fun_table(void)
@@ -219,6 +222,10 @@ erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end)
fe->pend_purge_address = addr;
ERTS_SMP_WRITE_MEMORY_BARRIER;
fe->address = unloaded_fun;
+#ifdef HIPE
+ fe->pend_purge_native_address = fe->native_address;
+ hipe_set_closure_stub(fe);
+#endif
erts_purge_state_add_fun(fe);
}
b = b->next;
@@ -234,8 +241,12 @@ erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
- if (fe->address == unloaded_fun)
+ if (fe->address == unloaded_fun) {
fe->address = fe->pend_purge_address;
+#ifdef HIPE
+ fe->native_address = fe->pend_purge_native_address;
+#endif
+ }
}
}
@@ -244,8 +255,12 @@ erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no)
{
Uint ix;
- for (ix = 0; ix < no; ix++)
+ for (ix = 0; ix < no; ix++) {
funs[ix]->pend_purge_address = NULL;
+#ifdef HIPE
+ funs[ix]->pend_purge_native_address = NULL;
+#endif
+ }
}
void
@@ -256,6 +271,9 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
fe->pend_purge_address = NULL;
+#ifdef HIPE
+ fe->pend_purge_native_address = NULL;
+#endif
if (erts_smp_refc_dectest(&fe->refc, 0) == 0)
erts_erase_fun_entry(fe);
}
@@ -324,6 +342,7 @@ fun_alloc(ErlFunEntry* template)
obj->pend_purge_address = NULL;
#ifdef HIPE
obj->native_address = NULL;
+ obj->pend_purge_native_address = NULL;
#endif
return obj;
}
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 0fd0d62343..e3073eb874 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -45,6 +45,9 @@ typedef struct erl_fun_entry {
erts_smp_refc_t refc; /* Reference count: One for code + one for each
fun object in each process. */
BeamInstr *pend_purge_address; /* address stored during a pending purge */
+#ifdef HIPE
+ UWord* pend_purge_native_address;
+#endif
} ErlFunEntry;
/*
@@ -57,9 +60,6 @@ typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
ErlFunEntry* fe; /* Pointer to fun entry. */
struct erl_off_heap_header* next;
-#ifdef HIPE
- UWord* native_address; /* Native code for the fun. */
-#endif
Uint arity; /* The arity of the fun. */
Uint num_free; /* Number of free variables (in env). */
/* -- The following may be compound Erlang terms ---------------------- */
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 9b7744068e..50805d9cd9 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -35,13 +35,14 @@
#include "error.h"
#include "big.h"
#include "erl_gc.h"
-#if HIPE
+#ifdef HIPE
#include "hipe_stack.h"
#include "hipe_mode_switch.h"
#endif
#include "dtrace-wrapper.h"
#include "erl_bif_unique.h"
#include "dist.h"
+#include "erl_nfunc_sched.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
@@ -120,11 +121,14 @@ static Eterm *full_sweep_heaps(Process *p,
char *oh, Uint oh_size,
Eterm *objv, int nobj);
static int garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, int fcalls);
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage);
static int major_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl);
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
static int minor_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl);
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
static void do_minor(Process *p, ErlHeapFragment *live_hf_end,
char *mature, Uint mature_size,
Uint new_sz, Eterm* objv, int nobj);
@@ -174,7 +178,7 @@ Uint erts_test_long_gc_sleep; /* Only used for testing... */
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
@@ -410,20 +414,20 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
if (is_non_value(result)) {
if (p->freason == TRAP) {
- #if HIPE
+#ifdef HIPE
if (regs == NULL) {
regs = erts_proc_sched_data(p)->x_reg_array;
}
- #endif
- cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls);
+#endif
+ cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls, 0);
} else {
- cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls);
+ cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls, 0);
}
} else {
Eterm val[1];
val[0] = result;
- cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls);
+ cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls, 0);
result = val[0];
}
BUMP_REDS(p, cost);
@@ -601,6 +605,32 @@ young_gen_usage(Process *p)
} \
} while (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static ERTS_INLINE void
+check_for_possibly_long_gc(Process *p, Uint ygen_usage)
+{
+ int major;
+ Uint sz;
+
+ major = (p->flags & F_NEED_FULLSWEEP) || GEN_GCS(p) >= MAX_GEN_GCS(p);
+
+ sz = ygen_usage;
+ sz += p->hend - p->stop;
+ if (p->flags & F_ON_HEAP_MSGQ)
+ sz += p->msg.len;
+ if (major)
+ sz += p->old_htop - p->old_heap;
+
+ if (sz >= ERTS_POTENTIALLY_LONG_GC_HSIZE) {
+ ASSERT(!(p->flags & (F_DISABLE_GC|F_DELAY_GC)));
+ p->flags |= major ? F_DIRTY_MAJOR_GC : F_DIRTY_MINOR_GC;
+ erts_schedule_dirty_sys_execution(p);
+ }
+}
+
+#endif
+
/*
* Garbage collect a process.
*
@@ -611,13 +641,15 @@ young_gen_usage(Process *p)
*/
static int
garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, int fcalls)
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage)
{
Uint reclaimed_now = 0;
+ Uint ygen_usage;
Eterm gc_trace_end_tag;
int reds;
ErtsMonotonicTime start_time;
- ErtsSchedulerData *esdp;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
erts_aint32_t state;
ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
@@ -627,13 +659,26 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
ERTS_UNDEF(start_time, 0);
ERTS_CHK_MBUF_SZ(p);
- ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
- >= erts_proc_sched_data(p)->virtual_reds);
+ ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds);
state = erts_smp_atomic32_read_nob(&p->state);
- if (p->flags & (F_DISABLE_GC|F_DELAY_GC) || state & ERTS_PSFLG_EXITING)
+ if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ delay_gc_before_start:
+#endif
return delay_garbage_collection(p, live_hf_end, need, fcalls);
+ }
+
+ ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto delay_gc_before_start;
+ }
+#endif
if (p->abandoned_heap)
live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -642,8 +687,6 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
- esdp = erts_get_scheduler_data();
-
erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0)
start_time = erts_get_monotonic_time(esdp);
@@ -670,14 +713,25 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE);
}
DTRACE2(gc_minor_start, pidbuf, need);
- reds = minor_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
+ reds = minor_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
if (reds == -1) {
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & F_DIRTY_MAJOR_GC)
+ goto delay_gc_after_start;
+ }
+#endif
goto do_major_collection;
}
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~F_DIRTY_MINOR_GC;
gc_trace_end_tag = am_gc_minor_end;
} else {
do_major_collection:
@@ -686,7 +740,10 @@ do_major_collection:
trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
DTRACE2(gc_major_start, pidbuf, need);
- reds = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
+ reds = major_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
gc_trace_end_tag = am_gc_major_end;
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
@@ -716,6 +773,9 @@ do_major_collection:
am_kill, NIL, NULL, 0);
erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ delay_gc_after_start:
+#endif
/* erts_send_exit_signal looks for ERTS_PSFLG_GC, so
we have to remove it after the signal is sent */
erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
@@ -799,7 +859,7 @@ do_major_collection:
int
erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
int reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
@@ -810,7 +870,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
>= erts_proc_sched_data(p)->virtual_reds);
@@ -836,6 +896,20 @@ erts_garbage_collect_hibernate(Process* p)
if (p->flags & F_DISABLE_GC)
ERTS_INTERNAL_ERROR("GC disabled");
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
+ else {
+ Uint flags = p->flags;
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, (p->htop - p->heap) + p->mbuf_sz);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ p->flags = flags|F_DIRTY_GC_HIBERNATE;
+ return;
+ }
+ p->flags = flags;
+ }
+#endif
/*
* Preliminaries.
*/
@@ -847,7 +921,6 @@ erts_garbage_collect_hibernate(Process* p)
* Do it.
*/
-
heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz;
heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
@@ -988,10 +1061,11 @@ static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
#endif /* HIPE */
-void
+int
erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint byte_lit_size,
- struct erl_off_heap_header* oh)
+ struct erl_off_heap_header* oh,
+ int fcalls)
{
Uint lit_size = byte_lit_size / sizeof(Eterm);
Uint old_heap_size;
@@ -1003,20 +1077,49 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint area_size;
Eterm* old_htop;
Uint n;
+ Uint ygen_usage = 0;
struct erl_off_heap_header** prev = NULL;
+ Sint64 reds;
+
+ if (p->flags & (F_DISABLE_GC|F_DELAY_GC))
+ ERTS_INTERNAL_ERROR("GC disabled");
+
+ /*
+ * First an ordinary major collection...
+ */
+
+ p->flags |= F_NEED_FULLSWEEP;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~F_DIRTY_CLA;
+ else {
+ ygen_usage = young_gen_usage(p);
+ check_for_possibly_long_gc(p,
+ (byte_lit_size/sizeof(Uint)
+ + 2*ygen_usage));
+ if (p->flags & F_DIRTY_MAJOR_GC) {
+ p->flags |= F_DIRTY_CLA;
+ return 10;
+ }
+ }
+#endif
+
+ reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
+ p->arg_reg, p->arity, fcalls,
+ ygen_usage);
+
+ ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
- if (p->flags & F_DISABLE_GC)
- return;
/*
* Set GC state.
*/
erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
/*
- * We assume that the caller has already done a major collection
- * (which has discarded the old heap), so that we don't have to cope
- * with pointer to literals on the old heap. We will now allocate
- * an old heap to contain the literals.
+ * Just did a major collection (which has discarded the old heap),
+ * so that we don't have to cope with pointer to literals on the
+ * old heap. We will now allocate an old heap to contain the literals.
*/
ASSERT(p->old_heap == 0); /* Must NOT have an old heap yet. */
@@ -1070,7 +1173,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ move_boxed(&ptr,val,&old_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1081,7 +1184,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ move_cons(&ptr,val,&old_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1159,15 +1262,21 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* Restore status.
*/
erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
+ reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0);
+ if (reds > INT_MAX)
+ return INT_MAX;
+ return (int) reds;
}
static int
minor_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl)
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
Eterm *mature = p->abandoned_heap ? p->abandoned_heap : p->heap;
Uint mature_size = p->high_water - mature;
- Uint size_before = young_gen_usage(p);
+ Uint size_before = ygen_usage;
/*
* Check if we have gone past the max heap size limit
@@ -1370,9 +1479,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ move_boxed(&ptr,val,&old_htop,g_ptr++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ move_boxed(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1385,9 +1494,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ move_cons(&ptr,val,&old_htop,g_ptr++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ move_cons(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1429,9 +1538,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,n_hp++);
+ move_boxed(&ptr,val,&old_htop,n_hp++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ move_boxed(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1443,9 +1552,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,n_hp++);
+ move_cons(&ptr,val,&old_htop,n_hp++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ move_cons(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1465,10 +1574,10 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
*origptr = val;
mb->base = binary_bytes(val);
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,origptr);
+ move_boxed(&ptr,val,&old_htop,origptr);
mb->base = binary_bytes(mb->orig);
} else if (ErtsInYoungGen(*origptr, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ move_boxed(&ptr,val,&n_htop,origptr);
mb->base = binary_bytes(mb->orig);
}
}
@@ -1538,7 +1647,8 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
static int
major_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl)
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
Uint size_before, size_after, stack_size;
Eterm* n_heap;
@@ -1556,7 +1666,7 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
* to receive all live data.
*/
- size_before = young_gen_usage(p);
+ size_before = ygen_usage;
size_before += p->old_htop - p->old_heap;
stack_size = p->hend - p->stop;
@@ -1682,7 +1792,7 @@ full_sweep_heaps(Process *p,
Eterm* ptr;
Eterm val;
Eterm gval = *g_ptr;
-
+
switch (primary_tag(gval)) {
case TAG_PRIMARY_BOXED: {
@@ -1692,7 +1802,7 @@ full_sweep_heaps(Process *p,
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ move_boxed(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1705,7 +1815,7 @@ full_sweep_heaps(Process *p,
if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ move_cons(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1994,7 +2104,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ move_boxed(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -2006,7 +2116,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ move_cons(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -2019,7 +2129,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
if (header_is_bin_matchstate(gval)) {
ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
ErlBinMatchBuffer *mb = &(ms->mb);
- Eterm* origptr;
+ Eterm* origptr;
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
@@ -2027,7 +2137,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (ERTS_IS_IN_SWEEP_AREA(*origptr, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ move_boxed(&ptr,val,&n_htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -2090,7 +2200,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
ASSERT(is_boxed(val));
*heap_ptr++ = val;
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,heap_ptr++);
+ move_boxed(&ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -2102,7 +2212,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
if (IS_MOVED_CONS(val)) {
*heap_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_CONS(ptr,val,htop,heap_ptr++);
+ move_cons(&ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -2123,7 +2233,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,origptr);
+ move_boxed(&ptr,val,&htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -2156,11 +2266,11 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
ASSERT(val != ERTS_HOLE_MARKER);
if (is_header(val)) {
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, n_htop, &dummy_ref);
+ move_boxed(&ptr, val, &n_htop, &dummy_ref);
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, n_htop, &dummy_ref);
+ move_cons(&ptr, val, &n_htop, &dummy_ref);
ptr += 2;
}
}
@@ -2226,32 +2336,27 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hp++ = val;
break;
case TAG_PRIMARY_LIST:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,list_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_BOXED:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,boxed_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_HEADER:
*hp++ = val;
switch (val & _HEADER_SUBTAG_MASK) {
case ARITYVAL_SUBTAG:
break;
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(fhp - 1))
+ goto the_default;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2261,6 +2366,7 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
cpy_sz = thing_arityval(val);
goto cpy_words;
default:
+ the_default:
cpy_sz = header_arity(val);
cpy_words:
@@ -2436,17 +2542,10 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
}
/*
- * If a NIF has saved arguments, they need to be added
+ * If a NIF or BIF has saved arguments, they need to be added
*/
- if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
- Eterm* argv;
- int argc;
- if (erts_setup_nif_gc(p, &argv, &argc)) {
- roots[n].v = argv;
- roots[n].sz = argc;
- n++;
- }
- }
+ if (erts_setup_nif_export_rootset(p, &roots[n].v, &roots[n].sz))
+ n++;
ASSERT(n <= rootset->size);
@@ -2701,6 +2800,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
*prevppp = &pbp->next;
}
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+/*
+ * ERTS_MAGIC_REF_THING_HEADER only defined when there
+ * is a size difference between magic and ordinary references...
+ */
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_MAGIC_REF_THING_HEADER
+#else
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_REF_THING_HEADER
+#endif
+
static void
sweep_off_heap(Process *p, int fullsweep)
@@ -2725,7 +2834,7 @@ sweep_off_heap(Process *p, int fullsweep)
prev = &MSO(p).first;
ptr = MSO(p).first;
- /* Firts part of the list will reside on the (old) new-heap.
+ /* First part of the list will reside on the (old) new-heap.
* Keep if moved, otherwise deref.
*/
while (ptr) {
@@ -2733,7 +2842,8 @@ sweep_off_heap(Process *p, int fullsweep)
ASSERT(!ErtsInArea(ptr, oheap, oheap_sz));
*prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN: {
int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
if (to_new_heap) {
@@ -2742,13 +2852,28 @@ sweep_off_heap(Process *p, int fullsweep)
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
}
link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
- }
- else {
+ break;
+ }
+ case ERTS_USED_MAGIC_REF_THING_HEADER__: {
+ Uint size;
+ int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
+ ASSERT(is_magic_ref_thing(ptr));
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ size = (Uint) ((ErtsMRefThing *) ptr)->mb->orig_size;
+ if (to_new_heap)
+ bin_vheap += size / sizeof(Eterm);
+ else
+ BIN_OLD_VHEAP(p) += size / sizeof(Eterm); /* for binary gc (words)*/
+ /* fall through... */
+ }
+ default:
prev = &ptr->next;
ptr = ptr->next;
}
}
- else if (!ErtsInArea(ptr, oheap, oheap_sz)) {
+ else if (ErtsInArea(ptr, oheap, oheap_sz))
+ break; /* and let old-heap loop continue */
+ else {
/* garbage */
switch (thing_subtag(ptr->thing_word)) {
case REFC_BINARY_SUBTAG:
@@ -2767,13 +2892,21 @@ sweep_off_heap(Process *p, int fullsweep)
}
break;
}
+ case REF_SUBTAG:
+ {
+ ErtsMagicBinary *bptr;
+ ASSERT(is_magic_ref_thing(ptr));
+ bptr = ((ErtsMRefThing *) ptr)->mb;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0)
+ erts_bin_free((Binary *) bptr);
+ break;
+ }
default:
ASSERT(is_external_header(ptr->thing_word));
erts_deref_node_entry(((ExternalThing*)ptr)->node);
}
*prev = ptr = ptr->next;
}
- else break; /* and let old-heap loop continue */
}
/* The rest of the list resides on old-heap, and we just did a
@@ -2782,15 +2915,24 @@ sweep_off_heap(Process *p, int fullsweep)
while (ptr) {
ASSERT(ErtsInArea(ptr, oheap, oheap_sz));
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN:
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
link_live_proc_bin(&shrink, &prev, &ptr, 0);
- }
- else {
- ASSERT(is_fun_header(ptr->thing_word) ||
- is_external_header(ptr->thing_word));
- prev = &ptr->next;
- ptr = ptr->next;
+ break;
+ case ERTS_USED_MAGIC_REF_THING_HEADER__:
+ ASSERT(is_magic_ref_thing(ptr));
+ BIN_OLD_VHEAP(p) +=
+ (((Uint) ((ErtsMRefThing *) ptr)->mb->orig_size)
+ / sizeof(Eterm)); /* for binary gc (words)*/
+ /* fall through... */
+ default:
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word)
+ || is_magic_ref_thing(ptr));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ break;
}
}
@@ -2883,6 +3025,9 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
}
tari = thing_arityval(val);
switch (thing_subtag(val)) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hp))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2998,6 +3143,8 @@ static void ERTS_INLINE
offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
Eterm* objv, int nobj)
{
+ Eterm *v;
+ Uint sz;
if (p->dictionary) {
offset_heap(ERTS_PD_START(p->dictionary),
ERTS_PD_SIZE(p->dictionary),
@@ -3018,12 +3165,8 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
offset_heap_ptr(objv, nobj, offs, area, area_size);
}
offset_off_heap(p, offs, area, area_size);
- if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
- Eterm* argv;
- int argc;
- if (erts_setup_nif_gc(p, &argv, &argc))
- offset_heap_ptr(argv, argc, offs, area, area_size);
- }
+ if (erts_setup_nif_export_rootset(p, &v, &sz))
+ offset_heap_ptr(v, sz, offs, area, area_size);
}
static void
@@ -3082,7 +3225,7 @@ reply_gc_info(void *vgcirp)
if (hpp)
ref_copy = STORE_NC(hpp, ohp, gcirp->ref);
else
- *szp += REF_THING_SIZE;
+ *szp += ERTS_REF_THING_SIZE;
msg = erts_bld_tuple(hpp, szp, 3,
make_small(esdp->no),
@@ -3113,6 +3256,39 @@ reply_gc_info(void *vgcirp)
gcireq_free(vgcirp);
}
+void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig) {
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+ Eterm gval;
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ ErlHeapBin *hb = (ErlHeapBin *)htop;
+ Eterm *real_bin;
+ byte *bs;
+
+ real_bin = binary_val(follow_moved(sb->orig, (Eterm)0));
+
+ if (*real_bin == HEADER_PROC_BIN) {
+ bs = ((ProcBin *) real_bin)->bytes + sb->offs;
+ } else {
+ bs = (byte *)(&(((ErlHeapBin *) real_bin)->data)) + sb->offs;
+ }
+
+ hb->thing_word = header_heap_bin(sb->size);
+ hb->size = sb->size;
+ sys_memcpy((byte *)hb->data, bs, sb->size);
+
+ gval = make_boxed(htop);
+ *orig = gval;
+ *ptr = gval;
+
+ ptr += ERL_SUB_BIN_SIZE;
+ htop += heap_bin_size(sb->size);
+
+ *hpp = htop;
+ *pp = ptr;
+}
+
+
Eterm
erts_gc_info_request(Process *c_p)
{
@@ -3346,8 +3522,8 @@ erts_max_heap_size(Eterm arg, Uint *max_heap_size, Uint *max_heap_flags)
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-static int
-within2(Eterm *ptr, Process *p, Eterm *real_htop)
+int
+erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm *real_htop)
{
ErlHeapFragment* bp;
ErtsMessage* mp;
@@ -3393,12 +3569,6 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
return 0;
}
-int
-within(Eterm *ptr, Process *p)
-{
- return within2(ptr, p, NULL);
-}
-
#endif
#ifdef ERTS_OFFHEAP_DEBUG
@@ -3439,6 +3609,10 @@ erts_check_off_heap2(Process *p, Eterm *htop)
case EXTERNAL_REF_SUBTAG:
refc = erts_smp_refc_read(&u.ext->node->refc, 1);
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ refc = erts_refc_read(&u.mref->mb->refc, 1);
+ break;
default:
ASSERT(!"erts_check_off_heap2: Invalid thing_word");
}
@@ -3453,7 +3627,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(erts_dbg_within_proc(u.ep, p, htop));
}
}
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 2521379664..414aff1e06 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -25,48 +25,73 @@
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
+#define ERTS_POTENTIALLY_LONG_GC_HSIZE (128*1024) /* Words */
+
#include "erl_map.h"
+#include "erl_fun.h"
+#include "erl_bits.h"
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
+void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig);
-#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- \
- HTOP[0] = CAR; /* copy car */ \
- HTOP[1] = PTR[1]; /* copy cdr */ \
- gval = make_list(HTOP); /* new location */ \
- *ORIG = gval; /* redirect original reference */ \
- PTR[0] = THE_NON_VALUE; /* store forwarding indicator */ \
- PTR[1] = gval; /* store forwarding address */ \
- HTOP += 2; /* update tospace htop */ \
-} while(0)
-
-#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- Sint nelts; \
- \
- ASSERT(is_header(HDR)); \
- nelts = header_arity(HDR); \
- switch ((HDR) & _HEADER_SUBTAG_MASK) { \
- case SUB_BINARY_SUBTAG: nelts++; break; \
- case MAP_SUBTAG: \
- if (is_flatmap_header(HDR)) nelts+=flatmap_get_size(PTR) + 1; \
- else nelts += hashmap_bitcount(MAP_HEADER_VAL(HDR)); \
- break; \
- case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \
- } \
- gval = make_boxed(HTOP); \
- *ORIG = gval; \
- *HTOP++ = HDR; \
- *PTR++ = gval; \
- while (nelts--) *HTOP++ = *PTR++; \
-} while(0)
+ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig)
+{
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+ Eterm gval;
+
+ htop[0] = car; /* copy car */
+ htop[1] = ptr[1]; /* copy cdr */
+ gval = make_list(htop); /* new location */
+ *orig = gval; /* redirect original reference */
+ ptr[0] = THE_NON_VALUE; /* store forwarding indicator */
+ ptr[1] = gval; /* store forwarding address */
+ *hpp += 2; /* update tospace htop */
+}
+#endif
-#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-int within(Eterm *ptr, Process *p);
+ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig)
+{
+ Eterm gval;
+ Sint nelts;
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+
+ ASSERT(is_header(hdr));
+ nelts = header_arity(hdr);
+ switch ((hdr) & _HEADER_SUBTAG_MASK) {
+ case SUB_BINARY_SUBTAG:
+ {
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ /* convert sub-binary to heap-binary if applicable */
+ if (sb->bitsize == 0 && sb->bitoffs == 0 &&
+ sb->is_writable == 0 && sb->size <= sizeof(Eterm) * 3) {
+ erts_sub_binary_to_heap_binary(pp, hpp, orig);
+ return;
+ }
+ }
+ nelts++;
+ break;
+ case MAP_SUBTAG:
+ if (is_flatmap_header(hdr)) nelts+=flatmap_get_size(ptr) + 1;
+ else nelts += hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(ptr))->num_free+1; break;
+ }
+ gval = make_boxed(htop);
+ *orig = gval;
+ *htop++ = hdr;
+ *ptr++ = gval;
+ while (nelts--) *htop++ = *ptr++;
+
+ *hpp = htop;
+ *pp = ptr;
+}
#endif
#define ErtsInYoungGen(TPtr, Ptr, OldHeap, OldHeapSz) \
@@ -141,9 +166,10 @@ void erts_garbage_collect_hibernate(struct process* p);
Eterm erts_gc_after_bif_call_lhf(struct process* p, ErlHeapFragment *live_hf_end,
Eterm result, Eterm* regs, Uint arity);
Eterm erts_gc_after_bif_call(struct process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(struct process* p, Eterm* literals,
- Uint lit_size,
- struct erl_off_heap_header* oh);
+int erts_garbage_collect_literals(struct process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh,
+ int fcalls);
Uint erts_next_heap_size(Uint, Uint);
Eterm erts_heap_sizes(struct process* p);
@@ -154,5 +180,8 @@ void erts_free_heap_frags(struct process* p);
Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
int erts_max_heap_size(Eterm, Uint *, Uint *);
void erts_deallocate_young_generation(Process *c_p);
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+int erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm* real_htop);
+#endif
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 647fa26811..26be8c7edf 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1771,7 +1771,7 @@ setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
esdp = erts_proc_sched_data(c_p);
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
ASSERT(erts_get_ref_numbers_thr_id(
@@ -1939,7 +1939,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
Eterm *hp_end;
#endif
- hsz = REF_THING_SIZE;
+ hsz = ERTS_REF_THING_SIZE;
if (async) {
refn = trefn; /* timer ref */
hsz += 4; /* 3-tuple */
@@ -1973,7 +1973,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
refn[1],
refn[2]);
ref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
msg = (async
? TUPLE3(hp, (cancel
@@ -2087,7 +2087,7 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp,
ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
ErlOffHeap *ohp;
- hsz = 4 + REF_THING_SIZE;
+ hsz = 4 + ERTS_REF_THING_SIZE;
if (time_left > (Sint64) MAX_SMALL)
hsz += ERTS_SINT64_HEAP_SIZE(time_left);
@@ -2103,7 +2103,7 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp,
trefn[1],
trefn[2]);
tref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
if (time_left < 0)
res = am_false;
@@ -2189,7 +2189,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
Eterm *hp, rref;
Uint32 *rrefn;
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
rref = erts_sched_make_ref_in_buffer(esdp, hp);
rrefn = internal_ref_numbers(rref);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index f65a06c85f..b2c307e826 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -111,10 +111,12 @@ const int etp_lock_check = 1;
#else
const int etp_lock_check = 0;
#endif
-#ifdef WORDS_BIGENDIAN
-const int etp_big_endian = 1;
+const int etp_endianness = ERTS_ENDIANNESS;
+const Eterm etp_ref_header = ERTS_REF_THING_HEADER;
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+const Eterm etp_magic_ref_header = ERTS_MAGIC_REF_THING_HEADER;
#else
-const int etp_big_endian = 0;
+const Eterm etp_magic_ref_header = ERTS_REF_THING_HEADER;
#endif
const Eterm etp_the_non_value = THE_NON_VALUE;
#ifdef ERTS_HOLE_MARKER
@@ -772,6 +774,8 @@ early_init(int *argc, char **argv) /*
H_MAX_SIZE = H_DEFAULT_MAX_SIZE;
H_MAX_FLAGS = MAX_HEAP_SIZE_KILL|MAX_HEAP_SIZE_LOG;
+ erts_term_init();
+
erts_initialized = 0;
erts_use_sender_punish = 1;
@@ -1143,6 +1147,8 @@ early_init(int *argc, char **argv) /*
no_schedulers_online = schdlrs_onln;
erts_no_schedulers = (Uint) no_schedulers;
+#else
+ erts_no_schedulers = 1;
#endif
#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
@@ -2265,7 +2271,6 @@ erl_start(int argc, char **argv)
ASSERT(erts_code_purger && erts_code_purger->common.id == pid);
erts_proc_inc_refc(erts_code_purger);
-#ifdef ERTS_NEW_PURGE_STRATEGY
pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector");
erts_literal_area_collector
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
@@ -2273,7 +2278,6 @@ erl_start(int argc, char **argv)
ASSERT(erts_literal_area_collector
&& erts_literal_area_collector->common.id == pid);
erts_proc_inc_refc(erts_literal_area_collector);
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker");
@@ -2303,7 +2307,7 @@ erl_start(int argc, char **argv)
#endif
set_main_stack_size();
erts_sched_init_time_sup(esdp);
- process_main();
+ process_main(esdp->x_reg_array, esdp->f_reg_array);
}
#endif
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 5d4823c077..6ff9aea5ab 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -89,6 +89,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "hipe_mfait_lock", NULL },
#endif
{ "nodes_monitors", NULL },
+#ifdef ERTS_SMP
+ { "resource_monitors", "address" },
+#endif
{ "driver_list", NULL },
{ "proc_link", "pid" },
{ "proc_msgq", "pid" },
@@ -113,10 +116,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "export_tab", NULL },
{ "fun_tab", NULL },
{ "environ", NULL },
-#ifdef ERTS_NEW_PURGE_STRATEGY
{ "release_literal_areas", NULL },
#endif
-#endif
{ "efile_drv", "address" },
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
@@ -158,6 +159,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "tracer_mtx", NULL },
{ "port_table", NULL },
#endif
+ { "magic_ref_table", "address" },
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 481e92b2cd..6354fc8663 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -55,11 +55,11 @@ static erts_lcnt_thread_data_t *lcnt_thread_data[2048];
/* local functions */
static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
+ ethr_mutex_lock(&lcnt_data_lock);
}
static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
+ ethr_mutex_unlock(&lcnt_data_lock);
}
const int log2_tab64[64] = {
@@ -159,7 +159,7 @@ static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
lcnt_thread_data[eltd->id] = eltd;
return eltd;
-}
+}
static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
@@ -254,9 +254,9 @@ void erts_lcnt_init() {
/* init lock */
if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
- /* init tsd */
+ /* init tsd */
lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
+ ethr_tsd_key_create(&lcnt_thr_data_key, "lcnt_data");
lcnt_lock();
@@ -352,11 +352,11 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock)
/* interface to erl_threads.h */
/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
+void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
erts_lcnt_init_lock_x(lock, name, flag, NIL);
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
+void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
int i;
if (name == NULL) { ERTS_LCNT_CLEAR_FLAG(lock); return; }
lcnt_lock();
@@ -382,7 +382,10 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
lcnt_unlock();
}
-/* init empty, instead of zero struct */
+
+/* init empty, instead of zero struct
+ * used by process locks probes
+ */
void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) {
lock->next = NULL;
lock->prev = NULL;
@@ -444,7 +447,7 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
}
/* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
+ /* we cannot acquire r_lock if w_lock is taken */
if ((w_state > 0) || (r_state > 0)) {
eltd->lock_in_conflict = 1;
@@ -561,7 +564,7 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
#ifdef DEBUG
{
- erts_aint_t w_state;
+ erts_aint_t w_state;
erts_aint_t flowstate;
/* flowstate */
@@ -647,7 +650,7 @@ Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
return prev;
}
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
+Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
Uint16 prev;
prev = (erts_lcnt_rt_options & opt);
erts_lcnt_rt_options &= ~opt;
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 3e8dcefe69..71cd73ee27 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -129,7 +129,7 @@ typedef struct {
typedef struct erts_lcnt_lock_stats_s {
/* "tries" and "colls" needs to be atomic since
- * trylock busy does not aquire a lock and there
+ * trylock busy does not acquire a lock and there
* is no post action to rectify the situation
*/
@@ -148,13 +148,13 @@ typedef struct erts_lcnt_lock_stats_s {
typedef struct erts_lcnt_lock_s {
char *name; /* lock name */
Uint16 flag; /* lock type */
- Eterm id; /* id if possible */
+ Eterm id; /* id if possible */
#ifdef DEBUG
ethr_atomic_t flowstate;
#endif
- /* lock states */
+ /* lock states */
ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index 979a0040b0..a80f5d6a16 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -91,7 +91,7 @@ static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_
static Export hashmap_merge_trap_export;
static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1);
static Uint hashmap_subtree_size(Eterm node);
-static Eterm hashmap_to_list(Process *p, Eterm map);
+static Eterm hashmap_to_list(Process *p, Eterm map, Sint n);
static Eterm hashmap_keys(Process *p, Eterm map);
static Eterm hashmap_values(Process *p, Eterm map);
static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node, Eterm *value);
@@ -161,13 +161,58 @@ BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
BIF_RET(res);
} else if (is_hashmap(BIF_ARG_1)) {
- return hashmap_to_list(BIF_P, BIF_ARG_1);
+ return hashmap_to_list(BIF_P, BIF_ARG_1, -1);
}
BIF_P->fvalue = BIF_ARG_1;
BIF_ERROR(BIF_P, BADMAP);
}
+/* erts_internal:maps_to_list/2
+ *
+ * This function should be removed once iterators are in place.
+ * Never document it.
+ * Never encourage its usage.
+ *
+ * A negative value in ARG 2 means the entire map.
+ */
+
+BIF_RETTYPE erts_internal_maps_to_list_2(BIF_ALIST_2) {
+ Sint m;
+ if (term_to_Sint(BIF_ARG_2, &m)) {
+ if (is_flatmap(BIF_ARG_1)) {
+ Uint n;
+ Eterm* hp;
+ Eterm *ks,*vs, res, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+
+ if (m >= 0) {
+ n = m < n ? m : n;
+ }
+
+ hp = HAlloc(BIF_P, (2 + 3) * n);
+ res = NIL;
+
+ while(n--) {
+ tup = TUPLE2(hp, ks[n], vs[n]); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+ }
+
+ BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ return hashmap_to_list(BIF_P, BIF_ARG_1, m);
+ }
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
/* maps:find/2
* return value if key *matches* a key in the map
*/
@@ -1188,16 +1233,17 @@ typedef struct HashmapMergeContext_ {
#endif
} HashmapMergeContext;
-static void hashmap_merge_ctx_destructor(Binary* ctx_bin)
+static int hashmap_merge_ctx_destructor(Binary* ctx_bin)
{
HashmapMergeContext* ctx = (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
PSTACK_DESTROY_SAVED(&ctx->pstack);
+ return 1;
}
BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1) {
- Binary* ctx_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary* ctx_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
@@ -1453,9 +1499,9 @@ trap: /* Yield */
hashmap_merge_ctx_destructor);
ctx = ERTS_MAGIC_BIN_DATA(ctx_b);
sys_memcpy(ctx, &local_ctx, sizeof(HashmapMergeContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
ASSERT(ctx->trap_bin == THE_NON_VALUE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), ctx_b);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), ctx_b);
erts_set_gc_state(p, 0);
}
@@ -1916,15 +1962,22 @@ BIF_RETTYPE maps_values_1(BIF_ALIST_1) {
BIF_ERROR(BIF_P, BADMAP);
}
-static Eterm hashmap_to_list(Process *p, Eterm node) {
+static Eterm hashmap_to_list(Process *p, Eterm node, Sint m) {
DECLARE_WSTACK(stack);
Eterm *hp, *kv;
- Eterm res = NIL;
+ Eterm tup, res = NIL;
+ Uint n = hashmap_size(node);
- hp = HAlloc(p, hashmap_size(node) * (2 + 3));
+ if (m >= 0) {
+ n = m < n ? m : n;
+ }
+
+ hp = HAlloc(p, n * (2 + 3));
hashmap_iterator_init(&stack, node, 0);
- while ((kv=hashmap_iterator_next(&stack)) != NULL) {
- Eterm tup = TUPLE2(hp, CAR(kv), CDR(kv));
+ while (n--) {
+ kv = hashmap_iterator_next(&stack);
+ ASSERT(kv != NULL);
+ tup = TUPLE2(hp, CAR(kv), CDR(kv));
hp += 3;
res = CONS(hp, tup, res);
hp += 2;
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
index fc0aaed18a..1f270eb55f 100644
--- a/erts/emulator/beam/erl_math.c
+++ b/erts/emulator/beam/erl_math.c
@@ -247,6 +247,17 @@ BIF_RETTYPE math_pow_2(BIF_ALIST_2)
return math_call_2(BIF_P, pow, BIF_ARG_1, BIF_ARG_2);
}
+BIF_RETTYPE math_ceil_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, ceil, BIF_ARG_1);
+}
+BIF_RETTYPE math_floor_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, floor, BIF_ARG_1);
+}
-
+BIF_RETTYPE math_fmod_2(BIF_ALIST_2)
+{
+ return math_call_2(BIF_P, fmod, BIF_ARG_1, BIF_ARG_2);
+}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 019e079e67..f181c1e3cb 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -176,6 +176,11 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ if (erts_refc_dectest(&u.mref->mb->refc, 0) == 0)
+ erts_bin_free((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
erts_deref_node_entry(u.ext->node);
@@ -285,9 +290,11 @@ erts_queue_dist_message(Process *rcvr,
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
- if (rcvr_locks & ERTS_PROC_LOCK_STATUS) {
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
- need_locks |= ERTS_PROC_LOCK_STATUS;
+ ErtsProcLocks unlocks =
+ rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
+ if (unlocks) {
+ erts_smp_proc_unlock(rcvr, unlocks);
+ need_locks |= unlocks;
}
erts_smp_proc_lock(rcvr, need_locks);
}
@@ -406,7 +413,7 @@ queue_messages(Process* receiver,
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
goto exiting;
- need_locks = receiver_locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
if (need_locks) {
erts_smp_proc_unlock(receiver, need_locks);
}
@@ -696,6 +703,9 @@ erts_send_message(Process* sender,
erts_aint32_t receiver_state;
#ifdef SHCOPY_SEND
erts_shcopy_t info;
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
#ifdef USE_VM_PROBES
@@ -725,7 +735,7 @@ erts_send_message(Process* sender,
*/
if (have_seqtrace(stoken)) {
seq_trace_update_send(sender);
- seq_trace_output(stoken, message, SEQ_TRACE_SEND,
+ seq_trace_output(stoken, message, SEQ_TRACE_SEND,
receiver->common.id, sender);
seq_trace_size = 6; /* TUPLE5 */
}
@@ -741,7 +751,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -760,7 +770,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
if (is_immed(stoken))
token = stoken;
@@ -796,7 +806,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -810,7 +820,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
}
#ifdef USE_VM_PROBES
@@ -998,8 +1008,8 @@ erts_move_messages_off_heap(Process *c_p)
hp = hfrag->mem;
if (is_not_immed(ERL_MESSAGE_TERM(mp)))
ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
- msg_sz, &hp,
- &hfrag->off_heap);
+ msg_sz, &hp,
+ &hfrag->off_heap);
if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
token_sz, &hp,
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index c207dea10f..6dee1d5ef3 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -24,7 +24,7 @@
* key in the monitor case and the pid of the linked process as key in the
* link case. Lookups the order of the references is somewhat special. Local
* references are strictly smaller than remote references and are sorted
- * by inlined comparision functionality. Remote references are handled by the
+ * by inlined comparison functionality. Remote references are handled by the
* usual cmp function.
* Each Monitor is tagged with different tags depending on which end of the
* monitor it is.
@@ -45,6 +45,7 @@
#include "bif.h"
#include "big.h"
#include "erl_monitors.h"
+#include "erl_bif_unique.h"
#define STACK_NEED 50
#define MAX_MONITORS 0xFFFFFFFFUL
@@ -79,7 +80,24 @@ static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
b2 = boxed_val(ref2);
if (is_ref_thing_header(*b1)) {
if (is_ref_thing_header(*b2)) {
- return memcmp(b1+1,b2+1,ERTS_REF_WORDS*sizeof(Uint));
+ Uint32 *num1, *num2;
+ if (is_ordinary_ref_thing(b1)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) b1;
+ num1 = rtp->num;
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) b1;
+ num1 = mrtp->mb->refn;
+ }
+ if (is_ordinary_ref_thing(b2)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) b2;
+ num2 = rtp->num;
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) b2;
+ num2 = mrtp->mb->refn;
+ }
+ return erts_internal_ref_number_cmp(num1, num2);
}
return -1;
}
@@ -91,13 +109,14 @@ static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
#define CP_LINK_VAL(To, Hp, From) \
do { \
- if (IS_CONST(From)) \
+ if (is_immed(From)) \
(To) = (From); \
else { \
Uint i__; \
Uint len__; \
ASSERT((Hp)); \
- ASSERT(is_internal_ref((From)) || is_external((From))); \
+ ASSERT(is_internal_ordinary_ref((From)) \
+ || is_external((From))); \
(To) = make_boxed((Hp)); \
len__ = thing_arityval(*boxed_val((From))) + 1; \
for(i__ = 0; i__ < len__; i__++) \
@@ -109,15 +128,15 @@ do { \
} \
} while (0)
-static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
+static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm name)
{
Uint mon_size = ERTS_MONITOR_SIZE;
ErtsMonitor *n;
Eterm *hp;
mon_size += NC_HEAP_SIZE(ref);
- if (!IS_CONST(pid)) {
- mon_size += NC_HEAP_SIZE(pid);
+ if (type != MON_NIF_TARGET && is_not_immed(entity)) {
+ mon_size += NC_HEAP_SIZE(entity);
}
if (mon_size <= ERTS_MONITOR_SH_SIZE) {
@@ -135,8 +154,11 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
n->type = (Uint16) type;
n->balance = 0; /* Always the same initial value */
n->name = name; /* atom() or [] */
- CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/
- CP_LINK_VAL(n->pid, hp, pid);
+ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unnecessary check, never immediate*/
+ if (type == MON_NIF_TARGET)
+ n->u.resource = (ErtsResource*)entity;
+ else
+ CP_LINK_VAL(n->u.pid, hp, (Eterm)entity);
return n;
}
@@ -147,7 +169,7 @@ static ErtsLink *create_link(Uint type, Eterm pid)
ErtsLink *n;
Eterm *hp;
- if (!IS_CONST(pid)) {
+ if (is_not_immed(pid)) {
lnk_size += NC_HEAP_SIZE(pid);
}
@@ -206,16 +228,16 @@ void erts_destroy_monitor(ErtsMonitor *mon)
Uint mon_size = ERTS_MONITOR_SIZE;
ErlNode *node;
- ASSERT(!IS_CONST(mon->ref));
+ ASSERT(is_not_immed(mon->ref));
mon_size += NC_HEAP_SIZE(mon->ref);
if (is_external(mon->ref)) {
node = external_thing_ptr(mon->ref)->node;
erts_deref_node_entry(node);
}
- if (!IS_CONST(mon->pid)) {
- mon_size += NC_HEAP_SIZE(mon->pid);
- if (is_external(mon->pid)) {
- node = external_thing_ptr(mon->pid)->node;
+ if (mon->type != MON_NIF_TARGET && is_not_immed(mon->u.pid)) {
+ mon_size += NC_HEAP_SIZE(mon->u.pid);
+ if (is_external(mon->u.pid)) {
+ node = external_thing_ptr(mon->u.pid)->node;
erts_deref_node_entry(node);
}
}
@@ -234,7 +256,7 @@ void erts_destroy_link(ErtsLink *lnk)
ASSERT(lnk->type == LINK_NODE || ERTS_LINK_ROOT(lnk) == NULL);
- if (!IS_CONST(lnk->pid)) {
+ if (is_not_immed(lnk->pid)) {
lnk_size += NC_HEAP_SIZE(lnk->pid);
if (is_external(lnk->pid)) {
node = external_thing_ptr(lnk->pid)->node;
@@ -329,7 +351,7 @@ static void insertion_rotation(int dstack[], int dpos,
}
}
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, UWord entity,
Eterm name)
{
void *tstack[STACK_NEED];
@@ -339,12 +361,14 @@ void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
int state = 0;
ErtsMonitor **this = root;
Sint c;
+
+ ASSERT(is_internal_ordinary_ref(ref) || is_external_ref(ref));
dstack[0] = DIR_END;
for (;;) {
if (!*this) { /* Found our place */
state = 1;
- *this = create_monitor(type,ref,pid,name);
+ *this = create_monitor(type,ref,entity,name);
break;
} else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
/* go left */
@@ -914,8 +938,12 @@ static void erts_dump_monitors(ErtsMonitor *root, int indent)
if (root == NULL)
return;
erts_dump_monitors(root->right,indent+2);
- erts_printf("%*s[%b16d:%b16u:%T:%T:%T]\n", indent, "", root->balance,
- root->type, root->ref, root->pid, root->name);
+ erts_printf("%*s[%b16d:%b16u:%T:%T", indent, "", root->balance,
+ root->type, root->ref, root->name);
+ if (root->type == MON_NIF_TARGET)
+ erts_printf(":%p]\n", root->u.resource);
+ else
+ erts_printf(":%T]\n", root->u.pid);
erts_dump_monitors(root->left,indent+2);
}
@@ -1030,7 +1058,7 @@ void erts_one_link_size(ErtsLink *lnk, void *vpu)
{
Uint *pu = vpu;
*pu += ERTS_LINK_SIZE*sizeof(Uint);
- if(!IS_CONST(lnk->pid))
+ if(is_not_immed(lnk->pid))
*pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
erts_doforall_links(ERTS_LINK_ROOT(lnk),&erts_one_link_size,vpu);
@@ -1040,8 +1068,8 @@ void erts_one_mon_size(ErtsMonitor *mon, void *vpu)
{
Uint *pu = vpu;
*pu += ERTS_MONITOR_SIZE*sizeof(Uint);
- if(!IS_CONST(mon->pid))
- *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
- if(!IS_CONST(mon->ref))
+ if(mon->type != MON_NIF_TARGET && is_not_immed(mon->u.pid))
+ *pu += NC_HEAP_SIZE(mon->u.pid)*sizeof(Uint);
+ if(is_not_immed(mon->ref))
*pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
}
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index 9e2beedea3..f659829e6c 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -82,8 +82,9 @@
/* Type tags for monitors */
#define MON_ORIGIN 1
-#define MON_TARGET 3
-#define MON_TIME_OFFSET 7
+#define MON_TARGET 2
+#define MON_NIF_TARGET 3
+#define MON_TIME_OFFSET 4
/* Type tags for links */
#define LINK_PID 1 /* ...Or port */
@@ -91,7 +92,7 @@
/* Size of a monitor without heap, in words (fixalloc) */
#define ERTS_MONITOR_SIZE ((sizeof(ErtsMonitor) - sizeof(Uint))/sizeof(Uint))
-#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + REF_THING_SIZE)
+#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + ERTS_REF_THING_SIZE)
#define ERTS_LINK_SIZE ((sizeof(ErtsLink) - sizeof(Uint))/sizeof(Uint))
#define ERTS_LINK_SH_SIZE ERTS_LINK_SIZE /* Size of fix-alloced links */
@@ -105,11 +106,15 @@ typedef struct erts_monitor_or_link {
typedef struct erts_monitor {
struct erts_monitor *left, *right;
Sint16 balance;
- Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_TIME_OFFSET */
+ Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_NIF_TARGET | MON_TIME_OFFSET */
Eterm ref;
- Eterm pid; /* In case of distributed named monitor, this is the
- nodename atom in MON_ORIGIN process, otherwise a pid or
- , in case of a MON_TARGET, a port */
+ union {
+ Eterm pid; /* In case of distributed named monitor, this is the
+ * nodename atom in MON_ORIGIN process, otherwise a pid or,
+ * in case of a MON_TARGET, a port
+ */
+ struct ErtsResource_* resource; /* MON_NIF_TARGET */
+ }u;
Eterm name; /* When monitoring a named process: atom() else [] */
Uint heap[1]; /* Larger in reality */
} ErtsMonitor;
@@ -144,7 +149,7 @@ Uint erts_tot_link_lh_size(void);
/* Prototypes */
void erts_destroy_monitor(ErtsMonitor *mon);
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, UWord entity,
Eterm name);
ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref);
ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref);
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 7ddf49937f..1c3160efaf 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -66,9 +66,9 @@ static Uint msacc_unmanaged_count = 0;
#endif
#if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT
-#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + REF_THING_SIZE)
+#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE)
#else
-#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + REF_THING_SIZE)
+#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + ERTS_REF_THING_SIZE)
#endif
/* we have to split initiation as atoms are not inited in early init */
@@ -137,8 +137,8 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
void erts_msacc_set_bif_state(ErtsMsAcc *__erts_msacc_cache, Eterm mod, void *fn) {
#ifdef ERTS_MSACC_EXTENDED_BIFS
-#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \
- if (fn == &FuncAddr) { \
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
+ if (fn == &BifFuncAddr) { \
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATIC_STATE_COUNT + Num); \
} else
#include "erl_bif_list.h"
@@ -212,7 +212,7 @@ typedef struct {
int action;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
} ErtsMSAccReq;
@@ -245,7 +245,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
- hp = erts_produce_heap(&factory, REF_THING_SIZE + 3 /* tuple */, 0);
+ hp = erts_produce_heap(&factory, ERTS_REF_THING_SIZE + 3 /* tuple */, 0);
ref_copy = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
msg = erts_msacc_gather_stats(msacc, &factory);
msg = TUPLE2(hp, ref_copy, msg);
@@ -255,7 +255,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
erts_factory_close(&factory);
} else {
ErlOffHeap *ohp = NULL;
- msgp = erts_alloc_message_heap(rp, &rp_locks, REF_THING_SIZE, &hp, &ohp);
+ msgp = erts_alloc_message_heap(rp, &rp_locks, ERTS_REF_THING_SIZE, &hp, &ohp);
msg = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
}
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
index 4c8e1c8e22..d64ef8c8b9 100644
--- a/erts/emulator/beam/erl_msacc.h
+++ b/erts/emulator/beam/erl_msacc.h
@@ -22,7 +22,7 @@
#define ERL_MSACC_H__
/* Can be enabled/disabled via configure */
-#if ERTS_ENABLE_MSACC == 2
+#if defined(ERTS_ENABLE_MSACC) && ERTS_ENABLE_MSACC == 2
#define ERTS_MSACC_EXTENDED_STATES 1
#endif
@@ -66,7 +66,7 @@
#define ERTS_MSACC_STATE_COUNT 7
-#if ERTS_MSACC_STATE_STRINGS && ERTS_ENABLE_MSACC
+#if defined(ERTS_MSACC_STATE_STRINGS) && defined(ERTS_ENABLE_MSACC)
static char *erts_msacc_states[] = {
"aux",
"check_io",
@@ -104,7 +104,7 @@ static char *erts_msacc_states[] = {
#define ERTS_MSACC_STATE_COUNT ERTS_MSACC_STATIC_STATE_COUNT
#endif
-#if ERTS_MSACC_STATE_STRINGS
+#ifdef ERTS_MSACC_STATE_STRINGS
static char *erts_msacc_states[] = {
"alloc",
"aux",
@@ -122,7 +122,7 @@ static char *erts_msacc_states[] = {
"sleep",
"timers"
#ifdef ERTS_MSACC_EXTENDED_BIFS
-#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
,"bif_" #Mod "_" #Func "_" #Arity
#include "erl_bif_list.h"
#undef BIF_LIST
@@ -157,7 +157,7 @@ struct erl_msacc_t_ {
};
-#if ERTS_ENABLE_MSACC
+#ifdef ERTS_ENABLE_MSACC
#ifdef USE_THREADS
extern erts_tsd_key_t erts_msacc_key;
diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c
new file mode 100644
index 0000000000..1bebc1eda4
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.c
@@ -0,0 +1,180 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+
+#include "global.h"
+#include "erl_process.h"
+#include "bif.h"
+#include "erl_nfunc_sched.h"
+#include "erl_trace.h"
+
+NifExport *
+erts_new_proc_nif_export(Process *c_p, int argc)
+{
+ size_t size;
+ int i;
+ NifExport *nep, *old_nep;
+
+ size = sizeof(NifExport) + (argc-1)*sizeof(Eterm);
+ nep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, size);
+
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++)
+ nep->exp.addressv[i] = &nep->exp.beam[0];
+
+ nep->argc = -1; /* unused marker */
+ nep->argv_size = argc;
+ nep->trace = NULL;
+ old_nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(c_p, nep);
+ if (old_nep) {
+ ASSERT(!nep->trace);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, old_nep);
+ }
+ return nep;
+}
+
+void
+erts_destroy_nif_export(Process *p)
+{
+ NifExport *nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
+ if (nep) {
+ if (nep->m)
+ erts_nif_export_cleanup_nif_mod(nep);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, nep);
+ }
+}
+
+void
+erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ NifExportTrace *netp;
+ ASSERT(nep && nep->argc >= 0);
+ ASSERT(!nep->trace);
+ netp = erts_alloc(ERTS_ALC_T_NIF_EXP_TRACE,
+ sizeof(NifExportTrace));
+ netp->applying = applying;
+ netp->ep = ep;
+ netp->cp = cp;
+ netp->flags = flags;
+ netp->flags_meta = flags_meta;
+ netp->I = I;
+ netp->meta_tracer = NIL;
+ erts_tracer_update(&netp->meta_tracer, meta_tracer);
+ nep->trace = netp;
+}
+
+void
+erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep)
+{
+ NifExportTrace *netp = nep->trace;
+ nep->trace = NULL;
+ erts_bif_trace_epilogue(c_p, result, netp->applying, netp->ep,
+ netp->cp, netp->flags, netp->flags_meta,
+ netp->I, netp->meta_tracer);
+ erts_tracer_update(&netp->meta_tracer, NIL);
+ erts_free(ERTS_ALC_T_NIF_EXP_TRACE, netp);
+}
+
+NifExport *
+erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv)
+{
+ Process *used_proc;
+ ErtsSchedulerData *esdp;
+ Eterm* reg;
+ NifExport* nep;
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ if (dirty_shadow_proc) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp && ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = dirty_shadow_proc;
+ }
+ else {
+ esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = c_p;
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+
+ reg = esdp->x_reg_array;
+
+ if (mfa)
+ nep = erts_get_proc_nif_export(c_p, (int) mfa->arity);
+ else {
+ /* If no mfa, this is not the first schedule... */
+ nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(nep && nep->argc >= 0);
+ }
+
+ if (nep->argc < 0) {
+ /*
+ * First schedule; save things that might
+ * need to be restored...
+ */
+ for (i = 0; i < (int) mfa->arity; i++)
+ nep->argv[i] = reg[i];
+ nep->pc = pc;
+ nep->cp = c_p->cp;
+ nep->mfa = mfa;
+ nep->current = c_p->current;
+ ASSERT(argc >= 0);
+ nep->argc = (int) mfa->arity;
+ nep->m = NULL;
+
+ ASSERT(!erts_check_nif_export_in_area(c_p,
+ (char *) nep,
+ (sizeof(NifExport)
+ + (sizeof(Eterm)
+ *(nep->argc-1)))));
+ }
+ /* Copy new arguments into register array if necessary... */
+ if (reg != argv) {
+ for (i = 0; i < argc; i++)
+ reg[i] = argv[i];
+ }
+ ASSERT(is_atom(mod) && is_atom(func));
+ nep->exp.info.mfa.module = mod;
+ nep->exp.info.mfa.function = func;
+ nep->exp.info.mfa.arity = (Uint) argc;
+ nep->exp.beam[0] = (BeamInstr) instr; /* call_nif || apply_bif */
+ nep->exp.beam[1] = (BeamInstr) dfunc;
+ nep->func = ifunc;
+ used_proc->arity = argc;
+ used_proc->freason = TRAP;
+ used_proc->i = (BeamInstr*) nep->exp.addressv[0];
+ return nep;
+}
diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h
new file mode 100644
index 0000000000..55a3a6dbf6
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.h
@@ -0,0 +1,332 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_NFUNC_SCHED_H__
+#define ERL_NFUNC_SCHED_H__
+
+#include "erl_process.h"
+#include "bif.h"
+#include "error.h"
+
+typedef struct {
+ int applying;
+ Export* ep;
+ BeamInstr *cp;
+ Uint32 flags;
+ Uint32 flags_meta;
+ BeamInstr* I;
+ ErtsTracer meta_tracer;
+} NifExportTrace;
+
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. A number of values are stored for error handling purposes
+ * only.
+ *
+ * 'argc' is >= 0 when NifExport is in use, and < 0 when not.
+ */
+
+typedef struct {
+ Export exp;
+ struct erl_module_nif* m; /* NIF module, or NULL if BIF */
+ void *func; /* Indirect NIF or BIF to execute (may be unused) */
+ ErtsCodeMFA *current;/* Current as set when originally called */
+ NifExportTrace *trace;
+ /* --- The following is only used on error --- */
+ BeamInstr *pc; /* Program counter */
+ BeamInstr *cp; /* Continuation pointer */
+ ErtsCodeMFA *mfa; /* MFA of original call */
+ int argc; /* Number of arguments in original call */
+ int argv_size; /* Allocated size of argv */
+ Eterm argv[1]; /* Saved arguments from the original call */
+} NifExport;
+
+NifExport *erts_new_proc_nif_export(Process *c_p, int argc);
+void erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+void erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep);
+void erts_destroy_nif_export(Process *p);
+NifExport *erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv);
+void erts_nif_export_cleanup_nif_mod(NifExport *ep); /* erl_nif.c */
+ERTS_GLB_INLINE NifExport *erts_get_proc_nif_export(Process *c_p, int extra);
+ERTS_GLB_INLINE int erts_setup_nif_export_rootset(Process* proc, Eterm** objv,
+ Uint* nobj);
+ERTS_GLB_INLINE int erts_check_nif_export_in_area(Process *p,
+ char *start, Uint size);
+ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep,
+ Eterm result);
+ERTS_GLB_INLINE void erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa);
+ERTS_GLB_INLINE int erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+ERTS_GLB_INLINE Process *erts_proc_shadow2real(Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE NifExport *
+erts_get_proc_nif_export(Process *c_p, int argc)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (!nep || (nep->argc < 0 && nep->argv_size < argc))
+ return erts_new_proc_nif_export(c_p, argc);
+ return nep;
+}
+
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. Any exception term saved in the NifExport is also made
+ * part of the GC rootset here; it always resides in rootset[0].
+ */
+ERTS_GLB_INLINE int
+erts_setup_nif_export_rootset(Process* proc, Eterm** objv, Uint* nobj)
+{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+
+ if (!ep || ep->argc <= 0)
+ return 0;
+
+ *objv = ep->argv;
+ *nobj = ep->argc;
+ return 1;
+}
+
+/*
+ * Check if nif export points into code area...
+ */
+ERTS_GLB_INLINE int
+erts_check_nif_export_in_area(Process *p, char *start, Uint size)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(p);
+ if (!nep || nep->argc < 0)
+ return 0;
+ if (ErtsInArea(nep->pc, start, size))
+ return 1;
+ if (ErtsInArea(nep->cp, start, size))
+ return 1;
+ if (ErtsInArea(nep->mfa, start, size))
+ return 1;
+ if (ErtsInArea(nep->current, start, size))
+ return 1;
+ return 0;
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result)
+{
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ERTS_SMP_LC_ASSERT(!(c_p->static_flags
+ & ERTS_STC_FLG_SHADOW_PROC));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ c_p->current = ep->current;
+ ep->argc = -1; /* Unused nif-export marker... */
+ if (ep->trace)
+ erts_nif_export_restore_trace(c_p, result, ep);
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ int ix;
+
+ ASSERT(nep);
+ *pc = nep->pc;
+ c_p->cp = nep->cp;
+ *nif_mfa = nep->mfa;
+ for (ix = 0; ix < nep->argc; ix++)
+ reg[ix] = nep->argv[ix];
+ erts_nif_export_restore(c_p, nep, THE_NON_VALUE);
+}
+
+ERTS_GLB_INLINE int
+erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ if (is_non_value(result) && c_p->freason == TRAP) {
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (nep && nep->argc >= 0) {
+ erts_nif_export_save_trace(c_p, nep, applying, ep,
+ cp, flags, flags_meta,
+ I, meta_tracer);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+ERTS_GLB_INLINE Process *
+erts_proc_shadow2real(Process *c_p)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ Process *real_c_p = c_p->next;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ASSERT(real_c_p->common.id == c_p->common.id);
+ return real_c_p;
+ }
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+#endif
+ return c_p;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_NFUNC_SCHED_H__ */
+
+#if defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__)
+#define ERTS_NFUNC_SCHED_INTERNALS__
+
+#define ERTS_I_BEAM_OP_TO_NIF_EXPORT(I) \
+ (ASSERT(BeamOp(op_apply_bif) == (BeamInstr *) (*(I)) \
+ || BeamOp(op_call_nif) == (BeamInstr *) (*(I))), \
+ ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0]))))
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+#include "erl_message.h"
+#include <stddef.h>
+
+ERTS_GLB_INLINE void erts_flush_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE void erts_cache_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE Process *erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp,
+ Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_flush_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p->stop == sproc->stop);
+ ASSERT(c_p->hend == sproc->hend);
+ ASSERT(c_p->heap == sproc->heap);
+ ASSERT(c_p->abandoned_heap == sproc->abandoned_heap);
+ ASSERT(c_p->heap_sz == sproc->heap_sz);
+ ASSERT(c_p->high_water == sproc->high_water);
+ ASSERT(c_p->old_heap == sproc->old_heap);
+ ASSERT(c_p->old_htop == sproc->old_htop);
+ ASSERT(c_p->old_hend == sproc->old_hend);
+
+ ASSERT(c_p->htop <= sproc->htop && sproc->htop <= c_p->stop);
+
+ c_p->htop = sproc->htop;
+
+ if (!c_p->mbuf)
+ c_p->mbuf = sproc->mbuf;
+ else if (sproc->mbuf) {
+ ErlHeapFragment *bp;
+ for (bp = sproc->mbuf; bp->next; bp = bp->next)
+ ASSERT(!bp->off_heap.first);
+ bp->next = c_p->mbuf;
+ c_p->mbuf = sproc->mbuf;
+ }
+
+ c_p->mbuf_sz += sproc->mbuf_sz;
+
+ if (!c_p->off_heap.first)
+ c_p->off_heap.first = sproc->off_heap.first;
+ else if (sproc->off_heap.first) {
+ struct erl_off_heap_header *ohhp;
+ for (ohhp = sproc->off_heap.first; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = c_p->off_heap.first;
+ c_p->off_heap.first = sproc->off_heap.first;
+ }
+
+ c_p->off_heap.overhead += sproc->off_heap.overhead;
+}
+
+ERTS_GLB_INLINE void
+erts_cache_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+ ASSERT(c_p);
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ sproc->htop = c_p->htop;
+ sproc->stop = c_p->stop;
+ sproc->hend = c_p->hend;
+ sproc->heap = c_p->heap;
+ sproc->abandoned_heap = c_p->abandoned_heap;
+ sproc->heap_sz = c_p->heap_sz;
+ sproc->high_water = c_p->high_water;
+ sproc->old_hend = c_p->old_hend;
+ sproc->old_htop = c_p->old_htop;
+ sproc->old_heap = c_p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+}
+
+ERTS_GLB_INLINE Process *
+erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p)
+{
+ Process *sproc;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = c_p;
+ sproc->common.id = c_p->common.id;
+
+ erts_cache_dirty_shadow_proc(sproc);
+
+ return sproc;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+#endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */
+
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 4fa15a8dd8..e6da4c1a76 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -55,6 +55,9 @@
#include "dtrace-wrapper.h"
#include "erl_process.h"
#include "erl_bif_unique.h"
+#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
#define HAVE_USE_DTRACE 1
#endif
@@ -71,14 +74,19 @@
struct erl_module_nif {
void* priv_data;
void* handle; /* "dlopen" */
- struct enif_entry_t* entry;
+ struct enif_entry_t entry;
erts_refc_t rt_cnt; /* number of resource types */
erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */
Module* mod; /* Can be NULL if orphan with dtor-resources left */
+
+ ErlNifFunc _funcs_copy_[1]; /* only used for old libs */
};
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
#ifdef DEBUG
# define READONLY_CHECK
+# define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1)
#endif
#ifdef READONLY_CHECK
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
@@ -87,6 +95,14 @@ static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
#endif
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__)
+static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func);
+# include "erl_gc.h"
+#else
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE)
+#endif
+
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* off_heap);
#endif
@@ -194,6 +210,9 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(p->common.id != ERTS_INVALID_PID);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env->dbg_disable_assert_in_env = 0;
+#endif
#if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
@@ -217,38 +236,6 @@ static void cache_env(ErlNifEnv* env);
static void full_flush_env(ErlNifEnv *env);
static void flush_env(ErlNifEnv* env);
-#ifdef ERTS_DIRTY_SCHEDULERS
-void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
- ErlNifEnv* env, Process* p,
- struct erl_module_nif* mod_nif)
-{
- Process *sproc;
-#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
-
- ASSERT(!p->scheduler_data);
- ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
- && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
- ASSERT(esdp);
-#endif
-
- erts_pre_nif(env, p, mod_nif, NULL);
-
- sproc = esdp->dirty_shadow_process;
- ASSERT(sproc);
- ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
- == (ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_PROXY));
-
- sproc->next = p;
- sproc->common.id = p->common.id;
- env->proc = sproc;
- full_cache_env(env);
-}
-#endif
-
/* Temporary object header, auto-deallocated when NIF returns
* or when independent environment is cleared.
*/
@@ -276,115 +263,154 @@ void erts_post_nif(ErlNifEnv* env)
env->exiting = ERTS_PROC_IS_EXITING(env->proc);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-void erts_post_dirty_nif(ErlNifEnv* env)
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save 'current' and registers if first time this
+ * call is scheduled.
+ */
+
+static ERTS_INLINE ERL_NIF_TERM
+schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[])
{
- Process *c_p;
- ASSERT(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(env->proc->next);
- erts_unblock_fpe(env->fpe_was_unmasked);
- full_flush_env(env);
- free_tmp_objs(env);
- c_p = env->proc->next;
- env->exiting = ERTS_PROC_IS_EXITING(c_p);
- ERTS_VBUMP_ALL_REDS(c_p);
+ NifExport *ep;
+ Process *c_p, *dirty_shadow_proc;
+
+ execution_state(env, &c_p, NULL);
+ if (c_p == env->proc)
+ dirty_shadow_proc = NULL;
+ else
+ dirty_shadow_proc = env->proc;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+
+ ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ c_p->current,
+ c_p->cp,
+ (BeamInstr) em_call_nif,
+ direct_fp, indirect_fp,
+ mod, func_name,
+ argc, (const Eterm *) argv);
+ if (!ep->m) {
+ /* First time this call is scheduled... */
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep->m = env->mod_nif;
+ }
+ return (ERL_NIF_TERM) THE_NON_VALUE;
}
-#endif
-static void full_flush_env(ErlNifEnv* env)
-{
#ifdef ERTS_DIRTY_SCHEDULERS
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- /* Dirty nif call using shadow process struct */
- Process *c_p = env->proc->next;
-
- ASSERT(is_scheduler() < 0);
- ASSERT(env->proc->common.id == c_p->common.id);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
- & ERTS_PROC_LOCK_MAIN);
-
- if (!env->heap_frag) {
- ASSERT(env->hp_end == HEAP_LIMIT(c_p));
- ASSERT(env->hp >= HEAP_TOP(c_p));
- ASSERT(env->hp <= HEAP_LIMIT(c_p));
- HEAP_TOP(c_p) = env->hp;
+
+static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+int
+erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ int exiting;
+ ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg;
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsCodeMFA *codemfa = erts_code_to_codemfa(I);
+ NativeFunPtr dirty_nif = (NativeFunPtr) I[1];
+ ErlNifEnv env;
+ ERL_NIF_TERM result;
+#ifdef DEBUG
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state);
+
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ ASSERT(!c_p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+ nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
+ erts_pre_nif(&env, c_p, nep->m, NULL);
+
+ env.proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ env.proc->freason = EXC_NULL;
+ env.proc->fvalue = NIL;
+ env.proc->ftrace = NIL;
+ env.proc->i = c_p->i;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
+
+ erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(env.proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (env.exception_thrown) {
+ schedule_exception:
+ schedule(&env, dirty_nif_exception, NULL,
+ am_erts_internal, am_dirty_nif_exception,
+ 1, &env.proc->fvalue);
+ }
+ else if (is_value(result)) {
+ schedule(&env, dirty_nif_finalizer, NULL,
+ am_erts_internal, am_dirty_nif_finalizer,
+ 1, &result);
+ }
+ else if (env.proc->freason != TRAP) { /* user returned garbage... */
+ ERTS_DECL_AM(badreturn);
+ (void) enif_raise_exception(&env, AM_badreturn);
+ goto schedule_exception;
}
else {
- Uint usz;
- ASSERT(env->hp_end != HEAP_LIMIT(c_p));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
-
- HEAP_TOP(c_p) = HEAP_TOP(env->proc);
- usz = env->hp - env->heap_frag->mem;
- env->proc->mbuf_sz += usz - env->heap_frag->used_size;
- env->heap_frag->used_size = usz;
-
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
-
- if (c_p->mbuf) {
- ErlHeapFragment *bp;
- for (bp = env->proc->mbuf; bp->next; bp = bp->next)
- ;
- bp->next = c_p->mbuf;
- }
+ /* Rescheduled by dirty NIF call... */
+ ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ }
+ c_p->i = env.proc->i;
+ c_p->arity = env.proc->arity;
+ }
- c_p->mbuf = env->proc->mbuf;
- c_p->mbuf_sz += env->proc->mbuf_sz;
+#ifdef DEBUG
+ if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ nep->func = NULL;
+#endif
- }
+ erts_unblock_fpe(env.fpe_was_unmasked);
+ full_flush_env(&env);
+ free_tmp_objs(&env);
- if (!c_p->off_heap.first)
- c_p->off_heap.first = env->proc->off_heap.first;
- else if (env->proc->off_heap.first) {
- struct erl_off_heap_header *ohhp;
- for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next)
- ;
- ohhp->next = c_p->off_heap.first;
- c_p->off_heap.first = env->proc->off_heap.first;
- }
- c_p->off_heap.overhead += env->proc->off_heap.overhead;
+ return exiting;
+}
- return;
- }
#endif
+static void full_flush_env(ErlNifEnv* env)
+{
flush_env(env);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ /* Dirty nif call using shadow process struct */
+ erts_flush_dirty_shadow_proc(env->proc);
+#endif
}
static void full_cache_env(ErlNifEnv* env)
{
#ifdef ERTS_DIRTY_SCHEDULERS
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- /* Dirty nif call using shadow process struct */
- Process *sproc = env->proc;
- Process *c_p = sproc->next;
- ASSERT(c_p);
- ASSERT(is_scheduler() < 0);
- ASSERT(env->proc->common.id == c_p->common.id);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
- & ERTS_PROC_LOCK_MAIN);
-
- sproc->htop = c_p->htop;
- sproc->stop = c_p->stop;
- sproc->hend = c_p->hend;
- sproc->heap = c_p->heap;
- sproc->abandoned_heap = c_p->abandoned_heap;
- sproc->heap_sz = c_p->heap_sz;
- sproc->high_water = c_p->high_water;
- sproc->old_hend = c_p->old_hend;
- sproc->old_htop = c_p->old_htop;
- sproc->old_heap = c_p->old_heap;
- sproc->mbuf = NULL;
- sproc->mbuf_sz = 0;
- ERTS_INIT_OFF_HEAP(&sproc->off_heap);
-
- env->hp_end = HEAP_LIMIT(c_p);
- env->hp = HEAP_TOP(c_p);
- env->heap_frag = NULL;
- return;
- }
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ erts_cache_dirty_shadow_proc(env->proc);
#endif
-
cache_env(env);
}
@@ -472,11 +498,15 @@ setup_nif_env(struct enif_msg_environment_t* msg_env,
HEAP_END(&msg_env->phony_proc) = phony_heap;
MBUF(&msg_env->phony_proc) = NULL;
msg_env->phony_proc.common.id = ERTS_INVALID_PID;
+ msg_env->env.tracee = tracee;
+
#ifdef FORCE_HEAP_FRAGS
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
#endif
- msg_env->env.tracee = tracee;
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env->env.dbg_disable_assert_in_env = 0;
+#endif
}
ErlNifEnv* enif_alloc_env(void)
@@ -687,9 +717,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
MBUF(&menv->phony_proc) = NULL;
}
} else {
- Uint sz = size_object(msg);
+ erts_literal_area_t litarea;
ErlOffHeap *ohp;
Eterm *hp;
+ Uint sz;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
+ sz = size_object_litopt(msg, &litarea);
if (c_p && !env->tracee) {
full_flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
@@ -709,7 +742,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ohp = &bp->off_heap;
}
}
- msg = copy_struct(msg, sz, &hp, ohp);
+ msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
}
ERL_MESSAGE_TERM(mp) = msg;
@@ -1298,22 +1331,15 @@ Eterm enif_make_badarg(ErlNifEnv* env)
Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
{
- Process *c_p;
-
- execution_state(env, &c_p, NULL);
-
env->exception_thrown = 1;
- c_p->fvalue = reason;
- BIF_ERROR(c_p, EXC_ERROR);
+ env->proc->fvalue = reason;
+ BIF_ERROR(env->proc, EXC_ERROR);
}
int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
{
- if (env->exception_thrown && reason != NULL) {
- Process *c_p;
- execution_state(env, &c_p, NULL);
- *reason = c_p->fvalue;
- }
+ if (env->exception_thrown && reason != NULL)
+ *reason = env->proc->fvalue;
return env->exception_thrown;
}
@@ -1581,6 +1607,9 @@ int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
va_list ap;
@@ -1588,7 +1617,9 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
*hp++ = make_arityval(cnt);
va_start(ap,cnt);
while (cnt--) {
- *hp++ = va_arg(ap,Eterm);
+ Eterm elem = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, elem, ++nr, "tuple");
+ *hp++ = elem;
}
va_end(ap);
return ret;
@@ -1596,12 +1627,16 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
const Eterm* src = arr;
*hp++ = make_arityval(cnt);
while (cnt--) {
+ ASSERT_IN_ENV(env, *src, ++nr, "tuple");
*hp++ = *src++;
}
return ret;
@@ -1612,6 +1647,8 @@ ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
Eterm* hp = alloc_heap(env,2);
Eterm ret = make_list(hp);
+ ASSERT_IN_ENV(env, car, 0, "head of list cell");
+ ASSERT_IN_ENV(env, cdr, 0, "tail of list cell");
CAR(hp) = car;
CDR(hp) = cdr;
return ret;
@@ -1623,6 +1660,9 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
return NIL;
}
else {
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
@@ -1630,8 +1670,10 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
va_start(ap,cnt);
while (cnt--) {
+ Eterm term = va_arg(ap,Eterm);
*last = make_list(hp);
- *hp = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1643,14 +1685,19 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
const Eterm* src = arr;
while (cnt--) {
+ Eterm term = *src++;
*last = make_list(hp);
- *hp = *src++;
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1674,7 +1721,7 @@ ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
{
- Eterm* hp = alloc_heap(env, REF_THING_SIZE);
+ Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
return erts_make_ref_in_buffer(hp);
}
@@ -1683,13 +1730,9 @@ void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
driver_system_info(sip, si_size);
}
-int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) {
- Eterm *listptr, ret = NIL, *hp;
-
- if (is_nil(term)) {
- *list = term;
- return 1;
- }
+int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)
+{
+ Eterm *listptr, ret, *hp;
ret = NIL;
@@ -1899,38 +1942,9 @@ int enif_snprintf(char *buffer, size_t size, const char* format, ...)
** Memory managed (GC'ed) "resource" objects **
***********************************************************/
-
-struct enif_resource_type_t
-{
- struct enif_resource_type_t* next; /* list of all resource types */
- struct enif_resource_type_t* prev;
- struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
- ErlNifResourceDtor* dtor; /* user destructor function */
- erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
- +1 for active erl_module_nif */
- Eterm module;
- Eterm name;
-};
-
/* dummy node in circular list */
struct enif_resource_type_t resource_type_list;
-typedef struct enif_resource_t
-{
- struct enif_resource_type_t* type;
-#ifdef DEBUG
- erts_refc_t nif_refc;
-# ifdef ARCH_32
- byte align__[4];
-# endif
-#endif
-
- char data[1];
-}ErlNifResource;
-
-#define SIZEOF_ErlNifResource(SIZE) (offsetof(ErlNifResource,data) + (SIZE))
-#define DATA_TO_RESOURCE(PTR) ((ErlNifResource*)((char*)(PTR) - offsetof(ErlNifResource,data)))
-
static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
{
ErlNifResourceType* type;
@@ -1955,10 +1969,10 @@ static void close_lib(struct erl_module_nif* lib)
ASSERT(lib->handle != NULL);
ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
- if (lib->entry != NULL && lib->entry->unload != NULL) {
+ if (lib->entry.unload != NULL) {
struct enif_msg_environment_t msg_env;
pre_nif_noproc(&msg_env, lib, NULL);
- lib->entry->unload(&msg_env.env, lib->priv_data);
+ lib->entry.unload(&msg_env.env, lib->priv_data);
post_nif_noproc(&msg_env);
}
if (!erts_is_static_nif(lib->handle))
@@ -1992,24 +2006,23 @@ struct opened_resource_type
ErlNifResourceFlags op;
ErlNifResourceType* type;
- ErlNifResourceDtor* new_dtor;
+ ErlNifResourceTypeInit new_callbacks;
};
static struct opened_resource_type* opened_rt_list = NULL;
-ErlNifResourceType*
-enif_open_resource_type(ErlNifEnv* env,
- const char* module_str,
- const char* name_str,
- ErlNifResourceDtor* dtor,
- ErlNifResourceFlags flags,
- ErlNifResourceFlags* tried)
+static
+ErlNifResourceType* open_resource_type(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried,
+ size_t sizeof_init)
{
ErlNifResourceType* type = NULL;
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(module_str == NULL); /* for now... */
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -2043,7 +2056,9 @@ enif_open_resource_type(ErlNifEnv* env,
sizeof(struct opened_resource_type));
ort->op = op;
ort->type = type;
- ort->new_dtor = dtor;
+ sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit));
+ ASSERT(sizeof_init > 0 && sizeof_init <= sizeof(ErlNifResourceTypeInit));
+ sys_memcpy(&ort->new_callbacks, init, sizeof_init);
ort->next = opened_rt_list;
opened_rt_list = ort;
}
@@ -2053,6 +2068,31 @@ enif_open_resource_type(ErlNifEnv* env,
return type;
}
+ErlNifResourceType*
+enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str,
+ const char* name_str,
+ ErlNifResourceDtor* dtor,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ ErlNifResourceTypeInit init = {dtor, NULL};
+ ASSERT(module_str == NULL); /* for now... */
+ return open_resource_type(env, name_str, &init, flags, tried,
+ sizeof(init));
+}
+
+ErlNifResourceType*
+enif_open_resource_type_x(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ return open_resource_type(env, name_str, init, flags, tried,
+ env->mod_nif->entry.sizeof_ErlNifResourceTypeInit);
+}
+
static void commit_opened_resource_types(struct erl_module_nif* lib)
{
while (opened_rt_list) {
@@ -2071,7 +2111,9 @@ static void commit_opened_resource_types(struct erl_module_nif* lib)
}
type->owner = lib;
- type->dtor = ort->new_dtor;
+ type->dtor = ort->new_callbacks.dtor;
+ type->stop = ort->new_callbacks.stop;
+ type->down = ort->new_callbacks.down;
if (type->dtor != NULL) {
erts_refc_inc(&lib->rt_dtor_cnt, 1);
@@ -2097,12 +2139,145 @@ static void rollback_opened_resource_types(void)
}
}
+struct destroy_monitor_ctx
+{
+ ErtsResource* resource;
+ int exiting_procs;
+ int scheduler;
+};
-static void nif_resource_dtor(Binary* bin)
+static void destroy_one_monitor(ErtsMonitor* mon, void* context)
{
- ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
+ struct destroy_monitor_ctx* ctx = (struct destroy_monitor_ctx*) context;
+ Process* rp;
+ ErtsMonitor *rmon = NULL;
+ int is_exiting;
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->u.pid));
+ ASSERT(is_internal_ref(mon->ref));
+
+ if (ctx->scheduler > 0) { /* Normal scheduler */
+ rp = erts_proc_lookup(mon->u.pid);
+ }
+ else {
+#ifdef ERTS_SMP
+ rp = erts_proc_lookup_inc_refc(mon->u.pid);
+#else
+ ASSERT(!"nif monitor destruction in non-scheduler thread");
+ rp = NULL;
+#endif
+ }
+
+ if (!rp) {
+ is_exiting = 1;
+ }
+ if (rp) {
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ is_exiting = 1;
+ } else {
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
+ ASSERT(rmon);
+ is_exiting = 0;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+#ifdef ERTS_SMP
+ if (ctx->scheduler <= 0)
+ erts_proc_dec_refc(rp);
+#endif
+ }
+ if (is_exiting) {
+ ctx->resource->monitors->pending_failed_fire++;
+ }
+
+ /* ToDo: Delay destruction after monitor_locks */
+ if (rmon) {
+ ASSERT(rmon->type == MON_NIF_TARGET);
+ ASSERT(rmon->u.resource == ctx->resource);
+ erts_destroy_monitor(rmon);
+ }
+ erts_destroy_monitor(mon);
+}
+
+static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource)
+{
+ struct destroy_monitor_ctx ctx;
+
+ execution_state(NULL, NULL, &ctx.scheduler);
+
+ ctx.resource = resource;
+ erts_sweep_monitors(monitors, &destroy_one_monitor, &ctx);
+}
+
+
+#ifdef ERTS_SMP
+# define NIF_RESOURCE_DTOR &nif_resource_dtor
+#else
+# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue
+
+/*
+ * NO-SMP: Always run resource destructor on scheduler thread
+ * as we may have to remove process monitors.
+ */
+static int nif_resource_dtor(Binary*);
+
+static void nosmp_nif_resource_dtor_scheduled(void* vbin)
+{
+ erts_bin_free((Binary*)vbin);
+}
+
+static int nosmp_nif_resource_dtor_prologue(Binary* bin)
+{
+ if (is_scheduler()) {
+ return nif_resource_dtor(bin);
+ }
+ else {
+ erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin);
+ return 0; /* do not free */
+ }
+}
+
+#endif /* !ERTS_SMP */
+
+static int nif_resource_dtor(Binary* bin)
+{
+ ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ErlNifResourceType* type = resource->type;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+
+ if (resource->monitors) {
+ ErtsResourceMonitors* rm = resource->monitors;
+
+ ASSERT(type->down);
+ erts_smp_mtx_lock(&rm->lock);
+ ASSERT(erts_refc_read(&bin->refc, 0) == 0);
+ if (rm->root) {
+ ASSERT(!rm->is_dying);
+ destroy_all_monitors(rm->root, resource);
+ rm->root = NULL;
+ }
+ if (rm->pending_failed_fire) {
+ /*
+ * Resource death struggle prolonged to serve exiting process(es).
+ * Destructor will be called again when last exiting process
+ * tries to fire its MON_NIF_TARGET monitor (and fails).
+ *
+ * This resource is doomed. It has no "real" references and
+ * should get not get called upon to do anything except the
+ * final destructor call.
+ *
+ * We keep refc at 0 and use a separate counter for exiting
+ * processes to avoid resource getting revived by "dec_term".
+ */
+ ASSERT(!rm->is_dying);
+ rm->is_dying = 1;
+ erts_smp_mtx_unlock(&rm->lock);
+ return 0;
+ }
+ erts_smp_mtx_unlock(&rm->lock);
+ erts_smp_mtx_destroy(&rm->lock);
+ }
if (type->dtor != NULL) {
struct enif_msg_environment_t msg_env;
@@ -2117,14 +2292,92 @@ static void nif_resource_dtor(Binary* bin)
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
+ return 1;
+}
+
+void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
+ int is_direct_call)
+{
+ struct enif_msg_environment_t msg_env;
+ ASSERT(resource->type->stop);
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->stop(&msg_env.env, resource->data, e, is_direct_call);
+ post_nif_noproc(&msg_env);
}
-void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
+void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
{
- Binary* bin = erts_create_magic_binary_x(SIZEOF_ErlNifResource(size),
- &nif_resource_dtor,
- 1); /* unaligned */
- ErlNifResource* resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
+ ErtsMonitor* rmon;
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ struct enif_msg_environment_t msg_env;
+ ErlNifPid nif_pid;
+ ErlNifMonitor nif_monitor;
+ ErtsResourceMonitors* rmp = resource->monitors;
+
+ ASSERT(rmp);
+ ASSERT(resource->type->down);
+
+ erts_smp_mtx_lock(&rmp->lock);
+ rmon = erts_remove_monitor(&rmp->root, ref);
+ if (!rmon) {
+ int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying;
+ ASSERT(rmp->pending_failed_fire >= 0);
+ erts_smp_mtx_unlock(&rmp->lock);
+
+ if (free_me) {
+ ASSERT(erts_refc_read(&bin->binary.refc, 0) == 0);
+ erts_bin_free(&bin->binary);
+ }
+ return;
+ }
+ ASSERT(!rmp->is_dying);
+ if (erts_refc_inc_unless(&bin->binary.refc, 0, 0) == 0) {
+ /*
+ * Racing resource destruction.
+ * To avoid a more complex refc-dance with destructing thread
+ * we avoid calling 'down' and just silently remove the monitor.
+ * This can happen even for non smp as destructor calls may be scheduled.
+ */
+ erts_smp_mtx_unlock(&rmp->lock);
+ }
+ else {
+ erts_smp_mtx_unlock(&rmp->lock);
+
+ ASSERT(rmon->u.pid == pid);
+ erts_ref_to_driver_monitor(ref, &nif_monitor);
+ nif_pid.pid = pid;
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
+ post_nif_noproc(&msg_env);
+
+ if (erts_refc_dectest(&bin->binary.refc, 0) == 0) {
+ erts_bin_free(&bin->binary);
+ }
+ }
+ erts_destroy_monitor(rmon);
+}
+
+void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
+{
+ size_t magic_sz = offsetof(ErtsResource,data);
+ Binary* bin;
+ ErtsResource* resource;
+ size_t monitors_offs;
+
+ if (type->down) {
+ /* Put ErtsResourceMonitors after user data and properly aligned */
+ monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1)
+ & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1));
+ magic_sz += monitors_offs + sizeof(ErtsResourceMonitors);
+ }
+ else {
+ ERTS_UNDEF(monitors_offs, 0);
+ magic_sz += data_sz;
+ }
+ bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR,
+ ERTS_ALC_T_BINARY,
+ 1); /* unaligned */
+ resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
@@ -2133,15 +2386,27 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
erts_refc_init(&resource->nif_refc, 1);
#endif
erts_refc_inc(&resource->type->refc, 2);
+ if (type->down) {
+ resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
+ erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors");
+ resource->monitors->root = NULL;
+ resource->monitors->pending_failed_fire = 0;
+ resource->monitors->is_dying = 0;
+ resource->monitors->user_data_sz = data_sz;
+ }
+ else {
+ resource->monitors = NULL;
+ }
return resource->data;
}
void enif_release_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
#ifdef DEBUG
erts_refc_dec(&resource->nif_refc, 0);
#endif
@@ -2152,50 +2417,81 @@ void enif_release_resource(void* obj)
void enif_keep_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
#ifdef DEBUG
erts_refc_inc(&resource->nif_refc, 1);
#endif
erts_refc_inc(&bin->binary.refc, 2);
}
+Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
+{
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ return erts_mk_magic_ref(hpp, oh, &bin->binary);
+}
+
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
+ Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
}
ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
const void* data, size_t size)
{
- Eterm bin = enif_make_resource(env, obj);
- ProcBin* pb = (ProcBin*) binary_val(bin);
- pb->bytes = (byte*) data;
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ErlOffHeap *ohp = &MSO(env->proc);
+ Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
+ ProcBin* pb = (ProcBin *) hp;
+
+ pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- return bin;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
+ pb->val = &bin->binary;
+ pb->bytes = (byte*) data;
+ pb->flags = 0;
+
+ OH_OVERHEAD(ohp, size / sizeof(Eterm));
+ erts_refc_inc(&bin->binary.refc, 1);
+
+ return make_binary(hp);
}
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
void** objp)
{
- ProcBin* pb;
Binary* mbin;
- ErlNifResource* resource;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
- return 0;
+ ErtsResource* resource;
+ if (is_internal_magic_ref(term))
+ mbin = erts_magic_ref2bin(term);
+ else {
+ Eterm *hp;
+ if (!is_binary(term))
+ return 0;
+ hp = binary_val(term);
+ if (thing_subtag(*hp) != REFC_BINARY_SUBTAG)
+ return 0;
+ /*
+ if (((ProcBin *) hp)->size != 0) {
+ return 0; / * Or should we allow "resource binaries" as handles? * /
+ }
+ */
+ mbin = ((ProcBin *) hp)->val;
+ if (!(mbin->flags & BIN_FLAG_MAGIC))
+ return 0;
}
- pb = (ProcBin*) binary_val(term);
- /*if (pb->size != 0) {
- return 0; / * Or should we allow "resource binaries" as handles? * /
- }*/
- mbin = pb->val;
- resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
- if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
+ resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR
|| resource->type != type) {
return 0;
}
@@ -2205,9 +2501,14 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
size_t enif_sizeof_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
- return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ if (resource->monitors) {
+ return resource->monitors->user_data_sz;
+ }
+ else {
+ Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
+ return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data);
+ }
}
@@ -2264,188 +2565,28 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(proc) == 0;
}
-/*
- * NIF exports need a few more items than the Export struct provides,
- * including the erl_module_nif* and a NIF function pointer, so the
- * NifExport below adds those. The Export member must be first in the
- * struct. The saved_current, exception_thrown, saved_argc, rootset_extra, and
- * rootset members are used to track the MFA, any pending exception, and
- * arguments of the top NIF in case a chain of one or more
- * enif_schedule_nif() calls results in an exception, since in that case
- * the original MFA and registers have to be restored before returning to
- * Erlang to ensure stacktrace information associated with the exception is
- * correct.
- */
-typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-
-typedef struct {
- Export exp;
- struct erl_module_nif* m;
- NativeFunPtr fp;
- BeamInstr *saved_current;
- int exception_thrown;
- int saved_argc;
- int rootset_extra;
- Eterm rootset[1];
-} NifExport;
-
-/*
- * If a process has saved arguments, they need to be part of the GC
- * rootset. The function below is called from setup_rootset() in
- * erl_gc.c. This function is declared in erl_process.h. Any exception term
- * saved in the NifExport is also made part of the GC rootset here; it
- * always resides in rootset[0].
- */
-int
-erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
-{
- NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- int gc = ep && (ep->saved_argc > 0 || ep->rootset[0] != NIL);
-
- if (gc) {
- *objv = ep->rootset;
- *nobj = 1 + ep->saved_argc;
- }
- return gc;
-}
-
-int
-erts_check_nif_export_in_area(Process *p, char *start, Uint size)
-{
- NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(p);
- if (!nep || !nep->saved_current)
- return 0;
- if (ErtsInArea(nep->saved_current, start, size))
- return 1;
- return 0;
-}
-
-/*
- * Allocate a NifExport and set it in proc specific data
- */
-static NifExport*
-allocate_nif_sched_data(Process* proc, int argc)
-{
- NifExport* ep;
- size_t total;
- int i;
-
- total = sizeof(NifExport) + argc*sizeof(Eterm);
- ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
- sys_memset((void*) ep, 0, total);
- ep->rootset_extra = argc;
- ep->rootset[0] = NIL;
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->exp.addressv[i] = &ep->exp.code[3];
- }
- ep->exp.code[3] = (BeamInstr) em_call_nif;
- (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ep);
- return ep;
-}
-
static ERTS_INLINE void
-destroy_nif_export(NifExport *nif_export)
+nif_export_cleanup_nif_mod(NifExport *ep)
{
- erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
+ if (erts_refc_dectest(&ep->m->rt_dtor_cnt, 0) == 0 && ep->m->mod == NULL)
+ close_lib(ep->m);
+ ep->m = NULL;
}
void
-erts_destroy_nif_export(void *nif_export)
+erts_nif_export_cleanup_nif_mod(NifExport *ep)
{
- destroy_nif_export((NifExport *) nif_export);
+ nif_export_cleanup_nif_mod(ep);
}
-/*
- * Initialize a NifExport struct. Create it if needed and store it in the
- * proc. The direct_fp function is what will be invoked by op_call_nif, and
- * the indirect_fp function, if not NULL, is what the direct_fp function
- * will call. If the allocated NifExport isn't enough to hold all of argv,
- * allocate a larger one. Save MFA and registers only if the need_save
- * parameter is true.
- */
-static ERL_NIF_TERM
-init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
- int need_save, int argc, const ERL_NIF_TERM argv[])
+static ERTS_INLINE void
+nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
{
- Process* proc;
- Eterm* reg;
- NifExport* ep;
- int i, scheduler;
- int orig_argc;
-
- execution_state(env, &proc, &scheduler);
-
- ASSERT(scheduler);
-
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
- & ERTS_PROC_LOCK_MAIN);
-
- reg = erts_proc_sched_data(proc)->x_reg_array;
-
- ASSERT(!need_save || proc->current);
- orig_argc = need_save ? (int) proc->current[2] : 0;
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- if (!ep)
- ep = allocate_nif_sched_data(proc, orig_argc);
- else if (need_save && ep->rootset_extra < orig_argc) {
- NifExport* new_ep = allocate_nif_sched_data(proc, orig_argc);
- destroy_nif_export(ep);
- ep = new_ep;
- }
- if (env->exception_thrown) {
- ep->exception_thrown = 1;
- ep->rootset[0] = proc->fvalue;
- } else {
- ep->exception_thrown = 0;
- ep->rootset[0] = NIL;
- }
- if (scheduler > 0)
- ERTS_VBUMP_ALL_REDS(proc);
- if (need_save) {
- ep->saved_current = proc->current;
- ep->saved_argc = orig_argc;
- for (i = 0; i < orig_argc; i++)
- ep->rootset[i+1] = reg[i];
- }
- for (i = 0; i < argc; i++)
- reg[i] = (Eterm) argv[i];
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[0] = (BeamInstr) proc->current[0];
- ep->exp.code[1] = (BeamInstr) proc->current[1];
- ep->exp.code[2] = argc;
- ep->exp.code[4] = (BeamInstr) direct_fp;
- ep->m = env->mod_nif;
- ep->fp = indirect_fp;
- proc->freason = TRAP;
- proc->arity = argc;
- return THE_NON_VALUE;
+ erts_nif_export_restore(c_p, ep, res);
+ ASSERT(ep->m);
+ nif_export_cleanup_nif_mod(ep);
}
-/*
- * Restore saved MFA and registers. Registers are restored only when the
- * exception flag is true.
- */
-static void
-restore_nif_mfa(Process* proc, NifExport* ep, int exception)
-{
- int i;
-
- ERTS_SMP_LC_ASSERT(!(proc->static_flags
- & ERTS_STC_FLG_SHADOW_PROC));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
- & ERTS_PROC_LOCK_MAIN);
-
- ASSERT(ep->saved_current != &ep->exp.code[0]);
- proc->current = ep->saved_current;
- ep->saved_current = NULL;
- if (exception) {
- Eterm* reg = erts_proc_sched_data(proc)->x_reg_array;
- for (i = 0; i < ep->saved_argc; i++)
- reg[i] = ep->rootset[i+1];
- }
- ep->saved_argc = 0;
-}
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -2454,7 +2595,7 @@ restore_nif_mfa(Process* proc, NifExport* ep, int exception)
* switch the process off a dirty scheduler thread and back onto a regular
* scheduler thread, and then return the result from the dirty NIF. It also
* restores the original NIF MFA when necessary based on the value of
- * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL
* means restore, NULL means do not restore.
*/
static ERL_NIF_TERM
@@ -2469,9 +2610,7 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
- ASSERT(!ep->exception_thrown);
- if (ep->fp)
- restore_nif_mfa(proc, ep, 0);
+ nif_export_restore(proc, ep, argv[0]);
return argv[0];
}
@@ -2481,146 +2620,100 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
+ ERL_NIF_TERM ret;
Process* proc;
NifExport* ep;
+ Eterm exception;
execution_state(env, &proc, NULL);
+ ASSERT(argc == 1);
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
- ASSERT(ep->exception_thrown);
- if (ep->fp)
- restore_nif_mfa(proc, ep, 1);
- return enif_raise_exception(env, ep->rootset[0]);
+ exception = argv[0]; /* argv overwritten by restore below... */
+ nif_export_cleanup_nif_mod(ep);
+ ret = enif_raise_exception(env, exception);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ proc->freason |= EXF_RESTORE_NIF;
+ return ret;
}
/*
- * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
- * then check the result and schedule the appropriate finalizer function
- * where needed. Also restore the original NIF MFA when appropriate.
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute.
+ * The dirty scheduler thread type (CPU or I/O) is indicated in flags
+ * parameter.
*/
-static ERL_NIF_TERM
-execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
+ Eterm func_name, int argc, const ERL_NIF_TERM argv[])
{
Process* proc;
- NativeFunPtr fp;
- NifExport* ep;
- ERL_NIF_TERM result;
-
- execution_state(env, &proc, NULL);
-
- fp = (NativeFunPtr) proc->current[6];
-
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
-
- /*
- * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
- */
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep && fp);
- ep->fp = NULL;
- erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC));
+ ASSERT(is_atom(func_name));
+ ASSERT(fp);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
- result = (*fp)(env, argc, argv);
+ execution_state(env, &proc, NULL);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
+ ? ERTS_PSFLG_DIRTY_CPU_PROC
+ : ERTS_PSFLG_DIRTY_IO_PROC));
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- /*
- * If no more NIFs were scheduled by the native call via
- * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
- * which case we need to restore the original NIF calling
- * context. Reuse fp essentially as a boolean for this, passing it to
- * init_nif_sched_data below. Both dirty_nif_exception and
- * dirty_nif_finalizer then check ep->fp to decide whether or not to
- * restore the original calling context.
- */
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- if (ep->fp)
- fp = NULL;
- if (is_non_value(result) || env->exception_thrown) {
- if (proc->freason != TRAP) {
- return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
- } else {
- if (ep->fp == NULL)
- restore_nif_mfa(proc, ep, 1);
- return THE_NON_VALUE;
- }
- }
- else
- return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+ return schedule(env, fp, NULL, proc->current->module, func_name, argc, argv);
}
-/*
- * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
- * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
- * type (CPU or I/O) is indicated in flags parameter.
- */
static ERTS_INLINE ERL_NIF_TERM
-schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
+ int argc, const ERL_NIF_TERM argv[])
{
- ERL_NIF_TERM result;
- erts_aint32_t act, dirty_flag;
- Process* proc;
+ Process *proc;
+ NifExport *ep;
+ Eterm mod, func;
NativeFunPtr fp;
- NifExport* ep;
- int need_save, scheduler;
-
- execution_state(env, &proc, &scheduler);
- if (scheduler <= 0) {
- ASSERT(scheduler < 0);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
- }
- fp = (NativeFunPtr) proc->current[6];
+ execution_state(env, &proc, NULL);
- ASSERT(fp);
+ /*
+ * Called in order to schedule statically determined
+ * dirty NIF calls...
+ *
+ * Note that 'current' does not point into a NifExport
+ * structure; only a structure with similar
+ * parts (located in code).
+ */
- ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ mod = proc->current->module;
+ func = proc->current->function;
+ fp = (NativeFunPtr) ep->func;
- if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
- else
- dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
+ ASSERT(is_atom(mod) && is_atom(func));
+ ASSERT(fp);
- act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag);
- if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)))
- erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
- else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) {
- /* clear other flag... */
- if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
- else
- dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
- erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag);
- }
+ (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ dirty_psflg);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- need_save = (ep == NULL || !ep->saved_current);
- result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
- if (scheduler <= 0)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- return result;
+ return schedule(env, fp, NULL, mod, func, argc, argv);
}
static ERL_NIF_TERM
-schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv);
}
static ERL_NIF_TERM
-schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
#endif /* ERTS_DIRTY_SCHEDULERS */
@@ -2639,22 +2732,42 @@ execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ERL_NIF_TERM result;
execution_state(env, &proc, NULL);
- fp = (NativeFunPtr) proc->current[6];
- ASSERT(!env->exception_thrown);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ fp = ep->func;
ASSERT(ep);
- ep->fp = NULL;
+ ASSERT(!env->exception_thrown);
+
+ fp = (NativeFunPtr) ep->func;
+
+#ifdef DEBUG
+ ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
result = (*fp)(env, argc, argv);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- /*
- * If no NIFs were scheduled by the native call via
- * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
- * which case we need to restore the original NIF MFA.
- */
- if (ep->fp == NULL)
- restore_nif_mfa(proc, ep, env->exception_thrown);
+
+ ASSERT(ep == ERTS_PROC_GET_NIF_TRAP_EXPORT(proc));
+
+ if (is_value(result) || proc->freason != TRAP) {
+ /* Done (not rescheduled)... */
+ ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ if (!env->exception_thrown)
+ nif_export_restore(proc, ep, result);
+ else {
+ nif_export_cleanup_nif_mod(ep);
+ /*
+ * Restore orig info for error and clear nif
+ * export in handle_error()
+ */
+ proc->freason |= EXF_RESTORE_NIF;
+ }
+ }
+
+#ifdef DEBUG
+ if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ ep->func = NULL;
+#endif
+
return result;
}
@@ -2664,9 +2777,8 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
int argc, const ERL_NIF_TERM argv[])
{
Process* proc;
- NifExport* ep;
ERL_NIF_TERM fun_name_atom, result;
- int need_save, scheduler;
+ int scheduler;
if (argc > MAX_ARG)
return enif_make_badarg(env);
@@ -2681,35 +2793,19 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
}
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- need_save = (ep == NULL || !ep->saved_current);
-
- if (flags) {
+ if (flags == 0)
+ result = schedule(env, execute_nif, fp, proc->current->module,
+ fun_name_atom, argc, argv);
+ else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
#ifdef ERTS_DIRTY_SCHEDULERS
- NativeFunPtr sched_fun;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
- sched_fun = schedule_dirty_io_nif;
- else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- sched_fun = schedule_dirty_cpu_nif;
- else {
- result = enif_make_badarg(env);
- goto done;
- }
- result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
+ result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
#else
- result = enif_make_badarg(env);
+ result = enif_raise_exception(env, am_notsup);
#endif
- goto done;
}
else
- result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- ep->exp.code[1] = (BeamInstr) fun_name_atom;
+ result = enif_make_badarg(env);
-done:
if (scheduler < 0)
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
@@ -2781,6 +2877,10 @@ int enif_make_map_put(ErlNifEnv* env,
if (!is_map(map_in)) {
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
*map_out = erts_maps_put(env->proc, key, value, map_in);
cache_env(env);
@@ -2815,6 +2915,10 @@ int enif_make_map_update(ErlNifEnv* env,
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
res = erts_maps_update(env->proc, key, value, map_in, map_out);
cache_env(env);
@@ -3016,22 +3120,172 @@ int enif_map_iterator_get_pair(ErlNifEnv *env,
return 0;
}
+int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
+ ErlNifMonitor* monitor)
+{
+ int scheduler;
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+ Process *rp;
+ Eterm tmp[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ int retval;
+
+ ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
+ == NIF_RESOURCE_DTOR);
+ ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+ ASSERT(!rsrc->monitors == !rsrc->type->down);
+
+
+ if (!rsrc->monitors) {
+ ASSERT(!rsrc->type->down);
+ return -1;
+ }
+ ASSERT(rsrc->type->down);
+
+ execution_state(env, NULL, &scheduler);
+
+#ifdef ERTS_SMP
+ if (scheduler > 0) /* Normal scheduler */
+ rp = erts_proc_lookup_raw(target_pid->pid);
+ else
+ rp = erts_proc_lookup_raw_inc_refc(target_pid->pid);
+#else
+ if (scheduler <= 0) {
+ erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from "
+ "non-scheduler thread on non-SMP VM");
+ }
+ rp = erts_proc_lookup(target_pid->pid);
+#endif
+
+ if (!rp)
+ return 1;
+
+ ref = erts_make_ref_in_buffer(tmp);
+
+ erts_smp_mtx_lock(&rsrc->monitors->lock);
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) {
+ retval = 1;
+ }
+ else {
+ erts_add_monitor(&rsrc->monitors->root, MON_ORIGIN, ref, rp->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL);
+ retval = 0;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_smp_mtx_unlock(&rsrc->monitors->lock);
+
+#ifdef ERTS_SMP
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+#endif
+ if (monitor)
+ erts_ref_to_driver_monitor(ref,monitor);
+
+ return retval;
+}
+
+int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
+{
+ int scheduler;
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+#ifdef DEBUG
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
+#endif
+ Process *rp;
+ ErtsMonitor *mon;
+ ErtsMonitor *rmon = NULL;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ int is_exiting;
+
+ ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
+ ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+
+ execution_state(env, NULL, &scheduler);
+
+ ref = erts_driver_monitor_to_ref(ref_heap, monitor);
+
+ erts_smp_mtx_lock(&rsrc->monitors->lock);
+ mon = erts_remove_monitor(&rsrc->monitors->root, ref);
+
+ if (mon == NULL) {
+ erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ return 1;
+ }
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->u.pid));
+
+#ifdef ERTS_SMP
+ if (scheduler > 0) /* Normal scheduler */
+ rp = erts_proc_lookup(mon->u.pid);
+ else
+ rp = erts_proc_lookup_inc_refc(mon->u.pid);
+#else
+ if (scheduler <= 0) {
+ erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from "
+ "non-scheduler thread on non-SMP VM");
+ }
+ rp = erts_proc_lookup(mon->u.pid);
+#endif
+
+ if (!rp) {
+ is_exiting = 1;
+ }
+ else {
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ is_exiting = 1;
+ } else {
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ ASSERT(rmon);
+ is_exiting = 0;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+#ifdef ERTS_SMP
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+#endif
+ }
+ if (is_exiting) {
+ rsrc->monitors->pending_failed_fire++;
+ }
+ erts_smp_mtx_unlock(&rsrc->monitors->lock);
+
+ if (rmon) {
+ ASSERT(rmon->type == MON_NIF_TARGET);
+ ASSERT(rmon->u.resource == rsrc);
+ erts_destroy_monitor(rmon);
+ }
+ erts_destroy_monitor(mon);
+
+ return 0;
+}
+
+int enif_compare_monitors(const ErlNifMonitor *monitor1,
+ const ErlNifMonitor *monitor2)
+{
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
-static BeamInstr** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
+static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
{
int n = (int) mod_code->num_functions;
int j;
for (j = 0; j < n; ++j) {
- BeamInstr* code_ptr = (BeamInstr*) mod_code->functions[j];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (f_atom == ((Eterm) code_ptr[3])
- && arity == ((unsigned) code_ptr[4])) {
-
- return (BeamInstr**) &mod_code->functions[j];
+ ErtsCodeInfo* ci = mod_code->functions[j];
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (f_atom == ci->mfa.function
+ && arity == ci->mfa.arity) {
+ return mod_code->functions+j;
}
}
return NULL;
@@ -3123,39 +3377,62 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+#define AT_LEAST_VERSION(E,MAJ,MIN) \
+ (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN)))
+
/*
- * The function below is for looping through ErlNifFunc arrays, helping
- * provide backwards compatibility across the version 2.7 change that added
- * the "flags" field to ErlNifFunc.
+ * Allocate erl_module_nif and make a _modern_ copy of the lib entry.
*/
-static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+static struct erl_module_nif* create_lib(const ErlNifEntry* src)
{
- ASSERT(incrp);
- if (!*incrp) {
- if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- *incrp = sizeof(ErlNifFunc);
- else {
- /*
- * ErlNifFuncV1 below is what ErlNifFunc was before the
- * addition of the flags field for 2.7, and is needed to handle
- * backward compatibility.
- */
- typedef struct {
- const char* name;
- unsigned arity;
- ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
- }ErlNifFuncV1;
- *incrp = sizeof(ErlNifFuncV1);
- }
+ struct erl_module_nif* lib;
+ ErlNifEntry* dst;
+ Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_);
+
+ if (!AT_LEAST_VERSION(src, 2, 7))
+ bytes += src->num_of_funcs * sizeof(ErlNifFunc);
+
+ lib = erts_alloc(ERTS_ALC_T_NIF, bytes);
+ dst = &lib->entry;
+
+ sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant));
+
+ if (AT_LEAST_VERSION(src, 2, 1)) {
+ dst->vm_variant = src->vm_variant;
+ } else {
+ dst->vm_variant = "beam.vanilla";
}
- return (ErlNifFunc*) ((char*)func + *incrp);
-}
+ if (AT_LEAST_VERSION(src, 2, 7)) {
+ dst->options = src->options;
+ } else {
+ /*
+ * Make a modern copy of the ErlNifFunc array
+ */
+ struct ErlNifFunc_V1 {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }*src_funcs = (struct ErlNifFunc_V1*) src->funcs;
+ int i;
+ for (i = 0; i < src->num_of_funcs; ++i) {
+ sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs));
+ lib->_funcs_copy_[i].flags = 0;
+ }
+ dst->funcs = lib->_funcs_copy_;
+ dst->options = 0;
+ }
+ if (AT_LEAST_VERSION(src, 2, 12)) {
+ dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit;
+ } else {
+ dst->sizeof_ErlNifResourceTypeInit = 0;
+ }
+ return lib;
+};
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
- static const char reload[] = "reload";
static const char upgrade[] = "upgrade";
char* lib_name = NULL;
void* handle = NULL;
@@ -3167,15 +3444,20 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
Eterm mod_atom;
const Atom* mod_atomp;
Eterm f_atom;
- BeamInstr* caller;
+ ErtsCodeMFA* caller;
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
Eterm ret = am_ok;
int veto;
struct erl_module_nif* lib = NULL;
- int reload_warning = 0;
struct erl_module_instance* this_mi;
struct erl_module_instance* prev_mi;
+ if (BIF_P->flags & F_HIPE_MODE) {
+ ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled "
+ "modules not supported");
+ BIF_RET(ret);
+ }
+
encoding = erts_get_native_filename_encoding();
if (encoding == ERL_FILENAME_WIN_WCHAR) {
/* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
@@ -3201,12 +3483,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/* Find calling module */
ASSERT(BIF_P->current != NULL);
- ASSERT(BIF_P->current[0] == am_erlang
- && BIF_P->current[1] == am_load_nif
- && BIF_P->current[2] == 2);
+ ASSERT(BIF_P->current->module == am_erlang
+ && BIF_P->current->function == am_load_nif
+ && BIF_P->current->arity == 2);
caller = find_function_from_pc(BIF_P->cp);
ASSERT(caller != NULL);
- mod_atom = caller[0];
+ mod_atom = caller->module;
ASSERT(is_atom(mod_atom));
module_p = erts_get_module(mod_atom, erts_active_code_ix());
ASSERT(module_p != NULL);
@@ -3232,8 +3514,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
this_mi = module_p->on_load;
}
- if (init_func == NULL &&
- (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
+ if (this_mi->nif != NULL) {
+ ret = load_nif_error(BIF_P,"reload","NIF library already loaded"
+ " (reload disallowed since OTP 20).");
+ }
+ else if (init_func == NULL &&
+ (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
@@ -3261,7 +3547,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
}
- else if (entry->minor >= 1
+ else if (AT_LEAST_VERSION(entry, 2, 1)
&& sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for "
"this vm variant (%s).",
@@ -3272,20 +3558,25 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
" match calling module '%T'", entry->name, mod_atom);
}
else {
- /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
- int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
- for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
- BeamInstr** code_pp;
+ lib = create_lib(entry);
+ entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */
+
+ lib->handle = handle;
+ erts_refc_init(&lib->rt_cnt, 0);
+ erts_refc_init(&lib->rt_dtor_cnt, 0);
+ ASSERT(opened_rt_list == NULL);
+ lib->mod = module_p;
+
+ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
+ ErtsCodeInfo** ci_pp;
+ ErlNifFunc* f = &entry->funcs[i];
+
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
- || (code_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
+ || (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
- else if (maybe_dirty_nifs && f->flags) {
+ else if (f->flags) {
/*
* If the flags field is non-zero and this emulator was
* built with dirty scheduler support, check that the flags
@@ -3302,11 +3593,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
mod_atom, f->name, f->arity);
#endif
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (code_pp[1] - code_pp[0] < (5+4))
-#else
- else if (code_pp[1] - code_pp[0] < (5+3))
-#endif
+ else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
+ < BEAM_NIF_MIN_FUNC_SZ)
{
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
" in module (%T:%s/%u too small)",
@@ -3314,7 +3602,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
mod_atom, f->name, f->arity);*/
- f = next_func(entry, &incr, f);
}
}
@@ -3322,132 +3609,72 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
- /* Call load, reload or upgrade:
+ /* Call load or upgrade:
*/
-
- lib = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct erl_module_nif));
- lib->handle = handle;
- lib->entry = entry;
- erts_refc_init(&lib->rt_cnt, 0);
- erts_refc_init(&lib->rt_dtor_cnt, 0);
- ASSERT(opened_rt_list == NULL);
- lib->mod = module_p;
env.mod_nif = lib;
- if (this_mi->nif != NULL) { /*************** Reload ******************/
- /*
- * Repeated load_nif calls from same Erlang module instance ("reload")
- * is deprecated and was only ment as a development feature not to
- * be used in production systems. (See warning below)
- */
- int k, old_incr = 0;
- ErlNifFunc* old_func;
- lib->priv_data = this_mi->nif->priv_data;
-
- ASSERT(this_mi->nif->entry != NULL);
- if (entry->reload == NULL) {
- ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
- goto error;
- }
- /* Check that no NIF is removed */
- old_func = this_mi->nif->entry->funcs;
- for (k=0; k < this_mi->nif->entry->num_of_funcs; k++) {
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
- for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == f->arity
- && sys_strcmp(old_func->name, f->name) == 0) {
- break;
- }
- f = next_func(entry, &incr, f);
- }
- if (i == entry->num_of_funcs) {
- ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
- "function %T:%s/%u\r\n", mod_atom,
- old_func->name, old_func->arity);
- goto error;
- }
- old_func = next_func(this_mi->nif->entry, &old_incr, old_func);
- }
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful (%d).", veto);
- }
- else {
- commit_opened_resource_types(lib);
- this_mi->nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(this_mi->nif);
- reload_warning = 1;
- }
+
+ lib->priv_data = NULL;
+ if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
+ void* prev_old_data = prev_mi->nif->priv_data;
+ if (entry->upgrade == NULL) {
+ ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
+ goto error;
+ }
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ prev_mi->nif->priv_data = prev_old_data;
+ ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
+ }
}
- else {
- lib->priv_data = NULL;
- if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
- void* prev_old_data = prev_mi->nif->priv_data;
- if (entry->upgrade == NULL) {
- ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
- goto error;
- }
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- prev_mi->nif->priv_data = prev_old_data;
- ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
- }
- else
- commit_opened_resource_types(lib);
- }
- else if (entry->load != NULL) { /********* Initial load ***********/
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
- }
- else
- commit_opened_resource_types(lib);
- }
+ else if (entry->load != NULL) { /********* Initial load ***********/
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
+ }
}
if (ret == am_ok) {
+ commit_opened_resource_types(lib);
+
/*
** Everything ok, patch the beam code with op_call_nif
*/
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
-
this_mi->nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
- BeamInstr* code_ptr;
+ ErlNifFunc* f = &entry->funcs[i];
+ ErtsCodeInfo* ci;
+ BeamInstr *code_ptr;
+
erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ code_ptr = erts_codeinfo_to_code(ci);
- if (code_ptr[1] == 0) {
- code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
+ if (ci->native == 0) {
+ code_ptr[0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- GenericBp* g = (GenericBp *) code_ptr[1];
- ASSERT(code_ptr[5+0] ==
+ GenericBp* g = (GenericBp *) ci->native;
+ ASSERT(code_ptr[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
- code_ptr[5+3] = (BeamInstr) f->fptr;
- code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
- (BeamInstr) schedule_dirty_io_nif :
- (BeamInstr) schedule_dirty_cpu_nif;
+ if (f->flags) {
+ code_ptr[3] = (BeamInstr) f->fptr;
+ code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) static_schedule_dirty_io_nif :
+ (BeamInstr) static_schedule_dirty_cpu_nif;
}
else
#endif
- code_ptr[5+1] = (BeamInstr) f->fptr;
- code_ptr[5+2] = (BeamInstr) lib;
- f = next_func(entry, &incr, f);
+ code_ptr[1] = (BeamInstr) f->fptr;
+ code_ptr[2] = (BeamInstr) lib;
}
}
else {
@@ -3468,14 +3695,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
- if (reload_warning) {
- erts_dsprintf_buf_t* dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Repeated calls to erlang:load_nif from module '%T'.\n\n"
- "The NIF reload mechanism is deprecated and must not "
- "be used in production systems.\n", mod_atom);
- erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
- }
BIF_RET(ret);
}
@@ -3525,7 +3744,8 @@ erts_unload_nif(struct erl_module_nif* lib)
void erl_nif_init()
{
- ERTS_CT_ASSERT((offsetof(ErlNifResource,data) % 8) == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
+ ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8)
+ == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
resource_type_list.next = &resource_type_list;
resource_type_list.prev = &resource_type_list;
@@ -3539,8 +3759,8 @@ void erl_nif_init()
int erts_nif_get_funcs(struct erl_module_nif* mod,
ErlNifFunc **funcs)
{
- *funcs = mod->entry->funcs;
- return mod->entry->num_of_funcs;
+ *funcs = mod->entry.funcs;
+ return mod->entry.num_of_funcs;
}
Eterm erts_nif_call_function(Process *p, Process *tracee,
@@ -3551,10 +3771,10 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
#ifdef DEBUG
/* Verify that function is part of this module */
int i;
- for (i = 0; i < mod->entry->num_of_funcs; i++)
- if (fun == &(mod->entry->funcs[i]))
+ for (i = 0; i < mod->entry.num_of_funcs; i++)
+ if (fun == &(mod->entry.funcs[i]))
break;
- ASSERT(i < mod->entry->num_of_funcs);
+ ASSERT(i < mod->entry.num_of_funcs);
if (p)
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
|| erts_smp_thr_progress_is_blocking());
@@ -3576,6 +3796,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
clear_offheap(&MSO(p));
erts_pre_nif(&env, p, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env.dbg_disable_assert_in_env = 1;
+#endif
nif_result = (*fun->fptr)(&env, argc, argv);
if (env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -3598,6 +3821,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
so we create a phony one. */
struct enif_msg_environment_t msg_env;
pre_nif_noproc(&msg_env, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env.env.dbg_disable_assert_in_env = 1;
+#endif
nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
if (msg_env.env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -3662,6 +3888,55 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size)
#endif /* READONLY_CHECK */
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+static void dbg_assert_in_env(ErlNifEnv* env, Eterm term,
+ int nr, const char* type, const char* func)
+{
+ Uint saved_used_size;
+ Eterm* real_htop;
+
+ if (is_immed(term)
+ || (is_non_value(term) && env->exception_thrown)
+ || erts_is_literal(term, ptr_val(term)))
+ return;
+
+ if (env->dbg_disable_assert_in_env) {
+ /*
+ * Trace nifs may cheat as built terms are discarded after return.
+ * ToDo: Check if 'term' is part of argv[].
+ */
+ return;
+ }
+
+ if (env->heap_frag) {
+ ASSERT(env->heap_frag == MBUF(env->proc));
+ ASSERT(env->hp >= env->heap_frag->mem);
+ ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size);
+ saved_used_size = env->heap_frag->used_size;
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ real_htop = NULL;
+ }
+ else {
+ real_htop = env->hp;
+ }
+ if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) {
+ fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func);
+ if (nr) {
+ fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.",
+ nr, type);
+ }
+ else {
+ fprintf(stderr, "The %s is not from the same ErlNifEnv.", type);
+ }
+ fprintf(stderr, "\r\nABORTING\r\n");
+ abort();
+ }
+ if (env->heap_frag) {
+ env->heap_frag->used_size = saved_used_size;
+ }
+}
+#endif
+
#ifdef HAVE_USE_DTRACE
#define MESSAGE_BUFSIZ 1024
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 494971e118..ac45f3ac81 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -49,10 +49,10 @@
** 2.8: 18.0 add enif_has_pending_exception
** 2.9: 18.2 enif_getenv
** 2.10: Time API
-** 2.11: 19.0 enif_snprintf
+** 2.11: 19.0 enif_snprintf
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 11
+#define ERL_NIF_MINOR_VERSION 12
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -116,12 +116,16 @@ typedef struct enif_entry_t
int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
+
+ /* Added in 2.1 */
const char* vm_variant;
- unsigned options;
-}ErlNifEntry;
-/* Field bits for ErlNifEntry options */
-#define ERL_NIF_DIRTY_NIF_OPTION 1
+ /* Added in 2.7 */
+ unsigned options; /* Unused. Can be set to 0 or 1 (dirty sched config) */
+
+ /* Added in 2.12 */
+ size_t sizeof_ErlNifResourceTypeInit;
+}ErlNifEntry;
typedef struct
@@ -134,8 +138,18 @@ typedef struct
void* ref_bin;
}ErlNifBinary;
-typedef struct enif_resource_type_t ErlNifResourceType;
-typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef void* ErlNifEvent; /* FIXME: Use 'HANDLE' somehow without breaking existing source */
+#else
+typedef int ErlNifEvent;
+#endif
+
+/* Return bits from enif_select: */
+#define ERL_NIF_SELECT_STOP_CALLED (1 << 0)
+#define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1)
+#define ERL_NIF_SELECT_INVALID_EVENT (1 << 2)
+#define ERL_NIF_SELECT_FAILED (1 << 3)
+
typedef enum
{
ERL_NIF_RT_CREATE = 1,
@@ -157,6 +171,19 @@ typedef struct
ERL_NIF_TERM port_id; /* internal, may change */
}ErlNifPort;
+typedef ErlDrvMonitor ErlNifMonitor;
+
+typedef struct enif_resource_type_t ErlNifResourceType;
+typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+typedef void ErlNifResourceStop(ErlNifEnv*, void*, ErlNifEvent, int is_direct_call);
+typedef void ErlNifResourceDown(ErlNifEnv*, void*, ErlNifPid*, ErlNifMonitor*);
+
+typedef struct {
+ ErlNifResourceDtor* dtor;
+ ErlNifResourceStop* stop; /* at ERL_NIF_SELECT_STOP event */
+ ErlNifResourceDown* down; /* enif_monitor_process */
+} ErlNifResourceTypeInit;
+
typedef ErlDrvSysInfo ErlNifSysInfo;
typedef struct ErlDrvTid_ *ErlNifTid;
@@ -266,8 +293,6 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS)
#endif
-#define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
-
#ifdef __cplusplus
}
# define ERL_NIF_INIT_PROLOGUE extern "C" {
@@ -293,7 +318,8 @@ ERL_NIF_INIT_DECL(NAME) \
FUNCS, \
LOAD, RELOAD, UPGRADE, UNLOAD, \
ERL_NIF_VM_VARIANT, \
- ERL_NIF_ENTRY_OPTIONS \
+ 1, \
+ sizeof(ErlNifResourceTypeInit) \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 9a8f216773..01d9e386ed 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -175,6 +175,11 @@ ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsign
ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
ERL_NIF_API_FUNC_DECL(int,enif_thread_type,(void));
ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_select,(ErlNifEnv* env, ErlNifEvent e, enum ErlNifSelectFlags flags, void* obj, const ErlNifPid* pid, ERL_NIF_TERM ref));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type_x,(ErlNifEnv*, const char* name_str, const ErlNifResourceTypeInit*, ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(int, enif_monitor_process,(ErlNifEnv*,void* obj,const ErlNifPid*,ErlDrvMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_demonitor_process,(ErlNifEnv*,void* obj,const ErlDrvMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_compare_monitors,(const ErlNifMonitor*,const ErlNifMonitor*));
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
@@ -332,6 +337,11 @@ ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char
# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
# define enif_thread_type ERL_NIF_API_FUNC_MACRO(enif_thread_type)
# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
+# define enif_select ERL_NIF_API_FUNC_MACRO(enif_select)
+# define enif_open_resource_type_x ERL_NIF_API_FUNC_MACRO(enif_open_resource_type_x)
+# define enif_monitor_process ERL_NIF_API_FUNC_MACRO(enif_monitor_process)
+# define enif_demonitor_process ERL_NIF_API_FUNC_MACRO(enif_demonitor_process)
+# define enif_compare_monitors ERL_NIF_API_FUNC_MACRO(enif_compare_monitors)
/*
** ADD NEW ENTRIES HERE (before this comment)
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 0c76c6fe7d..e27f1d121a 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -255,19 +255,16 @@ extern ErtsPTab erts_port;
* Refs *
\* */
+#define internal_ref_no_numbers(x) ERTS_REF_NUMBERS
+#define internal_ref_numbers(x) (is_internal_ordinary_ref((x)) \
+ ? internal_ordinary_ref_numbers((x)) \
+ : (ASSERT(is_internal_magic_ref((x))), \
+ internal_magic_ref_numbers((x))))
#if defined(ARCH_64)
-#define internal_ref_no_of_numbers(x) \
- (internal_ref_data((x))[0])
-#define internal_thing_ref_no_of_numbers(thing) \
- (internal_thing_ref_data(thing)[0])
-#define internal_ref_numbers(x) \
- (&internal_ref_data((x))[1])
-#define internal_thing_ref_numbers(thing) \
- (&internal_thing_ref_data(thing)[1])
-#define external_ref_no_of_numbers(x) \
+#define external_ref_no_numbers(x) \
(external_ref_data((x))[0])
-#define external_thing_ref_no_of_numbers(thing) \
+#define external_thing_ref_no_numbers(thing) \
(external_thing_ref_data(thing)[0])
#define external_ref_numbers(x) \
(&external_ref_data((x))[1])
@@ -277,12 +274,8 @@ extern ErtsPTab erts_port;
#else
-#define internal_ref_no_of_numbers(x) (internal_ref_data_words((x)))
-#define internal_thing_ref_no_of_numbers(t) (internal_thing_ref_data_words(t))
-#define internal_ref_numbers(x) (internal_ref_data((x)))
-#define internal_thing_ref_numbers(t) (internal_thing_ref_data(t))
-#define external_ref_no_of_numbers(x) (external_ref_data_words((x)))
-#define external_thing_ref_no_of_numbers(t) (external_thing_ref_data_words((t)))
+#define external_ref_no_numbers(x) (external_ref_data_words((x)))
+#define external_thing_ref_no_numbers(t) (external_thing_ref_data_words((t)))
#define external_ref_numbers(x) (external_ref_data((x)))
#define external_thing_ref_numbers(t) (external_thing_ref_data((t)))
@@ -299,15 +292,9 @@ extern ErtsPTab erts_port;
#define internal_ref_channel_no(x) (internal_channel_no((x)))
#define external_ref_channel_no(x) (external_channel_no((x)))
-#define ref_data_words(x) (is_internal_ref((x)) \
- ? internal_ref_data_words((x)) \
- : external_ref_data_words((x)))
-#define ref_data(x) (is_internal_ref((x)) \
- ? internal_ref_data((x)) \
- : external_ref_data((x)))
-#define ref_no_of_numbers(x) (is_internal_ref((x)) \
- ? internal_ref_no_of_numbers((x))\
- : external_ref_no_of_numbers((x)))
+#define ref_no_numbers(x) (is_internal_ref((x)) \
+ ? internal_ref_no_numbers((x))\
+ : external_ref_no_numbers((x)))
#define ref_numbers(x) (is_internal_ref((x)) \
? internal_ref_numbers((x)) \
: external_ref_numbers((x)))
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 467a05f950..e2072fe30f 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1126,12 +1126,12 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
- case REFC_BINARY_SUBTAG:
- if(IsMatchProgBinary(u.pb->val)) {
+ case REF_SUBTAG:
+ if(IsMatchProgBinary(u.mref->mb)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == u.pb->val) {
+ if(ib->bin_val == (Binary *) u.mref->mb) {
insert_bin = 0;
break;
}
@@ -1140,18 +1140,19 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
Uint *hp = &id_heap[0];
InsertedBin *nib;
UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
- a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
- erts_match_prog_foreach_offheap(u.pb->val,
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.mref->mb);
+ erts_match_prog_foreach_offheap((Binary *) u.mref->mb,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = u.pb->val;
+ nib->bin_val = (Binary *) u.mref->mb;
nib->next = inserted_bins;
inserted_bins = nib;
UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
}
}
break;
+ case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
break; /* No need to */
default:
@@ -1165,8 +1166,8 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
{
Eterm *idp = p;
- if(is_external(monitor->pid))
- insert_node(external_thing_ptr(monitor->pid)->node, MONITOR_REF, *idp);
+ if(monitor->type != MON_NIF_TARGET && is_external(monitor->u.pid))
+ insert_node(external_thing_ptr(monitor->u.pid)->node, MONITOR_REF, *idp);
if(is_external(monitor->ref))
insert_node(external_thing_ptr(monitor->ref)->node, MONITOR_REF, *idp);
}
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index c59b42cdae..206078903d 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -733,7 +733,7 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
if (lock_pdl && prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
-#if ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_CHECK
if (!ERTS_IS_CRASH_DUMPING) {
if (erts_lc_is_emu_thr()) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 3102e44c11..4836b9e2d3 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -936,7 +936,7 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
#ifdef ERTS_SMP
- if (runq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING)
erts_non_empty_runq(runq);
#endif
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 2a6bd165a3..e3550e878e 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -188,13 +188,11 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 1
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
#endif
- );
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
#endif
}
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 1a579704a8..6b64bbd2f1 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -382,12 +382,15 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
break;
}
case REF_DEF:
+ if (!ERTS_IS_CRASH_DUMPING)
+ erts_magic_ref_save_bin(obj);
+ /* fall through... */
case EXTERNAL_REF_DEF:
PRINT_STRING(res, fn, arg, "#Ref<");
PRINT_UWORD(res, fn, arg, 'u', 0, 1,
(ErlPfUWord) ref_channel_no(wobj));
ref_num = ref_numbers(wobj);
- for (i = ref_no_of_numbers(wobj)-1; i >= 0; i--) {
+ for (i = ref_no_numbers(wobj)-1; i >= 0; i--) {
PRINT_CHAR(res, fn, arg, '.');
PRINT_UWORD(res, fn, arg, 'u', 0, 1, (ErlPfUWord) ref_num[i]);
}
@@ -526,8 +529,8 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
case EXPORT_DEF:
{
Export* ep = *((Export **) (export_val(wobj) + 1));
- Atom* module = atom_tab(atom_val(ep->code[0]));
- Atom* name = atom_tab(atom_val(ep->code[1]));
+ Atom* module = atom_tab(atom_val(ep->info.mfa.module));
+ Atom* name = atom_tab(atom_val(ep->info.mfa.function));
PRINT_STRING(res, fn, arg, "#Fun<");
PRINT_BUF(res, fn, arg, module->name, module->len);
@@ -535,7 +538,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
PRINT_BUF(res, fn, arg, name->name, name->len);
PRINT_CHAR(res, fn, arg, '.');
PRINT_SWORD(res, fn, arg, 'd', 0, 1,
- (ErlPfSWord) ep->code[2]);
+ (ErlPfSWord) ep->info.mfa.arity);
PRINT_CHAR(res, fn, arg, '>');
}
break;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 485949c84a..52d9f9ddf7 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -48,6 +48,7 @@
#include "erl_bif_unique.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
+#include "erl_nfunc_sched.h"
#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
@@ -106,9 +107,33 @@
#define LOW_BIT (1 << PRIORITY_LOW)
#define PORT_BIT (1 << ERTS_PORT_PRIO_LEVEL)
-#define ERTS_EMPTY_RUNQ(RQ) \
- ((ERTS_RUNQ_FLGS_GET_NOB((RQ)) & ERTS_RUNQ_FLGS_QMASK) == 0 \
- && (RQ)->misc.start == NULL)
+#define ERTS_IS_RUNQ_EMPTY_FLGS(FLGS) \
+ (!((FLGS) & (ERTS_RUNQ_FLGS_QMASK|ERTS_RUNQ_FLG_MISC_OP)))
+
+#define ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(FLGS) \
+ (!((FLGS) & (PORT_BIT|ERTS_RUNQ_FLG_MISC_OP)))
+
+#define ERTS_EMPTY_RUNQ(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+static ERTS_INLINE int
+runq_got_work_to_execute_flags(Uint32 flags)
+{
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ return !ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(flags);
+ return !ERTS_IS_RUNQ_EMPTY_FLGS(flags);
+}
+
+#ifdef ERTS_SMP
+static ERTS_INLINE int
+runq_got_work_to_execute(ErtsRunQueue *rq)
+{
+ return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq));
+}
+#endif
#undef RUNQ_READ_RQ
#undef RUNQ_SET_RQ
@@ -140,9 +165,6 @@ do { \
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
#endif
-#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
- (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
-
const Process erts_invalid_process = {{ERTS_INVALID_PID}};
extern BeamInstr beam_apply[];
@@ -193,170 +215,145 @@ static ErtsAuxWorkData *aux_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
#define ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN (((erts_aint32_t) 1) << 3)
-typedef enum {
- ERTS_SCHED_NORMAL,
- ERTS_SCHED_DIRTY_CPU,
- ERTS_SCHED_DIRTY_IO
-} ErtsSchedType;
-
typedef struct {
int ongoing;
ErtsProcList *blckrs;
ErtsProcList *chngq;
} ErtsMultiSchedulingBlock;
+typedef struct {
+ Uint32 normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Uint32 dirty_cpu;
+ Uint32 dirty_io;
+#endif
+} ErtsSchedTypeCounters;
+
static struct {
erts_smp_mtx_t mtx;
- Uint32 online;
- Uint32 curr_online;
- Uint32 active;
+ ErtsSchedTypeCounters online;
+ ErtsSchedTypeCounters curr_online;
+ ErtsSchedTypeCounters active;
erts_smp_atomic32_t changing;
ErtsProcList *chngq;
Eterm changer;
ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */
ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedType last_msb_dirty_type;
+#endif
} schdlr_sspnd;
-#define ERTS_SCHDLR_SSPND_S_BITS 10
-#define ERTS_SCHDLR_SSPND_DCS_BITS 11
-#define ERTS_SCHDLR_SSPND_DIS_BITS 11
-
-#define ERTS_SCHDLR_SSPND_S_MASK ((1 << ERTS_SCHDLR_SSPND_S_BITS)-1)
-#define ERTS_SCHDLR_SSPND_DCS_MASK ((1 << ERTS_SCHDLR_SSPND_DCS_BITS)-1)
-#define ERTS_SCHDLR_SSPND_DIS_MASK ((1 << ERTS_SCHDLR_SSPND_DIS_BITS)-1)
-
-#define ERTS_SCHDLR_SSPND_S_SHIFT 0
-#define ERTS_SCHDLR_SSPND_DCS_SHIFT (ERTS_SCHDLR_SSPND_S_SHIFT \
- + ERTS_SCHDLR_SSPND_S_BITS)
-#define ERTS_SCHDLR_SSPND_DIS_SHIFT (ERTS_SCHDLR_SSPND_DCS_SHIFT \
- + ERTS_SCHDLR_SSPND_DCS_BITS)
-
-#if (ERTS_SCHDLR_SSPND_S_BITS \
- + ERTS_SCHDLR_SSPND_DCS_BITS \
- + ERTS_SCHDLR_SSPND_DIS_BITS) > 32
-# error Wont fit in Uint32
-#endif
+static void init_scheduler_suspend(void);
-#if (ERTS_MAX_NO_OF_SCHEDULERS-1) > ERTS_SCHDLR_SSPND_S_MASK
-# error Max no schedulers wont fit in its bit-field
-#endif
-#if ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS > ERTS_SCHDLR_SSPND_DCS_MASK
-# error Max no dirty cpu schedulers wont fit in its bit-field
-#endif
-#if ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS > ERTS_SCHDLR_SSPND_DIS_MASK
-# error Max no dirty io schedulers wont fit in its bit-field
+static ERTS_INLINE Uint32
+schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p)
+{
+ int res = val1p->normal == val2p->normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ res &= val1p->dirty_cpu == val2p->dirty_cpu;
+ res &= val1p->dirty_io == val2p->dirty_io;
#endif
-
-#define ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(S, DCS, DIS) \
- ((((Uint32) (((S) & ERTS_SCHDLR_SSPND_S_MASK))-1) \
- << ERTS_SCHDLR_SSPND_S_SHIFT) \
- | ((((Uint32) ((DCS) & ERTS_SCHDLR_SSPND_DCS_MASK)) \
- << ERTS_SCHDLR_SSPND_DCS_SHIFT)) \
- | ((((Uint32) ((DIS) & ERTS_SCHDLR_SSPND_DIS_MASK)) \
- << ERTS_SCHDLR_SSPND_DIS_SHIFT)))
-
-static void init_scheduler_suspend(void);
+ return res;
+}
static ERTS_INLINE Uint32
-schdlr_sspnd_get_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
- Uint32 res = (Uint32) (*valp);
switch (type) {
case ERTS_SCHED_NORMAL:
- res >>= ERTS_SCHDLR_SSPND_S_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_S_MASK;
- res++;
- break;
+ return valp->normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- res >>= ERTS_SCHDLR_SSPND_DCS_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_DCS_MASK;
- break;
+ return valp->dirty_cpu;
case ERTS_SCHED_DIRTY_IO:
- res >>= ERTS_SCHDLR_SSPND_DIS_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_DIS_MASK;
- break;
+ return valp->dirty_io;
+#else
+ case ERTS_SCHED_DIRTY_CPU:
+ case ERTS_SCHED_DIRTY_IO:
+ return 0;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return 0;
}
+}
+static ERTS_INLINE Uint32
+schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp)
+{
+ Uint32 res = valp->normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ res += valp->dirty_cpu;
+ res += valp->dirty_io;
+#endif
return res;
}
static ERTS_INLINE void
-schdlr_sspnd_dec_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
ASSERT(schdlr_sspnd_get_nscheds(valp, type) > 0);
switch (type) {
case ERTS_SCHED_NORMAL:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_S_SHIFT;
+ valp->normal--;
break;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_DCS_SHIFT;
+ valp->dirty_cpu--;
break;
case ERTS_SCHED_DIRTY_IO:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_DIS_SHIFT;
+ valp->dirty_io--;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
}
static ERTS_INLINE void
-schdlr_sspnd_inc_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
switch (type) {
case ERTS_SCHED_NORMAL:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_SCHEDULERS-1);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_S_SHIFT;
+ valp->normal++;
break;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_DCS_SHIFT;
+ valp->dirty_cpu++;
break;
case ERTS_SCHED_DIRTY_IO:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_DIS_SHIFT;
+ valp->dirty_io++;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
}
static ERTS_INLINE void
-schdlr_sspnd_set_nscheds(Uint32 *valp, ErtsSchedType type, Uint32 no)
+schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type, Uint32 no)
{
- Uint32 val = *valp;
-
switch (type) {
case ERTS_SCHED_NORMAL:
- ASSERT(no > 0);
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_S_MASK)
- << ERTS_SCHDLR_SSPND_S_SHIFT);
- val |= (((no-1) & ((Uint32) ERTS_SCHDLR_SSPND_S_MASK))
- << ERTS_SCHDLR_SSPND_S_SHIFT);
+ valp->normal = no;
break;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_DCS_MASK)
- << ERTS_SCHDLR_SSPND_DCS_SHIFT);
- val |= ((no & ((Uint32) ERTS_SCHDLR_SSPND_DCS_MASK))
- << ERTS_SCHDLR_SSPND_DCS_SHIFT);
+ valp->dirty_cpu = no;
break;
case ERTS_SCHED_DIRTY_IO:
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_DIS_MASK)
- << ERTS_SCHDLR_SSPND_DIS_SHIFT);
- val |= ((no & ((Uint32) ERTS_SCHDLR_SSPND_DIS_MASK))
- << ERTS_SCHDLR_SSPND_DIS_SHIFT);
+ valp->dirty_io = no;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
-
- *valp = val;
}
static struct {
@@ -447,11 +444,10 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
#endif
typedef enum {
- ERTS_PSTT_GC, /* Garbage Collect */
+ ERTS_PSTT_GC_MAJOR, /* Garbage Collect: Fullsweep */
+ ERTS_PSTT_GC_MINOR, /* Garbage Collect: Generational */
ERTS_PSTT_CPC, /* Check Process Code */
-#ifdef ERTS_NEW_PURGE_STRATEGY
ERTS_PSTT_CLA, /* Copy Literal Area */
-#endif
ERTS_PSTT_COHMQ, /* Change off heap message queue */
ERTS_PSTT_FTMQ /* Flush trace msg queue */
} ErtsProcSysTaskType;
@@ -810,15 +806,28 @@ erts_late_init_process(void)
}
+#define ERTS_SCHED_WTIME_IDLE ~((Uint64) 0)
+
static void
-init_sched_wall_time(ErtsSchedWallTime *swtp)
+init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp)
{
- swtp->need = erts_sched_balance_util;
- swtp->enabled = 0;
- swtp->start = 0;
- swtp->working.total = 0;
- swtp->working.start = 0;
- swtp->working.currently = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (esdp->type != ERTS_SCHED_NORMAL) {
+ erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0);
+ esdp->sched_wall_time.enabled = 1;
+ esdp->sched_wall_time.start = time_stamp;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+ }
+ else
+#endif
+ {
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
+ esdp->sched_wall_time.enabled = 0;
+ esdp->sched_wall_time.start = 0;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = 0;
+ }
}
static ERTS_INLINE Uint64
@@ -1009,31 +1018,145 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
-static ERTS_INLINE void
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+typedef struct {
+ Uint64 working;
+ Uint64 total;
+} ErtsDirtySchedWallTime;
+
+static void
+read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info)
+{
+ erts_aint32_t mod1;
+ Uint64 working, start, ts;
+
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+
+ while (1) {
+ erts_aint32_t mod2;
+
+ /* Spin until values are not written... */
+ while (1) {
+ if ((mod1 & 1) == 0)
+ break;
+ ERTS_SPIN_BODY;
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ }
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ working = esdp->sched_wall_time.working.total;
+ start = esdp->sched_wall_time.working.start;
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ mod2 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ if (mod1 == mod2)
+ break;
+ mod1 = mod2;
+ }
+
+ ts = sched_wall_time_ts();
+ ts -= esdp->sched_wall_time.start;
+
+ info->total = ts;
+
+ if (start == ERTS_SCHED_WTIME_IDLE || ts < start)
+ info->working = working;
+ else
+ info->working = working + (ts - start);
+
+ if (info->working > info->total)
+ info->working = info->total;
+}
+
+#endif
+
+#ifdef ERTS_SMP
+
+static void
+dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
+{
+ erts_aint32_t mod;
+ Uint64 ts = sched_wall_time_ts();
+
+ ts -= esdp->sched_wall_time.start;
+
+ /*
+ * This thread is the only thread writing in
+ * this sched_wall_time struct. We set 'mod' to
+ * an odd value while writing...
+ */
+ mod = erts_atomic32_read_dirty(&esdp->sched_wall_time.u.mod);
+ ASSERT((mod & 1) == 0);
+ mod++;
+
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+
+ if (working) {
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
+
+ esdp->sched_wall_time.working.start = ts;
+
+ }
+ else {
+ Uint64 total;
+
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
+
+ total = esdp->sched_wall_time.working.total;
+ total += ts - esdp->sched_wall_time.working.start;
+
+ esdp->sched_wall_time.working.total = total;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+
+
+ }
+
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ mod++;
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+
+#if 0
+ if (!working) {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER);
+ }
+#endif
+}
+
+#endif /* ERTS_SMP */
+
+static void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
{
- if (esdp->sched_wall_time.need) {
+ if (esdp->sched_wall_time.u.need) {
Uint64 ts = sched_wall_time_ts();
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
- update_avg_sched_util(esdp, ts, working);
+ update_avg_sched_util(esdp, ts, working);
#endif
if (esdp->sched_wall_time.enabled) {
if (working) {
-#ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
esdp->sched_wall_time.working.start = ts;
}
else {
-#ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
ts -= esdp->sched_wall_time.working.start;
esdp->sched_wall_time.working.total += ts;
+#ifdef DEBUG
+ esdp->sched_wall_time.working.start
+ = ERTS_SCHED_WTIME_IDLE;
+#endif
}
}
}
@@ -1050,15 +1173,19 @@ typedef struct {
int enable;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int want_dirty_cpu;
+ int want_dirty_io;
+#endif
} ErtsSchedWallTimeReq;
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
} ErtsSystemCheckReq;
@@ -1074,6 +1201,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(screq,
5,
ERTS_ALC_T_SYS_CHECK_REQ)
+
static void
reply_sched_wall_time(void *vswtrp)
{
@@ -1097,23 +1225,23 @@ reply_sched_wall_time(void *vswtrp)
#endif
if (swtrp->set) {
if (!swtrp->enable && esdp->sched_wall_time.enabled) {
- esdp->sched_wall_time.need = erts_sched_balance_util;
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
}
else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- esdp->sched_wall_time.need = 1;
+ esdp->sched_wall_time.u.need = 1;
esdp->sched_wall_time.enabled = 1;
esdp->sched_wall_time.start = ts;
esdp->sched_wall_time.working.total = 0;
esdp->sched_wall_time.working.start = 0;
- esdp->sched_wall_time.working.currently = 1;
}
}
if (esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- ASSERT(esdp->sched_wall_time.working.currently);
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
total = ts;
ts -= esdp->sched_wall_time.working.start;
@@ -1124,30 +1252,117 @@ reply_sched_wall_time(void *vswtrp)
hpp = NULL;
szp = &sz;
- while (1) {
- if (hpp)
- ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
- else
- *szp += REF_THING_SIZE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (esdp->sched_wall_time.enabled
+ && swtrp->req_sched == esdp->no
+ && (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) {
+ /* Reply with info about this scheduler and all dirty schedulers... */
+ ErtsDirtySchedWallTime *dswt;
+ int ix, no_dirty_scheds, want_dcpu, want_dio, soffset;
+
+ want_dcpu = swtrp->want_dirty_cpu;
+ want_dio = swtrp->want_dirty_io;
+
+ no_dirty_scheds = 0;
+ if (want_dcpu)
+ no_dirty_scheds += erts_no_dirty_cpu_schedulers;
+ if (want_dio)
+ no_dirty_scheds += erts_no_dirty_io_schedulers;
+
+ ASSERT(no_dirty_scheds);
+
+ dswt = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(ErtsDirtySchedWallTime)
+ * no_dirty_scheds);
+
+ for (ix = 0; ix < no_dirty_scheds; ix++) {
+ ErtsSchedulerData *esdp;
+ if (want_dcpu && ix < erts_no_dirty_cpu_schedulers)
+ esdp = &erts_aligned_dirty_cpu_scheduler_data[ix].esd;
+ else {
+ int dio_ix = ix - erts_no_dirty_cpu_schedulers;
+ esdp = &erts_aligned_dirty_io_scheduler_data[dio_ix].esd;
+ }
+ read_dirty_sched_wall_time(esdp, &dswt[ix]);
+ }
- if (swtrp->set)
- msg = ref_copy;
- else {
- msg = (!esdp->sched_wall_time.enabled
- ? am_notsup
- : erts_bld_tuple(hpp, szp, 3,
- make_small(esdp->no),
- erts_bld_uint64(hpp, szp, working),
- erts_bld_uint64(hpp, szp, total)));
+ soffset = erts_no_schedulers + 1;
- msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
- }
- if (hpp)
- break;
+ if (!want_dcpu) {
+ ASSERT(want_dio);
+ soffset += erts_no_dirty_cpu_schedulers;
+ }
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ ASSERT(!swtrp->set);
+
+ /* info about dirty schedulers... */
+ msg = NIL;
+ for (ix = no_dirty_scheds-1; ix >= 0; ix--) {
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(ix+soffset),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].working),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].total)),
+ msg);
+ }
+ /* info about this scheduler... */
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)),
+ msg);
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, dswt);
+ }
+ else
+#endif
+ {
+ /* Reply with info about this scheduler only... */
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ if (swtrp->set)
+ msg = ref_copy;
+ else {
+ msg = (!esdp->sched_wall_time.enabled
+ ? am_undefined
+ : erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)));
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ }
+ if (hpp)
+ break;
- mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
- szp = NULL;
- hpp = &hp;
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
}
erts_queue_message(rp, rp_locks, mp, msg, am_system);
@@ -1165,7 +1380,8 @@ reply_sched_wall_time(void *vswtrp)
}
Eterm
-erts_sched_wall_time_request(Process *c_p, int set, int enable)
+erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int want_dirty_cpu, int want_dirty_io)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
@@ -1187,6 +1403,10 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable)
swtrp->proc = c_p;
swtrp->ref = STORE_NC(&hp, NULL, ref);
swtrp->req_sched = esdp->no;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ swtrp->want_dirty_cpu = want_dirty_cpu;
+ swtrp->want_dirty_io = want_dirty_io;
+#endif
erts_smp_atomic32_init_nob(&swtrp->refc,
(erts_aint32_t) erts_no_schedulers);
@@ -1224,7 +1444,7 @@ reply_system_check(void *vscrp)
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
#endif
- sz = REF_THING_SIZE;
+ sz = ERTS_REF_THING_SIZE;
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
hpp = &hp;
msg = STORE_NC(hpp, ohp, scrp->ref);
@@ -2299,7 +2519,8 @@ static ERTS_INLINE erts_aint32_t
handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
- awdp->esdp->run_queue->halt_in_progress = 1;
+ ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING);
+
if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
int i, max = erts_ptab_max(&erts_port);
erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
@@ -2839,11 +3060,12 @@ static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t nflgs;
erts_aint32_t xflgs = 0;
do {
+ nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC);
+ nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
@@ -2865,7 +3087,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
} while (oflgs & ERTS_SSI_FLG_WAITING);
return oflgs;
}
@@ -2915,7 +3137,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
return oflgs;
}
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
}
}
@@ -3050,6 +3272,8 @@ aux_thread(void *unused)
return NULL;
}
+static void suspend_scheduler(ErtsSchedulerData *esdp);
+
#endif /* ERTS_SMP */
static void
@@ -3108,8 +3332,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && thr_prgr_active != working)
- sched_wall_time_change(esdp, thr_prgr_active);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 0);
+ else if (thr_prgr_active != working)
+ sched_wall_time_change(esdp, working = thr_prgr_active);
while (1) {
ErtsMonotonicTime current_time = 0;
@@ -3215,13 +3441,17 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 1);
+ else if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
- }
+ }
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
@@ -3415,8 +3645,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_runq_lock(rq);
}
clear_sys_scheduling();
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
#endif
if (!working)
sched_wall_time_change(esdp, working = 1);
@@ -3439,17 +3671,54 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
- nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
xflgs = oflgs;
}
}
+static ERTS_INLINE void
+ssi_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi));
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static void
+dcpu_sched_ix_suspend_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+
static void
-wake_scheduler(ErtsRunQueue *rq)
+dio_sched_ix_suspend_wake(Uint ix)
{
- ErtsSchedulerSleepInfo *ssi;
- erts_aint32_t flgs;
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+static void
+dcpu_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#if 0
+static void
+dio_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+#endif
+
+#endif
+
+static void
+wake_scheduler(ErtsRunQueue *rq)
+{
/*
* The unlocked run queue is not strictly necessary
* from a thread safety or deadlock prevention
@@ -3461,10 +3730,7 @@ wake_scheduler(ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)
|| ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
- ssi = rq->scheduler->ssi;
-
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
+ ssi_wake(rq->scheduler->ssi);
}
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -3511,6 +3777,13 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
} while (ssi);
}
}
+
+static void
+wake_dirty_scheduler(ErtsRunQueue *rq)
+{
+ wake_dirty_schedulers(rq, 1);
+}
+
#endif
#define ERTS_NO_USED_RUNQS_SHIFT 16
@@ -3651,7 +3924,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
if (runq) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
- wake_dirty_schedulers(runq, 1);
+ wake_dirty_scheduler(runq);
else
#endif
wake_scheduler(runq);
@@ -3955,8 +4228,7 @@ suspend_run_queue(ErtsRunQueue *rq)
wake_scheduler(rq);
}
-static void scheduler_ix_resume_wake(Uint ix);
-static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
+static void nrml_sched_ix_resume_wake(Uint ix);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
@@ -3964,16 +4236,19 @@ resume_run_queue(ErtsRunQueue *rq)
int pix;
Uint32 oflgs;
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
erts_smp_runq_lock(rq);
oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq,
(ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
- | ERTS_RUNQ_FLG_SUSPENDED),
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC),
(ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- if (oflgs & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (oflgs & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
erts_aint32_t len;
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
@@ -3992,10 +4267,7 @@ resume_run_queue(ErtsRunQueue *rq)
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- scheduler_ix_resume_wake(rq->ix);
+ nrml_sched_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -4053,28 +4325,22 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp = NULL;
+ ErtsMigrationPath *mp;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
- }
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -4083,6 +4349,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
end = rq->misc.end;
rq->misc.start = NULL;
rq->misc.end = NULL;
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
erts_smp_runq_unlock(rq);
erts_smp_runq_lock(to_rq);
@@ -4103,9 +4370,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
if (rq->ports.start) {
Port *prt;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -4143,15 +4407,10 @@ evacuate_run_queue(ErtsRunQueue *rq,
int notify = 0;
to_rq = NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
- }
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
@@ -4207,11 +4466,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
goto handle_next_proc;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- clear_proc_dirty_queue_bit(real_proc, rq, qbit);
-#endif
-
if (ERTS_PSFLG_BOUND & real_state) {
/* Bound processes get stuck here... */
proc->next = NULL;
@@ -4225,16 +4479,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- /*
- * dirty run queues evacuate only to run
- * queue 0 during multi-scheduling blocking
- */
- to_rq = ERTS_RUNQ_IX(0);
- else
-#endif
- to_rq = mp->prio[prio].runq;
+ to_rq = mp->prio[prio].runq;
RUNQ_SET_RQ(&proc->run_queue, to_rq);
erts_smp_runq_lock(to_rq);
@@ -4269,7 +4514,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
erts_smp_runq_lock(vrq);
- if (rq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING)
goto no_procs;
/*
@@ -4368,8 +4613,7 @@ check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
Uint32 flags = ERTS_RUNQ_FLGS_GET(vrq);
- if ((flags & (ERTS_RUNQ_FLG_NONEMPTY
- | ERTS_RUNQ_FLG_PROTECTED)) == ERTS_RUNQ_FLG_NONEMPTY)
+ if (runq_got_work_to_execute_flags(flags) & (!(flags & ERTS_RUNQ_FLG_PROTECTED)))
return try_steal_task_from_victim(rq, rq_lockedp, vrq, flags);
else
return 0;
@@ -4437,11 +4681,9 @@ try_steal_task(ErtsRunQueue *rq)
if (!rq_locked)
erts_smp_runq_lock(rq);
- if (!res)
- res = rq->halt_in_progress ?
- !ERTS_EMPTY_RUNQ_PORTS(rq) : !ERTS_EMPTY_RUNQ(rq);
-
- return res;
+ if (res)
+ return res;
+ return runq_got_work_to_execute(rq);
}
/* Run queue balancing */
@@ -4808,7 +5050,7 @@ check_balance(ErtsRunQueue *c_rq)
sched_util_balancing = 1;
/*
* In order to avoid renaming a large amount of fields
- * we write utilization values instead of lenght values
+ * we write utilization values instead of length values
* in the 'max_len' and 'migration_limit' fields...
*/
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -5353,7 +5595,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
if (rq->waiting) {
- wake_dirty_schedulers(rq, 1);
+ wake_dirty_scheduler(rq);
}
} else
#endif
@@ -5700,7 +5942,8 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
ErtsSchedulerSleepInfo* ssi,
ErtsRunQueue* runq,
char** daww_ptr, size_t daww_sz,
- Process *shadow_proc)
+ Process *shadow_proc,
+ Uint64 time_stamp)
{
esdp->timer_wheel = NULL;
#ifdef ERTS_SMP
@@ -5716,13 +5959,29 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
MAX_REG * sizeof(FloatDef));
#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp->run_queue = runq;
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
+ if (runq == ERTS_DIRTY_CPU_RUNQ)
+ esdp->type = ERTS_SCHED_DIRTY_CPU;
+ else {
+ ASSERT(runq == ERTS_DIRTY_IO_RUNQ);
+ esdp->type = ERTS_SCHED_DIRTY_IO;
+ }
ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
+ if (num == 1) {
+ /*
+ * Multi-scheduling block functionality depends
+ * on finding dirty scheduler number 1 here...
+ */
+ runq->scheduler = esdp;
+ }
}
else {
+ esdp->type = ERTS_SCHED_NORMAL;
esdp->no = (Uint) num;
ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
+ runq->scheduler = esdp;
}
esdp->dirty_shadow_process = shadow_proc;
if (shadow_proc) {
@@ -5734,6 +5993,8 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
}
#else
+ runq->scheduler = esdp;
+ esdp->run_queue = runq;
esdp->no = (Uint) num;
#endif
@@ -5746,9 +6007,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
erts_init_atom_cache_map(&esdp->atom_cache_map);
- esdp->run_queue = runq;
- esdp->run_queue->scheduler = esdp;
-
esdp->last_monotonic_time = 0;
esdp->check_time_reds = 0;
@@ -5767,7 +6025,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->reductions = 0;
- init_sched_wall_time(&esdp->sched_wall_time);
+ init_sched_wall_time(esdp, time_stamp);
erts_port_task_handle_init(&esdp->nosuspend_port_task_handle);
}
@@ -5864,7 +6122,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_smp_atomic32_set_nob(&rq->len, 0);
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- rq->halt_in_progress = 0;
rq->procs.pending_exiters = NULL;
rq->procs.context_switches = 0;
@@ -5982,17 +6239,18 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_aligned_scheduler_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
- n*sizeof(ErtsAlignedSchedulerData));
+ n*sizeof(ErtsAlignedSchedulerData));
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz,
- NULL);
+ NULL, 0);
}
#ifdef ERTS_DIRTY_SCHEDULERS
{
+ Uint64 ts = sched_wall_time_ts();
int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
int adspix = 0;
ErtsAlignedDirtyShadowProcess *adsp =
@@ -6012,13 +6270,13 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_CPU_RUNQ, NULL, 0,
- &adsp[adspix++].dsp);
+ &adsp[adspix++].dsp, ts);
}
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_IO_RUNQ, NULL, 0,
- &adsp[adspix++].dsp);
+ &adsp[adspix++].dsp, ts);
}
}
#endif
@@ -6123,7 +6381,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_tsd_set(sched_data_key, (void *) esdp);
#endif
}
- erts_no_schedulers = 1;
erts_no_dirty_cpu_schedulers = 0;
erts_no_dirty_io_schedulers = 0;
#endif
@@ -6445,10 +6702,30 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
int enqueue; /* < 0 -> use proxy */
ErtsRunQueue* runq;
- if (is_normal_sched)
- running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
- else
+ if (!is_normal_sched)
running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ else {
+ running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ && (p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ /*
+ * Delay dirty GC; will be enabled automatically
+ * again by next GC...
+ */
+
+ /*
+ * No normal execution until dirty CLA or hibernat has
+ * been handled...
+ */
+ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE)));
+
+ state = erts_smp_atomic32_read_band_nob(&p->state,
+ ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+ state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ }
+#endif
+ }
a = state;
@@ -6960,15 +7237,8 @@ resume_process(Process *p, ErtsProcLocks locks)
#ifdef ERTS_SMP
-static void
-scheduler_ix_resume_wake(Uint ix)
-{
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
-}
-
-static void
-scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+static ERTS_INLINE void
+sched_resume_wake__(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
@@ -6982,9 +7252,31 @@ scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
break;
}
xflgs = oflgs;
- } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+ } while (oflgs & (ERTS_SSI_FLG_MSB_EXEC|ERTS_SSI_FLG_SUSPENDED));
}
+static void
+nrml_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static void
+dcpu_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+static void
+dio_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#endif
+
static erts_aint32_t
sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
{
@@ -7064,9 +7356,18 @@ static void
init_scheduler_suspend(void)
{
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
- schdlr_sspnd.online = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- schdlr_sspnd.curr_online = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- schdlr_sspnd.active = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
+ schdlr_sspnd.online.normal = 1;
+ schdlr_sspnd.curr_online.normal = 1;
+ schdlr_sspnd.active.normal = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.online.dirty_cpu = 0;
+ schdlr_sspnd.curr_online.dirty_cpu = 0;
+ schdlr_sspnd.active.dirty_cpu = 0;
+ schdlr_sspnd.online.dirty_io = 0;
+ schdlr_sspnd.curr_online.dirty_io = 0;
+ schdlr_sspnd.active.dirty_io = 0;
+ schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO;
+#endif
erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0);
schdlr_sspnd.chngq = NULL;
schdlr_sspnd.changer = am_false;
@@ -7127,6 +7428,252 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static ERTS_INLINE int
+have_dirty_work(void)
+{
+ return !(ERTS_EMPTY_RUNQ(ERTS_DIRTY_CPU_RUNQ)
+ | ERTS_EMPTY_RUNQ(ERTS_DIRTY_IO_RUNQ));
+}
+
+#define ERTS_MSB_NONE_PRIO_BIT PORT_BIT
+
+static ERTS_INLINE Uint32
+msb_runq_prio_bit(Uint32 flgs)
+{
+ int pbit;
+
+ pbit = (int) (flgs & ERTS_RUNQ_FLGS_PROCS_QMASK);
+ if (flgs & PORT_BIT) {
+ /* rate ports as proc prio high */
+ pbit |= HIGH_BIT;
+ }
+ if (flgs & ERTS_RUNQ_FLG_MISC_OP) {
+ /* rate misc ops as proc prio normal */
+ pbit |= NORMAL_BIT;
+ }
+ if (flgs & LOW_BIT) {
+ /* rate low prio as normal (avoid starvation) */
+ pbit |= NORMAL_BIT;
+ }
+ if (!pbit)
+ pbit = (int) ERTS_MSB_NONE_PRIO_BIT;
+ else
+ pbit &= -pbit; /* least significant bit set... */
+ ASSERT(pbit);
+
+ /* High prio low value; low prio high value... */
+ return (Uint32) pbit;
+}
+
+static ERTS_INLINE void
+msb_runq_prio_bits(Uint32 *nrmlp, Uint32 *dcpup, Uint32 *diop)
+{
+ Uint32 flgs = ERTS_RUNQ_FLGS_GET(ERTS_RUNQ_IX(0));
+ if (flgs & ERTS_RUNQ_FLG_HALTING) {
+ /*
+ * Emulator is halting; only execute port jobs
+ * on normal scheduler. Ensure that we switch
+ * to the normal scheduler.
+ */
+ *nrmlp = HIGH_BIT;
+ *dcpup = ERTS_MSB_NONE_PRIO_BIT;
+ *diop = ERTS_MSB_NONE_PRIO_BIT;
+ }
+ else {
+ *nrmlp = msb_runq_prio_bit(flgs);
+
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_CPU_RUNQ);
+ *dcpup = msb_runq_prio_bit(flgs);
+
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_IO_RUNQ);
+ *diop = msb_runq_prio_bit(flgs);
+ }
+}
+
+static int
+msb_scheduler_type_switch(ErtsSchedType sched_type,
+ ErtsSchedulerData *esdp,
+ long no)
+{
+ Uint32 nrml_prio, dcpu_prio, dio_prio;
+ ErtsSchedType exec_type;
+ ErtsRunQueue *exec_rq;
+#ifdef DEBUG
+ erts_aint32_t dbg_val;
+#endif
+
+ ASSERT(schdlr_sspnd.msb.ongoing);
+
+ /*
+ * This function determines how to switch
+ * between scheduler types when multi-scheduling
+ * is blocked.
+ *
+ * If no dirty work exist, we always select
+ * execution of normal scheduler. If nothing
+ * executes, normal scheduler 1 should be waiting
+ * in sys_schedule(), otherwise we cannot react
+ * on I/O events.
+ *
+ * We unconditionally switch back to normal
+ * scheduler after executing dirty in order to
+ * make sure we check for I/O...
+ */
+
+ msb_runq_prio_bits(&nrml_prio, &dcpu_prio, &dio_prio);
+
+ exec_type = ERTS_SCHED_NORMAL;
+ if (sched_type == ERTS_SCHED_NORMAL) {
+
+ /*
+ * Check priorities of work in the
+ * different run-queues and determine
+ * run-queue with highest prio job...
+ */
+
+ if ((dcpu_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & (dio_prio == ERTS_MSB_NONE_PRIO_BIT)) {
+ /*
+ * No dirty work exist; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+
+ if (dcpu_prio < nrml_prio) {
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ if (dio_prio < dcpu_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ else {
+ if (dio_prio < nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+
+ /*
+ * Make sure to alternate between dirty types
+ * inbetween normal execution if highest
+ * priorities are equal.
+ */
+
+ if (exec_type == ERTS_SCHED_NORMAL) {
+ if (dcpu_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else if (dio_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ else {
+ /*
+ * Normal work has higher prio than
+ * dirty work; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+ }
+
+ ASSERT(exec_type != ERTS_SCHED_NORMAL);
+ if (dio_prio == dcpu_prio) {
+ /* Alter between dirty types... */
+ if (schdlr_sspnd.last_msb_dirty_type == ERTS_SCHED_DIRTY_IO)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ }
+
+ ASSERT(sched_type != exec_type);
+
+ if (exec_type != ERTS_SCHED_NORMAL)
+ schdlr_sspnd.last_msb_dirty_type = exec_type;
+ else {
+ erts_aint32_t calls;
+ /*
+ * Going back to normal scheduler after
+ * dirty execution; make sure it will check
+ * for I/O...
+ */
+ if (ERTS_USE_MODIFIED_TIMING())
+ calls = ERTS_MODIFIED_TIMING_INPUT_REDS + 1;
+ else
+ calls = INPUT_REDUCTIONS + 1;
+ erts_smp_atomic32_set_nob(&function_calls, calls);
+
+ if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT)
+ | (dio_prio != ERTS_MSB_NONE_PRIO_BIT))) {
+ /*
+ * We have dirty work, but an empty
+ * normal run-queue.
+ *
+ * Since the normal run-queue is
+ * empty, the normal scheduler will
+ * go to sleep when selected for
+ * execution. We have dirty work to
+ * do, so we only want it to check
+ * I/O, and then come back here and
+ * switch to dirty execution.
+ *
+ * To prevent the scheduler from going
+ * to sleep we trick it into believing
+ * it has work to do...
+ */
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0),
+ ERTS_RUNQ_FLG_MISC_OP);
+ }
+ }
+
+ /*
+ * Suspend this scheduler and wake up scheduler
+ * number one of another type...
+ */
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_bset_mb(&esdp->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC);
+
+ switch (exec_type) {
+ case ERTS_SCHED_NORMAL:
+ exec_rq = ERTS_RUNQ_IX(0);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ exec_rq = ERTS_DIRTY_CPU_RUNQ;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ exec_rq = ERTS_DIRTY_IO_RUNQ;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ exec_rq = NULL;
+ break;
+ }
+
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_MSB_EXEC);
+ ASSERT(dbg_val & ERTS_SSI_FLG_SUSPENDED);
+
+ wake_scheduler(exec_rq);
+
+ return 1; /* suspend this scheduler... */
+
+}
+
+#endif
+
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
@@ -7152,32 +7699,51 @@ suspend_scheduler(ErtsSchedulerData *esdp)
* Regardless of why a scheduler is suspended, it ends up here.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+
+#if !defined(ERTS_DIRTY_SCHEDULERS)
+
+ sched_type = ERTS_SCHED_NORMAL;
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ no = esdp->no;
+ ASSERT(no != 1);
+
+#else
+
+ sched_type = esdp->type;
+ switch (sched_type) {
+ case ERTS_SCHED_NORMAL:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ no = esdp->no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
+ no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ online_flag = 0;
no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
- online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
- sched_type = ERTS_SCHED_DIRTY_CPU;
- }
- else {
- online_flag = 0;
- sched_type = ERTS_SCHED_DIRTY_IO;
- }
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return;
}
- else
-#endif
- {
- online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
- no = esdp->no;
- sched_type = ERTS_SCHED_NORMAL;
+
+ if (erts_smp_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) {
+ ASSERT(no == 1);
+ if (!msb_scheduler_type_switch(sched_type, esdp, no))
+ return;
+ /* Suspend and let scheduler 1 of another type execute... */
}
- ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
+#endif
- if (sched_type != ERTS_SCHED_NORMAL)
+ if (sched_type != ERTS_SCHED_NORMAL) {
erts_smp_runq_unlock(esdp->run_queue);
+ dirty_sched_wall_time_change(esdp, 0);
+ }
else {
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (no != 1)
+ evacuate_run_queue(esdp->run_queue, &sbp);
erts_smp_runq_unlock(esdp->run_queue);
@@ -7185,10 +7751,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
-
- sched_wall_time_change(esdp, 0);
-
}
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
@@ -7196,9 +7760,6 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type);
- ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) >= 1);
-
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
while (1) {
@@ -7220,9 +7781,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ERTS_SCHED_NORMAL) == 1) {
clr_flg = ERTS_SCHDLR_SSPND_CHNG_NMSB;
}
- else if (schdlr_sspnd.active
- == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)) {
- clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
+ else if (schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL) == 1
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_CPU) == 0
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_IO) == 0) {
+ clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
}
if (clr_flg) {
@@ -7294,10 +7859,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
- if (curr_online
- && (sched_type == ERTS_SCHED_NORMAL
- ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
- : !schdlr_sspnd.msb.ongoing)) {
+ if (curr_online) {
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
@@ -7313,13 +7875,11 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
else {
- erts_aint32_t qmask;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
+ int evacuate = no == 1 ? 0 : !ERTS_EMPTY_RUNQ(esdp->run_queue);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work|qmask) {
+ if (aux_work|evacuate) {
if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
@@ -7331,7 +7891,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
- if (qmask) {
+ if (evacuate) {
erts_smp_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
erts_smp_runq_unlock(esdp->run_queue);
@@ -7446,7 +8006,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (changing) {
if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
&& !schdlr_sspnd.msb.ongoing
- && schdlr_sspnd.online == schdlr_sspnd.active) {
+ && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online,
+ &schdlr_sspnd.active)) {
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
@@ -7461,30 +8022,27 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
- ASSERT((sched_type == ERTS_SCHED_NORMAL
- ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
- : !schdlr_sspnd.msb.ongoing));
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- ASSERT(!resume.msb.chngrs);
schdlr_sspnd_resume_procs(sched_type, &resume);
ASSERT(curr_online);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_active);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_sched_wall_time_change(esdp, 1);
+ else {
+ (void) erts_get_monotonic_time(esdp);
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
}
- if (sched_type == ERTS_SCHED_NORMAL)
- (void) erts_get_monotonic_time(esdp);
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -7754,10 +8312,8 @@ erts_set_schedulers_online(Process *p,
erts_sched_poke(ssi);
}
} else {
- for (ix = dirty_online; ix < dirty_no; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
+ for (ix = dirty_online; ix < dirty_no; ix++)
+ dcpu_sched_ix_resume_wake(ix);
}
}
if (!dirty_only)
@@ -7785,19 +8341,19 @@ erts_set_schedulers_online(Process *p,
else /* if decrease */ {
#ifdef ERTS_DIRTY_SCHEDULERS
if (change_dirty) {
- ErtsSchedulerSleepInfo* ssi;
if (schdlr_sspnd.msb.ongoing) {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_sched_poke(ssi);
- }
- } else {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ erts_sched_poke(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ /*
+ * Newly suspended scheduler may have just been
+ * about to handle a task. Make sure someone takes
+ * care of such a task...
+ */
+ dcpu_sched_ix_wake(0);
}
}
if (!dirty_only)
@@ -7860,9 +8416,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
{
int resume_proc, ix, res, have_unlocked_plocks = 0;
ErtsProcList *plp;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerSleepInfo* ssi;
-#endif
ErtsMultiSchedulingBlock *msbp;
erts_aint32_t chng_flg;
int have_blckd_flg;
@@ -7909,8 +8462,9 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
erts_proclist_store_last(&msbp->blckrs, plp);
p->flags |= have_blckd_flg;
ASSERT(normal
- ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL)
- : schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
+ ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL)
+ : schdlr_sspnd_get_nscheds_tot(&schdlr_sspnd.active) == 1);
ASSERT(erts_proc_sched_data(p)->no == 1);
if (schdlr_sspnd.msb.ongoing)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
@@ -7928,59 +8482,41 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
ASSERT(!msbp->ongoing);
msbp->ongoing = 1;
- if (schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)
- || (normal && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) == 1)) {
- ASSERT(erts_proc_sched_data(p)->no == 1);
- plp = proclist_create(p);
- erts_proclist_store_last(&msbp->blckrs, plp);
- if (schdlr_sspnd.msb.ongoing)
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- else
- res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
- }
- else {
- erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
- chng_flg);
- change_no_used_runqs(1);
- for (ix = 1; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
- for (ix = 1; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq);
- }
+ erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
+ chng_flg);
+ change_no_used_runqs(1);
+ for (ix = 1; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!normal) {
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ for (ix = 1; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
- }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!normal) {
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC);
+ erts_smp_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags,
+ ERTS_SSI_FLG_MSB_EXEC);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_suspend_wake(ix);
+ }
#endif
- wait_until_msb:
+ wait_until_msb:
- ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing));
+ ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing));
- plp = proclist_create(p);
- erts_proclist_store_last(&msbp->chngq, plp);
- resume_proc = 0;
- if (schdlr_sspnd.msb.ongoing)
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
- else
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
- }
+ plp = proclist_create(p);
+ erts_proclist_store_last(&msbp->chngq, plp);
+ resume_proc = 0;
+ if (schdlr_sspnd.msb.ongoing)
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
+ else
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
ASSERT(erts_proc_sched_data(p));
}
}
@@ -8012,27 +8548,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
}
if (!msbp->blckrs && !msbp->chngq) {
- int online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
- ERTS_SCHED_NORMAL);
+ int online;
erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
chng_flg);
p->flags &= ~have_blckd_flg;
msbp->ongoing = 0;
- if (online == 1) {
- /* No normal schedulers to resume */
- ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) == 1);
-#ifndef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~chng_flg);
-#endif
- }
- else if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
- if (plocks) {
+ if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
+ if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL);
change_no_used_runqs(online);
/* Resume all online run queues */
@@ -8043,19 +8571,15 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (!normal) {
- ASSERT(!schdlr_sspnd.msb.ongoing);
+ if (!schdlr_sspnd.msb.ongoing) {
+ /* Get rid of msb-exec flag in run-queue of scheduler 1 */
+ resume_run_queue(ERTS_RUNQ_IX(0));
online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU);
- for (ix = 0; ix < online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
-
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
+ for (ix = 0; ix < online; ix++)
+ dcpu_sched_ix_resume_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_resume_wake(ix);
}
#endif
}
@@ -8203,7 +8727,8 @@ sched_thread_func(void *vesdp)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
- process_main();
+ process_main(esdp->x_reg_array, esdp->f_reg_array);
+
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Scheduler thread number %beu terminated\n",
@@ -8228,6 +8753,8 @@ sched_dirty_cpu_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
esdp->thr_id += erts_no_schedulers;
erts_msacc_init_thread("dirty_cpu_scheduler", no, 0);
@@ -8275,6 +8802,8 @@ sched_dirty_io_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers;
erts_msacc_init_thread("dirty_io_scheduler", no, 0);
@@ -8647,8 +9176,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
erts_aint32_t state;
state = erts_smp_atomic32_read_nob(&rp->state);
ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
- || !(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ || !(state & ERTS_PSFLG_RUNNING));
}
#endif
@@ -9721,7 +10249,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!is_normal_sched) {
if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED) {
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
suspend_scheduler(esdp);
}
}
@@ -9731,8 +10259,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ASSERT(is_normal_sched);
- if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC)) {
+ if (flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
(void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
suspend_scheduler(esdp);
flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
@@ -9771,13 +10301,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (!is_normal_sched && rq->halt_in_progress) {
+ if (!is_normal_sched & !!(flags & ERTS_RUNQ_FLG_HALTING)) {
/* Wait for emulator to terminate... */
while (1)
erts_milli_sleep(1000*1000);
}
- else if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
- || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ else if (!runq_got_work_to_execute_flags(flags)) {
/* Prepare for scheduler wait */
#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -9791,21 +10320,44 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (flags & ERTS_RUNQ_FLG_INACTIVE)
empty_runq(rq);
else {
- if (is_normal_sched && try_steal_task(rq))
- goto continue_check_activities_to_run;
-
- empty_runq(rq);
-
- /*
- * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
- * after trying to steal a task.
- */
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- non_empty_runq(rq);
- flags |= ERTS_RUNQ_FLG_NONEMPTY;
- goto continue_check_activities_to_run_known_flags;
+ ASSERT(!runq_got_work_to_execute(rq));
+ if (!is_normal_sched) {
+ /* Dirty scheduler */
+ if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
+ /* Go suspend... */
+ goto continue_check_activities_to_run_known_flags;
+ }
+ }
+ else {
+ /* Normal scheduler */
+ if (try_steal_task(rq))
+ goto continue_check_activities_to_run;
+ /*
+ * Check for suspend has to be done after trying
+ * to steal a task...
+ */
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if ((flags & ERTS_RUNQ_FLG_SUSPENDED)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /* If multi scheduling block and we have
+ * dirty work, suspend and let dirty
+ * scheduler handle work... */
+ || ((((flags & (ERTS_RUNQ_FLG_HALTING
+ | ERTS_RUNQ_FLG_MSB_EXEC))
+ == ERTS_RUNQ_FLG_MSB_EXEC))
+ && have_dirty_work())
+#endif
+ ) {
+ non_empty_runq(rq);
+ flags |= ERTS_RUNQ_FLG_NONEMPTY;
+ /*
+ * Go suspend...
+ */
+ goto continue_check_activities_to_run_known_flags;
+ }
}
+ empty_runq(rq);
}
#endif
@@ -9857,7 +10409,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
#endif
}
- if (rq->misc.start)
+ if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
#ifdef ERTS_SMP
@@ -9868,13 +10420,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
* Find a new port to run.
*/
- if (RUNQ_READ_LEN(&rq->ports.info.len)) {
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+
+ if (flags & PORT_BIT) {
int have_outstanding_io;
have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
if ((!erts_eager_check_io
&& have_outstanding_io
&& fcalls > 2*input_reductions)
- || rq->halt_in_progress) {
+ || (flags & ERTS_RUNQ_FLG_HALTING)) {
/*
* If we have performed more than 2*INPUT_REDUCTIONS since
* last call to erl_sys_schedule() and we still haven't
@@ -9987,6 +10541,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_EXITING))
== ERTS_PSFLG_EXITING)
& (!!is_normal_sched))
@@ -9998,7 +10553,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- != ERTS_PSFLG_SUSPENDED));
+ != ERTS_PSFLG_SUSPENDED)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ & (!(state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))
+ | (!!is_normal_sched))
+#endif
+ );
+
if (run_process) {
if (state & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
@@ -10104,9 +10666,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
goto sunlock_sched_out_proc;
}
- ASSERT((state & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
- || *p->i == (BeamInstr) em_call_nif);
-
ASSERT(rq == ERTS_DIRTY_CPU_RUNQ
? (state & (ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
@@ -10143,65 +10702,70 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- if (state & (ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- /*
- * GC is normally never delayed when a process
- * is scheduled out, but might be when executing
- * hand written beam assembly in
- * prim_eval:'receive'. If GC is delayed we are
- * not allowed to execute system tasks.
- */
- if (!(p->flags & F_DELAY_GC)) {
- int cost = execute_sys_tasks(p, &state, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0
+ if (is_normal_sched) {
+
+ if (state & ERTS_PSFLG_RUNNING_SYS) {
+ /*
+ * GC is normally never delayed when a process
+ * is scheduled out, but might be when executing
+ * hand written beam assembly in
+ * prim_eval:'receive'. If GC is delayed we are
+ * not allowed to execute system tasks.
+ */
+ if (!(p->flags & F_DELAY_GC)) {
+ int cost = execute_sys_tasks(p, &state, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
#ifdef ERTS_DIRTY_SCHEDULERS
- || !is_normal_sched
- || (state & ERTS_PSFLGS_DIRTY_WORK)
+ if (state & ERTS_PSFLGS_DIRTY_WORK)
+ goto sched_out_proc;
#endif
- ) {
- goto sched_out_proc;
- }
- }
+ }
- ASSERT(state & psflg_running_sys);
- ASSERT(!(state & psflg_running));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
- while (1) {
- erts_aint32_t n, e;
+ while (1) {
+ erts_aint32_t n, e;
- if (((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
- && !(state & ERTS_PSFLG_EXITING)) {
- goto sched_out_proc;
- }
+ if (((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ && !(state & ERTS_PSFLG_EXITING)) {
+ goto sched_out_proc;
+ }
- n = e = state;
- n &= ~psflg_running_sys;
- n |= psflg_running;
+ n = e = state;
+ n &= ~psflg_running_sys;
+ n |= psflg_running;
- state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
- if (state == e) {
- state = n;
- break;
- }
+ state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ break;
+ }
- ASSERT(state & psflg_running_sys);
- ASSERT(!(state & psflg_running));
- }
- }
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
+ }
+ }
- if (ERTS_IS_GC_DESIRED(p) && !ERTS_SCHEDULER_IS_DIRTY_IO(esdp)) {
- if (!(state & ERTS_PSFLG_EXITING) && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
- int cost = scheduler_gc_proc(p, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0)
- goto sched_out_proc;
- }
- }
+ if (ERTS_IS_GC_DESIRED(p)) {
+ if (!(state & ERTS_PSFLG_EXITING)
+ && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ int cost = scheduler_gc_proc(p, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto sched_out_proc;
+#endif
+ }
+ }
+ }
if (proxy_p) {
free_proxy_proc(proxy_p);
@@ -10213,7 +10777,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* Never run a suspended process */
- ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t dstate = erts_smp_atomic32_read_nob(&p->state);
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate)
+ || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate));
+ }
+#endif
ASSERT(erts_proc_read_refc(p) > 0);
@@ -10234,9 +10804,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
static int
-notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
+notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
+ Eterm st_result, int normal_sched)
{
- Process *rp = erts_proc_lookup(st->requester);
+ Process *rp;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!normal_sched)
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ st->requester, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ else
+#endif
+ rp = erts_proc_lookup(st->requester);
if (rp) {
ErtsProcLocks rp_locks;
ErlOffHeap *ohp;
@@ -10284,6 +10863,11 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!normal_sched)
+ erts_proc_dec_refc(rp);
+#endif
}
erts_cleanup_offheap(&st->off_heap);
@@ -10439,18 +11023,23 @@ done:
}
static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio);
+#ifdef ERTS_DIRTY_SCHEDULERS
+static void save_dirty_task(Process *c_p, ErtsProcSysTask *st);
+#endif
static int
execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
{
- int garbage_collected = 0;
+ int minor_gc = 0, major_gc = 0;
erts_aint32_t state = *statep;
int reds = in_reds;
int qmask = 0;
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
do {
+ ErtsProcSysTaskType type;
ErtsProcSysTask *st;
int st_prio;
Eterm st_res;
@@ -10468,18 +11057,34 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
if (!st)
break;
- switch (st->type) {
- case ERTS_PSTT_GC:
+ type = st->type;
+
+ switch (type) {
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
if (c_p->flags & F_DISABLE_GC) {
save_gc_task(c_p, st, st_prio);
st = NULL;
reds--;
}
else {
- if (!garbage_collected) {
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ if (!minor_gc
+ || (!major_gc && type == ERTS_PSTT_GC_MAJOR)) {
+ if (type == ERTS_PSTT_GC_MAJOR) {
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ }
reds -= scheduler_gc_proc(c_p, reds);
- garbage_collected = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
+#endif
+ if (type == ERTS_PSTT_GC_MAJOR)
+ minor_gc = major_gc = 1;
+ else
+ minor_gc = 1;
}
st_res = am_true;
}
@@ -10493,7 +11098,6 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
fcalls = reds - CONTEXT_REDS;
st_res = erts_check_process_code(c_p,
st->arg[0],
- unsigned_val(st->arg[1]),
&cpc_reds,
fcalls);
reds -= cpc_reds;
@@ -10504,27 +11108,36 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
}
break;
}
-#ifdef ERTS_NEW_PURGE_STRATEGY
case ERTS_PSTT_CLA: {
int fcalls;
int cla_reds = 0;
+ int do_gc;
+
if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
fcalls = reds;
else
fcalls = reds - CONTEXT_REDS;
- st_res = erts_proc_copy_literal_area(c_p,
- &cla_reds,
- fcalls,
- st->arg[0] == am_true);
+ do_gc = st->arg[0] == am_true;
+ st_res = erts_proc_copy_literal_area(c_p, &cla_reds,
+ fcalls, do_gc);
reds -= cla_reds;
if (is_non_value(st_res)) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->flags & F_DIRTY_CLA) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
+#endif
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
+ break;
}
+ if (do_gc) /* We did a major gc */
+ minor_gc = major_gc = 1;
break;
}
-#endif
case ERTS_PSTT_COHMQ:
reds -= erts_complete_off_heap_message_queue_change(c_p);
st_res = am_true;
@@ -10541,7 +11154,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
}
if (st)
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
state = erts_smp_atomic32_read_acqb(&c_p->state);
} while (qmask && reds > 0);
@@ -10569,21 +11182,29 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
Eterm st_res;
int st_prio;
- st = fetch_sys_task(c_p, state, &qmask, &st_prio);
- if (!st)
- break;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->dirty_sys_tasks) {
+ st = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st->next;
+ }
+ else
+#endif
+ {
+ st = fetch_sys_task(c_p, state, &qmask, &st_prio);
+ if (!st)
+ break;
+ }
switch (st->type) {
- case ERTS_PSTT_GC:
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
case ERTS_PSTT_CPC:
case ERTS_PSTT_COHMQ:
st_res = am_false;
break;
-#ifdef ERTS_NEW_PURGE_STRATEGY
case ERTS_PSTT_CLA:
st_res = am_ok;
break;
-#endif
#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
@@ -10596,7 +11217,7 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
break;
}
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
state = erts_smp_atomic32_read_acqb(&c_p->state);
} while (qmask && reds < max_reds);
@@ -10606,6 +11227,92 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
#ifdef ERTS_DIRTY_SCHEDULERS
+void
+erts_execute_dirty_system_task(Process *c_p)
+{
+ Eterm cla_res = THE_NON_VALUE;
+ ErtsProcSysTask *stasks;
+
+ /*
+ * If multiple operations, perform them in the following
+ * order (in order to avoid unnecessary GC):
+ * 1. Copy Literal Area (implies major GC).
+ * 2. GC Hibernate (implies major GC if not woken).
+ * 3. Major GC (implies minor GC).
+ * 4. Minor GC.
+ *
+ * System task requests are handled after the actual
+ * operations have been performed...
+ */
+
+ ASSERT(!(c_p->flags & (F_DELAY_GC|F_DISABLE_GC)));
+
+ if (c_p->flags & F_DIRTY_CLA) {
+ int cla_reds = 0;
+ cla_res = erts_proc_copy_literal_area(c_p, &cla_reds, c_p->fcalls, 1);
+ ASSERT(is_value(cla_res));
+ }
+
+ if (c_p->flags & F_DIRTY_GC_HIBERNATE) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ if (c_p->msg.len)
+ c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */
+ else {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->fvalue = NIL;
+ erts_garbage_collect_hibernate(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ if (c_p->flags & F_DIRTY_MAJOR_GC)
+ c_p->flags |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg,
+ c_p->arity, c_p->fcalls);
+ }
+
+ ASSERT(!(c_p->flags & (F_DIRTY_CLA
+ | F_DIRTY_GC_HIBERNATE
+ | F_DIRTY_MAJOR_GC
+ | F_DIRTY_MINOR_GC)));
+
+ stasks = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = NULL;
+
+ while (stasks) {
+ Eterm st_res;
+ ErtsProcSysTask *st = stasks;
+ stasks = st->next;
+
+ switch (st->type) {
+ case ERTS_PSTT_CLA:
+ ASSERT(is_value(st_res));
+ st_res = cla_res;
+ break;
+ case ERTS_PSTT_GC_MAJOR:
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_GC_MINOR:
+ st_res = am_true;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Not supported dirty system task");
+ break;
+ }
+
+ (void) notify_sys_task_executed(c_p, st, st_res, 0);
+
+ }
+
+ erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+}
+
static BIF_RETTYPE
dispatch_system_task(Process *c_p, erts_aint_t fail_state,
ErtsProcSysTask *st, Eterm target,
@@ -10749,17 +11456,19 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
switch (req_type) {
case am_garbage_collect:
- st->type = ERTS_PSTT_GC;
- noproc_res = am_false;
- if (!rp)
+ switch (st->arg[0]) {
+ case am_minor: st->type = ERTS_PSTT_GC_MINOR; break;
+ case am_major: st->type = ERTS_PSTT_GC_MAJOR; break;
+ default: goto badarg;
+ }
+ noproc_res = am_false;
+ if (!rp)
goto noproc;
break;
case am_check_process_code:
if (is_not_atom(st->arg[0]))
goto badarg;
- if (is_not_small(st->arg[1]) || (unsigned_val(st->arg[1]) & ~ERTS_CPC_ALL))
- goto badarg;
noproc_res = am_false;
st->type = ERTS_PSTT_CPC;
if (!rp)
@@ -10775,7 +11484,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
#endif
break;
-#ifdef ERTS_NEW_PURGE_STRATEGY
case am_copy_literals:
if (st->arg[0] != am_true && st->arg[0] != am_false)
goto badarg;
@@ -10784,7 +11492,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
if (!rp)
goto noproc;
break;
-#endif
default:
goto badarg;
@@ -10808,7 +11515,7 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
failure = am_internal_error;
}
- notify_sys_task_executed(c_p, st, failure);
+ notify_sys_task_executed(c_p, st, failure, 1);
}
ERTS_BIF_PREP_RET(ret, am_ok);
@@ -11023,6 +11730,15 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+static void
+save_dirty_task(Process *c_p, ErtsProcSysTask *st)
+{
+ st->next = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st;
+}
+#endif
+
int
erts_set_gc_state(Process *c_p, int enable)
{
@@ -11245,6 +11961,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
non_empty_runq(rq);
#endif
+ ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
erts_smp_runq_unlock(rq);
smp_notify_inc_runq(rq);
@@ -11275,6 +11993,9 @@ exec_misc_ops(ErtsRunQueue *rq)
rq->misc.end = NULL;
}
+ if (!rq->misc.start)
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
erts_smp_runq_unlock(rq);
while (molp) {
@@ -11359,6 +12080,7 @@ static void early_init_process_struct(void *varg, Eterm data)
proc->common.id = make_internal_pid(data);
#ifdef ERTS_DIRTY_SCHEDULERS
erts_smp_atomic32_init_nob(&proc->dirty_state, 0);
+ proc->dirty_sys_tasks = NULL;
#endif
erts_smp_atomic32_init_relb(&proc->state, arg->state);
@@ -11434,6 +12156,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
erts_shcopy_t info;
INITIALIZE_SHCOPY(info);
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -11492,7 +12217,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
arg_size = copy_shared_calculate(args, &info);
#else
- arg_size = size_object(args);
+ arg_size = size_object_litopt(args, &litarea);
#endif
heap_need = arg_size;
@@ -11516,10 +12241,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
}
p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
-
- p->u.initial[INITIAL_MOD] = mod;
- p->u.initial[INITIAL_FUN] = func;
- p->u.initial[INITIAL_ARI] = (Uint) arity;
+
+ p->u.initial.module = mod;
+ p->u.initial.function = func;
+ p->u.initial.arity = (Uint) arity;
/*
* Must initialize binary lists here before copying binaries to process.
@@ -11561,7 +12286,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/* No need to initialize p->fcalls. */
- p->current = p->u.initial+INITIAL_MOD;
+ p->current = &p->u.initial;
p->i = (BeamInstr *) beam_apply;
p->cp = (BeamInstr *) beam_apply+1;
@@ -11574,7 +12299,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
DESTROY_SHCOPY(info);
#else
- p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
+ p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, &litarea);
#endif
p->arity = 3;
@@ -11743,11 +12468,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_spawn)) {
+ ErtsCodeMFA cmfa = {mod, func, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(p, mod, func, arity, process_name, mfa);
- DTRACE2(process_spawn, process_name, mfa);
+ dtrace_fun_decode(p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_spawn, process_name, mfa_buf);
}
#endif
return res;
@@ -11822,9 +12548,9 @@ void erts_init_empty_process(Process *p)
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
p->seq_trace_token = NIL;
- p->u.initial[0] = 0;
- p->u.initial[1] = 0;
- p->u.initial[2] = 0;
+ p->u.initial.module = 0;
+ p->u.initial.function = 0;
+ p->u.initial.arity = 0;
p->catches = 0;
p->cp = NULL;
p->i = NULL;
@@ -11863,6 +12589,7 @@ void erts_init_empty_process(Process *p)
#ifdef ERTS_DIRTY_SCHEDULERS
erts_smp_atomic32_init_nob(&p->dirty_state, 0);
+ p->dirty_sys_tasks = NULL;
#endif
erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
@@ -11974,7 +12701,6 @@ delete_process(Process* p)
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
- void *nif_export;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
@@ -11991,9 +12717,7 @@ delete_process(Process* p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
- nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
- if (nif_export)
- erts_destroy_nif_export(nif_export);
+ erts_destroy_nif_export(p);
/* Cleanup psd */
@@ -12437,7 +13161,9 @@ send_exit_signal(Process *c_p, /* current process if and only
}
set_proc_exiting(c_p, state, rsn, NULL);
}
- else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
+ else if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
ErlHeapFragment *bp = NULL;
@@ -12464,8 +13190,7 @@ send_exit_signal(Process *c_p, /* current process if and only
ErlOffHeap *ohp;
Uint rsn_sz = size_object(rsn);
#ifdef ERTS_DIRTY_SCHEDULERS
- if (state & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (state & ERTS_PSFLG_DIRTY_RUNNING) {
bp = new_message_buffer(rsn_sz);
ohp = &bp->off_heap;
hp = &bp->mem[0];
@@ -12482,7 +13207,7 @@ send_exit_signal(Process *c_p, /* current process if and only
set_proc_exiting(rp, state, rsn_cpy, bp);
}
else { /* Process running... */
-
+
/*
* The pending exit will be discovered when the process
* is scheduled out if not discovered earlier.
@@ -12504,8 +13229,35 @@ send_exit_signal(Process *c_p, /* current process if and only
&bp->off_heap);
rp->pending_exit.bp = bp;
}
- erts_smp_atomic32_read_bor_relb(&rp->state,
- ERTS_PSFLG_PENDING_EXIT);
+
+ /*
+ * If no dirty work has been scheduled, pending exit will
+ * be discovered when the process is scheduled. If dirty work
+ * has been scheduled, we may need to add it to a normal run
+ * queue...
+ */
+#ifndef ERTS_DIRTY_SCHEDULERS
+ (void) erts_smp_atomic32_read_bor_relb(&rp->state,
+ ERTS_PSFLG_PENDING_EXIT);
+#else
+ {
+ erts_aint32_t a = erts_smp_atomic32_read_nob(&rp->state);
+ while (1) {
+ erts_aint32_t n, e;
+ int dwork;
+ n = e = a;
+ n |= ERTS_PSFLG_PENDING_EXIT;
+ dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK);
+ n &= ~ERTS_PSFLGS_DIRTY_WORK;
+ a = erts_smp_atomic32_cmpxchg_mb(&rp->state, n, e);
+ if (a == e) {
+ if (dwork)
+ erts_schedule_process(rp, n, *rp_locks);
+ break;
+ }
+ }
+ }
+#endif
}
}
/* else:
@@ -12571,9 +13323,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
switch (mon->type) {
case MON_ORIGIN:
/* We are monitoring someone else, we need to demonitor that one.. */
- if (is_atom(mon->pid)) { /* remote by name */
- ASSERT(is_node_name_atom(mon->pid));
- dep = erts_sysname_to_connected_dist_entry(mon->pid);
+ if (is_atom(mon->u.pid)) { /* remote by name */
+ ASSERT(is_node_name_atom(mon->u.pid));
+ dep = erts_sysname_to_connected_dist_entry(mon->u.pid);
if (dep) {
erts_smp_de_links_lock(dep);
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
@@ -12584,7 +13336,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
+ rmon->u.pid,
mon->name,
mon->ref,
1);
@@ -12595,10 +13347,10 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_deref_dist_entry(dep);
}
} else {
- ASSERT(is_pid(mon->pid) || is_port(mon->pid));
+ ASSERT(is_pid(mon->u.pid) || is_port(mon->u.pid));
/* if is local by pid or name */
- if (is_internal_pid(mon->pid)) {
- Process *rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ if (is_internal_pid(mon->u.pid)) {
+ Process *rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
goto done;
}
@@ -12608,9 +13360,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
goto done;
}
erts_destroy_monitor(rmon);
- } else if (is_internal_port(mon->pid)) {
+ } else if (is_internal_port(mon->u.pid)) {
/* Is a local port */
- Port *prt = erts_port_lookup_raw(mon->pid);
+ Port *prt = erts_port_lookup_raw(mon->u.pid);
if (!prt) {
goto done;
}
@@ -12618,8 +13370,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
prt, mon->ref, NULL);
} else { /* remote by pid */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
+ ASSERT(is_external_pid(mon->u.pid));
+ dep = external_pid_dist_entry(mon->u.pid);
ASSERT(dep != NULL);
if (dep) {
erts_smp_de_links_lock(dep);
@@ -12631,8 +13383,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
- mon->pid,
+ rmon->u.pid,
+ mon->u.pid,
mon->ref,
1);
ASSERT(code == ERTS_DSIG_SEND_OK);
@@ -12644,22 +13396,21 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
break;
case MON_TARGET:
- ASSERT(mon->type == MON_TARGET);
- ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
- if (is_internal_port(mon->pid)) {
- Port *prt = erts_id2port(mon->pid);
+ ASSERT(is_pid(mon->u.pid) || is_internal_port(mon->u.pid));
+ if (is_internal_port(mon->u.pid)) {
+ Port *prt = erts_id2port(mon->u.pid);
if (prt == NULL) {
goto done;
}
erts_fire_port_monitor(prt, mon->ref);
erts_port_release(prt);
- } else if (is_internal_pid(mon->pid)) {/* local by name or pid */
+ } else if (is_internal_pid(mon->u.pid)) {/* local by name or pid */
Eterm watched;
Process *rp;
DeclareTmpHeapNoproc(lhp,3);
ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
| ERTS_PROC_LOCKS_MSG_SEND);
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
if (rp == NULL) {
goto done;
}
@@ -12678,8 +13429,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
/* else: demonitor while we exited, i.e. do nothing... */
erts_smp_proc_unlock(rp, rp_locks);
} else { /* external by pid or name */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
+ ASSERT(is_external_pid(mon->u.pid));
+ dep = external_pid_dist_entry(mon->u.pid);
ASSERT(dep != NULL);
if (dep) {
erts_smp_de_links_lock(dep);
@@ -12691,10 +13442,10 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_m_exit(&dsd,
- mon->pid,
+ mon->u.pid,
(rmon->name != NIL
? rmon->name
- : rmon->pid),
+ : rmon->u.pid),
mon->ref,
pcontext->reason);
ASSERT(code == ERTS_DSIG_SEND_OK);
@@ -12704,6 +13455,11 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
}
break;
+ case MON_NIF_TARGET:
+ erts_fire_nif_monitor(mon->u.resource,
+ pcontext->p->common.id,
+ mon->ref);
+ break;
case MON_TIME_OFFSET:
erts_demonitor_time_offset(mon->ref);
break;
@@ -13278,8 +14034,8 @@ erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p)
static void
print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA *cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -13293,7 +14049,8 @@ print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%d + %d",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
@@ -13354,6 +14111,8 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "WAITING"); break;
case ERTS_SSI_FLG_SUSPENDED:
erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_SSI_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
}
@@ -13463,6 +14222,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "NONEMPTY"); break;
case ERTS_RUNQ_FLG_PROTECTED:
erts_print(to, to_arg, "PROTECTED"); break;
+ case ERTS_RUNQ_FLG_EXEC:
+ erts_print(to, to_arg, "EXEC"); break;
+ case ERTS_RUNQ_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
+ case ERTS_RUNQ_FLG_MISC_OP:
+ erts_print(to, to_arg, "MISC_OP"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
}
@@ -13512,11 +14277,11 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
* on all schedulers, then schedules itself out.
- * 2) All shedulers detect this and set the flag halt_in_progress
+ * 2) All shedulers detect this and set the flag ERTS_RUNQ_FLG_HALTING
* on their run queue. The last scheduler sets all non-closed ports
* ERTS_PORT_SFLG_HALT. Global atomic erts_halt_progress is used
* as refcount to determine which is last.
- * 3) While the run ques has flag halt_in_progress no processes
+ * 3) While the run queues has flag ERTS_RUNQ_FLG_HALTING no processes
* will be scheduled, only ports.
* 4) When the last port closes that scheduler calls erlang:halt/1.
* The same global atomic is used as refcount.
@@ -13531,8 +14296,8 @@ void erts_halt(int code)
erts_no_schedulers,
-1)) {
#ifdef ERTS_DIRTY_SCHEDULERS
- ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
- ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING);
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING);
#endif
erts_halt_code = code;
notify_reap_ports_relb();
@@ -13546,6 +14311,9 @@ erts_dbg_check_halloc_lock(Process *p)
ErtsSchedulerData *esdp;
if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
return 1;
+ if ((p->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ && ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()))
+ return 1;
if (p->common.id == ERTS_INVALID_PID)
return 1;
esdp = erts_proc_sched_data(p);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 68fbb10602..8bf372dad5 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -21,6 +21,8 @@
#ifndef __PROCESS_H__
#define __PROCESS_H__
+#include "sys.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -37,8 +39,6 @@
typedef struct process Process;
-#include "sys.h"
-
#define ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
#include "erl_process_lock.h" /* Only pull out important types... */
#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
@@ -93,10 +93,6 @@ struct ErtsNodesMonitor_;
#define ERTS_HEAP_FREE(Type, Ptr, Size) \
erts_free((Type), (Ptr))
-#define INITIAL_MOD 0
-#define INITIAL_FUN 1
-#define INITIAL_ARI 2
-
#include "export.h"
struct saved_calls {
@@ -174,8 +170,14 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
#define ERTS_RUNQ_FLG_EXEC \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 7))
+#define ERTS_RUNQ_FLG_MSB_EXEC \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 8))
+#define ERTS_RUNQ_FLG_MISC_OP \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 9))
+#define ERTS_RUNQ_FLG_HALTING \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
-#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 8)
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 11)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -266,8 +268,9 @@ typedef enum {
#define ERTS_SSI_FLG_TSE_SLEEPING (((erts_aint32_t) 1) << 2)
#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_FLG_MSB_EXEC (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_FLGS_MAX 5
+#define ERTS_SSI_FLGS_MAX 6
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -278,7 +281,8 @@ typedef enum {
#define ERTS_SSI_FLGS_ALL \
(ERTS_SSI_FLGS_SLEEP \
| ERTS_SSI_FLG_WAITING \
- | ERTS_SSI_FLG_SUSPENDED)
+ | ERTS_SSI_FLG_SUSPENDED \
+ | ERTS_SSI_FLG_MSB_EXEC)
/*
* Keep ERTS_SSI_AUX_WORK flags ordered in expected frequency
@@ -397,6 +401,12 @@ typedef struct {
Process* last;
} ErtsRunPrioQueue;
+typedef enum {
+ ERTS_SCHED_NORMAL,
+ ERTS_SCHED_DIRTY_CPU,
+ ERTS_SCHED_DIRTY_IO
+} ErtsSchedType;
+
typedef struct ErtsSchedulerData_ ErtsSchedulerData;
typedef struct ErtsRunQueue_ ErtsRunQueue;
@@ -482,7 +492,6 @@ struct ErtsRunQueue_ {
erts_smp_atomic32_t len;
int wakeup_other;
int wakeup_other_reds;
- int halt_in_progress;
struct {
ErtsProcList *pending_exiters;
@@ -541,13 +550,15 @@ do { \
} while (0)
typedef struct {
- int need; /* "+sbu true" or scheduler_wall_time enabled */
+ union {
+ erts_atomic32_t mod; /* on dirty schedulers */
+ int need; /* "+sbu true" or scheduler_wall_time enabled */
+ } u;
int enabled;
Uint64 start;
struct {
Uint64 total;
Uint64 start;
- int currently;
} working;
} ErtsSchedWallTime;
@@ -646,6 +657,7 @@ struct ErtsSchedulerData_ {
#endif
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
+ ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
#ifdef ERTS_DIRTY_SCHEDULERS
ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
@@ -842,8 +854,8 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ((ErtsProcLocks) 0)
-#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ((ErtsProcLocks) 0)
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -946,9 +958,11 @@ struct process {
Eterm* stop; /* Stack top */
Eterm* heap; /* Heap start */
Eterm* hend; /* Heap end */
+ Eterm* abandoned_heap;
Uint heap_sz; /* Size of heap in words */
Uint min_heap_size; /* Minimum size of heap (in words). */
Uint min_vheap_size; /* Minimum size of virtual heap (in words). */
+ Uint max_heap_size; /* Maximum size of heap (in words). */
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
volatile unsigned long fp_exception;
@@ -961,16 +975,6 @@ struct process {
#endif
/*
- * Moved to after "struct hipe_process_state hipe", as a temporary fix for
- * LLVM hard-coding offsetof(struct process, hipe.nstack) (sic!)
- * (see void X86FrameLowering::adjustForHiPEPrologue(...) in
- * lib/Target/X86/X86FrameLowering.cpp).
- *
- * Used to be below "Eterm* hend".
- */
- Eterm* abandoned_heap;
-
- /*
* Saved x registers.
*/
Uint arity; /* Number of live argument registers (only valid
@@ -1023,15 +1027,16 @@ struct process {
#endif
union {
void *terminate;
- BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
- of pointer to funcinfo instruction, hence the BeamInstr datatype */
+ ErtsCodeMFA initial; /* Initial module(0), function(1), arity(2),
+ often used instead of pointer to funcinfo
+ instruction. */
} u;
- BeamInstr* current; /* Current Erlang function, part of the funcinfo:
+ ErtsCodeMFA* current; /* Current Erlang function, part of the funcinfo:
* module(0), function(1), arity(2)
* (module and functions are tagged atoms;
- * arity an untagged integer). BeamInstr * because it references code
+ * arity an untagged integer).
*/
-
+
/*
* Information mainly for post-mortem use (erl crash dump).
*/
@@ -1048,7 +1053,6 @@ struct process {
Eterm *old_hend; /* Heap pointers for generational GC. */
Eterm *old_htop;
Eterm *old_heap;
- Uint max_heap_size; /* Maximum size of heap (in words). */
Uint16 gen_gcs; /* Number of (minor) generational GCs. */
Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */
ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */
@@ -1063,6 +1067,9 @@ struct process {
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
ErtsProcSysTaskQs *sys_task_qs;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsProcSysTask *dirty_sys_tasks;
+#endif
erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -1387,14 +1394,18 @@ extern int erts_system_profile_ts_type;
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */
#define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */
-#define F_ON_HEAP_MSGQ (1 << 13) /* Off heap msg queue */
+#define F_ON_HEAP_MSGQ (1 << 13) /* On heap msg queue */
#define F_OFF_HEAP_MSGQ_CHNG (1 << 14) /* Off heap msg queue changing */
#define F_ABANDONED_HEAP_USE (1 << 15) /* Have usage of abandoned heap */
#define F_DELAY_GC (1 << 16) /* Similar to disable GC (see below) */
#define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */
#define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */
-#define F_HIPE_MODE (1 << 19)
+#define F_HIPE_MODE (1 << 19) /* Process is executing in HiPE mode */
#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */
+#define F_DIRTY_CLA (1 << 21) /* Dirty copy literal area scheduled */
+#define F_DIRTY_GC_HIBERNATE (1 << 22) /* Dirty GC hibernate scheduled */
+#define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */
+#define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -1576,19 +1587,18 @@ void erts_init_scheduling(int, int
, int, int, int
#endif
);
-
+#ifdef ERTS_DIRTY_SCHEDULERS
+void erts_execute_dirty_system_task(Process *c_p);
+#endif
int erts_set_gc_state(Process *c_p, int enable);
-Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
+Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int dirty_cpu, int want_dirty_io);
Eterm erts_system_check_request(Process *c_p);
Eterm erts_gc_info_request(Process *c_p);
Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
-int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
-void erts_destroy_nif_export(void *); /* see erl_nif.c */
-int erts_check_nif_export_in_area(Process *p, char *start, Uint size);
-
ErtsProcList *erts_proclist_create(Process *);
ErtsProcList *erts_proclist_copy(ErtsProcList *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1925,6 +1935,8 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
+ERTS_GLB_INLINE void erts_schedule_dirty_sys_execution(Process *c_p);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
@@ -1934,6 +1946,34 @@ erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
if (!(state & ERTS_PSFLG_ACTIVE))
erts_schedule_process(p, state, locks);
}
+
+ERTS_GLB_INLINE void
+erts_schedule_dirty_sys_execution(Process *c_p)
+{
+ erts_aint32_t a, n, e;
+
+ a = erts_smp_atomic32_read_nob(&c_p->state);
+
+ /*
+ * Only a currently executing process schedules
+ * itself for dirty-sys execution...
+ */
+
+ ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+ /* Don't set dirty-active-sys if we are about to exit... */
+
+ while (!(a & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))) {
+ e = a;
+ n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ a = erts_smp_atomic32_cmpxchg_mb(&c_p->state, n, e);
+ if (a == e)
+ break; /* dirty-active-sys set */
+ }
+}
+
#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 42654604cb..8311fde025 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -56,7 +56,7 @@
#define MAKE_HASH(Term) \
((is_small(Term)) ? unsigned_val(Term) : \
((is_atom(Term)) ? \
- (atom_tab(atom_val(Term))->slot.bucket.hvalue) : \
+ atom_val(Term) : \
make_internal_hash(Term)))
#define PD_SZ2BYTES(Sz) (sizeof(ProcDict) + ((Sz) - 1)*sizeof(Eterm))
@@ -408,6 +408,11 @@ static void pd_hash_erase_all(Process *p)
}
}
+Uint32 erts_pd_make_hx(Eterm key)
+{
+ return MAKE_HASH(key);
+}
+
Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id)
{
unsigned int hval;
diff --git a/erts/emulator/beam/erl_process_dict.h b/erts/emulator/beam/erl_process_dict.h
index b50a2af72c..ab58f3c239 100644
--- a/erts/emulator/beam/erl_process_dict.h
+++ b/erts/emulator/beam/erl_process_dict.h
@@ -43,6 +43,7 @@ void erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
Eterm erts_dictionary_copy(struct process *p, ProcDict *pd);
Eterm erts_pd_hash_get(struct process *p, Eterm id);
+Uint32 erts_pd_make_hx(Eterm key);
Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id);
#endif
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 4a74cc2b82..77c7c5e73c 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -337,8 +337,8 @@ stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
static void
print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA* cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -350,7 +350,8 @@ print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%bpu + %bpu",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index a69185bc5c..a93f1755c8 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -123,7 +123,7 @@ erts_init_proc_lock(int cpus)
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i), 1);
+ "pix_lock", make_small(i));
#else
erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
@@ -1006,6 +1006,41 @@ erts_pid2proc_opt(Process *c_p,
return proc;
}
+static ERTS_INLINE
+Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
+{
+ Process *proc;
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
+
+ proc = erts_proc_lookup_raw(pid);
+ if (proc) {
+ if (!allow_exit && ERTS_PROC_IS_EXITING(proc))
+ proc = NULL;
+ else
+ erts_proc_inc_refc(proc);
+ }
+
+#ifdef ERTS_SMP
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+
+ return proc;
+}
+
+Process *erts_proc_lookup_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 0);
+}
+
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 1);
+}
+
void
erts_proc_lock_init(Process *p)
{
@@ -1027,39 +1062,32 @@ erts_proc_lock_init(Process *p)
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- int do_lock_count = 1;
-#else
- int do_lock_count = 0;
-#endif
-
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id, do_lock_count);
+ erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id);
ethr_mutex_lock(&p->lock.btm.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.btm.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
- do_lock_count);
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
- erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id, do_lock_count);
+ erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id);
ethr_mutex_lock(&p->lock.trace.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.trace.lc);
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 2cccf0697a..46a72fcb0c 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -219,6 +219,10 @@ typedef struct erts_proc_lock_t_ {
#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
& ~ERTS_PROC_LOCK_MAIN)
+/* All locks we first must unlock to lock L */
+#define ERTS_PROC_LOCKS_HIGHER_THAN(L) \
+ (ERTS_PROC_LOCKS_ALL & (~(L) & ~((L)-1)))
+
#define ERTS_PIX_LOCKS_BITS 10
#define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS)
@@ -940,6 +944,8 @@ void erts_proc_safelock(Process *a_proc,
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
+Process *erts_proc_lookup_inc_refc(Eterm pid);
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid);
ERTS_GLB_INLINE Process *erts_pix2proc(int ix);
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid);
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index 22830a19c4..abf2fe44f3 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -474,7 +474,7 @@ erts_ptab_init_table(ErtsPTab *ptab,
* we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2.
*
* In order to fix this, we insert a pointer from the table
- * to the invalid_element, wich will be interpreted as a
+ * to the invalid_element, which will be interpreted as a
* slot currently being modified. This way we will be able to
* have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while
* still having a table size of the power of 2.
@@ -733,7 +733,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
* erts_ptab_list() implements BIFs listing the content of the table,
* e.g. erlang:processes/0.
*/
-static void cleanup_ptab_list_bif_data(Binary *bp);
+static int cleanup_ptab_list_bif_data(Binary *bp);
static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
@@ -771,23 +771,23 @@ erts_ptab_list(Process *c_p, ErtsPTab *ptab)
}
else {
Eterm *hp;
- Eterm magic_bin;
+ Eterm magic_ref;
ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
ERTS_BIF_PREP_YIELD2(ret_val,
&ptab_list_continue_export,
c_p,
res_acc,
- magic_bin);
+ magic_ref);
}
return ret_val;
}
-static void
+static int
cleanup_ptab_list_bif_data(Binary *bp)
{
ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
@@ -875,6 +875,8 @@ cleanup_ptab_list_bif_data(Binary *bp)
ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+
+ return 1;
}
static int
@@ -1287,9 +1289,7 @@ static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
res_acc = BIF_ARG_1;
- ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_2);
ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_ptab_list_bif_data);
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 713ed50b86..14be511f86 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1065,7 +1065,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra, 1);
+ erts_mtx_init_x(mtx, name, extra);
#endif
}
@@ -1073,7 +1073,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra, 1);
+ erts_mtx_init_locked_x(mtx, name, extra);
#endif
}
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 7d857ad326..6b25728af7 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -59,6 +59,42 @@ erts_set_literal_tag(Eterm *term, Eterm *hp_start, Eterm hsz)
#endif
}
+void
+erts_term_init(void)
+{
+#ifdef ERTS_ORDINARY_REF_MARKER
+ /* Ordinary and magic references of same size... */
+
+ ErtsRefThing ref_thing;
+
+ ERTS_CT_ASSERT(ERTS_ORDINARY_REF_MARKER == ~((Uint32)0));
+ ref_thing.m.header = ERTS_REF_THING_HEADER;
+ ref_thing.m.mb = (ErtsMagicBinary *) ~((UWord) 3);
+ ref_thing.m.next = (struct erl_off_heap_header *) ~((UWord) 3);
+ if (ref_thing.o.marker == ERTS_ORDINARY_REF_MARKER)
+ ERTS_INTERNAL_ERROR("Cannot differentiate between magic and ordinary references");
+
+ ERTS_CT_ASSERT(offsetof(ErtsORefThing,marker) != 0);
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) == sizeof(ErtsMRefThing));
+# ifdef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should not have been defined...
+# endif
+
+#else
+ /* Ordinary and magic references of different sizes... */
+
+# ifndef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should have been defined...
+# endif
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) != sizeof(ErtsMRefThing));
+
+#endif
+
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsORefThing));
+ ERTS_CT_ASSERT(ERTS_MAGIC_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsMRefThing));
+
+}
+
/*
* XXX: define NUMBER_CODE() here when new representation is used
*/
@@ -95,8 +131,8 @@ ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple);
ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint32*,internal_ref_data,Wterm,is_internal_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm,is_internal_magic_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm,is_internal_ordinary_ref);
ET_DEFINE_CHECKED(struct erl_node_*,internal_ref_node,Eterm,is_internal_ref);
ET_DEFINE_CHECKED(Eterm*,external_val,Wterm,is_external);
ET_DEFINE_CHECKED(Uint,external_data_words,Wterm,is_external);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index c3234ee349..a602a8f7c6 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -23,6 +23,8 @@
#include "erl_mmap.h"
+void erts_term_init(void);
+
typedef UWord Wterm; /* Full word terms */
struct erl_node_; /* Declared in erl_node_tables.h */
@@ -718,73 +720,224 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define ERTS_MAX_REF_NUMBERS 3
#define ERTS_REF_NUMBERS ERTS_MAX_REF_NUMBERS
-#if defined(ARCH_64)
-# define ERTS_REF_WORDS (ERTS_REF_NUMBERS/2 + 1)
-# define ERTS_REF_32BIT_WORDS (ERTS_REF_NUMBERS+1)
-#else
-# define ERTS_REF_WORDS ERTS_REF_NUMBERS
-# define ERTS_REF_32BIT_WORDS ERTS_REF_NUMBERS
+#ifndef ERTS_ENDIANNESS
+# error ERTS_ENDIANNESS not defined...
#endif
-typedef struct {
- Eterm header;
- union {
- Uint32 ui32[ERTS_REF_32BIT_WORDS];
- Uint ui[ERTS_REF_WORDS];
- } data;
-} RefThing;
-
-#define REF_THING_SIZE (sizeof(RefThing)/sizeof(Uint))
-#define REF_THING_HEAD_SIZE (sizeof(Eterm)/sizeof(Uint))
+#if ERTS_REF_NUMBERS != 3
+# error "A new reference layout for 64-bit needs to be implemented..."
+#endif
-#define make_ref_thing_header(DW) \
- _make_header((DW)+REF_THING_HEAD_SIZE-1,_TAG_HEADER_REF)
+struct magic_binary;
#if defined(ARCH_64)
+# define ERTS_ORDINARY_REF_MARKER (~((Uint32) 0))
+
+typedef struct {
+ Eterm header;
+#if ERTS_ENDIANNESS <= 0
+ Uint32 marker;
+#endif
+ Uint32 num[ERTS_REF_NUMBERS];
+#if ERTS_ENDIANNESS > 0
+ Uint32 marker;
+#endif
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+#if !ERTS_ENDIANNESS
+ Uint32 num[ERTS_REF_NUMBERS];
+ Uint32 marker;
+#endif
+} ErtsMRefThing;
+
/*
- * Ref layout on a 64-bit little endian machine:
+ * Ordinary ref layout on a 64-bit little endian machine:
*
* 63 31 0
* +--------------+--------------+
* | Thing word |
* +--------------+--------------+
- * | Data 0 | 32-bit arity |
+ * | Data 0 | 0xffffffff |
* +--------------+--------------+
* | Data 2 | Data 1 |
* +--------------+--------------+
*
- * Data is stored as an Uint32 array with 32-bit arity as first number.
+ * Ordinary ref layout on a 64-bit big endian machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Data 0 | Data 1 |
+ * +--------------+--------------+
+ * | Data 2 | 0xffffffff |
+ * +--------------+--------------+
+ *
+ * Magic Ref layout on a 64-bit machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Magic Binary Pointer |
+ * +--------------+--------------+
+ * | Next Off Heap Pointer |
+ * +--------------+--------------+
+ *
+ * Both pointers in the magic ref are 64-bit aligned. That is,
+ * least significant bits are zero. The marker 32-bit word is
+ * placed over the least significant bits of one of the pointers.
+ * That is, we can distinguish between magic and ordinary ref
+ * by looking at the marker field.
+ *
*/
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = ERTS_REF_NUMBERS; \
- ((RefThing *) (Hp))->data.ui32[1] = (R0); \
- ((RefThing *) (Hp))->data.ui32[2] = (R1); \
- ((RefThing *) (Hp))->data.ui32[3] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->marker = ERTS_ORDINARY_REF_MARKER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
} while (0)
-#else
+#if ERTS_ENDIANNESS
+/* Known big or little endian */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#else /* !ERTS_ENDIANNESS */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ((ErtsMRefThing *) (Hp))->marker = 0; \
+ ((ErtsMRefThing *) (Hp))->num[0] = (Binp)->refn[0]; \
+ ((ErtsMRefThing *) (Hp))->num[1] = (Binp)->refn[1]; \
+ ((ErtsMRefThing *) (Hp))->num[2] = (Binp)->refn[2]; \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#endif /* !ERTS_ENDIANNESS */
+
+#else /* ARCH_32 */
+
+typedef struct {
+ Eterm header;
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+} ErtsMRefThing;
+
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = (R0); \
- ((RefThing *) (Hp))->data.ui32[1] = (R1); \
- ((RefThing *) (Hp))->data.ui32[2] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
} while (0)
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic(&(Binp)->refn)); \
+} while (0)
+
+#endif /* ARCH_32 */
+
+typedef union {
+ ErtsMRefThing m;
+ ErtsORefThing o;
+} ErtsRefThing;
+
+#define ERTS_REF_THING_SIZE (sizeof(ErtsORefThing)/sizeof(Uint))
+#define ERTS_MAGIC_REF_THING_SIZE (sizeof(ErtsMRefThing)/sizeof(Uint))
+#define ERTS_MAX_INTERNAL_REF_SIZE (sizeof(ErtsRefThing)/sizeof(Uint))
+
+#define make_ref_thing_header(Words) \
+ _make_header((Words)-1,_TAG_HEADER_REF)
+
+#define ERTS_REF_THING_HEADER _make_header(ERTS_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+#if defined(ARCH_64) && ERTS_ENDIANNESS /* All internal refs of same size... */
+
+# undef ERTS_MAGIC_REF_THING_HEADER
+
+# define is_ref_thing_header(x) ((x) == ERTS_REF_THING_HEADER)
+
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ ((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+
+#define is_magic_ref_thing(x) \
+ (!is_ordinary_ref_thing((x)))
+
+#define is_internal_magic_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_magic_ref_thing(boxed_val((x))))
+
+#define is_internal_ordinary_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_ordinary_ref_thing(boxed_val((x))))
+
+#else /* Ordinary and magic references of different sizes... */
+
+# define ERTS_MAGIC_REF_THING_HEADER \
+ _make_header(ERTS_MAGIC_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+# define is_ref_thing_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
+
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_REF_THING_HEADER)
+
+#define is_magic_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_magic_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_ordinary_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER)
+
#endif
-#define is_ref_thing_header(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
#define make_internal_ref(x) make_boxed((Eterm*)(x))
-#define _unchecked_ref_thing_ptr(x) \
- ((RefThing*) _unchecked_internal_ref_val(x))
-#define ref_thing_ptr(x) \
- ((RefThing*) internal_ref_val(x))
+#define _unchecked_ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) _unchecked_internal_ref_val(x))
+#define ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) internal_ref_val(x))
+
+#define _unchecked_magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) _unchecked_internal_ref_val(x))
+#define magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) internal_ref_val(x))
#define is_internal_ref(x) \
(_unchecked_is_boxed((x)) && is_ref_thing_header(*boxed_val((x))))
@@ -796,16 +949,21 @@ do { \
_ET_DECLARE_CHECKED(Eterm*,internal_ref_val,Wterm)
#define internal_ref_val(x) _ET_APPLY(internal_ref_val,(x))
-#define internal_thing_ref_data_words(t) (thing_arityval(*(Eterm*)(t)))
-#define _unchecked_internal_ref_data_words(x) \
- (_unchecked_thing_arityval(*_unchecked_internal_ref_val(x)))
-_ET_DECLARE_CHECKED(Uint,internal_ref_data_words,Wterm)
-#define internal_ref_data_words(x) _ET_APPLY(internal_ref_data_words,(x))
+#define internal_ordinary_thing_ref_numbers(ort) (((ErtsORefThing *)(ort))->num)
+#define _unchecked_internal_ordinary_ref_numbers(x) (internal_ordinary_thing_ref_numbers(_unchecked_ordinary_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm)
+#define internal_ordinary_ref_numbers(x) _ET_APPLY(internal_ordinary_ref_numbers,(x))
+
+#if defined(ARCH_64) && !ERTS_ENDIANNESS
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->num)
+#else
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->mb->refn)
+#endif
+
+#define _unchecked_internal_magic_ref_numbers(x) (internal_magic_thing_ref_numbers(_unchecked_magic_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm)
+#define internal_magic_ref_numbers(x) _ET_APPLY(internal_magic_ref_numbers,(x))
-#define internal_thing_ref_data(thing) ((thing)->data.ui32)
-#define _unchecked_internal_ref_data(x) (internal_thing_ref_data(_unchecked_ref_thing_ptr(x)))
-_ET_DECLARE_CHECKED(Uint32*,internal_ref_data,Wterm)
-#define internal_ref_data(x) _ET_APPLY(internal_ref_data,(x))
#define _unchecked_internal_ref_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index eccd49f2a9..9e75f6fee5 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -479,14 +479,9 @@ ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
- Uint16 opt, int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
- char *name,
- Eterm extra,
- int enable_lcnt);
+ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
+ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
+ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra);
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
@@ -2164,7 +2159,7 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
+erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -2174,17 +2169,13 @@ erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
- int enable_lcnt)
+erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -2194,17 +2185,14 @@ erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
+erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -2214,10 +2202,7 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 6aa2a7500f..cf9d3adc86 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1502,7 +1502,7 @@ static time_t gregday(int year, int month, int day)
pyear = gyear - 1;
ndays = (pyear/4) - (pyear/100) + (pyear/400) + pyear*365 + 366;
}
- /* number of days in all months preceeding month */
+ /* number of days in all months preceding month */
for (m = 1; m < month; m++)
ndays += mdays[m];
/* Extra day if leap year and March or later */
@@ -1831,7 +1831,10 @@ erts_demonitor_time_offset(Eterm ref)
ErtsMonitor *mon;
ASSERT(is_internal_ref(ref));
erts_smp_mtx_lock(&erts_get_time_mtx);
- mon = erts_remove_monitor(&time_offset_monitors, ref);
+ if (is_internal_ordinary_ref(ref))
+ mon = erts_remove_monitor(&time_offset_monitors, ref);
+ else
+ mon = NULL;
if (!mon)
res = 0;
else {
@@ -1848,7 +1851,7 @@ erts_demonitor_time_offset(Eterm ref)
typedef struct {
Eterm pid;
Eterm ref;
- Eterm heap[REF_THING_SIZE];
+ Eterm heap[ERTS_REF_THING_SIZE];
} ErtsTimeOffsetMonitorInfo;
typedef struct {
@@ -1866,14 +1869,14 @@ save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
cntxt = (ErtsTimeOffsetMonitorContext *) vcntxt;
mix = (cntxt->ix)++;
- cntxt->to_mon_info[mix].pid = mon->pid;
+ cntxt->to_mon_info[mix].pid = mon->u.pid;
to_hp = &cntxt->to_mon_info[mix].heap[0];
- ASSERT(is_internal_ref(mon->ref));
+ ASSERT(is_internal_ordinary_ref(mon->ref));
from_hp = internal_ref_val(mon->ref);
- ASSERT(thing_arityval(*from_hp) + 1 == REF_THING_SIZE);
+ ASSERT(thing_arityval(*from_hp) + 1 == ERTS_REF_THING_SIZE);
- for (hix = 0; hix < REF_THING_SIZE; hix++)
+ for (hix = 0; hix < ERTS_REF_THING_SIZE; hix++)
to_hp[hix] = from_hp[hix];
cntxt->to_mon_info[mix].ref
@@ -1933,7 +1936,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
hp = (Eterm *) (tmp + no_monitors*sizeof(ErtsTimeOffsetMonitorInfo));
hsz = 6; /* 5-tuple */
- hsz += REF_THING_SIZE;
+ hsz += ERTS_REF_THING_SIZE;
hsz += ERTS_SINT64_HEAP_SIZE(new_offset);
if (IS_SSMALL(new_offset))
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ac9e91e31f..04f3160d42 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -781,7 +781,8 @@ trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
tmp = make_small(0);
} else {
hp = HAlloc(p, 4);
- tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
+ tmp = TUPLE3(hp,p->current->module,p->current->function,
+ make_small(p->current->arity));
hp += 4;
}
@@ -1027,14 +1028,14 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
{
Eterm mfa;
- BeamInstr *code_ptr = find_function_from_pc(pc);
+ ErtsCodeMFA *cmfa = find_function_from_pc(pc);
-
- if (!code_ptr) {
+ if (!cmfa) {
mfa = am_undefined;
} else {
Eterm *hp = HAlloc(p, 4);
- mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
}
send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL,
@@ -1046,11 +1047,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
+erts_trace_return(Process* p, ErtsCodeMFA *mfa,
+ Eterm retval, ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa, mod, name;
- int arity;
+ Eterm mfa_tuple;
Uint meta_flags, *tracee_flags;
ASSERT(tracer);
@@ -1084,15 +1085,13 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
tracee_flags = &meta_flags;
}
- mod = fi[0];
- name = fi[1];
- arity = fi[2];
-
hp = HAlloc(p, 4);
- mfa = TUPLE3(hp, mod, name, make_small(arity));
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function,
+ make_small(mfa->arity));
hp += 4;
send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
- NULL, TRACE_FUN_T_CALL, am_return_from, mfa, retval, am_true);
+ NULL, TRACE_FUN_T_CALL, am_return_from, mfa_tuple,
+ retval, am_true);
}
/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
@@ -1103,7 +1102,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
* Where Class is atomic but Value is any term.
*/
void
-erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
+erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
ErtsTracer *tracer)
{
Eterm* hp;
@@ -1142,7 +1141,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
}
hp = HAlloc(p, 7);;
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function, make_small(mfa->arity));
hp += 4;
cv = TUPLE2(hp, class, value);
hp += 3;
@@ -1165,7 +1164,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
* if it is a pid or port we do a meta trace.
*/
Uint32
-erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
+erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer)
{
Eterm* hp;
@@ -1244,7 +1243,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* such as size_object() and copy_struct(), we must make sure that we
* temporarily convert any match contexts to sub binaries.
*/
- arity = (Eterm) mfa[2];
+ arity = info->mfa.arity;
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
@@ -1339,7 +1338,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
hp += 2;
}
}
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
+ mfa_tuple = TUPLE3(hp, info->mfa.module, info->mfa.function, mfa_tuple);
hp += 4;
/*
@@ -1462,7 +1461,8 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
}
void
-monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint time)
+monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp,
+ ErtsCodeMFA *out_fp, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
@@ -1493,11 +1493,13 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
tmo = erts_bld_uint(&hp, NULL, time);
if (in_fp != NULL) {
- in_mfa = TUPLE3(hp,(Eterm) in_fp[0], (Eterm) in_fp[1], make_small(in_fp[2]));
+ in_mfa = TUPLE3(hp, in_fp->module, in_fp->function,
+ make_small(in_fp->arity));
hp +=4;
}
if (out_fp != NULL) {
- out_mfa = TUPLE3(hp,(Eterm) out_fp[0], (Eterm) out_fp[1], make_small(out_fp[2]));
+ out_mfa = TUPLE3(hp, out_fp->module, out_fp->function,
+ make_small(out_fp->arity));
hp +=4;
}
tmo_tpl = TUPLE2(hp,am_timeout, tmo);
@@ -2132,7 +2134,7 @@ profile_runnable_proc(Process *p, Eterm status){
Eterm *hp, msg;
Eterm where = am_undefined;
ErlHeapFragment *bp = NULL;
- BeamInstr *current = NULL;
+ ErtsCodeMFA *cmfa = NULL;
#ifndef ERTS_SMP
#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
@@ -2154,14 +2156,14 @@ profile_runnable_proc(Process *p, Eterm status){
if (!ERTS_PROC_IS_EXITING(p)) {
if (p->current) {
- current = p->current;
+ cmfa = p->current;
} else {
- current = find_function_from_pc(p->i);
+ cmfa = find_function_from_pc(p->i);
}
}
#ifdef ERTS_SMP
- if (!current) {
+ if (!cmfa) {
hsz -= 4;
}
@@ -2169,8 +2171,10 @@ profile_runnable_proc(Process *p, Eterm status){
hp = bp->mem;
#endif
- if (current) {
- where = TUPLE3(hp, current[0], current[1], make_small(current[2])); hp += 4;
+ if (cmfa) {
+ where = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
+ hp += 4;
} else {
where = make_small(0);
}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 0095d4386b..01fe1e5e23 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -101,11 +101,11 @@ void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
void trace_send(Process*, Eterm, Eterm);
void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec,
+Uint32 erts_call_trace(Process *p, ErtsCodeInfo *info, struct binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval,
+void erts_trace_return(Process* p, ErtsCodeMFA *mfa, Eterm retval,
ErtsTracer *tracer);
-void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
+void erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
ErtsTracer *tracer);
void erts_trace_return_to(Process *p, BeamInstr *pc);
void trace_sched(Process*, ErtsProcLocks, Eterm);
@@ -134,7 +134,8 @@ void erts_system_profile_setup_active_schedulers(void);
/* system_monitor */
void monitor_long_gc(Process *p, Uint time);
-void monitor_long_schedule_proc(Process *p, BeamInstr *in_i, BeamInstr *out_i, Uint time);
+void monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_i,
+ ErtsCodeMFA *out_i, Uint time);
void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time);
void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
@@ -142,6 +143,11 @@ Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
@@ -176,7 +182,7 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
struct binary* match_prog_set,
struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index bd5e1482fb..01629db9ad 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -123,19 +123,16 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
-static RestartContext *get_rc_from_bin(Eterm bin)
+static RestartContext *get_rc_from_bin(Eterm mref)
{
- Binary *mbp;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin));
-
- mbp = ((ProcBin *) binary_val(bin))->val;
-
+ Binary *mbp = erts_magic_ref2bin(mref);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
return (RestartContext *) ERTS_MAGIC_BIN_DATA(mbp);
@@ -148,8 +145,8 @@ static Eterm make_magic_bin_for_restart(Process *p, RestartContext *rc)
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
Eterm *hp;
memcpy(restartp,rc,sizeof(RestartContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(p), mbp);
}
@@ -1890,74 +1887,57 @@ binary_to_atom(Process* proc, Eterm bin, Eterm enc, int must_exist)
byte* bytes;
byte *temp_alloc = NULL;
Uint bin_size;
+ Eterm a;
if ((bytes = erts_get_aligned_binary_bytes(bin, &temp_alloc)) == 0) {
BIF_ERROR(proc, BADARG);
}
bin_size = binary_size(bin);
+
if (enc == am_latin1) {
- Eterm a;
- if (bin_size > MAX_ATOM_CHARACTERS) {
- system_limit:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, SYSTEM_LIMIT);
- }
if (!must_exist) {
- a = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_LATIN1,
- 0);
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(a))
- goto badarg;
- BIF_RET(a);
- } else if (erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_RET(a);
- } else {
+ int lix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_LATIN1,
+ 0);
+ if (lix == ATOM_BAD_ENCODING_ERROR) {
+ badarg:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, BADARG);
+ } else if (lix == ATOM_MAX_CHARS_ERROR) {
+ system_limit:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, SYSTEM_LIMIT);
+ }
+
+ a = make_atom(lix);
+ } else if (!erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
goto badarg;
}
- } else if (enc == am_utf8 || enc == am_unicode) {
- Eterm res;
- Uint num_chars = 0;
- const byte* p = bytes;
- Uint left = bin_size;
- while (left) {
- if (++num_chars > MAX_ATOM_CHARACTERS) {
+ } else if (enc == am_utf8 || enc == am_unicode) {
+ if (!must_exist) {
+ int uix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_UTF8,
+ 0);
+ if (uix == ATOM_BAD_ENCODING_ERROR) {
+ goto badarg;
+ } else if (uix == ATOM_MAX_CHARS_ERROR) {
goto system_limit;
}
- if ((p[0] & 0x80) == 0) {
- ++p;
- --left;
- }
- else if (left >= 2
- && (p[0] & 0xFE) == 0xC2 /* only allow latin1 subset */
- && (p[1] & 0xC0) == 0x80) {
- p += 2;
- left -= 2;
- }
- else goto badarg;
- }
- if (!must_exist) {
- res = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_UTF8,
- 0);
+ a = make_atom(uix);
}
- else if (!erts_atom_get((char*)bytes, bin_size, &res, ERTS_ATOM_ENC_UTF8)) {
+ else if (!erts_atom_get((char*)bytes, bin_size, &a, ERTS_ATOM_ENC_UTF8)) {
goto badarg;
}
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(res))
- goto badarg;
- BIF_RET(res);
} else {
- badarg:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, BADARG);
+ goto badarg;
}
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(a);
}
BIF_RETTYPE binary_to_atom_2(BIF_ALIST_2)
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 81800752f0..47289a0af1 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -117,7 +117,6 @@ int erts_fit_in_bits_int32(Sint32);
int erts_fit_in_bits_uint(Uint);
Sint erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 899e2a275a..5366ee3644 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -36,7 +36,7 @@
#define EMULATOR "BEAM"
#define SEQ_TRACE 1
-#define CONTEXT_REDS 2000 /* Swap process out after this number */
+#define CONTEXT_REDS 4000 /* Swap process out after this number */
#define MAX_ARG 255 /* Max number of arguments allowed */
#define MAX_REG 1024 /* Max number of x(N) registers used */
@@ -159,11 +159,12 @@ typedef struct op_entry {
int sz; /* Number of loaded words. */
char* pack; /* Instructions for packing engine. */
char* sign; /* Signature string. */
- unsigned count; /* Number of times executed. */
} OpEntry;
-extern OpEntry opc[]; /* Description of all instructions. */
-extern int num_instructions; /* Number of instruction in opc[]. */
+extern const OpEntry opc[]; /* Description of all instructions. */
+extern const int num_instructions; /* Number of instruction in opc[]. */
+
+extern Uint erts_instr_count[];
/* some constants for various table sizes etc */
@@ -198,4 +199,11 @@ extern int erts_pd_initial_size;/* Initial Process dictionary table size */
#include "erl_term.h"
+#ifdef NO_JUMP_TABLE
+#define BeamOp(Op) (Op)
+#else
+extern void** beam_ops;
+#define BeamOp(Op) beam_ops[(Op)]
+#endif
+
#endif /* __ERL_VM_H__ */
diff --git a/erts/emulator/beam/error.h b/erts/emulator/beam/error.h
index 6c33b12dd0..64c08b1570 100644
--- a/erts/emulator/beam/error.h
+++ b/erts/emulator/beam/error.h
@@ -21,6 +21,8 @@
#ifndef __ERROR_H__
#define __ERROR_H__
+#include "code_ix.h"
+
/*
* There are three primary exception classes:
*
@@ -37,14 +39,11 @@
*/
/*
- * Bits 0-1 index the 'exception class tag' table.
- */
-#define EXC_CLASSBITS 3
-#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
-
-/*
* Exception class tags (indices into the 'exception_tag' array)
*/
+#define EXTAG_OFFSET 0
+#define EXTAG_BITS 2
+
#define EXTAG_ERROR 0
#define EXTAG_EXIT 1
#define EXTAG_THROWN 2
@@ -52,20 +51,31 @@
#define NUMBER_EXC_TAGS 3 /* The number of exception class tags */
/*
- * Exit code flags (bits 2-7)
+ * Index to the 'exception class tag' table.
+ */
+#define EXC_CLASSBITS ((1<<EXTAG_BITS)-1)
+#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
+
+/*
+ * Exit code flags
*
* These flags make is easier and quicker to decide what to do with the
* exception in the early stages, before a handler is found, and also
* maintains some separation between the class tag and the actions.
*/
-#define EXF_PANIC (1<<2) /* ignore catches */
-#define EXF_THROWN (1<<3) /* nonlocal return */
-#define EXF_LOG (1<<4) /* write to logger on termination */
-#define EXF_NATIVE (1<<5) /* occurred in native code */
-#define EXF_SAVETRACE (1<<6) /* save stack trace in internal form */
-#define EXF_ARGLIST (1<<7) /* has arglist for top of trace */
+#define EXF_OFFSET EXTAG_BITS
+#define EXF_BITS 7
-#define EXC_FLAGBITS 0x00fc
+#define EXF_PANIC (1<<(0+EXF_OFFSET)) /* ignore catches */
+#define EXF_THROWN (1<<(1+EXF_OFFSET)) /* nonlocal return */
+#define EXF_LOG (1<<(2+EXF_OFFSET)) /* write to logger on termination */
+#define EXF_NATIVE (1<<(3+EXF_OFFSET)) /* occurred in native code */
+#define EXF_SAVETRACE (1<<(4+EXF_OFFSET)) /* save stack trace in internal form */
+#define EXF_ARGLIST (1<<(5+EXF_OFFSET)) /* has arglist for top of trace */
+#define EXF_RESTORE_NIF (1<<(6+EXF_OFFSET)) /* restore original bif/nif */
+
+#define EXC_FLAGBITS (((1<<(EXF_BITS+EXF_OFFSET))-1) \
+ & ~((1<<(EXF_OFFSET))-1))
/*
* The primary fields of an exception code
@@ -75,11 +85,16 @@
#define NATIVE_EXCEPTION(x) ((x) | EXF_NATIVE)
/*
- * Bits 8-12 of the error code are used for indexing into
+ * Error code used for indexing into
* the short-hand error descriptor table.
*/
-#define EXC_INDEXBITS 0x1f00
-#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> 8)
+#define EXC_OFFSET (EXF_OFFSET+EXF_BITS)
+#define EXC_BITS 5
+
+#define EXC_INDEXBITS (((1<<(EXC_BITS+EXC_OFFSET))-1) \
+ & ~((1<<(EXC_OFFSET))-1))
+
+#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> EXC_OFFSET)
/*
* Exit codes used for raising a fresh exception. The primary exceptions
@@ -105,46 +120,46 @@
/* Error with given arglist term
* (exit reason in p->fvalue) */
-#define EXC_NORMAL ((1 << 8) | EXC_EXIT)
+#define EXC_NORMAL ((1 << EXC_OFFSET) | EXC_EXIT)
/* Normal exit (reason 'normal') */
-#define EXC_INTERNAL_ERROR ((2 << 8) | EXC_ERROR | EXF_PANIC)
+#define EXC_INTERNAL_ERROR ((2 << EXC_OFFSET) | EXC_ERROR | EXF_PANIC)
/* Things that shouldn't happen */
-#define EXC_BADARG ((3 << 8) | EXC_ERROR)
+#define EXC_BADARG ((3 << EXC_OFFSET) | EXC_ERROR)
/* Bad argument to a BIF */
-#define EXC_BADARITH ((4 << 8) | EXC_ERROR)
+#define EXC_BADARITH ((4 << EXC_OFFSET) | EXC_ERROR)
/* Bad arithmetic */
-#define EXC_BADMATCH ((5 << 8) | EXC_ERROR)
+#define EXC_BADMATCH ((5 << EXC_OFFSET) | EXC_ERROR)
/* Bad match in function body */
-#define EXC_FUNCTION_CLAUSE ((6 << 8) | EXC_ERROR)
+#define EXC_FUNCTION_CLAUSE ((6 << EXC_OFFSET) | EXC_ERROR)
/* No matching function head */
-#define EXC_CASE_CLAUSE ((7 << 8) | EXC_ERROR)
+#define EXC_CASE_CLAUSE ((7 << EXC_OFFSET) | EXC_ERROR)
/* No matching case clause */
-#define EXC_IF_CLAUSE ((8 << 8) | EXC_ERROR)
+#define EXC_IF_CLAUSE ((8 << EXC_OFFSET) | EXC_ERROR)
/* No matching if clause */
-#define EXC_UNDEF ((9 << 8) | EXC_ERROR)
+#define EXC_UNDEF ((9 << EXC_OFFSET) | EXC_ERROR)
/* No farity that matches */
-#define EXC_BADFUN ((10 << 8) | EXC_ERROR)
+#define EXC_BADFUN ((10 << EXC_OFFSET) | EXC_ERROR)
/* Not an existing fun */
-#define EXC_BADARITY ((11 << 8) | EXC_ERROR)
+#define EXC_BADARITY ((11 << EXC_OFFSET) | EXC_ERROR)
/* Attempt to call fun with
* wrong number of arguments. */
-#define EXC_TIMEOUT_VALUE ((12 << 8) | EXC_ERROR)
+#define EXC_TIMEOUT_VALUE ((12 << EXC_OFFSET) | EXC_ERROR)
/* Bad time out value */
-#define EXC_NOPROC ((13 << 8) | EXC_ERROR)
+#define EXC_NOPROC ((13 << EXC_OFFSET) | EXC_ERROR)
/* No process or port */
-#define EXC_NOTALIVE ((14 << 8) | EXC_ERROR)
+#define EXC_NOTALIVE ((14 << EXC_OFFSET) | EXC_ERROR)
/* Not distributed */
-#define EXC_SYSTEM_LIMIT ((15 << 8) | EXC_ERROR)
+#define EXC_SYSTEM_LIMIT ((15 << EXC_OFFSET) | EXC_ERROR)
/* Ran out of something */
-#define EXC_TRY_CLAUSE ((16 << 8) | EXC_ERROR)
+#define EXC_TRY_CLAUSE ((16 << EXC_OFFSET) | EXC_ERROR)
/* No matching try clause */
-#define EXC_NOTSUP ((17 << 8) | EXC_ERROR)
+#define EXC_NOTSUP ((17 << EXC_OFFSET) | EXC_ERROR)
/* Not supported */
-#define EXC_BADMAP ((18 << 8) | EXC_ERROR)
+#define EXC_BADMAP ((18 << EXC_OFFSET) | EXC_ERROR)
/* Bad map */
-#define EXC_BADKEY ((19 << 8) | EXC_ERROR)
+#define EXC_BADKEY ((19 << EXC_OFFSET) | EXC_ERROR)
/* Bad key in map */
#define NUMBER_EXIT_CODES 20 /* The number of exit code indices */
@@ -152,7 +167,7 @@
/*
* Internal pseudo-error codes.
*/
-#define TRAP (1 << 8) /* BIF Trap to erlang code */
+#define TRAP (1 << EXC_OFFSET) /* BIF Trap to erlang code */
/*
* Aliases for some common exit codes.
@@ -197,7 +212,7 @@ struct StackTrace {
Eterm header; /* bignum header - must be first in struct */
Eterm freason; /* original exception reason is saved in the struct */
BeamInstr* pc;
- BeamInstr* current;
+ ErtsCodeMFA* current;
int depth; /* number of saved pointers in trace[] */
BeamInstr *trace[1]; /* varying size - must be last in struct */
};
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 00fa452f79..33ed6d7ec1 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -103,7 +103,8 @@ static HashValue
export_hash(struct export_entry* ee)
{
Export* x = ee->ep;
- return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
+ return EXPORT_HASH(x->info.mfa.module, x->info.mfa.function,
+ x->info.mfa.arity);
}
static int
@@ -111,9 +112,9 @@ export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e)
{
Export* tmpl = tmpl_e->ep;
Export* obj = obj_e->ep;
- return !(tmpl->code[0] == obj->code[0] &&
- tmpl->code[1] == obj->code[1] &&
- tmpl->code[2] == obj->code[2]);
+ return !(tmpl->info.mfa.module == obj->info.mfa.module &&
+ tmpl->info.mfa.function == obj->info.mfa.function &&
+ tmpl->info.mfa.arity == obj->info.mfa.arity);
}
@@ -130,21 +131,23 @@ export_alloc(struct export_entry* tmpl_e)
blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob));
erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
obj = &blob->exp;
- obj->fake_op_func_info_for_hipe[0] = 0;
- obj->fake_op_func_info_for_hipe[1] = 0;
- obj->code[0] = tmpl->code[0];
- obj->code[1] = tmpl->code[1];
- obj->code[2] = tmpl->code[2];
- obj->code[3] = (BeamInstr) em_call_error_handler;
- obj->code[4] = 0;
+ obj->info.op = 0;
+ obj->info.native = 0;
+ obj->info.mfa.module = tmpl->info.mfa.module;
+ obj->info.mfa.function = tmpl->info.mfa.function;
+ obj->info.mfa.arity = tmpl->info.mfa.arity;
+ obj->beam[0] = (BeamInstr) em_call_error_handler;
+ obj->beam[1] = 0;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
- obj->addressv[ix] = obj->code+3;
+ obj->addressv[ix] = obj->beam;
blob->entryv[ix].slot.index = -1;
blob->entryv[ix].ep = &blob->exp;
}
ix = 0;
+
+ DBG_TRACE_MFA_P(&obj->info.mfa, "export allocation at %p", obj);
}
else { /* Existing entry in another table, use free entry in blob */
blob = entry_to_blob(tmpl_e);
@@ -163,9 +166,12 @@ export_free(struct export_entry* obj)
obj->slot.index = -1;
for (i=0; i < ERTS_NUM_CODE_IX; i++) {
if (blob->entryv[i].slot.index >= 0) {
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export entry slot %u freed for %p",
+ (obj - blob->entryv), &blob->exp);
return;
}
}
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp);
erts_free(ERTS_ALC_T_EXPORT, blob);
erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
}
@@ -224,7 +230,9 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
while (b != (HashBucket*) 0) {
Export* ep = ((struct export_entry*) b)->ep;
- if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
+ if (ep->info.mfa.module == m &&
+ ep->info.mfa.function == f &&
+ ep->info.mfa.arity == a) {
return ep;
}
b = b->next;
@@ -237,9 +245,9 @@ static struct export_entry* init_template(struct export_templ* templ,
{
templ->entry.ep = &templ->exp;
templ->entry.slot.index = -1;
- templ->exp.code[0] = m;
- templ->exp.code[1] = f;
- templ->exp.code[2] = a;
+ templ->exp.info.mfa.module = m;
+ templ->exp.info.mfa.function = f;
+ templ->exp.info.mfa.arity = a;
return &templ->entry;
}
@@ -263,8 +271,8 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
if (ee == NULL ||
- (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
+ (ee->ep->addressv[code_ix] == ee->ep->beam &&
+ ee->ep->beam[0] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 17fc4828ca..7c812b306c 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -33,22 +33,20 @@ typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ ErtsCodeInfo info; /* MUST be just before beam[] */
+
/*
- * code[0]: Tagged atom for module.
- * code[1]: Tagged atom for function.
- * code[2]: Arity (untagged integer).
- * code[3]: This entry is 0 unless the 'address' field points to it.
+ * beam[0]: This entry is 0 unless the 'addressv' field points to it.
* Threaded code instruction to load function
* (em_call_error_handler), execute BIF (em_apply_bif),
* or a breakpoint instruction (op_i_generic_breakpoint).
- * code[4]: Function pointer to BIF function (for BIFs only),
+ * beam[1]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
* on_load function that has not been run yet, or pointer
- * to code for function code[3] is a breakpont instruction.
+ * to code if function beam[0] is a breakpoint instruction.
* Otherwise: 0.
*/
- BeamInstr code[5];
+ BeamInstr beam[2];
} Export;
@@ -74,8 +72,8 @@ extern erts_smp_mtx_t export_staging_lock;
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
-(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \
- ((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
+(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->beam) && \
+ ((EntryPtr)->beam[0] == (BeamInstr) em_apply_bif))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 0ef8114bac..205a7711ec 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1079,7 +1079,7 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
Eterm *tp = tuple_val(BIF_ARG_1);
Eterm Term = tp[1];
Eterm bt = tp[2];
- Binary *bin = ((ProcBin *) binary_val(bt))->val;
+ Binary *bin = erts_magic_ref2bin(bt);
Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin);
if (is_tuple(res)) {
ASSERT(BIF_P->flags & F_DISABLE_GC);
@@ -1398,17 +1398,18 @@ static void b2t_destroy_context(B2TContext* context)
}
}
-static void b2t_context_destructor(Binary *context_bin)
+static int b2t_context_destructor(Binary *context_bin)
{
B2TContext* ctx = (B2TContext*) ERTS_MAGIC_BIN_DATA(context_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
b2t_destroy_context(ctx);
+ return 1;
}
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
{
- Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *context_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
@@ -1442,8 +1443,8 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src)
if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) {
ctx->u.dc.next = &ctx->u.dc.res;
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), context_b);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), context_b);
return ctx;
}
@@ -1808,7 +1809,7 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
#endif
#define TERM_TO_BINARY_MEMCPY_FACTOR 8
-static void ttb_context_destructor(Binary *context_bin)
+static int ttb_context_destructor(Binary *context_bin)
{
TTBContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
if (context->alive) {
@@ -1842,6 +1843,7 @@ static void ttb_context_destructor(Binary *context_bin)
break;
}
}
+ return 1;
}
static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
@@ -1871,8 +1873,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, PROC_BIN_SIZE+3); \
- c_term = erts_mk_magic_binary_term(&hp, &MSO(p), context_b); \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE+3); \
+ c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
return res; \
@@ -2174,12 +2176,8 @@ enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags)
* We use this atom as sysname in local pid/port/refs
* for the ETS compressed format (DFLAG_INTERNAL_TAGS).
*
- * We used atom '' earlier but that turned out to cause problems
- * for buggy erl_interface/ic usage of c-nodes with empty node names.
- * A long atom reduces risk of nodes actually called this and the length
- * does not matter anyway as it's encoded with atom index (ATOM_INTERNAL_REF2).
*/
-#define INTERNAL_LOCAL_SYSNAME am_await_microstate_accounting_modifications
+#define INTERNAL_LOCAL_SYSNAME am_ErtsSecretAtom
static byte*
enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
@@ -2583,7 +2581,9 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ erts_magic_ref_save_bin(obj);
+
+ i = ref_no_numbers(obj);
put_int16(i, ep);
ep += 2;
ep = enc_atom(acmp, sysname, ep, dflags);
@@ -2837,9 +2837,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Export* exp = *((Export **) (export_val(obj) + 1));
if ((dflags & DFLAG_EXPORT_PTR_TAG) != 0) {
*ep++ = EXPORT_EXT;
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
- ep = enc_term(acmp, make_small(exp->code[2]), ep, dflags, off_heap);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
+ ep = enc_term(acmp, make_small(exp->info.mfa.arity),
+ ep, dflags, off_heap);
} else {
/* Tag, arity */
*ep++ = SMALL_TUPLE_EXT;
@@ -2847,10 +2848,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 1;
/* Module name */
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
/* Function name */
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
}
break;
}
@@ -3438,26 +3439,35 @@ dec_term_atom_common:
r0 = get_int32(ep); /* allow full word */
ep += 4;
- ref_ext_common:
+ ref_ext_common: {
+ ErtsORefThing *rtp;
+
if (ref_words > ERTS_MAX_REF_NUMBERS)
goto error;
node = dec_get_node(sysname, cre);
if(node == erts_this_node) {
- RefThing *rtp = (RefThing *) hp;
- ref_num = (Uint32 *) (hp + REF_THING_HEAD_SIZE);
+
+ rtp = (ErtsORefThing *) hp;
+ ref_num = &rtp->num[0];
+ if (ref_words != ERTS_REF_NUMBERS) {
+ int i;
+ if (ref_words > ERTS_REF_NUMBERS)
+ goto error; /* Not a ref that we created... */
+ for (i = ref_words; i < ERTS_REF_NUMBERS; i++)
+ ref_num[i] = 0;
+ }
-#if defined(ARCH_64)
- hp += REF_THING_HEAD_SIZE + ref_words/2 + 1;
- rtp->header = make_ref_thing_header(ref_words/2 + 1);
-#else
- hp += REF_THING_HEAD_SIZE + ref_words;
- rtp->header = make_ref_thing_header(ref_words);
+#ifdef ERTS_ORDINARY_REF_MARKER
+ rtp->marker = ERTS_ORDINARY_REF_MARKER;
#endif
+ hp += ERTS_REF_THING_SIZE;
+ rtp->header = ERTS_REF_THING_HEADER;
*objp = make_internal_ref(rtp);
}
else {
ExternalThing *etp = (ExternalThing *) hp;
+ rtp = NULL;
#if defined(ARCH_64)
hp += EXTERNAL_THING_HEAD_SIZE + ref_words/2 + 1;
#else
@@ -3475,12 +3485,13 @@ dec_term_atom_common:
factory->off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
ref_num = &(etp->data.ui32[0]);
- }
-
#if defined(ARCH_64)
- *(ref_num++) = ref_words /* 32-bit arity */;
+ *(ref_num++) = ref_words /* 32-bit arity */;
#endif
+ }
+
ref_num[0] = r0;
+
for(i = 1; i < ref_words; i++) {
ref_num[i] = get_int32(ep);
ep += 4;
@@ -3489,8 +3500,26 @@ dec_term_atom_common:
if ((1 + ref_words) % 2)
ref_num[ref_words] = 0;
#endif
+ if (node == erts_this_node) {
+ /* Check if it was a magic reference... */
+ ErtsMagicBinary *mb = erts_magic_ref_lookup_bin(ref_num);
+ if (mb) {
+ /*
+ * Was a magic ref; adjust it...
+ *
+ * Refc on binary was increased by lookup above...
+ */
+ ASSERT(rtp);
+ hp = (Eterm *) rtp;
+ write_magic_ref_thing(hp, factory->off_heap, mb);
+ OH_OVERHEAD(factory->off_heap,
+ mb->orig_size / sizeof(Eterm));
+ hp += ERTS_MAGIC_REF_THING_SIZE;
+ }
+ }
break;
}
+ }
case BINARY_EXT:
{
n = get_int32(ep);
@@ -3758,9 +3787,8 @@ dec_term_atom_common:
funp->arity = arity;
#ifdef HIPE
if (funp->fe->native_address == NULL) {
- hipe_set_closure_stub(funp->fe, num_free);
+ hipe_set_closure_stub(funp->fe);
}
- funp->native_address = funp->fe->native_address;
#endif
hp = factory->hp;
@@ -3832,9 +3860,6 @@ dec_term_atom_common:
funp->fe = erts_put_fun_entry(module, old_uniq, old_index);
funp->arity = funp->fe->address[-1] - num_free;
-#ifdef HIPE
- funp->native_address = funp->fe->native_address;
-#endif
hp = factory->hp;
/* Environment */
@@ -4116,7 +4141,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
/*fall through*/
case REF_DEF:
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ i = ref_no_numbers(obj);
result += (1 + 2 + encode_size_struct2(acmp, ref_node_name(obj), dflags) +
1 + 4*i);
break;
@@ -4273,9 +4298,9 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
{
Export* ep = *((Export **) (export_val(obj) + 1));
result += 1;
- result += encode_size_struct2(acmp, ep->code[0], dflags);
- result += encode_size_struct2(acmp, ep->code[1], dflags);
- result += encode_size_struct2(acmp, make_small(ep->code[2]), dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.module, dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.function, dflags);
+ result += encode_size_struct2(acmp, make_small(ep->info.mfa.arity), dflags);
}
break;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 26aa39b65a..c4c848f49f 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -42,9 +42,16 @@
#include "erl_utils.h"
#include "erl_port.h"
#include "erl_gc.h"
+#include "erl_nif.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
struct enif_func_t;
+#ifdef DEBUG
+# define ERTS_NIF_ASSERT_IN_ENV
+#endif
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -57,16 +64,59 @@ struct enif_environment_t /* ErlNifEnv */
int exception_thrown; /* boolean */
Process *tracee;
int exiting; /* boolean (dirty nifs might return in exiting state) */
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int dbg_disable_assert_in_env;
+#endif
+};
+struct enif_resource_type_t
+{
+ struct enif_resource_type_t* next; /* list of all resource types */
+ struct enif_resource_type_t* prev;
+ struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
+ ErlNifResourceDtor* dtor; /* user destructor function */
+ ErlNifResourceStop* stop;
+ ErlNifResourceDown* down;
+ erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
+ +1 for active erl_module_nif */
+ Eterm module;
+ Eterm name;
};
+
+typedef struct
+{
+ erts_smp_mtx_t lock;
+ ErtsMonitor* root;
+ int pending_failed_fire;
+ int is_dying;
+
+ size_t user_data_sz;
+} ErtsResourceMonitors;
+
+typedef struct ErtsResource_
+{
+ struct enif_resource_type_t* type;
+ ErtsResourceMonitors* monitors;
+#ifdef DEBUG
+ erts_refc_t nif_refc;
+#else
+# ifdef ARCH_32
+ byte align__[4];
+# endif
+#endif
+ char data[1];
+}ErtsResource;
+
+#define DATA_TO_RESOURCE(PTR) ErtsContainerStruct(PTR, ErtsResource, data)
+#define erts_resource_ref_size(P) ERTS_MAGIC_REF_THING_SIZE
+
+extern Eterm erts_bld_resource_ref(Eterm** hp, ErlOffHeap*, ErtsResource*);
+
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
-#ifdef ERTS_DIRTY_SCHEDULERS
-extern void erts_pre_dirty_nif(ErtsSchedulerData *,
- struct enif_environment_t*, Process*,
- struct erl_module_nif*);
-extern void erts_post_dirty_nif(struct enif_environment_t* env);
-#endif
+extern void erts_resource_stop(ErtsResource*, ErlNifEvent, int is_direct_call);
+void erts_fire_nif_monitor(ErtsResource*, Eterm pid, Eterm ref);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(fmtfn_t to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
@@ -78,6 +128,12 @@ extern Eterm erts_nif_call_function(Process *p, Process *tracee,
struct enif_func_t *,
int argc, Eterm *argv);
+#ifdef ERTS_DIRTY_SCHEDULERS
+int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
#define ERL_DE_UNLOAD 1
@@ -119,7 +175,7 @@ typedef struct de_proc_entry {
PROC_AWAIT_LOAD == Wants to be notified when we
reloaded the driver (old was locked) */
Uint flags; /* ERL_FL_DE_DEREFERENCED when reload in progress */
- Eterm heap[REF_THING_SIZE]; /* "ref heap" */
+ Eterm heap[ERTS_REF_THING_SIZE]; /* "ref heap" */
struct de_proc_entry *next;
} DE_ProcEntry;
@@ -207,118 +263,6 @@ extern Eterm erts_ddll_monitor_driver(Process *p,
ErtsProcLocks plocks);
/*
-** Just like the driver binary but with initial flags
-** Note that the two structures Binary and ErlDrvBinary HAVE to
-** be equal except for extra fields in the beginning of the struct.
-** ErlDrvBinary is defined in erl_driver.h.
-** When driver_alloc_binary is called, a Binary is allocated, but
-** the pointer returned is to the address of the first element that
-** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
-** The driver need never know about additions to the internal Binary of the
-** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
-** and Binary, the macros below can convert one type to the other, as they both
-** in reality are equal.
-*/
-
-#ifdef ARCH_32
- /* *DO NOT USE* only for alignment. */
-#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
-#else
-#define ERTS_BINARY_STRUCT_ALIGNMENT
-#endif
-
-/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
-#define ERTS_INTERNAL_BINARY_FIELDS \
- UWord flags; \
- erts_refc_t refc; \
- ERTS_BINARY_STRUCT_ALIGNMENT
-
-typedef struct binary {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- char orig_bytes[1]; /* to be continued */
-} Binary;
-
-#define ERTS_SIZEOF_Binary(Sz) \
- (offsetof(Binary,orig_bytes) + (Sz))
-
-typedef struct {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- void (*destructor)(Binary *);
- union {
- struct {
- ERTS_BINARY_STRUCT_ALIGNMENT
- char data[1];
- } aligned;
- struct {
- char data[1];
- } unaligned;
- } u;
-} ErtsMagicBinary;
-
-#ifdef ARCH_32
-#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 4
-#else
-#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 0
-#endif
-
-typedef union {
- Binary binary;
- ErtsMagicBinary magic_binary;
- struct {
- ERTS_INTERNAL_BINARY_FIELDS
- ErlDrvBinary binary;
- } driver;
-} ErtsBinary;
-
-/*
- * 'Binary' alignment:
- * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
- * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
- * 32-bits architectures and 8 bytes on 64-bits architectures.
- */
-
-#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
- ((ErtsBinary *) (BP))->magic_binary.destructor
-#define ERTS_MAGIC_BIN_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
-#define ERTS_MAGIC_DATA_OFFSET \
- (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
-#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
- (ERTS_MAGIC_DATA_OFFSET + (Sz))
-#define ERTS_MAGIC_BIN_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
-
-/* On 32-bit arch these macro variants will save memory
- by not forcing 8-byte alignment for the magic payload.
-*/
-#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
-#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
- (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
-#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
- ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
-#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
- (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
-#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
-#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
- ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
-
-
-#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
-#define ErlDrvBinary2Binary(D) ((Binary *) \
- (((char *) (D)) \
- - offsetof(ErtsBinary, driver.binary)))
-
-/* A "magic" binary flag */
-#define BIN_FLAG_MAGIC 1
-#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
-#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
-#define BIN_FLAG_DRV 8
-
-/*
* This structure represents one type of a binary in a process.
*/
@@ -339,46 +283,12 @@ typedef struct proc_bin {
*/
#define PROC_BIN_SIZE (sizeof(ProcBin)/sizeof(Eterm))
-ERTS_GLB_INLINE Eterm erts_mk_magic_binary_term(Eterm **hpp,
- ErlOffHeap *ohp,
- Binary *mbp);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
-{
- ProcBin *pb = (ProcBin *) *hpp;
- *hpp += PROC_BIN_SIZE;
-
- ASSERT(mbp->flags & BIN_FLAG_MAGIC);
-
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = 0;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*) pb;
- pb->val = mbp;
- pb->bytes = (byte *) mbp->orig_bytes;
- pb->flags = 0;
-
- erts_refc_inc(&mbp->refc, 1);
-
- return make_binary(pb);
-}
-
-#endif
-
-#define ERTS_TERM_IS_MAGIC_BINARY(T) \
- (is_binary((T)) \
- && (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
- && (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
-
-
union erl_off_heap_ptr {
struct erl_off_heap_header* hdr;
ProcBin *pb;
struct erl_fun_thing* fun;
struct external_thing_* ext;
+ ErtsMRefThing *mref;
Eterm* ep;
void* voidp;
};
@@ -773,8 +683,8 @@ do { \
typedef struct ErtsPStack_ {
byte* pstart;
- byte* psp;
- byte* pend;
+ int offs; /* "stack pointer" as byte offset from pstart */
+ int size; /* allocated size in bytes */
ErtsAlcType_t alloc_type;
}ErtsPStack;
@@ -785,8 +695,8 @@ void erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes);
#define PSTACK_DECLARE(s, DEF_PSTACK_SIZE) \
PSTACK_TYPE PSTK_DEF_STACK(s)[DEF_PSTACK_SIZE]; \
ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \
- (byte*)(PSTK_DEF_STACK(s) - 1), /* psp */ \
- (byte*)(PSTK_DEF_STACK(s) + (DEF_PSTACK_SIZE)), /* pend */\
+ -(int)sizeof(PSTACK_TYPE), /* offs */ \
+ DEF_PSTACK_SIZE*sizeof(PSTACK_TYPE), /* size */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
@@ -806,19 +716,21 @@ do { \
} \
} while(0)
-#define PSTACK_IS_EMPTY(s) (s.psp < s.pstart)
+#define PSTACK_IS_EMPTY(s) (s.offs < 0)
-#define PSTACK_COUNT(s) (((PSTACK_TYPE*)s.psp + 1) - (PSTACK_TYPE*)s.pstart)
+#define PSTACK_COUNT(s) ((s.offs + sizeof(PSTACK_TYPE)) / sizeof(PSTACK_TYPE))
-#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), (PSTACK_TYPE*)(s.psp))
+#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
-#define PSTACK_PUSH(s) \
- (s.psp += sizeof(PSTACK_TYPE), \
- ((s.psp == s.pend) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
- sizeof(PSTACK_TYPE)) : (void)0), \
- ((PSTACK_TYPE*) s.psp))
+#define PSTACK_PUSH(s) \
+ (s.offs += sizeof(PSTACK_TYPE), \
+ ((s.offs == s.size) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
+ sizeof(PSTACK_TYPE)) : (void)0), \
+ ((PSTACK_TYPE*) (s.pstart + s.offs)))
-#define PSTACK_POP(s) ((PSTACK_TYPE*) (s.psp -= sizeof(PSTACK_TYPE)))
+#define PSTACK_POP(s) ((s.offs -= sizeof(PSTACK_TYPE)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
/*
* Do not free the stack after this, it may have pointers into what
@@ -831,8 +743,8 @@ do {\
(dst)->pstart = erts_alloc(s.alloc_type,\
sizeof(PSTK_DEF_STACK(s)));\
sys_memcpy((dst)->pstart, s.pstart, _pbytes);\
- (dst)->psp = (dst)->pstart + _pbytes - sizeof(PSTACK_TYPE);\
- (dst)->pend = (dst)->pstart + sizeof(PSTK_DEF_STACK(s));\
+ (dst)->offs = s.offs;\
+ (dst)->size = s.size;\
(dst)->alloc_type = s.alloc_type;\
} else\
*(dst) = s;\
@@ -847,8 +759,8 @@ do { \
ASSERT(s.pstart == (byte*)PSTK_DEF_STACK(s)); \
s = *(src); /* struct copy */ \
(src)->pstart = NULL; \
- ASSERT(s.psp >= (s.pstart - sizeof(PSTACK_TYPE))); \
- ASSERT(s.psp < s.pend); \
+ ASSERT(s.offs >= -(int)sizeof(PSTACK_TYPE)); \
+ ASSERT(s.offs < s.size); \
} while (0)
#define PSTACK_DESTROY_SAVED(pstack)\
@@ -969,21 +881,6 @@ void erts_bif_info_init(void);
/* bif.c */
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
-{
- Eterm *hp = HAlloc(c_p, REF_THING_SIZE);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
- return make_internal_ref(hp);
-}
-
-#endif
-
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
Eterm,
@@ -991,7 +888,7 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm);
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(Process*,Eterm*));
+ Eterm (*bif)(Process*, Eterm*, BeamInstr*));
void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
@@ -1000,12 +897,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* beam_bif_load.c */
-#define ERTS_CPC_ALLOW_GC (1 << 0)
-#define ERTS_CPC_ALL ERTS_CPC_ALLOW_GC
-Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls);
-#ifdef ERTS_NEW_PURGE_STRATEGY
+Eterm erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls);
Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed);
-#endif
typedef struct ErtsLiteralArea_ {
struct erl_off_heap_header *off_heap;
@@ -1019,10 +912,7 @@ typedef struct ErtsLiteralArea_ {
extern erts_smp_atomic_t erts_copy_literal_area__;
#define ERTS_COPY_LITERAL_AREA() \
((ErtsLiteralArea *) erts_smp_atomic_read_nob(&erts_copy_literal_area__))
-
-#ifdef ERTS_NEW_PURGE_STRATEGY
extern Process *erts_literal_area_collector;
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
extern Process *erts_dirty_process_code_checker;
#endif
@@ -1031,7 +921,7 @@ extern Process *erts_code_purger;
/* beam_load.c */
typedef struct {
- BeamInstr* current; /* Pointer to: Mod, Name, Arity */
+ ErtsCodeMFA* mfa; /* Pointer to: Mod, Name, Arity */
Uint needed; /* Heap space needed for entire tuple */
Uint32 loc; /* Location in source code */
Eterm* fname_ptr; /* Pointer to fname table */
@@ -1048,13 +938,14 @@ Eterm erts_finish_loading(Binary* loader_state, Process* c_p,
Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
-BeamInstr* find_function_from_pc(BeamInstr* pc);
+ErtsCodeMFA* find_function_from_pc(BeamInstr* pc);
Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
Eterm args, Eterm* mfa_p);
-void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
+void erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin);
/* beam_ranges.c */
void erts_init_ranges(void);
@@ -1074,6 +965,9 @@ void print_process_info(fmtfn_t, void *, Process*);
void info(fmtfn_t, void *);
void loaded(fmtfn_t, void *);
+/* sighandler sys.c */
+int erts_set_signal(Eterm signal, Eterm type);
+
/* erl_arith.c */
double erts_get_positive_zero_float(void);
@@ -1105,26 +999,26 @@ typedef struct {
Eterm* shtable_start;
ErtsAlcType_t shtable_alloc_type;
Uint literal_size;
- Eterm *range_ptr;
- Uint range_sz;
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
} erts_shcopy_t;
-#define INITIALIZE_SHCOPY(info) \
-do { \
- ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA();\
- info.queue_start = info.queue_default; \
- info.bitstore_start = info.bitstore_default; \
- info.shtable_start = info.shtable_default; \
- info.literal_size = 0; \
- if (larea__) { \
- info.range_ptr = &larea__->start[0]; \
- info.range_sz = larea__->end - info.range_ptr; \
- } \
- else { \
- info.range_ptr = NULL; \
- info.range_sz = 0; \
- } \
-} while(0)
+#define INITIALIZE_SHCOPY(info) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ info.queue_start = info.queue_default; \
+ info.bitstore_start = info.bitstore_default; \
+ info.shtable_start = info.shtable_default; \
+ info.literal_size = 0; \
+ if (larea__) { \
+ info.lit_purge_ptr = &larea__->start[0]; \
+ info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
+ } \
+ else { \
+ info.lit_purge_ptr = NULL; \
+ info.lit_purge_sz = 0; \
+ } \
+ } while(0)
#define DESTROY_SHCOPY(info) \
do { \
@@ -1140,18 +1034,42 @@ do { \
} while(0)
/* copy.c */
+typedef struct {
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+} erts_literal_area_t;
+
+#define INITIALIZE_LITERAL_PURGE_AREA(Area) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ if (larea__) { \
+ (Area).lit_purge_ptr = &larea__->start[0]; \
+ (Area).lit_purge_sz = larea__->end - (Area).lit_purge_ptr; \
+ } \
+ else { \
+ (Area).lit_purge_ptr = NULL; \
+ (Area).lit_purge_sz = 0; \
+ } \
+ } while(0)
+
Eterm copy_object_x(Eterm, Process*, Uint);
#define copy_object(Term, Proc) copy_object_x(Term,Proc,0)
-Uint size_object(Eterm);
+Uint size_object_x(Eterm, erts_literal_area_t*);
+#define size_object(Term) size_object_x(Term,NULL)
+#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea)
+
Uint copy_shared_calculate(Eterm, erts_shcopy_t*);
Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
Uint size_shared(Eterm);
-Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz);
+Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*);
#define copy_struct(Obj,Sz,HPP,OH) \
- copy_struct_x(Obj,Sz,HPP,OH,NULL)
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL)
+#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea)
+
Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
@@ -1180,7 +1098,7 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
void init_emulator(void);
-void process_main(void);
+void process_main(Eterm* x_reg_array, FloatDef* f_reg_array);
void erts_dirty_process_main(ErtsSchedulerData *);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
@@ -1257,6 +1175,8 @@ void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
void erts_emergency_close_ports(void);
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon);
+Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_enable_io_lock_count(int enable);
@@ -1353,6 +1273,7 @@ int erts_utf8_to_latin1(byte* dest, const byte* source, int slen);
void bin_write(fmtfn_t, void*, byte*, size_t);
Sint intlist_to_buf(Eterm, char*, Sint); /* most callers pass plain char*'s */
+Sint erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len);
struct Sint_buf {
#if defined(ARCH_64)
@@ -1453,18 +1374,6 @@ Eterm erts_gc_bor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bxor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live);
-
Uint erts_current_reductions(Process* current, Process *p);
int erts_print_system_version(fmtfn_t to, void *arg, Process *c_p);
@@ -1573,8 +1482,7 @@ int erts_beam_jump_table(void);
ERTS_GLB_INLINE void dtrace_pid_str(Eterm pid, char *process_buf);
ERTS_GLB_INLINE void dtrace_proc_str(Process *process, char *process_buf);
ERTS_GLB_INLINE void dtrace_port_str(Port *port, char *port_buf);
-ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+ERTS_GLB_INLINE void dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1608,8 +1516,7 @@ dtrace_port_str(Port *port, char *port_buf)
}
ERTS_GLB_INLINE void
-dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf)
{
if (process_buf) {
@@ -1617,7 +1524,7 @@ dtrace_fun_decode(Process *process,
}
erts_snprintf(mfa_buf, DTRACE_TERM_BUF_SIZE, "%T:%T/%d",
- module, function, arity);
+ mfa->module, mfa->function, mfa->arity);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 38e403d91a..ddff862607 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -259,13 +259,11 @@ static ERTS_INLINE void port_init_instr(Port *prt
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
char *lock_str = "port_lock";
- erts_mtx_init_locked_x(prt->lock, lock_str, id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 0
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
#endif
- );
+ erts_mtx_init_locked_x(prt->lock, lock_str, id);
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -1447,7 +1445,7 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_unblock_fpe(sp->fpe_was_unmasked);
}
-#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
+#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (ERTS_REF_THING_SIZE + 3)
static ERTS_INLINE void
queue_port_sched_op_reply(Process *rp,
@@ -1462,7 +1460,7 @@ queue_port_sched_op_reply(Process *rp,
ref= make_internal_ref(hp);
write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
msg = TUPLE2(hp, ref, msg);
@@ -3101,7 +3099,7 @@ port_monitor(Port *prt, erts_aint32_t state, Eterm origin,
ASSERT(is_pid(origin));
ASSERT(is_atom(name) || is_port(name) || name == NIL);
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
@@ -3126,7 +3124,7 @@ static int
port_sig_monitor(Port *prt, erts_aint32_t state, int op,
ErtsProc2PortSigData *sigdp)
{
- Eterm hp[REF_THING_SIZE];
+ Eterm hp[ERTS_REF_THING_SIZE];
Eterm ref = make_internal_ref(&hp);
write_ref_thing(hp, sigdp->ref[0], sigdp->ref[1], sigdp->ref[2]);
@@ -3247,7 +3245,7 @@ static int
port_sig_demonitor(Port *prt, erts_aint32_t state, int op,
ErtsProc2PortSigData *sigdp)
{
- Eterm hp[REF_THING_SIZE];
+ Eterm hp[ERTS_REF_THING_SIZE];
Eterm ref = make_internal_ref(&hp);
write_ref_thing(hp, sigdp->u.demonitor.ref[0],
sigdp->u.demonitor.ref[1],
@@ -3304,10 +3302,10 @@ ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
sigdp->u.demonitor.origin = origin->common.id;
sigdp->u.demonitor.name = target->common.id;
{
- RefThing *reft = ref_thing_ptr(ref);
+ Uint32 *nums = internal_ref_numbers(ref);
/* Start from 1 skip ref arity */
sys_memcpy(sigdp->u.demonitor.ref,
- internal_thing_ref_numbers(reft),
+ nums,
sizeof(sigdp->u.demonitor.ref));
}
@@ -3433,7 +3431,7 @@ void erts_init_io(int port_tab_size,
NULL,
(ErtsPTabElementCommon *) &erts_invalid_port.common,
port_tab_size,
- common_element_size, /* Doesn't need to be excact */
+ common_element_size, /* Doesn't need to be exact */
"port_table",
legacy_port_tab,
1);
@@ -4194,8 +4192,8 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
ErtsMonitor *rmon;
Process *rp;
- ASSERT(is_internal_pid(mon->pid));
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ ASSERT(is_internal_pid(mon->u.pid));
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
goto done;
}
@@ -4296,7 +4294,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
Process *origin;
ErtsProcLocks origin_locks;
- if (mon->type != MON_TARGET || ! is_pid(mon->pid)) {
+ if (mon->type != MON_TARGET || ! is_pid(mon->u.pid)) {
return;
}
/*
@@ -4305,7 +4303,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
*/
origin_locks = ERTS_PROC_LOCKS_MSG_SEND | ERTS_PROC_LOCK_LINK;
- origin = erts_pid2proc(NULL, 0, mon->pid, origin_locks);
+ origin = erts_pid2proc(NULL, 0, mon->u.pid, origin_locks);
if (origin) {
DeclareTmpHeapNoproc(lhp,3);
SweepContext *ctx = (SweepContext *)ctx0;
@@ -5429,7 +5427,7 @@ reply_io_bytes(void *vreq)
rp_locks = ERTS_PROC_LOCK_MAIN;
}
- hsz = 5 /* 4-tuple */ + REF_THING_SIZE;
+ hsz = 5 /* 4-tuple */ + ERTS_REF_THING_SIZE;
erts_bld_uint64(NULL, &hsz, in);
erts_bld_uint64(NULL, &hsz, out);
@@ -5438,7 +5436,7 @@ reply_io_bytes(void *vreq)
ref = make_internal_ref(hp);
write_ref_thing(hp, req->refn[0], req->refn[1], req->refn[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
ein = erts_bld_uint64(&hp, NULL, in);
eout = erts_bld_uint64(&hp, NULL, out);
@@ -5467,7 +5465,7 @@ erts_request_io_bytes(Process *c_p)
ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ,
sizeof(ErtsIOBytesReq));
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
refn = internal_ref_numbers(ref);
@@ -5501,7 +5499,7 @@ typedef struct {
static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
{
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
- erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->pid,mon->ref);
+ erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->u.pid, mon->ref);
}
static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
@@ -7098,7 +7096,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
+ erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7623,22 +7621,29 @@ erl_drv_convert_time_unit(ErlDrvTime val,
(int) to);
}
-static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
{
- RefThing *refp;
- ASSERT(is_internal_ref(ref));
- ERTS_CT_ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
- refp = ref_thing_ptr(ref);
- memset(mon,0,sizeof(ErlDrvMonitor));
- memcpy(mon,refp,sizeof(RefThing));
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ ASSERT(is_internal_ordinary_ref(ref));
+ sys_memcpy((void *) mon, (void *) internal_ref_val(ref),
+ ERTS_REF_THING_SIZE*sizeof(Uint));
}
+Eterm erts_driver_monitor_to_ref(Eterm *hp, const ErlDrvMonitor *mon)
+{
+ Eterm ref;
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ sys_memcpy((void *) hp, (void *) mon, ERTS_REF_THING_SIZE*sizeof(Uint));
+ ref = make_internal_ref(hp);
+ ASSERT(is_internal_ordinary_ref(ref));
+ return ref;
+}
static int do_driver_monitor_process(Port *prt,
- Eterm *buf,
ErlDrvTermData process,
ErlDrvMonitor *monitor)
{
+ Eterm buf[ERTS_REF_THING_SIZE];
Process *rp;
Eterm ref;
@@ -7657,7 +7662,7 @@ static int do_driver_monitor_process(Port *prt,
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- ref_to_driver_monitor(ref,monitor);
+ erts_ref_to_driver_monitor(ref,monitor);
return 0;
}
@@ -7681,32 +7686,27 @@ int driver_monitor_process(ErlDrvPort drvport,
/* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_monitor_process(prt,buf,process,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ret = do_driver_monitor_process(prt,process,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static int do_driver_demonitor_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor)
{
+ Eterm heap[ERTS_REF_THING_SIZE];
Process *rp;
Eterm ref;
ErtsMonitor *mon;
Eterm to;
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
+ ref = erts_driver_monitor_to_ref(heap, monitor);
+
mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return 1;
}
ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
+ to = mon->u.pid;
ASSERT(is_internal_pid(to));
rp = erts_pid2proc_opt(NULL,
0,
@@ -7744,31 +7744,26 @@ int driver_demonitor_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_demonitor_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ret = do_driver_demonitor_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static ErlDrvTermData do_driver_get_monitored_process(Port *prt,const ErlDrvMonitor *monitor)
{
Eterm ref;
ErtsMonitor *mon;
Eterm to;
+ Eterm heap[ERTS_REF_THING_SIZE];
+
+ ref = erts_driver_monitor_to_ref(heap, monitor);
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return driver_term_nil;
}
ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
+ to = mon->u.pid;
ASSERT(is_internal_pid(to));
return (ErlDrvTermData) to;
}
@@ -7790,21 +7785,16 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_get_monitored_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ret = do_driver_get_monitored_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-
int driver_compare_monitors(const ErlDrvMonitor *monitor1,
const ErlDrvMonitor *monitor2)
{
- return memcmp(monitor1,monitor2,sizeof(ErlDrvMonitor));
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
}
void erts_fire_port_monitor(Port *prt, Eterm ref)
@@ -7824,7 +7814,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
}
callback = prt->drv_ptr->process_exit;
ASSERT(callback != NULL);
- ref_to_driver_monitor(ref,&drv_monitor);
+ erts_ref_to_driver_monitor(ref,&drv_monitor);
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
@@ -8281,14 +8271,13 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
erts_mtx_init_x(drv->lock,
"driver_lock",
#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1,
- 1),
+ erts_atom_put((byte *) drv->name,
+ sys_strlen(drv->name),
+ ERTS_ATOM_ENC_LATIN1,
+ 1)
#else
- NIL,
+ NIL
#endif
- 1
);
}
#endif
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 5978478323..8ab6c713d6 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -26,6 +26,7 @@
#include "erl_vm.h"
#include "global.h"
#include "module.h"
+#include "beam_catches.h"
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -67,6 +68,18 @@ static int module_cmp(Module* tmpl, Module* obj)
return tmpl->module != obj->module;
}
+void erts_module_instance_init(struct erl_module_instance* modi)
+{
+ modi->code_hdr = 0;
+ modi->code_length = 0;
+ modi->catches = BEAM_CATCHES_NIL;
+ modi->nif = NULL;
+ modi->num_breakpoints = 0;
+ modi->num_traced_exports = 0;
+#ifdef HIPE
+ modi->hipe_code = NULL;
+#endif
+}
static Module* module_alloc(Module* tmpl)
{
@@ -74,18 +87,11 @@ static Module* module_alloc(Module* tmpl)
erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module));
obj->module = tmpl->module;
- obj->curr.code_hdr = 0;
- obj->old.code_hdr = 0;
- obj->curr.code_length = 0;
- obj->old.code_length = 0;
obj->slot.index = -1;
- obj->curr.nif = NULL;
- obj->old.nif = NULL;
- obj->curr.num_breakpoints = 0;
- obj->old.num_breakpoints = 0;
- obj->curr.num_traced_exports = 0;
- obj->old.num_traced_exports = 0;
+ erts_module_instance_init(&obj->curr);
+ erts_module_instance_init(&obj->old);
obj->on_load = 0;
+ DBG_TRACE_MFA(make_atom(obj->module), 0, 0, "module_alloc");
return obj;
}
@@ -119,6 +125,7 @@ void init_module_table(void)
erts_smp_atomic_init_nob(&tot_module_bytes, 0);
}
+
Module*
erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
{
@@ -139,19 +146,14 @@ erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
}
}
-Module*
-erts_put_module(Eterm mod)
+
+static Module* put_module(Eterm mod, IndexTable* mod_tab)
{
Module e;
- IndexTable* mod_tab;
int oldsz, newsz;
Module* res;
ASSERT(is_atom(mod));
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_has_code_write_permission());
-
- mod_tab = &module_tables[erts_staging_code_ix()];
e.module = atom_val(mod);
oldsz = index_table_sz(mod_tab);
res = (Module*) index_put_entry(mod_tab, (void*) &e);
@@ -160,6 +162,15 @@ erts_put_module(Eterm mod)
return res;
}
+Module*
+erts_put_module(Eterm mod)
+{
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_has_code_write_permission());
+
+ return put_module(mod, &module_tables[erts_staging_code_ix()]);
+}
+
Module *module_code(int i, ErtsCodeIndex code_ix)
{
return (Module*) erts_index_lookup(&module_tables[code_ix], i);
@@ -181,6 +192,13 @@ static ErtsCodeIndex dbg_load_code_ix = 0;
static int entries_at_start_staging = 0;
+static ERTS_INLINE void copy_module(Module* dst_mod, Module* src_mod)
+{
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ dst_mod->on_load = src_mod->on_load;
+}
+
void module_start_staging(void)
{
IndexTable* src = &module_tables[erts_active_code_ix()];
@@ -199,10 +217,7 @@ void module_start_staging(void)
src_mod = (Module*) erts_index_lookup(src, i);
dst_mod = (Module*) erts_index_lookup(dst, i);
ASSERT(src_mod->module == dst_mod->module);
-
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
- dst_mod->on_load = src_mod->on_load;
+ copy_module(dst_mod, src_mod);
}
/*
@@ -214,9 +229,7 @@ void module_start_staging(void)
dst_mod = (Module*) index_put_entry(dst, src_mod);
ASSERT(dst_mod != src_mod);
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
- dst_mod->on_load = src_mod->on_load;
+ copy_module(dst_mod, src_mod);
}
newsz = index_table_sz(dst);
erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index f105b3f401..9d258d5dbf 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -23,6 +23,10 @@
#include "index.h"
+#ifdef HIPE
+#include "hipe_module.h"
+#endif
+
struct erl_module_instance {
BeamCodeHeader* code_hdr;
int code_length; /* Length of loaded code in bytes. */
@@ -30,6 +34,9 @@ struct erl_module_instance {
struct erl_module_nif* nif;
int num_breakpoints;
int num_traced_exports;
+#ifdef HIPE
+ HipeModule *hipe_code;
+#endif
};
typedef struct erl_module {
@@ -42,6 +49,7 @@ typedef struct erl_module {
struct erl_module_instance* on_load;
} Module;
+void erts_module_instance_init(struct erl_module_instance* modi);
Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix);
Module* erts_put_module(Eterm mod);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 879daaca0a..ec36b23059 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -792,24 +792,23 @@ allocate_init t I y
#################################################################
#
-# The BIFs erts_internal:check_process_code/2 must be called like a function,
+# The BIFs erts_internal:check_process_code/1 must be called like a function,
# to ensure that c_p->i (program counter) is set correctly (an ordinary
# BIF call doesn't set it).
#
-call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:check_process_code/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext_only Bif
#
-# The BIFs erlang:garbage_collect/0 must be called like a function,
+# The BIFs erts_internal:garbage_collect/1 must be called like a function,
# to allow them to invoke the garbage collector. (The stack pointer must
# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
#
-
-call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
-call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
-call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:garbage_collect/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext_only Bif
#
# put/2 and erase/1 must be able to do garbage collection, so we must call
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index acda51c9fc..7f60710124 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -272,13 +272,13 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
int ix;
HashBucket* b;
#ifdef ERTS_SMP
- ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (c_p) ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
-#endif
-
+ ErtsProcLocks c_p_locks = 0;
+ if (c_p) {
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ }
reg_safe_read_lock(c_p, &c_p_locks);
+
if (c_p && !c_p_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#endif
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 79ec34e717..c6ea8049c3 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -99,6 +99,12 @@
#define ErtsContainerStruct(ptr, type, member) \
((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)))
+/* Use this variant when the member is an array */
+#define ErtsContainerStruct_(ptr, type, memberv) \
+ ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv)))
+
+#define ErtsSizeofMember(type, member) sizeof(((type *)0)->member)
+
#if defined (__WIN32__)
# include "erl_win_sys.h"
#else
@@ -481,20 +487,6 @@ extern volatile int erts_break_requested;
void erts_do_break_handling(void);
#endif
-#ifdef ERTS_WANT_GOT_SIGUSR1
-# ifndef UNIX
-# define ERTS_GOT_SIGUSR1 0
-# else
-# ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read_mb(&erts_got_sigusr1))
-# else
-extern volatile int erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
-# endif
-# endif
-#endif
-
#ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_writing_erl_crash_dump;
extern erts_tsd_key_t erts_is_crash_dumping_key;
@@ -901,6 +893,9 @@ typedef erts_atomic_t erts_refc_t;
ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val);
ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp,
erts_aint_t min_val);
ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val);
@@ -934,6 +929,30 @@ erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
}
ERTS_GLB_INLINE erts_aint_t
+erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val)
+{
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
+ while (1) {
+ erts_aint_t exp, new;
+#ifdef ERTS_REFC_DEBUG
+ if (val < 0)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ if (val == unless_val)
+ return val;
+ new = val + 1;
+ exp = val;
+ val = erts_atomic_cmpxchg_nob((erts_atomic_t *) refcp, new, exp);
+ if (val == exp)
+ return new;
+ }
+}
+
+ERTS_GLB_INLINE erts_aint_t
erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
@@ -1006,6 +1025,9 @@ typedef erts_smp_atomic_t erts_smp_refc_t;
ERTS_GLB_INLINE void erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inc_unless(erts_smp_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val);
ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inctest(erts_smp_refc_t *refcp,
erts_aint_t min_val);
ERTS_GLB_INLINE void erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val);
@@ -1039,6 +1061,31 @@ erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val)
}
ERTS_GLB_INLINE erts_aint_t
+erts_smp_refc_inc_unless(erts_smp_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val)
+{
+ erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
+ while (1) {
+ erts_aint_t exp, new;
+#ifdef ERTS_REFC_DEBUG
+ if (val < 0)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ if (val == unless_val)
+ return val;
+ new = val + 1;
+ exp = val;
+ val = erts_smp_atomic_cmpxchg_nob((erts_smp_atomic_t *) refcp, new, exp);
+ if (val == exp)
+ return new;
+ }
+}
+
+
+ERTS_GLB_INLINE erts_aint_t
erts_smp_refc_inctest(erts_smp_refc_t *refcp, erts_aint_t min_val)
{
erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 7f7e38c22a..092a5320ba 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -56,6 +56,8 @@
#ifdef HIPE
# include "hipe_mode_switch.h"
#endif
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -204,9 +206,8 @@ erl_grow_wstack(ErtsWStack* s, Uint need)
void
erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
{
- Uint old_size = s->pend - s->pstart;
+ Uint old_size = s->size;
Uint new_size;
- Uint sp_offs = s->psp - s->pstart;
if (need_bytes < old_size)
new_size = 2 * old_size;
@@ -220,8 +221,7 @@ erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
sys_memcpy(new_ptr, s->pstart, old_size);
s->pstart = new_ptr;
}
- s->pend = s->pstart + new_size;
- s->psp = s->pstart + sp_offs;
+ s->size = new_size;
}
/*
@@ -700,12 +700,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
/* make a hash index from an erlang term */
/*
-** There are three hash functions.
-** make_broken_hash: the one used for backward compatibility
-** is called from the bif erlang:hash/2. Should never be used
-** as it a) hashes only a part of binaries, b) hashes bignums really poorly,
-** c) hashes bignums differently on different endian processors and d) hashes
-** small integers with different weights on different bytes.
+** There are two hash functions.
**
** make_hash: A hash function that will give the same values for the same
** terms regardless of the internal representation. Small integers are
@@ -894,11 +889,11 @@ tail_recur:
{
Export* ep = *((Export **) (export_val(term) + 1));
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
+ hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
+ (atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
break;
}
@@ -1045,6 +1040,10 @@ tail_recur:
DESTROY_WSTACK(stack);
return hash;
+#undef MAKE_HASH_TUPLE_OP
+#undef MAKE_HASH_TERM_ARRAY_OP
+#undef MAKE_HASH_CDR_PRE_OP
+#undef MAKE_HASH_CDR_POST_OP
#undef UINT32_HASH_STEP
#undef UINT32_HASH_RET
}
@@ -1331,11 +1330,11 @@ make_hash2(Eterm term)
{
Export* ep = *((Export **) (export_val(term) + 1));
UINT32_HASH_2
- (ep->code[2],
- atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
+ (ep->info.mfa.arity,
+ atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
HCONST);
UINT32_HASH
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue,
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
HCONST_14);
goto hash2_common;
}
@@ -1839,7 +1838,7 @@ make_internal_hash(Eterm term)
break;
case REF_SUBTAG:
UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
- ASSERT(internal_ref_no_of_numbers(term) == 3);
+ ASSERT(internal_ref_no_numbers(term) == 3);
UINT32_HASH_2(internal_ref_numbers(term)[1],
internal_ref_numbers(term)[2], HCONST_8);
goto pop_next;
@@ -1848,7 +1847,7 @@ make_internal_hash(Eterm term)
{
ExternalThing* thing = external_thing_ptr(term);
- ASSERT(external_thing_ref_no_of_numbers(thing) == 3);
+ ASSERT(external_thing_ref_no_numbers(thing) == 3);
/* See limitation #2 */
#ifdef ARCH_64
POINTER_HASH(thing->node, HCONST_7);
@@ -1954,259 +1953,6 @@ make_internal_hash(Eterm term)
#undef HCONST
#undef MIX
-
-Uint32 make_broken_hash(Eterm term)
-{
- Uint32 hash = 0;
- DECLARE_WSTACK(stack);
- unsigned op;
-tail_recur:
- op = tag_val_def(term);
- for (;;) {
- switch (op) {
- case NIL_DEF:
- hash = hash*FUNNY_NUMBER3 + 1;
- break;
- case ATOM_DEF:
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(term))->slot.bucket.hvalue);
- break;
- case SMALL_DEF:
-#if defined(ARCH_64)
- {
- Sint y1 = signed_val(term);
- Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
- Uint32 y3 = (Uint32) (y2 >> 32);
- int arity = 1;
-
-#if defined(WORDS_BIGENDIAN)
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- Uint32 y4 = (Uint32) y2;
- hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
- if (y3) {
- hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
- arity++;
- }
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#else
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- hash = hash*FUNNY_NUMBER2 + ((Uint32) y2);
- if (y3)
- {
- hash = hash*FUNNY_NUMBER2 + y3;
- arity++;
- }
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#endif
- }
-#else
- hash = hash*FUNNY_NUMBER2 + unsigned_val(term);
-#endif
- break;
-
- case BINARY_DEF:
- {
- size_t sz = binary_size(term);
- size_t i = (sz < 15) ? sz : 15;
-
- hash = hash_binary_bytes(term, i, hash);
- hash = hash*FUNNY_NUMBER4 + sz;
- break;
- }
-
- case EXPORT_DEF:
- {
- Export* ep = *((Export **) (export_val(term) + 1));
-
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
- break;
- }
-
- case FUN_DEF:
- {
- ErlFunThing* funp = (ErlFunThing *) fun_val(term);
- Uint num_free = funp->num_free;
-
- hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
- if (num_free > 0) {
- if (num_free > 1) {
- WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
- }
- term = funp->env[0];
- goto tail_recur;
- }
- break;
- }
-
- case PID_DEF:
- hash = hash*FUNNY_NUMBER5 + internal_pid_number(term);
- break;
- case EXTERNAL_PID_DEF:
- hash = hash*FUNNY_NUMBER5 + external_pid_number(term);
- break;
- case PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_port_number(term);
- break;
- case EXTERNAL_PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + external_port_number(term);
- break;
- case REF_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_ref_numbers(term)[0];
- break;
- case EXTERNAL_REF_DEF:
- hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
- break;
- case FLOAT_DEF:
- {
- FloatDef ff;
- GET_DOUBLE(term, ff);
- if (ff.fd == 0.0f) {
- /* ensure positive 0.0 */
- ff.fd = erts_get_positive_zero_float();
- }
- hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
- }
- break;
- case MAKE_HASH_CDR_PRE_OP:
- term = (Eterm) WSTACK_POP(stack);
- if (is_not_list(term)) {
- WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
- goto tail_recur;
- }
- /*fall through*/
- case LIST_DEF:
- {
- Eterm* list = list_val(term);
- WSTACK_PUSH2(stack, (UWord) CDR(list),
- (UWord) MAKE_HASH_CDR_PRE_OP);
- term = CAR(list);
- goto tail_recur;
- }
-
- case MAKE_HASH_CDR_POST_OP:
- hash *= FUNNY_NUMBER8;
- break;
-
- case BIG_DEF:
- {
- Eterm* ptr = big_val(term);
- int is_neg = BIG_SIGN(ptr);
- Uint arity = BIG_ARITY(ptr);
- Uint i = arity;
- ptr++;
-#if D_EXP == 16
- /* hash over 32 bit LE */
-
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#elif D_EXP == 32
-
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- hash = hash*FUNNY_NUMBER2 + ((d << 16) | (d >> 16));
- }
-#else
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#endif
-
-#elif D_EXP == 64
- {
- Uint32 h = 0, l;
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + ((l << 16) | (l >> 16));
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + ((h << 16) | (h >> 16));
- }
-#else
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + l;
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + h;
- }
-#endif
- /* adjust arity to match 32 bit mode */
- arity = (arity << 1) - (h == 0);
- }
-
-#else
-#error "unsupported D_EXP size"
-#endif
- hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- }
- break;
-
- case MAP_DEF:
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
- break;
- case TUPLE_DEF:
- {
- Eterm* ptr = tuple_val(term);
- Uint arity = arityval(*ptr);
-
- WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
- op = MAKE_HASH_TUPLE_OP;
- }/*fall through*/
- case MAKE_HASH_TUPLE_OP:
- case MAKE_HASH_TERM_ARRAY_OP:
- {
- Uint i = (Uint) WSTACK_POP(stack);
- Eterm* ptr = (Eterm*) WSTACK_POP(stack);
- if (i != 0) {
- term = *ptr;
- WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
- goto tail_recur;
- }
- if (op == MAKE_HASH_TUPLE_OP) {
- Uint32 arity = (UWord) WSTACK_POP(stack);
- hash = hash*FUNNY_NUMBER9 + arity;
- }
- break;
- }
-
- default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_broken_hash\n");
- return 0;
- }
- if (WSTACK_ISEMPTY(stack)) break;
- op = (Uint) WSTACK_POP(stack);
- }
-
- DESTROY_WSTACK(stack);
- return hash;
-
-#undef MAKE_HASH_TUPLE_OP
-#undef MAKE_HASH_TERM_ARRAY_OP
-#undef MAKE_HASH_CDR_PRE_OP
-#undef MAKE_HASH_CDR_POST_OP
-}
-
static Eterm
do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
ErlHeapFragment **bp, Process **p, Uint sz)
@@ -2740,22 +2486,20 @@ tailrecur_ne:
anum = external_thing_ref_numbers(athing);
bnum = external_thing_ref_numbers(bthing);
- alen = external_thing_ref_no_of_numbers(athing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ alen = external_thing_ref_no_numbers(athing);
+ blen = external_thing_ref_no_numbers(bthing);
goto ref_common;
+
case REF_SUBTAG:
- if (!is_internal_ref(b))
- goto not_equal;
- {
- RefThing* athing = ref_thing_ptr(a);
- RefThing* bthing = ref_thing_ptr(b);
- alen = internal_thing_ref_no_of_numbers(athing);
- blen = internal_thing_ref_no_of_numbers(bthing);
- anum = internal_thing_ref_numbers(athing);
- bnum = internal_thing_ref_numbers(bthing);
- }
+ if (!is_internal_ref(b))
+ goto not_equal;
+
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
ref_common:
ASSERT(alen > 0 && blen > 0);
@@ -3307,13 +3051,15 @@ tailrecur_ne:
Export* a_exp = *((Export **) (export_val(a) + 1));
Export* b_exp = *((Export **) (export_val(b) + 1));
- if ((j = erts_cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
+ b_exp->info.mfa.module)) != 0) {
RETURN_NEQ(j);
}
- if ((j = erts_cmp_atoms(a_exp->code[1], b_exp->code[1])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
+ b_exp->info.mfa.function)) != 0) {
RETURN_NEQ(j);
}
- ON_CMP_GOTO((Sint) a_exp->code[2] - (Sint) b_exp->code[2]);
+ ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
}
break;
case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
@@ -3385,25 +3131,21 @@ tailrecur_ne:
*/
if (is_internal_ref(b)) {
- RefThing* bthing = ref_thing_ptr(b);
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
} else if(is_external_ref(b)) {
ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = REF_DEF;
goto mixed_types;
}
- {
- RefThing* athing = ref_thing_ptr(a);
- anode = erts_this_node;
- anum = internal_thing_ref_numbers(athing);
- alen = internal_thing_ref_no_of_numbers(athing);
- }
+ anode = erts_this_node;
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
ref_common:
CMP_NODES(anode, bnode);
@@ -3433,15 +3175,14 @@ tailrecur_ne:
goto pop_next;
case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
if (is_internal_ref(b)) {
- RefThing* bthing = ref_thing_ptr(b);
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
} else if (is_external_ref(b)) {
ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = EXTERNAL_REF_DEF;
goto mixed_types;
@@ -3450,7 +3191,7 @@ tailrecur_ne:
ExternalThing* athing = external_thing_ptr(a);
anode = athing->node;
anum = external_thing_ref_numbers(athing);
- alen = external_thing_ref_no_of_numbers(athing);
+ alen = external_thing_ref_no_numbers(athing);
}
goto ref_common;
default:
@@ -3827,40 +3568,41 @@ not_equal:
Eterm
store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
+ struct erl_off_heap_header *ohhp;
Uint i;
Uint size;
- Uint *from_hp;
- Uint *to_hp = *hpp;
+ Eterm *from_hp;
+ Eterm *to_hp = *hpp;
ASSERT(is_external(ns) || is_internal_ref(ns));
- if(is_external(ns)) {
- from_hp = external_val(ns);
- size = thing_arityval(*from_hp) + 1;
- *hpp += size;
-
- for(i = 0; i < size; i++)
- to_hp[i] = from_hp[i];
-
- erts_smp_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
-
- ((struct erl_off_heap_header*) to_hp)->next = oh->first;
- oh->first = (struct erl_off_heap_header*) to_hp;
-
- return make_external(to_hp);
- }
-
- /* Internal ref */
- from_hp = internal_ref_val(ns);
-
+ from_hp = boxed_val(ns);
size = thing_arityval(*from_hp) + 1;
-
*hpp += size;
for(i = 0; i < size; i++)
to_hp[i] = from_hp[i];
- return make_internal_ref(to_hp);
+ if (is_external_header(*from_hp)) {
+ ExternalThing *etp = (ExternalThing *) from_hp;
+ ASSERT(is_external(ns));
+ erts_smp_refc_inc(&etp->node->refc, 2);
+ }
+ else if (is_ordinary_ref_thing(from_hp))
+ return make_internal_ref(to_hp);
+ else {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
+ ErtsMagicBinary *mb = mreft->mb;
+ ASSERT(is_magic_ref_thing(from_hp));
+ erts_refc_inc(&mb->refc, 2);
+ OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
+ }
+
+ ohhp = (struct erl_off_heap_header*) to_hp;
+ ohhp->next = oh->first;
+ oh->first = ohhp;
+
+ return make_boxed(to_hp);
}
Eterm
@@ -3923,6 +3665,68 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
return -2; /* not enough space */
}
+/* Fill buf with the contents of the unicode list.
+ * Return the number of bytes in the buffer,
+ * or -1 for type error,
+ * or -2 for not enough buffer space (buffer contains truncated result).
+ */
+Sint
+erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+
+ if (is_nil(list)) {
+ return 0;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+
+ while (len-- > 0) {
+ Sint val;
+
+ if (is_not_small(CAR(listptr))) {
+ return -1;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ buf[sz] = val;
+ sz++;
+ } else if (val < 0x800) {
+ buf[sz+0] = 0xC0 | (val >> 6);
+ buf[sz+1] = 0x80 | (val & 0x3F);
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ return -1;
+ }
+ buf[sz+0] = 0xE0 | (val >> 12);
+ buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+2] = 0x80 | (val & 0x3F);
+ sz += 3;
+ } else if (val < 0x110000) {
+ buf[sz+0] = 0xF0 | (val >> 18);
+ buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
+ buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+3] = 0x80 | (val & 0x3F);
+ sz += 4;
+ } else {
+ return -1;
+ }
+ list = CDR(listptr);
+ if (is_nil(list)) {
+ return sz;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+ }
+ return -2; /* not enough space */
+}
+
/*
** Convert an integer to a byte list
** return pointer to converted stuff (need not to be at start of buf!)
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 3adb8db661..173a39533d 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -1937,7 +1937,8 @@ static void free_sendfile(void *data) {
MUTEX_LOCK(d->c.sendfile.q_mtx);
driver_deq(d->c.sendfile.port,1);
MUTEX_UNLOCK(d->c.sendfile.q_mtx);
- driver_select(d->c.sendfile.port, (ErlDrvEvent)(long)d->c.sendfile.out_fd, ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 0);
+ driver_select(d->c.sendfile.port, (ErlDrvEvent)(long)d->c.sendfile.out_fd,
+ ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 0);
}
EF_FREE(data);
}
@@ -2330,9 +2331,9 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
free_read(data);
break;
case FILE_READ_LINE:
- /* The read_line stucture differs from the read structure.
- The data->read_offset and d->c.read_line.read_offset are copies, as are
- data->read_size and d->c.read_line.read_size
+ /* The read_line structure differs from the read structure.
+ The data->read_offset and d->c.read_line.read_offset are copies, as are
+ data->read_size and d->c.read_line.read_size
The read_line function does not kniow in advance how large the binary has to be,
why new allocation (but not reallocation of the old binary, for obvious reasons)
may happen in the worker thread. */
@@ -2555,7 +2556,7 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
desc->sendfile_state = sending;
desc->d = d;
driver_select(desc->port, (ErlDrvEvent)(long)d->c.sendfile.out_fd,
- ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 1);
+ ERL_DRV_USE|ERL_DRV_WRITE, 1);
}
break;
#endif
diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c
index 1ef1602ec9..f60c781894 100644
--- a/erts/emulator/drivers/common/gzio.c
+++ b/erts/emulator/drivers/common/gzio.c
@@ -308,7 +308,7 @@ ErtsGzFile erts_gzopen (path, mode)
/* ===========================================================================
Read a byte from a gz_stream; update next_in and avail_in. Return EOF
for end of file.
- IN assertion: the stream s has been sucessfully opened for reading.
+ IN assertion: the stream s has been successfully opened for reading.
*/
local int get_byte(s)
gz_stream *s;
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 1885338ce5..278abe4e00 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -728,7 +728,7 @@ static int is_nonzero(const char *s, size_t n)
#define TCP_ADDF_PENDING_SHUTDOWN \
(TCP_ADDF_PENDING_SHUT_WR | TCP_ADDF_PENDING_SHUT_RDWR)
#define TCP_ADDF_SHOW_ECONNRESET 64 /* Tell user about incoming RST */
-#define TCP_ADDF_DELAYED_ECONNRESET 128 /* An ECONNRESET error occured on send or shutdown */
+#define TCP_ADDF_DELAYED_ECONNRESET 128 /* An ECONNRESET error occurred on send or shutdown */
#define TCP_ADDF_SHUTDOWN_WR_DONE 256 /* A shutdown(sock, SHUT_WR) or SHUT_RDWR was made */
#define TCP_ADDF_LINGER_ZERO 512 /* Discard driver queue on port close */
@@ -4132,8 +4132,8 @@ static char *inet_set_faddress(int family, inet_address* dst,
/* Get a inaddr structure
** src = inaddr structure
-** *len is the lenght of structure
** dst is filled with [F,P1,P0,X1,....]
+** *len is the length of structure
** where F is the family code (coded)
** and *len is the length of dst on return
** (suitable to deliver to erlang)
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
index 440ba956d8..e8afddb01b 100644
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ b/erts/emulator/drivers/common/zlib_drv.c
@@ -44,30 +44,34 @@
#define INFLATE_INIT 8
#define INFLATE_INIT2 9
#define INFLATE_SETDICT 10
-#define INFLATE_SYNC 11
-#define INFLATE_RESET 12
-#define INFLATE_END 13
-#define INFLATE 14
+#define INFLATE_GETDICT 11
+#define INFLATE_SYNC 12
+#define INFLATE_RESET 13
+#define INFLATE_END 14
+#define INFLATE 15
-#define CRC32_0 15
-#define CRC32_1 16
-#define CRC32_2 17
+#define CRC32_0 16
+#define CRC32_1 17
+#define CRC32_2 18
-#define SET_BUFSZ 18
-#define GET_BUFSZ 19
-#define GET_QSIZE 20
+#define SET_BUFSZ 19
+#define GET_BUFSZ 20
+#define GET_QSIZE 21
-#define ADLER32_1 21
-#define ADLER32_2 22
+#define ADLER32_1 22
+#define ADLER32_2 23
-#define CRC32_COMBINE 23
-#define ADLER32_COMBINE 24
+#define CRC32_COMBINE 24
+#define ADLER32_COMBINE 25
-#define INFLATE_CHUNK 25
+#define INFLATE_CHUNK 26
#define DEFAULT_BUFSZ 4000
+/* According to zlib documentation, it can never exceed this */
+#define INFL_DICT_SZ 32768
+
/* This flag is used in the same places, where zlib return codes
* (Z_OK, Z_STREAM_END, Z_NEED_DICT) are. So, we need to set it to
* relatively large value to avoid possible value clashes in future.
@@ -248,6 +252,23 @@ static int zlib_output(ZLibData* d)
return zlib_output_init(d);
}
+static int zlib_inflate_get_dictionary(ZLibData* d)
+{
+#ifdef HAVE_ZLIB_INFLATEGETDICTIONARY
+ ErlDrvBinary* dbin = driver_alloc_binary(INFL_DICT_SZ);
+ uInt dlen = 0;
+ int res = inflateGetDictionary(&d->s, (unsigned char*)dbin->orig_bytes, &dlen);
+ if ((res == Z_OK) && (driver_output_binary(d->port, NULL, 0, dbin, 0, dlen) < 0)) {
+ res = Z_ERRNO;
+ }
+ driver_free_binary(dbin);
+ return res;
+#else
+ abort(); /* never called, just to silence 'unresolved symbol'
+ for non-optimizing compiler */
+#endif
+}
+
static int zlib_inflate(ZLibData* d, int flush)
{
int res = Z_OK;
@@ -430,10 +451,35 @@ static void zlib_free(void* data, void* addr)
driver_free(addr);
}
+#if defined(__APPLE__) && defined(__MACH__) && defined(HAVE_ZLIB_INFLATEGETDICTIONARY)
+
+/* Work around broken build system with runtime version test */
+static int have_inflateGetDictionary;
+
+static int zlib_init()
+{
+ unsigned int v[4] = {0, 0, 0, 0};
+ unsigned hexver;
+
+ sscanf(zlibVersion(), "%u.%u.%u.%u", &v[0], &v[1], &v[2], &v[3]);
+
+ hexver = (v[0] << (8*3)) | (v[1] << (8*2)) | (v[2] << (8)) | v[3];
+
+ have_inflateGetDictionary = (hexver >= 0x1020701); /* 1.2.7.1 */
+
+ return 0;
+}
+#else /* trust configure got it right */
+# ifdef HAVE_ZLIB_INFLATEGETDICTIONARY
+# define have_inflateGetDictionary 1
+# else
+# define have_inflateGetDictionary 0
+# endif
static int zlib_init()
{
return 0;
}
+#endif
static ErlDrvData zlib_start(ErlDrvPort port, char* buf)
{
@@ -586,6 +632,16 @@ static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *bu
res = inflateSetDictionary(&d->s, (unsigned char*)buf, len);
return zlib_return(res, rbuf, rlen);
+ case INFLATE_GETDICT:
+ if (have_inflateGetDictionary) {
+ if (d->state != ST_INFLATE) goto badarg;
+ res = zlib_inflate_get_dictionary(d);
+ } else {
+ errno = ENOTSUP;
+ res = Z_ERRNO;
+ }
+ return zlib_return(res, rbuf, rlen);
+
case INFLATE_SYNC:
if (d->state != ST_INFLATE) goto badarg;
if (len != 0) goto badarg;
diff --git a/erts/emulator/drivers/unix/sig_drv.c b/erts/emulator/drivers/unix/sig_drv.c
index 68ad6b9156..18f2038431 100644
--- a/erts/emulator/drivers/unix/sig_drv.c
+++ b/erts/emulator/drivers/unix/sig_drv.c
@@ -18,7 +18,7 @@
* %CopyrightEnd%
*/
-/* Purpose: demonstrate how to include interupt handlers in erlang */
+/* Purpose: demonstrate how to include interrupt handlers in erlang */
#ifdef HAVE_CONFIG_H
# include "config.h"
diff --git a/erts/emulator/hipe/hipe_amd64.c b/erts/emulator/hipe/hipe_amd64.c
index 62739d2a78..e3cff4a4ba 100644
--- a/erts/emulator/hipe/hipe_amd64.c
+++ b/erts/emulator/hipe/hipe_amd64.c
@@ -73,8 +73,8 @@ int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline)
{
Sint rel32;
- if (trampoline)
- return -1;
+ ASSERT(trampoline == NULL);
+
rel32 = (Sint)destAddress - (Sint)callAddress - 4;
if ((Sint)(Sint32)rel32 != rel32)
return -1;
@@ -83,29 +83,6 @@ int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline)
return 0;
}
-#if 0 /* change to non-zero to get allocation statistics at exit() */
-static unsigned int total_mapped, nr_joins, nr_splits, total_alloc, nr_allocs, nr_large, total_lost;
-static unsigned int atexit_done;
-
-static void alloc_code_stats(void)
-{
- printf("\r\nalloc_code_stats: %u bytes mapped, %u joins, %u splits, %u bytes allocated, %u average alloc, %u large allocs, %u bytes lost\r\n",
- total_mapped, nr_joins, nr_splits, total_alloc, nr_allocs ? total_alloc/nr_allocs : 0, nr_large, total_lost);
-}
-
-static void atexit_alloc_code_stats(void)
-{
- if (!atexit_done) {
- atexit_done = 1;
- (void)atexit(alloc_code_stats);
- }
-}
-
-#define ALLOC_CODE_STATS(X) do{X;}while(0)
-#else
-#define ALLOC_CODE_STATS(X) do{}while(0)
-#endif
-
/*
* Memory allocator for executable code.
*
@@ -116,9 +93,6 @@ static void atexit_alloc_code_stats(void)
*/
static void *alloc_code(unsigned int alloc_bytes)
{
- ALLOC_CODE_STATS(++nr_allocs);
- ALLOC_CODE_STATS(total_alloc += alloc_bytes);
-
return erts_alloc(ERTS_ALC_T_HIPE_EXEC, alloc_bytes);
}
@@ -130,6 +104,11 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
+void hipe_free_code(void* code, unsigned int bytes)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, code);
+}
+
/* Make stub for native code calling exported beam function.
*/
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
@@ -234,6 +213,11 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
return code;
}
+void hipe_free_native_stub(void* stub)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, stub);
+}
+
void hipe_arch_print_pcb(struct hipe_process_state *p)
{
#define U(n,x) \
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index 21739726bb..dca3887564 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -41,11 +41,11 @@ define(HANDLE_GOT_MBUF,`
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define CALL_BIF(F) \
- movq CSYM(F)@GOTPCREL(%rip), %r11; \
+ movq CSYM(nbif_impl_##F)@GOTPCREL(%rip), %r11; \
movq %r11, P_BIF_CALLEE(P); \
call CSYM(hipe_debug_bif_wrapper)
#else
-# define CALL_BIF(F) call CSYM(F)
+# define CALL_BIF(F) call CSYM(nbif_impl_##F)
#endif'
/*
@@ -595,13 +595,9 @@ noproc_primop_interface_0(nbif_handle_fp_exception, erts_restore_fpu)
#endif /* NO_FPE_SIGNALS */
/*
- * Implement gc_bif_interface_0 as nofail_primop_interface_0.
- */
-define(gc_bif_interface_0,`nofail_primop_interface_0($1, $2)')
-
-/*
- * Implement gc_bif_interface_N as standard_bif_interface_N (N=1,2,3).
+ * Implement gc_bif_interface_N as standard_bif_interface_N.
*/
+define(gc_bif_interface_0,`standard_bif_interface_0($1, $2)')
define(gc_bif_interface_1,`standard_bif_interface_1($1, $2)')
define(gc_bif_interface_2,`standard_bif_interface_2($1, $2)')
define(gc_bif_interface_3,`standard_bif_interface_3($1, $2)')
diff --git a/erts/emulator/hipe/hipe_arch.h b/erts/emulator/hipe/hipe_arch.h
index 6f959815bb..059b8e7f29 100644
--- a/erts/emulator/hipe/hipe_arch.h
+++ b/erts/emulator/hipe/hipe_arch.h
@@ -30,23 +30,31 @@ extern void hipe_patch_load_fe(Uint *address, Uint value);
extern int hipe_patch_insn(void *address, Uint value, Eterm type);
extern int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline);
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-extern void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity);
+extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, struct process *p);
+extern void hipe_free_code(void*, unsigned int);
+extern void *hipe_make_native_stub(void *exp, unsigned int beamArity);
+extern void hipe_free_native_stub(void*);
+
#if defined(__sparc__)
#include "hipe_sparc.h"
+#include "hipe_sparc_asm.h"
#endif
#if defined(__i386__)
#include "hipe_x86.h"
+#include "hipe_x86_asm.h"
#endif
#if defined(__x86_64__)
#include "hipe_amd64.h"
+#include "hipe_amd64_asm.h"
#endif
#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__)
#include "hipe_ppc.h"
+#include "hipe_ppc_asm.h"
#endif
#if defined(__arm__)
#include "hipe_arm.h"
+#include "hipe_arm_asm.h"
#endif
#if !defined(AEXTERN)
diff --git a/erts/emulator/hipe/hipe_arm.c b/erts/emulator/hipe/hipe_arm.c
index f8ef468341..b61939724c 100644
--- a/erts/emulator/hipe/hipe_arm.c
+++ b/erts/emulator/hipe/hipe_arm.c
@@ -25,7 +25,6 @@
#endif
#include "global.h"
#include "erl_binary.h"
-#include <sys/mman.h>
#include "hipe_arch.h"
#include "hipe_native_bif.h" /* nbif_callemu() */
@@ -57,30 +56,6 @@ void hipe_flush_icache_word(void *address)
hipe_flush_icache_range(address, 4);
}
-/*
- * Management of 32MB code segments for regular code and trampolines.
- */
-
-#define SEGMENT_NRBYTES (32*1024*1024) /* named constant, _not_ a tunable */
-
-static struct segment {
- unsigned int *base; /* [base,base+32MB[ */
- unsigned int *code_pos; /* INV: base <= code_pos <= tramp_pos */
- unsigned int *tramp_pos; /* INV: tramp_pos <= base+32MB */
- /* On ARM we always allocate a trampoline at base+32MB-8 for
- nbif_callemu, so tramp_pos <= base+32MB-8. */
-} curseg;
-
-#define in_area(ptr,start,nbytes) \
- ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
-
-static void *new_code_mapping(void)
-{
- return mmap(0, SEGMENT_NRBYTES,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS,
- -1, 0);
-}
static int check_callees(Eterm callees)
{
@@ -107,126 +82,53 @@ static int check_callees(Eterm callees)
return arity;
}
-static unsigned int *try_alloc(Uint nrwords, int nrcallees, Eterm callees, unsigned int **trampvec)
-{
- unsigned int *base, *address, *tramp_pos, nrfreewords;
- int trampnr;
- Eterm mfa, m, f;
- unsigned int a, *trampoline;
-
- m = NIL; f = NIL; a = 0; /* silence stupid compiler warning */
- tramp_pos = curseg.tramp_pos;
- address = curseg.code_pos;
- nrfreewords = tramp_pos - address;
- if (nrwords > nrfreewords)
- return NULL;
- curseg.code_pos = address + nrwords;
- nrfreewords -= nrwords;
+#define TRAMPOLINE_WORDS 2
- base = curseg.base;
- for (trampnr = 1; trampnr <= nrcallees; ++trampnr) {
- mfa = tuple_val(callees)[trampnr];
- if (is_atom(mfa))
- trampoline = hipe_primop_get_trampoline(mfa);
- else {
- m = tuple_val(mfa)[1];
- f = tuple_val(mfa)[2];
- a = unsigned_val(tuple_val(mfa)[3]);
- trampoline = hipe_mfa_get_trampoline(m, f, a);
- }
- if (!in_area(trampoline, base, SEGMENT_NRBYTES)) {
- if (nrfreewords < 2)
- return NULL;
- nrfreewords -= 2;
- tramp_pos = trampoline = tramp_pos - 2;
- trampoline[0] = 0xE51FF004; /* ldr pc, [pc,#-4] */
- trampoline[1] = 0; /* callee's address */
- hipe_flush_icache_range(trampoline, 2*sizeof(int));
- if (is_atom(mfa))
- hipe_primop_set_trampoline(mfa, trampoline);
- else
- hipe_mfa_set_trampoline(m, f, a, trampoline);
- }
- trampvec[trampnr-1] = trampoline;
+static void generate_trampolines(Uint32* address,
+ int nrcallees, Eterm callees,
+ Uint32** trampvec)
+{
+ Uint32* trampoline = address;
+ int i;
+
+ for (i = 0; i < nrcallees; ++i) {
+ trampoline[0] = 0xE51FF004; /* ldr pc, [pc,#-4] */
+ trampoline[1] = 0; /* callee's address */
+ trampvec[i] = trampoline;
+ trampoline += TRAMPOLINE_WORDS;
}
- curseg.tramp_pos = tramp_pos;
- return address;
+ hipe_flush_icache_range(address, nrcallees*2*sizeof(Uint32));
}
void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p)
{
- Uint nrwords;
+ Uint code_words;
int nrcallees;
Eterm trampvecbin;
- unsigned int **trampvec;
- unsigned int *address;
- unsigned int *base;
- struct segment oldseg;
+ Uint32 **trampvec;
+ Uint32 *address;
if (nrbytes & 0x3)
return NULL;
- nrwords = nrbytes >> 2;
+ code_words = nrbytes / sizeof(Uint32);
nrcallees = check_callees(callees);
if (nrcallees < 0)
return NULL;
- trampvecbin = new_binary(p, NULL, nrcallees*sizeof(unsigned int*));
- trampvec = (unsigned int**)binary_bytes(trampvecbin);
+ trampvecbin = new_binary(p, NULL, nrcallees*sizeof(Uint32*));
+ trampvec = (Uint32**)binary_bytes(trampvecbin);
- address = try_alloc(nrwords, nrcallees, callees, trampvec);
- if (!address) {
- base = new_code_mapping();
- if (base == MAP_FAILED)
- return NULL;
- oldseg = curseg;
- curseg.base = base;
- curseg.code_pos = base;
- curseg.tramp_pos = (unsigned int*)((char*)base + SEGMENT_NRBYTES);
- curseg.tramp_pos -= 2;
- curseg.tramp_pos[0] = 0xE51FF004; /* ldr pc, [pc,#-4] */
- curseg.tramp_pos[1] = (unsigned int)&nbif_callemu;
+ address = erts_alloc(ERTS_ALC_T_HIPE_EXEC,
+ (code_words + nrcallees*TRAMPOLINE_WORDS)*sizeof(Uint32));
- address = try_alloc(nrwords, nrcallees, callees, trampvec);
- if (!address) {
- munmap(base, SEGMENT_NRBYTES);
- curseg = oldseg;
- return NULL;
- }
- /* commit to new segment, ignore leftover space in old segment */
- }
+ generate_trampolines(address + code_words, nrcallees, callees, trampvec);
*trampolines = trampvecbin;
return address;
}
-static unsigned int *alloc_stub(Uint nrwords, unsigned int **tramp_callemu)
+void hipe_free_code(void* code, unsigned int bytes)
{
- unsigned int *address;
- unsigned int *base;
- struct segment oldseg;
-
- address = try_alloc(nrwords, 0, NIL, NULL);
- if (!address) {
- base = new_code_mapping();
- if (base == MAP_FAILED)
- return NULL;
- oldseg = curseg;
- curseg.base = base;
- curseg.code_pos = base;
- curseg.tramp_pos = (unsigned int*)((char*)base + SEGMENT_NRBYTES);
- curseg.tramp_pos -= 2;
- curseg.tramp_pos[0] = 0xE51FF004; /* ldr pc, [pc,#-4] */
- curseg.tramp_pos[1] = (unsigned int)&nbif_callemu;
-
- address = try_alloc(nrwords, 0, NIL, NULL);
- if (!address) {
- munmap(base, SEGMENT_NRBYTES);
- curseg = oldseg;
- return NULL;
- }
- /* commit to new segment, ignore leftover space in old segment */
- }
- *tramp_callemu = (unsigned int*)((char*)curseg.base + SEGMENT_NRBYTES) - 2;
- return address;
+ erts_free(ERTS_ALC_T_HIPE_EXEC, code);
}
/*
@@ -266,8 +168,8 @@ int hipe_patch_insn(void *address, Uint32 value, Eterm type)
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
unsigned int *code;
- unsigned int *tramp_callemu;
int callemu_offset;
+ int is_short_jmp;
/*
* Native code calls BEAM via a stub looking as follows:
@@ -277,36 +179,57 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
* b nbif_callemu
* .long callee_exp
*
+ * or if nbif_callemu is too far away:
+ *
+ * mov r0, #beamArity
+ * ldr r8, [pc,#0] // callee_exp
+ * ldr pc, [pc,#0] // nbif_callemu
+ * .long callee_exp
+ * .long nbif_callemu
+ *
* I'm using r0 and r8 since they aren't used for
- * parameter passing in native code. The branch to
- * nbif_callemu may need to go via a trampoline.
- * (Trampolines are allowed to modify r12, but they don't.)
+ * parameter passing in native code.
*/
- code = alloc_stub(4, &tramp_callemu);
+ code = erts_alloc(ERTS_ALC_T_HIPE_EXEC, 5*sizeof(Uint32));
if (!code)
return NULL;
callemu_offset = ((int)&nbif_callemu - ((int)&code[2] + 8)) >> 2;
- if (!(callemu_offset >= -0x00800000 && callemu_offset <= 0x007FFFFF)) {
- callemu_offset = ((int)tramp_callemu - ((int)&code[2] + 8)) >> 2;
- if (!(callemu_offset >= -0x00800000 && callemu_offset <= 0x007FFFFF))
- abort();
+ is_short_jmp = (callemu_offset >= -0x00800000 &&
+ callemu_offset <= 0x007FFFFF);
+#ifdef DEBUG
+ if (is_short_jmp && (callemu_offset % 3)==0) {
+ is_short_jmp = 0;
}
+#endif
/* mov r0, #beamArity */
code[0] = 0xE3A00000 | (beamArity & 0xFF);
/* ldr r8, [pc,#0] // callee_exp */
code[1] = 0xE59F8000;
- /* b nbif_callemu */
- code[2] = 0xEA000000 | (callemu_offset & 0x00FFFFFF);
+ if (is_short_jmp) {
+ /* b nbif_callemu */
+ code[2] = 0xEA000000 | (callemu_offset & 0x00FFFFFF);
+ }
+ else {
+ /* ldr pc, [pc,#0] // nbif_callemu */
+ code[2] = 0xE59FF000;
+ /* .long nbif_callemu */
+ code[4] = (unsigned int)&nbif_callemu;
+ }
/* .long callee_exp */
code[3] = (unsigned int)callee_exp;
- hipe_flush_icache_range(code, 4*sizeof(int));
+ hipe_flush_icache_range(code, 5*sizeof(Uint32));
return code;
}
+void hipe_free_native_stub(void* stub)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, stub);
+}
+
static void patch_b(Uint32 *address, Sint32 offset, Uint32 AA)
{
Uint32 oldI = *address;
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index d7a2fec04a..a9097dabde 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -30,9 +30,9 @@ include(`hipe/hipe_arm_asm.m4')
.arm
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) ldr r14, =F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
+# define CALL_BIF(F) ldr r14, =nbif_impl_##F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
#else
-# define CALL_BIF(F) bl F
+# define CALL_BIF(F) bl nbif_impl_##F
#endif'
define(TEST_GOT_MBUF,`ldr r1, [P, #P_MBUF] /* `TEST_GOT_MBUF' */
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 165f33eecd..688c82ab7a 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -44,6 +44,7 @@
#include "hipe_mode_switch.h"
#include "hipe_native_bif.h"
#include "hipe_bif0.h"
+#include "hipe_load.h"
/* We need hipe_literals.h for HIPE_SYSTEM_CRC, but it redefines
a few constants. #undef them here to avoid warnings. */
#undef F_TIMO
@@ -54,6 +55,7 @@
#define BeamOpCode(Op) ((Uint)BeamOp(Op))
+
int term_to_Sint32(Eterm term, Sint *sp)
{
Sint val;
@@ -374,15 +376,28 @@ BIF_RETTYPE hipe_bifs_ref_set_2(BIF_ALIST_2)
}
/*
+ * BIFs for loading code.
+ */
+
+static HipeLoaderState *get_loader_state(Eterm term)
+{
+ if (!is_internal_magic_ref(term)) return NULL;
+
+ return hipe_get_loader_state(erts_magic_ref2bin(term));
+}
+
+
+/*
* Allocate memory and copy machine code to it.
*/
-BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
+BIF_RETTYPE hipe_bifs_enter_code_3(BIF_ALIST_3)
{
Uint nrbytes;
void *bytes;
void *address;
Eterm trampolines;
Eterm *hp;
+ HipeLoaderState *stp;
#ifndef DEBUG
ERTS_DECLARE_DUMMY(Uint bitoffs);
ERTS_DECLARE_DUMMY(Uint bitsize);
@@ -391,7 +406,8 @@ BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
Uint bitsize;
#endif
- if (is_not_binary(BIF_ARG_1))
+ if (is_not_binary(BIF_ARG_1) ||
+ (!(stp = get_loader_state(BIF_ARG_3))))
BIF_ERROR(BIF_P, BADARG);
nrbytes = binary_size(BIF_ARG_1);
ERTS_GET_BINARY_BYTES(BIF_ARG_1, bytes, bitoffs, bitsize);
@@ -406,11 +422,15 @@ BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
nrcallees = arityval(tuple_val(BIF_ARG_2)[0]);
else
nrcallees = 0;
+ // XXX: Is there any reason to not just BIF_ERROR, so that the runtime
+ // survives?
erts_exit(ERTS_ERROR_EXIT, "%s: failed to allocate %lu bytes and %lu trampolines\r\n",
__func__, (unsigned long)nrbytes, (unsigned long)nrcallees);
}
memcpy(address, bytes, nrbytes);
hipe_flush_icache_range(address, nrbytes);
+ stp->text_segment = address;
+ stp->text_segment_size = nrbytes;
hp = HAlloc(BIF_P, 3);
hp[0] = make_arityval(2);
hp[1] = address_to_term(address, BIF_P);
@@ -423,25 +443,34 @@ BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
/*
* Allocate memory for arbitrary non-Erlang data.
*/
-BIF_RETTYPE hipe_bifs_alloc_data_2(BIF_ALIST_2)
+BIF_RETTYPE hipe_bifs_alloc_data_3(BIF_ALIST_3)
{
- Uint align, nrbytes;
- void *block;
+ Uint align;
+ HipeLoaderState *stp;
if (is_not_small(BIF_ARG_1) || is_not_small(BIF_ARG_2) ||
+ (!(stp = get_loader_state(BIF_ARG_3))) ||
(align = unsigned_val(BIF_ARG_1), !IS_POWER_OF_TWO(align)))
BIF_ERROR(BIF_P, BADARG);
- nrbytes = unsigned_val(BIF_ARG_2);
- if (nrbytes == 0)
+
+ if (stp->data_segment_size || stp->data_segment)
+ BIF_ERROR(BIF_P, BADARG);
+
+ stp->data_segment_size = unsigned_val(BIF_ARG_2);
+ if (stp->data_segment_size == 0)
BIF_RET(make_small(0));
- block = erts_alloc(ERTS_ALC_T_HIPE, nrbytes);
- if ((unsigned long)block & (align-1)) {
- fprintf(stderr, "%s: erts_alloc(%lu) returned %p which is not %lu-byte aligned\r\n",
- __FUNCTION__, (unsigned long)nrbytes, block, (unsigned long)align);
- erts_free(ERTS_ALC_T_HIPE, block);
+ stp->data_segment = erts_alloc(ERTS_ALC_T_HIPE, stp->data_segment_size);
+ if ((unsigned long)stp->data_segment & (align-1)) {
+ fprintf(stderr, "%s: erts_alloc(%lu) returned %p which is not %lu-byte "
+ "aligned\r\n",
+ __FUNCTION__, (unsigned long)stp->data_segment_size,
+ stp->data_segment, (unsigned long)align);
+ erts_free(ERTS_ALC_T_HIPE, stp->data_segment);
+ stp->data_segment = NULL;
+ stp->data_segment_size = 0;
BIF_ERROR(BIF_P, EXC_NOTSUP);
}
- BIF_RET(address_to_term(block, BIF_P));
+ BIF_RET(address_to_term(stp->data_segment, BIF_P));
}
/*
@@ -537,13 +566,13 @@ BIF_RETTYPE hipe_bifs_merge_term_1(BIF_ALIST_1)
BIF_RET(val);
}
-struct mfa_t {
+struct hipe_mfa {
Eterm mod;
Eterm fun;
Uint ari;
};
-static int term_to_mfa(Eterm term, struct mfa_t *mfa)
+static int term_to_mfa(Eterm term, struct hipe_mfa *mfa)
{
Eterm mod, fun, a;
Uint ari;
@@ -601,7 +630,7 @@ static Uint *hipe_find_emu_address(Eterm mod, Eterm name, unsigned int arity)
Uint *hipe_bifs_find_pc_from_mfa(Eterm term)
{
- struct mfa_t mfa;
+ struct hipe_mfa mfa;
if (!term_to_mfa(term, &mfa))
return NULL;
@@ -616,12 +645,20 @@ BIF_RETTYPE hipe_bifs_fun_to_address_1(BIF_ALIST_1)
BIF_RET(address_to_term(pc, BIF_P));
}
+BIF_RETTYPE hipe_bifs_commit_patch_load_1(BIF_ALIST_1)
+{
+ if (!erts_commit_hipe_patch_load(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ BIF_RET(am_ok);
+}
+
BIF_RETTYPE hipe_bifs_set_native_address_3(BIF_ALIST_3)
{
Eterm *pc;
void *address;
int is_closure;
- struct mfa_t mfa;
+ struct hipe_mfa mfa;
switch (BIF_ARG_3) {
case am_false:
@@ -644,28 +681,22 @@ BIF_RETTYPE hipe_bifs_set_native_address_3(BIF_ALIST_3)
pc = hipe_find_emu_address(mfa.mod, mfa.fun, mfa.ari);
if (pc) {
- hipe_mfa_save_orig_beam_op(mfa.mod, mfa.fun, mfa.ari, pc);
-#if HIPE
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(mfa.mod, mfa.fun, mfa.ari);
- printf(": planting call trap to %p at BEAM pc %p\r\n", address, pc);
-#endif
+ DBG_TRACE_MFA(mfa.mod,mfa.fun,mfa.ari, "set beam call trap at %p -> %p", pc, address);
hipe_set_call_trap(pc, address, is_closure);
BIF_RET(am_true);
-#endif
}
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(mfa.mod, mfa.fun, mfa.ari);
- printf(": no BEAM pc found\r\n");
-#endif
+ DBG_TRACE_MFA(mfa.mod,mfa.fun,mfa.ari, "failed set call trap to %p, no beam code found", address);
BIF_RET(am_false);
}
-BIF_RETTYPE hipe_bifs_enter_sdesc_1(BIF_ALIST_1)
+BIF_RETTYPE hipe_bifs_enter_sdesc_2(BIF_ALIST_2)
{
- struct sdesc *sdesc;
+ struct hipe_sdesc *sdesc;
+ HipeLoaderState* stp;
+
+ stp = get_loader_state(BIF_ARG_2);
+ if (!stp)
+ BIF_ERROR(BIF_P, BADARG);
sdesc = hipe_decode_sdesc(BIF_ARG_1);
if (!sdesc) {
@@ -676,6 +707,13 @@ BIF_RETTYPE hipe_bifs_enter_sdesc_1(BIF_ALIST_1)
fprintf(stderr, "%s: duplicate entry!\r\n", __FUNCTION__);
BIF_ERROR(BIF_P, BADARG);
}
+
+ /*
+ * Link into list of sdesc's in same module instance
+ */
+ sdesc->next_in_modi = stp->new_hipe_sdesc;
+ stp->new_hipe_sdesc = sdesc;
+
BIF_RET(NIL);
}
@@ -691,7 +729,7 @@ struct nbif {
};
static struct nbif nbifs[BIF_SIZE] = {
-#define BIF_LIST(MOD,FUN,ARY,CFUN,IX) \
+#define BIF_LIST(MOD,FUN,ARY,BIF,CFUN,IX) \
{ {0,0}, MOD, FUN, ARY, &nbif_##CFUN },
#include "erl_bif_list.h"
#undef BIF_LIST
@@ -778,9 +816,6 @@ BIF_RETTYPE hipe_bifs_bif_address_3(BIF_ALIST_3)
struct primop {
HashBucket bucket; /* bucket.hvalue == atom_val(name) */
const void *address;
-#if defined(__arm__)
- void *trampoline;
-#endif
};
static struct primop primops[] = {
@@ -839,29 +874,6 @@ static struct primop *primop_table_get(Eterm name)
return hash_get(&primop_table, &tmpl);
}
-#if defined(__arm__)
-static struct primop *primop_table_put(Eterm name)
-{
- struct primop tmpl;
-
- init_primop_table();
- tmpl.bucket.hvalue = atom_val(name);
- return hash_put(&primop_table, &tmpl);
-}
-
-void *hipe_primop_get_trampoline(Eterm name)
-{
- struct primop *primop = primop_table_get(name);
- return primop ? primop->trampoline : NULL;
-}
-
-void hipe_primop_set_trampoline(Eterm name, void *trampoline)
-{
- struct primop *primop = primop_table_put(name);
- primop->trampoline = trampoline;
-}
-#endif
-
/*
* hipe_bifs_primop_address(Atom) -> address or false
*/
@@ -890,7 +902,8 @@ BIF_RETTYPE hipe_bifs_term_to_word_1(BIF_ALIST_1)
}
/* XXX: this is really a primop, not a BIF */
-BIF_RETTYPE hipe_conv_big_to_float(BIF_ALIST_1)
+/* Called via standard_bif_interface_1 */
+BIF_RETTYPE nbif_impl_hipe_conv_big_to_float(NBIF_ALIST_1)
{
Eterm res;
Eterm *hp;
@@ -970,7 +983,7 @@ BIF_RETTYPE hipe_bifs_get_fe_2(BIF_ALIST_2)
atom_buf[0] = '\0';
strncat(atom_buf, (char*)atom_tab(i)->name, atom_tab(i)->len);
- printf("no fun entry for %s %ld:%ld\n", atom_buf, uniq, index);
+ printf("no fun entry for %s %ld:%ld\n", atom_buf, (unsigned long)uniq, (unsigned long)index);
BIF_ERROR(BIF_P, BADARG);
}
BIF_RET(address_to_term((void *)fe, BIF_P));
@@ -997,30 +1010,35 @@ BIF_RETTYPE hipe_bifs_set_native_address_in_fe_2(BIF_ALIST_2)
BIF_RET(am_true);
}
+struct hipe_ref_head {
+ struct hipe_ref_head* next;
+ struct hipe_ref_head* prev;
+};
+
/*
- * MFA info hash table:
+ * An exported function called from or implemented by native code
* - maps MFA to native code entry point
- * - the MFAs it calls (refers_to)
- * - the references to it (referred_from)
+ * - all references to it (callers)
* - maps MFA to most recent trampoline [if powerpc or arm]
*/
struct hipe_mfa_info {
+ HashBucket mod2mfa;
struct {
unsigned long hvalue;
struct hipe_mfa_info *next;
} bucket;
Eterm m; /* atom */
Eterm f; /* atom */
- unsigned int a;
+ unsigned int a : sizeof(int)*8 - 1;
+ unsigned int is_stub : 1; /* if beam or not (yet) loaded */
void *remote_address;
- void *local_address;
- Eterm *beam_code;
- Uint orig_beam_op;
- struct hipe_mfa_info_list *refers_to;
- struct ref *referred_from;
-#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
- void *trampoline;
+ void *new_address;
+ struct hipe_ref_head callers; /* sentinel in list of hipe_ref's */
+ struct hipe_mfa_info* next_in_mod;
+#ifdef DEBUG
+ Export* dbg_export;
#endif
+
};
static struct {
@@ -1038,6 +1056,82 @@ static struct {
erts_smp_rwmtx_t lock;
} hipe_mfa_info_table;
+Hash mod2mfa_tab; /* map from module atom to list of hipe_mfa_info */
+
+static HashValue mod2mfa_hash(struct hipe_mfa_info* mfa)
+{
+ return mfa->mod2mfa.hvalue;
+}
+
+static int mod2mfa_cmp(HashBucket* tmpl, struct hipe_mfa_info* mfa)
+{
+ return tmpl->hvalue != mfa->mod2mfa.hvalue;
+}
+
+static struct hipe_mfa_info* mod2mfa_alloc(struct hipe_mfa_info* tmpl)
+{
+ return tmpl; /* hash_put always use mfa itself at template */
+}
+
+static void mod2mfa_free(struct hipe_mfa_info* mfa)
+{
+}
+
+static void mod2mfa_tab_init(void)
+{
+ HashFunctions f;
+ static int init_done = 0;
+
+ if (init_done)
+ return;
+ init_done = 1;
+
+ f.hash = (H_FUN) mod2mfa_hash;
+ f.cmp = (HCMP_FUN) mod2mfa_cmp;
+ f.alloc = (HALLOC_FUN) mod2mfa_alloc;
+ f.free = (HFREE_FUN) mod2mfa_free;
+ f.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ f.meta_free = (HMFREE_FUN) erts_free;
+ f.meta_print = (HMPRINT_FUN) erts_print;
+
+ hash_init(ERTS_ALC_T_HIPE, &mod2mfa_tab, "mod2mfa_tab", 50, f);
+}
+
+static struct hipe_mfa_info* mod2mfa_get(Module* modp)
+{
+ HashBucket tmpl;
+ tmpl.hvalue = modp->module;
+ return hash_get(&mod2mfa_tab, &tmpl);
+}
+
+static struct hipe_mfa_info* mod2mfa_put(struct hipe_mfa_info* mfa)
+{
+ mfa->mod2mfa.hvalue = atom_val(mfa->m);
+ return hash_put(&mod2mfa_tab, mfa);
+}
+
+
+
+/*
+ * An external native call site M:F(...)
+ * to be patched when the callee changes.
+ */
+struct hipe_ref {
+ struct hipe_ref_head head; /* list of refs to same calleee */
+ void *address;
+#if defined(__arm__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__)
+ void *trampoline;
+#endif
+ unsigned int flags;
+ struct hipe_ref* next_from_modi; /* list of refs from same module instance */
+#if defined(DEBUG)
+ struct hipe_mfa_info* callee;
+ Eterm caller_m, caller_f, caller_a;
+#endif
+};
+#define REF_FLAG_IS_LOAD_MFA 1 /* bit 0: 0 == call, 1 == load_mfa */
+
+
static inline void hipe_mfa_info_table_init_lock(void)
{
erts_smp_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock");
@@ -1063,6 +1157,16 @@ static inline void hipe_mfa_info_table_rwunlock(void)
erts_smp_rwmtx_rwunlock(&hipe_mfa_info_table.lock);
}
+static ERTS_INLINE
+struct hipe_mfa_info* mod2mfa_get_safe(Module* modp)
+{
+ struct hipe_mfa_info* mfa;
+ hipe_mfa_info_table_rlock();
+ mfa = mod2mfa_get(modp);
+ hipe_mfa_info_table_runlock();
+ return mfa;
+}
+
#define HIPE_MFA_HASH(M,F,A) (atom_val(M) ^ atom_val(F) ^ (A))
static struct hipe_mfa_info **hipe_mfa_info_table_alloc_bucket(unsigned int size)
@@ -1108,14 +1212,14 @@ static struct hipe_mfa_info *hipe_mfa_info_table_alloc(Eterm m, Eterm f, unsigne
res->m = m;
res->f = f;
res->a = arity;
+ res->is_stub = 0;
res->remote_address = NULL;
- res->local_address = NULL;
- res->beam_code = NULL;
- res->orig_beam_op = 0;
- res->refers_to = NULL;
- res->referred_from = NULL;
-#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
- res->trampoline = NULL;
+ res->new_address = NULL;
+ res->callers.next = &res->callers;
+ res->callers.prev = &res->callers;
+ res->next_in_mod = NULL;
+#ifdef DEBUG
+ res->dbg_export = NULL;
#endif
return res;
@@ -1133,6 +1237,8 @@ void hipe_mfa_info_table_init(void)
hipe_mfa_info_table.bucket = hipe_mfa_info_table_alloc_bucket(size);
hipe_mfa_info_table_init_lock();
+
+ mod2mfa_tab_init();
}
static inline struct hipe_mfa_info *hipe_mfa_info_table_get_locked(Eterm m, Eterm f, unsigned int arity)
@@ -1154,21 +1260,12 @@ static inline struct hipe_mfa_info *hipe_mfa_info_table_get_locked(Eterm m, Eter
return NULL;
}
-#if 0 /* XXX: unused */
-void *hipe_mfa_find_na(Eterm m, Eterm f, unsigned int arity)
-{
- const struct hipe_mfa_info *p;
-
- p = hipe_mfa_info_table_get(m, f, arity);
- return p ? p->address : NULL;
-}
-#endif
-
static struct hipe_mfa_info *hipe_mfa_info_table_put_rwlocked(Eterm m, Eterm f, unsigned int arity)
{
unsigned long h;
unsigned int i;
struct hipe_mfa_info *p;
+ struct hipe_mfa_info *first_in_mod;
unsigned int size;
h = HIPE_MFA_HASH(m, f, arity);
@@ -1189,216 +1286,137 @@ static struct hipe_mfa_info *hipe_mfa_info_table_put_rwlocked(Eterm m, Eterm f,
size = 1 << hipe_mfa_info_table.log2size;
if (hipe_mfa_info_table.used > (4*size/5)) /* rehash at 80% */
hipe_mfa_info_table_grow();
- return p;
-}
-static void hipe_mfa_set_na(Eterm m, Eterm f, unsigned int arity, void *address, int is_exported)
-{
- struct hipe_mfa_info *p;
+ first_in_mod = mod2mfa_put(p);
+ if (p != first_in_mod) {
+ p->next_in_mod = first_in_mod->next_in_mod;
+ first_in_mod->next_in_mod = p;
+ }
+ else {
+ p->next_in_mod = NULL;
+ }
- hipe_mfa_info_table_rwlock();
- p = hipe_mfa_info_table_put_rwlocked(m, f, arity);
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(m, f, arity);
- printf(": changing address from %p to %p\r\n", p->local_address, address);
-#endif
- p->local_address = address;
- if (is_exported)
- p->remote_address = address;
- hipe_mfa_info_table_rwunlock();
+ DBG_TRACE_MFA(m,f,arity, "hipe_mfa_info allocated at %p", p);
+
+ return p;
}
-#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
-void *hipe_mfa_get_trampoline(Eterm m, Eterm f, unsigned int arity)
+static void remove_mfa_info(struct hipe_mfa_info* rm)
{
+ unsigned int i;
struct hipe_mfa_info *p;
- void *trampoline;
-
- hipe_mfa_info_table_rlock();
- p = hipe_mfa_info_table_get_locked(m, f, arity);
- trampoline = p ? p->trampoline : NULL;
- hipe_mfa_info_table_runlock();
- return trampoline;
+ struct hipe_mfa_info **prevp;
+
+ i = rm->bucket.hvalue & hipe_mfa_info_table.mask;
+ prevp = &hipe_mfa_info_table.bucket[i];
+ for (;;) {
+ p = *prevp;
+ ASSERT(p);
+ if (p == rm) {
+ *prevp = p->bucket.next;
+ ASSERT(hipe_mfa_info_table.used > 0);
+ hipe_mfa_info_table.used--;
+ return;
+ }
+ prevp = &p->bucket.next;
+ }
}
-void hipe_mfa_set_trampoline(Eterm m, Eterm f, unsigned int arity, void *trampoline)
+static void hipe_mfa_set_na(Eterm m, Eterm f, unsigned int arity, void *address)
{
struct hipe_mfa_info *p;
hipe_mfa_info_table_rwlock();
p = hipe_mfa_info_table_put_rwlocked(m, f, arity);
- p->trampoline = trampoline;
+ DBG_TRACE_MFA(m,f,arity,"set native address in hipe_mfa_info at %p", p);
+ p->new_address = address;
+
hipe_mfa_info_table_rwunlock();
}
-#endif
BIF_RETTYPE hipe_bifs_set_funinfo_native_address_3(BIF_ALIST_3)
{
- struct mfa_t mfa;
+ struct hipe_mfa mfa;
void *address;
- int is_exported;
- if (!term_to_mfa(BIF_ARG_1, &mfa))
- BIF_ERROR(BIF_P, BADARG);
- address = term_to_address(BIF_ARG_2);
- if (!address)
- BIF_ERROR(BIF_P, BADARG);
- if (BIF_ARG_3 == am_true)
- is_exported = 1;
- else if (BIF_ARG_3 == am_false)
- is_exported = 0;
- else
+ switch (BIF_ARG_3) {
+ case am_true: /* is_exported */
+ if (!term_to_mfa(BIF_ARG_1, &mfa))
+ BIF_ERROR(BIF_P, BADARG);
+ address = term_to_address(BIF_ARG_2);
+ if (!address)
+ BIF_ERROR(BIF_P, BADARG);
+ hipe_mfa_set_na(mfa.mod, mfa.fun, mfa.ari, address);
+ break;
+ case am_false:
+ break; /* ignore local functions */
+ default:
BIF_ERROR(BIF_P, BADARG);
- hipe_mfa_set_na(mfa.mod, mfa.fun, mfa.ari, address, is_exported);
- BIF_RET(NIL);
-}
-
-BIF_RETTYPE hipe_bifs_invalidate_funinfo_native_addresses_1(BIF_ALIST_1)
-{
- Eterm lst;
- struct mfa_t mfa;
- struct hipe_mfa_info *p;
-
- hipe_mfa_info_table_rwlock();
- lst = BIF_ARG_1;
- while (is_list(lst)) {
- if (!term_to_mfa(CAR(list_val(lst)), &mfa))
- break;
- lst = CDR(list_val(lst));
- p = hipe_mfa_info_table_get_locked(mfa.mod, mfa.fun, mfa.ari);
- if (p) {
- p->remote_address = NULL;
- p->local_address = NULL;
- if (p->beam_code) {
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(mfa.mod, mfa.fun, mfa.ari);
- printf(": removing call trap from BEAM pc %p (new op %#lx)\r\n",
- p->beam_code, p->orig_beam_op);
-#endif
- p->beam_code[0] = p->orig_beam_op;
- p->beam_code = NULL;
- p->orig_beam_op = 0;
- } else {
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(mfa.mod, mfa.fun, mfa.ari);
- printf(": no call trap to remove\r\n");
-#endif
- }
- }
}
- hipe_mfa_info_table_rwunlock();
- if (is_not_nil(lst))
- BIF_ERROR(BIF_P, BADARG);
BIF_RET(NIL);
}
-void hipe_mfa_save_orig_beam_op(Eterm mod, Eterm fun, unsigned int ari, Eterm *pc)
+
+/* Ask if we need to block all threads
+ * while loading/deleting code for this module?
+ */
+int hipe_need_blocking(Module* modp)
{
- Uint orig_beam_op;
struct hipe_mfa_info *p;
- orig_beam_op = pc[0];
- if (orig_beam_op != BeamOpCode(op_hipe_trap_call_closure) &&
- orig_beam_op != BeamOpCode(op_hipe_trap_call)) {
- hipe_mfa_info_table_rwlock();
- p = hipe_mfa_info_table_put_rwlocked(mod, fun, ari);
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(mod, fun, ari);
- printf(": saving orig op %#lx from BEAM pc %p\r\n", orig_beam_op, pc);
-#endif
- p->beam_code = pc;
- p->orig_beam_op = orig_beam_op;
- hipe_mfa_info_table_rwunlock();
- } else {
-#ifdef DEBUG_LINKER
- printf("%s: ", __FUNCTION__);
- print_mfa(mod, fun, ari);
- printf(": orig op %#lx already saved\r\n", orig_beam_op);
-#endif
+ /* Need to block if we have at least one native caller to this module
+ * or native code to make unaccessible.
+ */
+ hipe_mfa_info_table_rlock();
+ for (p = mod2mfa_get(modp); p; p = p->next_in_mod) {
+ ASSERT(!p->new_address);
+ if (p->callers.next != &p->callers || !p->is_stub) {
+ break;
+ }
}
+ hipe_mfa_info_table_runlock();
+ return (p != NULL);
}
-static void *hipe_make_stub(Eterm m, Eterm f, unsigned int arity, int is_remote)
-{
- Export *export_entry;
- void *StubAddress;
-
- ASSERT(is_remote);
-
- export_entry = erts_export_get_or_make_stub(m, f, arity);
- StubAddress = hipe_make_native_stub(export_entry, arity);
- if (!StubAddress)
- erts_exit(ERTS_ERROR_EXIT, "hipe_make_stub: code allocation failed\r\n");
- return StubAddress;
-}
-
-static void *hipe_get_na_try_locked(Eterm m, Eterm f, unsigned int a, int is_remote, struct hipe_mfa_info **pp)
+static void *hipe_get_na_try_locked(Eterm m, Eterm f, unsigned int a)
{
struct hipe_mfa_info *p;
- void *address;
p = hipe_mfa_info_table_get_locked(m, f, a);
- if (p) {
- /* find address, predicting for a runtime apply call */
- address = p->remote_address;
- if (!is_remote)
- address = p->local_address;
- if (address)
- return address;
-
- /* bummer, install stub, checking if one already existed */
- address = p->remote_address;
- if (address)
- return address;
- }
- /* Caller must take the slow path with the write lock held, but allow
- it to avoid some work if it already holds the write lock. */
- if (pp)
- *pp = p;
- return NULL;
-}
-
-static void *hipe_get_na_slow_rwlocked(Eterm m, Eterm f, unsigned int a, int is_remote, struct hipe_mfa_info *p)
-{
- void *address;
-
- if (!p)
- p = hipe_mfa_info_table_put_rwlocked(m, f, a);
- address = hipe_make_stub(m, f, a, is_remote);
- /* XXX: how to tell if a BEAM MFA is exported or not? */
- p->remote_address = address;
- return address;
+ return p ? p->remote_address : NULL;
}
-static void *hipe_get_na_nofail_rwlocked(Eterm m, Eterm f, unsigned int a, int is_remote)
+static void *hipe_get_na_slow_rwlocked(Eterm m, Eterm f, unsigned int a)
{
- struct hipe_mfa_info *p;
- void *address;
+ struct hipe_mfa_info *p = hipe_mfa_info_table_put_rwlocked(m, f, a);
- address = hipe_get_na_try_locked(m, f, a, is_remote, &p);
- if (address)
- return address;
+ if (!p->remote_address) {
+ Export* export_entry = erts_export_get_or_make_stub(m, f, a);
+ void* stubAddress = hipe_make_native_stub(export_entry, a);
+ if (!stubAddress)
+ erts_exit(ERTS_ERROR_EXIT, "hipe_make_stub: code allocation failed\r\n");
- address = hipe_get_na_slow_rwlocked(m, f, a, is_remote, p);
- return address;
+ p->remote_address = stubAddress;
+ p->is_stub = 1;
+#ifdef DEBUG
+ p->dbg_export = export_entry;
+#endif
+ }
+ return p->remote_address;
}
-static void *hipe_get_na_nofail(Eterm m, Eterm f, unsigned int a, int is_remote)
+static void *hipe_get_na_nofail(Eterm m, Eterm f, unsigned int a)
{
void *address;
hipe_mfa_info_table_rlock();
- address = hipe_get_na_try_locked(m, f, a, is_remote, NULL);
+ address = hipe_get_na_try_locked(m, f, a);
hipe_mfa_info_table_runlock();
if (address)
return address;
hipe_mfa_info_table_rwlock();
- address = hipe_get_na_slow_rwlocked(m, f, a, is_remote, NULL);
+ address = hipe_get_na_slow_rwlocked(m, f, a);
hipe_mfa_info_table_rwunlock();
return address;
}
@@ -1408,11 +1426,12 @@ void *hipe_get_remote_na(Eterm m, Eterm f, unsigned int a)
{
if (is_not_atom(m) || is_not_atom(f) || a > 255)
return NULL;
- return hipe_get_na_nofail(m, f, a, 1);
+ return hipe_get_na_nofail(m, f, a);
}
/* primop, but called like a BIF for error handling purposes */
-BIF_RETTYPE hipe_find_na_or_make_stub(BIF_ALIST_3)
+/* Called via standard_bif_interface_3 */
+BIF_RETTYPE nbif_impl_hipe_find_na_or_make_stub(NBIF_ALIST_3)
{
Uint arity;
void *address;
@@ -1420,30 +1439,25 @@ BIF_RETTYPE hipe_find_na_or_make_stub(BIF_ALIST_3)
if (is_not_atom(BIF_ARG_1) || is_not_atom(BIF_ARG_2))
BIF_ERROR(BIF_P, BADARG);
arity = unsigned_val(BIF_ARG_3); /* no error check */
- address = hipe_get_na_nofail(BIF_ARG_1, BIF_ARG_2, arity, 1);
+ address = hipe_get_na_nofail(BIF_ARG_1, BIF_ARG_2, arity);
BIF_RET((Eterm)address); /* semi-Ok */
}
-BIF_RETTYPE hipe_bifs_find_na_or_make_stub_2(BIF_ALIST_2)
+BIF_RETTYPE hipe_bifs_find_na_or_make_stub_1(BIF_ALIST_1)
{
- struct mfa_t mfa;
+ struct hipe_mfa mfa;
void *address;
- int is_remote;
if (!term_to_mfa(BIF_ARG_1, &mfa))
BIF_ERROR(BIF_P, BADARG);
- if (BIF_ARG_2 == am_true)
- is_remote = 1;
- else if (BIF_ARG_2 == am_false)
- is_remote = 0;
- else
- BIF_ERROR(BIF_P, BADARG);
- address = hipe_get_na_nofail(mfa.mod, mfa.fun, mfa.ari, is_remote);
+
+ address = hipe_get_na_nofail(mfa.mod, mfa.fun, mfa.ari);
BIF_RET(address_to_term(address, BIF_P));
}
/* primop, but called like a BIF for error handling purposes */
-BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2)
+/* Called via standard_bif_interface_2 */
+BIF_RETTYPE nbif_impl_hipe_nonclosure_address(NBIF_ALIST_2)
{
Eterm hdr, m, f;
void *address;
@@ -1453,14 +1467,14 @@ BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2)
hdr = *boxed_val(BIF_ARG_1);
if (is_export_header(hdr)) {
Export *ep = (Export*)(export_val(BIF_ARG_1)[1]);
- unsigned int actual_arity = ep->code[2];
+ unsigned int actual_arity = ep->info.mfa.arity;
if (actual_arity != BIF_ARG_2)
goto badfun;
- m = ep->code[0];
- f = ep->code[1];
+ m = ep->info.mfa.module;
+ f = ep->info.mfa.function;
} else
goto badfun;
- address = hipe_get_na_nofail(m, f, BIF_ARG_2, 1);
+ address = hipe_get_na_nofail(m, f, BIF_ARG_2);
BIF_RET((Eterm)address);
badfun:
@@ -1471,77 +1485,31 @@ BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2)
int hipe_find_mfa_from_ra(const void *ra, Eterm *m, Eterm *f, unsigned int *a)
{
- struct hipe_mfa_info *mfa;
- long mfa_offset, ra_offset;
- struct hipe_mfa_info **bucket;
- unsigned int i, nrbuckets;
+ const struct hipe_sdesc* sdesc = hipe_find_sdesc((unsigned long)ra);
- if (hipe_is_ra_mode_switch(ra)) {
+ if (!sdesc || sdesc->m_aix == atom_val(am_Empty))
return 0;
- }
- /* Note about locking: the table is only updated from the
- loader, which runs with the rest of the system suspended. */
- /* XXX: alas not true; see comment at hipe_mfa_info_table.lock */
- hipe_mfa_info_table_rlock();
- bucket = hipe_mfa_info_table.bucket;
- nrbuckets = 1 << hipe_mfa_info_table.log2size;
- mfa = NULL;
- mfa_offset = LONG_MAX;
- for (i = 0; i < nrbuckets; ++i) {
- struct hipe_mfa_info *b = bucket[i];
- while (b != NULL) {
- ra_offset = (char*)ra - (char*)b->local_address;
- if (ra_offset > 0 && ra_offset < mfa_offset) {
- mfa_offset = ra_offset;
- mfa = b;
- }
- b = b->bucket.next;
- }
- }
- if (mfa) {
- *m = mfa->m;
- *f = mfa->f;
- *a = mfa->a;
- }
- hipe_mfa_info_table_runlock();
- return mfa ? 1 : 0;
+ *m = make_atom(sdesc->m_aix);
+ *f = make_atom(sdesc->f_aix);
+ *a = sdesc->a;
+ return 1;
}
-/*
- * Patch Reference Handling.
- */
-struct hipe_mfa_info_list {
- struct hipe_mfa_info *mfa;
- struct hipe_mfa_info_list *next;
-};
-
-struct ref {
- struct hipe_mfa_info *caller_mfa;
- void *address;
- void *trampoline;
- unsigned int flags;
- struct ref *next;
-};
-#define REF_FLAG_IS_LOAD_MFA 1 /* bit 0: 0 == call, 1 == load_mfa */
-#define REF_FLAG_IS_REMOTE 2 /* bit 1: 0 == local, 1 == remote */
-#define REF_FLAG_PENDING_REDIRECT 4 /* bit 2: 1 == pending redirect */
-#define REF_FLAG_PENDING_REMOVE 8 /* bit 3: 1 == pending remove */
-/* add_ref(CalleeMFA, {CallerMFA,Address,'call'|'load_mfa',Trampoline,'remote'|'local'})
+/* add_ref(CalleeMFA, {CallerMFA,Address,'call'|'load_mfa',Trampoline,LoaderState})
*/
BIF_RETTYPE hipe_bifs_add_ref_2(BIF_ALIST_2)
{
- struct mfa_t callee;
+ struct hipe_mfa callee;
Eterm *tuple;
- struct mfa_t caller;
+ struct hipe_mfa caller;
void *address;
void *trampoline;
unsigned int flags;
struct hipe_mfa_info *callee_mfa;
- struct hipe_mfa_info *caller_mfa;
- struct hipe_mfa_info_list *refers_to;
- struct ref *ref;
+ struct hipe_ref *ref;
+ HipeLoaderState* stp;
if (!term_to_mfa(BIF_ARG_1, &callee))
goto badarg;
@@ -1572,63 +1540,90 @@ BIF_RETTYPE hipe_bifs_add_ref_2(BIF_ALIST_2)
if (!trampoline)
goto badarg;
}
- switch (tuple[5]) {
- case am_local:
- break;
- case am_remote:
- flags |= REF_FLAG_IS_REMOTE;
- break;
- default:
- goto badarg;
- }
+ stp = get_loader_state(tuple[5]);
+ if (!stp)
+ goto badarg;
+
hipe_mfa_info_table_rwlock();
callee_mfa = hipe_mfa_info_table_put_rwlocked(callee.mod, callee.fun, callee.ari);
- caller_mfa = hipe_mfa_info_table_put_rwlocked(caller.mod, caller.fun, caller.ari);
-
- refers_to = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*refers_to));
- refers_to->mfa = callee_mfa;
- refers_to->next = caller_mfa->refers_to;
- caller_mfa->refers_to = refers_to;
- ref = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*ref));
- ref->caller_mfa = caller_mfa;
+ ref = erts_alloc(ERTS_ALC_T_HIPE, sizeof(struct hipe_ref));
ref->address = address;
+#if defined(__arm__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__)
ref->trampoline = trampoline;
+#endif
ref->flags = flags;
- ref->next = callee_mfa->referred_from;
- callee_mfa->referred_from = ref;
+
+ /*
+ * Link into list of refs to same callee
+ */
+ ASSERT(callee_mfa->callers.next->prev == &callee_mfa->callers);
+ ASSERT(callee_mfa->callers.prev->next == &callee_mfa->callers);
+ ref->head.next = callee_mfa->callers.next;
+ ref->head.prev = &callee_mfa->callers;
+ ref->head.next->prev = &ref->head;
+ ref->head.prev->next = &ref->head;
+
+ /*
+ * Link into list of refs from same module instance
+ */
+ ref->next_from_modi = stp->new_hipe_refs;
+ stp->new_hipe_refs = ref;
+
+#if defined(DEBUG)
+ ref->callee = callee_mfa;
+ ref->caller_m = caller.mod;
+ ref->caller_f = caller.fun;
+ ref->caller_a = caller.ari;
+#endif
hipe_mfa_info_table_rwunlock();
- BIF_RET(NIL);
+ DBG_TRACE_MFA(caller.mod, caller.fun, caller.ari, "add_ref at %p TO %T:%T/%u (from %p)",
+ ref, callee.mod, callee.fun, callee.ari, ref->address);
+ DBG_TRACE_MFA(callee.mod, callee.fun, callee.ari, "add_ref at %p FROM %T:%T/%u (from %p)",
+ ref, caller.mod, caller.fun, caller.ari, ref->address);
+ BIF_RET(am_ok);
badarg:
BIF_ERROR(BIF_P, BADARG);
}
-/* Given a CalleeMFA, mark each ref to it as pending-redirect.
- * This ensures that remove_refs_from() won't remove them: any
- * removal is instead done at the end of redirect_referred_from().
- */
-BIF_RETTYPE hipe_bifs_mark_referred_from_1(BIF_ALIST_1) /* get_refs_from */
+
+static void unlink_mfa_from_mod(struct hipe_mfa_info* unlink_me)
{
- struct mfa_t mfa;
- const struct hipe_mfa_info *p;
- struct ref *ref;
+ struct hipe_mfa_info* p;
- if (!term_to_mfa(BIF_ARG_1, &mfa))
- BIF_ERROR(BIF_P, BADARG);
- hipe_mfa_info_table_rwlock();
- p = hipe_mfa_info_table_get_locked(mfa.mod, mfa.fun, mfa.ari);
- if (p)
- for (ref = p->referred_from; ref != NULL; ref = ref->next)
- ref->flags |= REF_FLAG_PENDING_REDIRECT;
- hipe_mfa_info_table_rwunlock();
- BIF_RET(NIL);
+ p = hash_get(&mod2mfa_tab, unlink_me);
+ ASSERT(p);
+ if (p == unlink_me) {
+ hash_erase(&mod2mfa_tab, p);
+ if (p->next_in_mod)
+ mod2mfa_put(p->next_in_mod);
+ }
+ else {
+ struct hipe_mfa_info** prevp;
+
+ do {
+ prevp = &p->next_in_mod;
+ p = *prevp;
+ ASSERT(p && p->m == unlink_me->m);
+ } while (p != unlink_me);
+
+ *prevp = p->next_in_mod;
+ }
+}
+
+static void purge_mfa(struct hipe_mfa_info* p)
+{
+ ASSERT(p->is_stub);
+ remove_mfa_info(p);
+ hipe_free_native_stub(p->remote_address);
+ erts_free(ERTS_ALC_T_HIPE, p);
}
/* Called by init:restart after unloading all hipe compiled modules
- * to work around bug causing execution of deallocated beam code.
- * Can be removed when delete/purge of native modules works better.
+ * to work around old bug that caused execution of deallocated beam code.
+ * Can be removed now when delete/purge of native modules works better.
* Test: Do init:restart in debug compiled vm with hipe compiled kernel.
*/
static void hipe_purge_all_refs(void)
@@ -1638,126 +1633,249 @@ static void hipe_purge_all_refs(void)
hipe_mfa_info_table_rwlock();
+ ASSERT(hipe_mfa_info_table.used == 0);
bucket = hipe_mfa_info_table.bucket;
nrbuckets = 1 << hipe_mfa_info_table.log2size;
for (i = 0; i < nrbuckets; ++i) {
+ ASSERT(bucket[i] == NULL);
while (bucket[i] != NULL) {
struct hipe_mfa_info* mfa = bucket[i];
bucket[i] = mfa->bucket.next;
- while (mfa->refers_to) {
- struct hipe_mfa_info_list *to = mfa->refers_to;
- mfa->refers_to = to->next;
- erts_free(ERTS_ALC_T_HIPE, to);
- }
- while (mfa->referred_from) {
- struct ref* from = mfa->referred_from;
- mfa->referred_from = from->next;
- erts_free(ERTS_ALC_T_HIPE, from);
- }
- erts_free(ERTS_ALC_T_HIPE, mfa);
+ hash_erase(&mod2mfa_tab, mfa);
+ erts_free(ERTS_ALC_T_HIPE, mfa);
}
}
+ hipe_mfa_info_table.used = 0;
hipe_mfa_info_table_rwunlock();
}
BIF_RETTYPE hipe_bifs_remove_refs_from_1(BIF_ALIST_1)
{
- struct mfa_t mfa;
- struct hipe_mfa_info *caller_mfa, *callee_mfa;
- struct hipe_mfa_info_list *refers_to, *tmp_refers_to;
- struct ref **prev, *ref;
-
if (BIF_ARG_1 == am_all) {
hipe_purge_all_refs();
BIF_RET(am_ok);
}
- if (!term_to_mfa(BIF_ARG_1, &mfa))
- BIF_ERROR(BIF_P, BADARG);
- hipe_mfa_info_table_rwlock();
- caller_mfa = hipe_mfa_info_table_get_locked(mfa.mod, mfa.fun, mfa.ari);
- if (caller_mfa) {
- refers_to = caller_mfa->refers_to;
- while (refers_to) {
- callee_mfa = refers_to->mfa;
- prev = &callee_mfa->referred_from;
- ref = *prev;
- while (ref) {
- if (ref->caller_mfa == caller_mfa) {
- if (ref->flags & REF_FLAG_PENDING_REDIRECT) {
- ref->flags |= REF_FLAG_PENDING_REMOVE;
- prev = &ref->next;
- ref = ref->next;
- } else {
- struct ref *tmp = ref;
- ref = ref->next;
- *prev = ref;
- erts_free(ERTS_ALC_T_HIPE, tmp);
- }
- } else {
- prev = &ref->next;
- ref = ref->next;
- }
- }
- tmp_refers_to = refers_to;
- refers_to = refers_to->next;
- erts_free(ERTS_ALC_T_HIPE, tmp_refers_to);
- }
- caller_mfa->refers_to = NULL;
+ ASSERT(!"hipe_bifs_remove_refs_from_1() called");
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+int hipe_purge_need_blocking(Module* modp)
+{
+ /* SVERK: Verify if this is really necessary */
+ if (modp->old.hipe_code) {
+ if (modp->old.hipe_code->first_hipe_ref ||
+ modp->old.hipe_code->first_hipe_sdesc)
+ return 1;
+ }
+ if (!modp->curr.code_hdr) {
+ return mod2mfa_get_safe(modp) != NULL;
+ }
+ return 0;
+}
+
+void hipe_purge_refs(struct hipe_ref* first_ref, Eterm caller_module,
+ int is_blocking)
+{
+ struct hipe_ref* ref = first_ref;
+
+ ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking());
+
+ while (ref) {
+ struct hipe_ref* free_ref = ref;
+
+ DBG_TRACE_MFA(ref->caller_m, ref->caller_f, ref->caller_a, "PURGE ref at %p to %T:%T/%u", ref,
+ ref->callee->m, ref->callee->f, ref->callee->a);
+ DBG_TRACE_MFA(ref->callee->m, ref->callee->f, ref->callee->a, "PURGE ref at %p from %T:%T/%u", ref,
+ ref->caller_m, ref->caller_f, ref->caller_a);
+ ASSERT(ref->caller_m == caller_module);
+
+ /*
+ * Unlink from other refs to same callee
+ */
+ ASSERT(ref->head.next->prev == &ref->head);
+ ASSERT(ref->head.prev->next == &ref->head);
+ ASSERT(ref->head.next != &ref->head);
+ ASSERT(ref->head.prev != &ref->head);
+ ref->head.next->prev = ref->head.prev;
+ ref->head.prev->next = ref->head.next;
+
+ /*
+ * Was this the last ref to that callee?
+ */
+ if (ref->head.next == ref->head.prev) {
+ struct hipe_mfa_info* p = ErtsContainerStruct(ref->head.next, struct hipe_mfa_info, callers);
+ if (p->is_stub) {
+ if (!is_blocking)
+ hipe_mfa_info_table_rwlock();
+ unlink_mfa_from_mod(p);
+ purge_mfa(p);
+ if (!is_blocking)
+ hipe_mfa_info_table_rwunlock();
+ }
+ }
+
+ ref = ref->next_from_modi;
+ erts_free(ERTS_ALC_T_HIPE, free_ref);
+ }
+}
+
+void hipe_purge_sdescs(struct hipe_sdesc* first_sdesc, Eterm module,
+ int is_blocking)
+{
+ struct hipe_sdesc* sdesc = first_sdesc;
+
+ ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking());
+
+ ERTS_SMP_LC_ASSERT(is_blocking); /*XXX Fix safe sdesc destruction */
+
+ while (sdesc) {
+ struct hipe_sdesc* free_sdesc = sdesc;
+
+ DBG_TRACE_MFA(make_atom(sdesc->m_aix), make_atom(sdesc->f_aix), sdesc->a, "PURGE sdesc at %p", (void*)sdesc->bucket.hvalue);
+ ASSERT(make_atom(sdesc->m_aix) == module);
+
+ sdesc = sdesc->next_in_modi;
+ hipe_destruct_sdesc(free_sdesc);
+ }
+}
+
+
+void hipe_purge_module(Module* modp, int is_blocking)
+{
+ ASSERT(modp);
+
+ ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking());
+
+ DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "hipe_purge_module");
+
+ if (modp->old.hipe_code) {
+ /*
+ * Remove all hipe_ref's (external calls) from the old module instance
+ */
+ if (modp->old.hipe_code->first_hipe_ref) {
+ ERTS_SMP_LC_ASSERT(is_blocking);
+
+ hipe_purge_refs(modp->old.hipe_code->first_hipe_ref,
+ make_atom(modp->module), is_blocking);
+ modp->old.hipe_code->first_hipe_ref = NULL;
+ }
+
+ /*
+ * Remove all hipe_sdesc's for the old module instance
+ */
+ if (modp->old.hipe_code->first_hipe_sdesc) {
+ ERTS_SMP_LC_ASSERT(is_blocking);
+
+ hipe_purge_sdescs(modp->old.hipe_code->first_hipe_sdesc,
+ make_atom(modp->module), is_blocking);
+ modp->old.hipe_code->first_hipe_sdesc = NULL;
+ }
+
+ hipe_free_module(modp->old.hipe_code);
+ modp->old.hipe_code = NULL;
+ }
+
+
+ /*
+ * Remove unreferred hipe_mfa_info's
+ * when all module instances are removed (like in init:restart)
+ */
+ if (is_blocking && modp->curr.code_hdr == NULL) {
+ struct hipe_mfa_info* was_first = mod2mfa_get(modp);
+ struct hipe_mfa_info* is_first = was_first;
+ struct hipe_mfa_info** prevp = &is_first;
+ struct hipe_mfa_info *p;
+
+ if (was_first) {
+ for (p = was_first ; p; p = *prevp) {
+ if (p->callers.next == &p->callers) {
+ *prevp = p->next_in_mod;
+ if (p != was_first)
+ purge_mfa(p);
+ }
+ else
+ prevp = &p->next_in_mod;
+ }
+ if (was_first != is_first) {
+ hash_erase(&mod2mfa_tab, was_first);
+ purge_mfa(was_first);
+ if (is_first)
+ mod2mfa_put(is_first);
+ }
+ }
}
- hipe_mfa_info_table_rwunlock();
- BIF_RET(am_ok);
}
-/* redirect_referred_from(CalleeMFA)
- * Redirect all pending-redirect refs in CalleeMFA's referred_from.
- * Then remove any pending-redirect && pending-remove refs from CalleeMFA's referred_from.
+/*
+ * Redirect all existing native calls to this module
*/
-BIF_RETTYPE hipe_bifs_redirect_referred_from_1(BIF_ALIST_1)
+void hipe_redirect_to_module(Module* modp)
{
- struct mfa_t mfa;
struct hipe_mfa_info *p;
- struct ref **prev, *ref;
- int is_remote, res;
- void *new_address;
+ struct hipe_ref_head* refh;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+
+ for (p = mod2mfa_get(modp); p; p = p->next_in_mod) {
+ if (p->new_address) {
+ if (p->is_stub) {
+ hipe_free_native_stub(p->remote_address);
+ p->is_stub = 0;
+ }
+ DBG_TRACE_MFA(p->m, p->f, p->a, "Commit new_address %p", p->new_address);
+ p->remote_address = p->new_address;
+ p->new_address = NULL;
+#ifdef DEBUG
+ p->dbg_export = NULL;
+#endif
+ }
+ else if (!p->is_stub) {
+ Export* exp = erts_export_get_or_make_stub(p->m, p->f, p->a);
+ p->remote_address = hipe_make_native_stub(exp, p->a);
+ DBG_TRACE_MFA(p->m, p->f, p->a, "Commit stub %p", p->remote_address);
+ if (!p->remote_address)
+ erts_exit(ERTS_ERROR_EXIT, "hipe_make_stub: code allocation failed\r\n");
+ p->is_stub = 1;
+#ifdef DEBUG
+ p->dbg_export = exp;
+#endif
+ }
+ else {
+ DBG_TRACE_MFA(p->m, p->f, p->a, "Commit no-op, already stub");
+ ASSERT(p->remote_address && p->dbg_export);
+ }
- if (!term_to_mfa(BIF_ARG_1, &mfa))
- BIF_ERROR(BIF_P, BADARG);
- hipe_mfa_info_table_rwlock();
- p = hipe_mfa_info_table_get_locked(mfa.mod, mfa.fun, mfa.ari);
- if (p) {
- prev = &p->referred_from;
- ref = *prev;
- while (ref) {
- if (ref->flags & REF_FLAG_PENDING_REDIRECT) {
- is_remote = ref->flags & REF_FLAG_IS_REMOTE;
- new_address = hipe_get_na_nofail_rwlocked(p->m, p->f, p->a, is_remote);
- if (ref->flags & REF_FLAG_IS_LOAD_MFA)
- res = hipe_patch_insn(ref->address, (Uint)new_address, am_load_mfa);
- else
- res = hipe_patch_call(ref->address, new_address, ref->trampoline);
- if (res)
- fprintf(stderr, "%s: patch failed\r\n", __FUNCTION__);
- ref->flags &= ~REF_FLAG_PENDING_REDIRECT;
- if (ref->flags & REF_FLAG_PENDING_REMOVE) {
- struct ref *tmp = ref;
- ref = ref->next;
- *prev = ref;
- erts_free(ERTS_ALC_T_HIPE, tmp);
- } else {
- prev = &ref->next;
- ref = ref->next;
- }
- } else {
- prev = &ref->next;
- ref = ref->next;
- }
+ DBG_TRACE_MFA(p->m,p->f,p->a,"START REDIRECT towards hipe_mfa_info at %p", p);
+ for (refh = p->callers.next; refh != &p->callers; refh = refh->next) {
+ struct hipe_ref* ref = (struct hipe_ref*) refh;
+ int res;
+
+ DBG_TRACE_MFA(p->m,p->f,p->a, " REDIRECT ref at %p FROM %T:%T/%u (%p -> %p)",
+ ref, ref->caller_m, ref->caller_f, ref->caller_a,
+ ref->address, p->remote_address);
+
+ DBG_TRACE_MFA(ref->caller_m, ref->caller_f, ref->caller_a,
+ " REDIRECT ref at %p TO %T:%T/%u (%p -> %p)",
+ ref, p->m,p->f,p->a, ref->address, p->remote_address);
+
+ if (ref->flags & REF_FLAG_IS_LOAD_MFA)
+ res = hipe_patch_insn(ref->address, (Uint)p->remote_address, am_load_mfa);
+ else {
+#if defined(__arm__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__)
+ void* trampoline = ref->trampoline;
+#else
+ void* trampoline = NULL;
+#endif
+ res = hipe_patch_call(ref->address, p->remote_address, trampoline);
+ }
+ if (res)
+ fprintf(stderr, "%s: patch failed", __FUNCTION__);
}
+ DBG_TRACE_MFA(p->m,p->f,p->a,"DONE REDIRECT towards hipe_mfa_info at %p", p);
}
- hipe_mfa_info_table_rwunlock();
- BIF_RET(NIL);
}
BIF_RETTYPE hipe_bifs_check_crc_1(BIF_ALIST_1)
@@ -1811,89 +1929,6 @@ void hipe_patch_address(Uint *address, Eterm patchtype, Uint value)
}
}
-struct modinfo {
- HashBucket bucket; /* bucket.hvalue == atom_val(the module name) */
- unsigned int code_size;
-};
-
-static Hash modinfo_table;
-
-static HashValue modinfo_hash(void *tmpl)
-{
- Eterm mod = (Eterm)tmpl;
- return atom_val(mod);
-}
-
-static int modinfo_cmp(void *tmpl, void *bucket)
-{
- /* bucket->hvalue == modinfo_hash(tmpl), so just return 0 (match) */
- return 0;
-}
-
-static void *modinfo_alloc(void *tmpl)
-{
- struct modinfo *p;
-
- p = (struct modinfo*)erts_alloc(ERTS_ALC_T_HIPE, sizeof(*p));
- p->code_size = 0;
- return &p->bucket;
-}
-
-static void init_modinfo_table(void)
-{
- HashFunctions f;
- static int init_done = 0;
-
- if (init_done)
- return;
- init_done = 1;
- f.hash = (H_FUN) modinfo_hash;
- f.cmp = (HCMP_FUN) modinfo_cmp;
- f.alloc = (HALLOC_FUN) modinfo_alloc;
- f.free = (HFREE_FUN) NULL;
- f.meta_alloc = (HMALLOC_FUN) erts_alloc;
- f.meta_free = (HMFREE_FUN) erts_free;
- f.meta_print = (HMPRINT_FUN) erts_print;
- hash_init(ERTS_ALC_T_HIPE, &modinfo_table, "modinfo_table", 11, f);
-}
-
-BIF_RETTYPE hipe_bifs_update_code_size_3(BIF_ALIST_3)
-{
- struct modinfo *p;
- Sint code_size;
-
- init_modinfo_table();
-
- if (is_not_atom(BIF_ARG_1) ||
- is_not_small(BIF_ARG_3) ||
- (code_size = signed_val(BIF_ARG_3)) < 0)
- BIF_ERROR(BIF_P, BADARG);
-
- p = (struct modinfo*)hash_put(&modinfo_table, (void*)BIF_ARG_1);
-
- if (is_nil(BIF_ARG_2)) /* some MFAs, not whole module */
- p->code_size += code_size;
- else /* whole module */
- p->code_size = code_size;
- BIF_RET(NIL);
-}
-
-BIF_RETTYPE hipe_bifs_code_size_1(BIF_ALIST_1)
-{
- struct modinfo *p;
- unsigned int code_size;
-
- init_modinfo_table();
-
- if (is_not_atom(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
-
- p = (struct modinfo*)hash_get(&modinfo_table, (void*)BIF_ARG_1);
-
- code_size = p ? p->code_size : 0;
- BIF_RET(make_small(code_size));
-}
-
BIF_RETTYPE hipe_bifs_patch_insn_3(BIF_ALIST_3)
{
Uint *address, value;
@@ -1929,3 +1964,23 @@ BIF_RETTYPE hipe_bifs_patch_call_3(BIF_ALIST_3)
BIF_ERROR(BIF_P, BADARG);
BIF_RET(NIL);
}
+
+BIF_RETTYPE hipe_bifs_alloc_loader_state_1(BIF_ALIST_1)
+{
+ Binary *magic;
+ Eterm *hp;
+ Eterm res;
+
+ if (is_not_atom(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ magic = hipe_alloc_loader_state(BIF_ARG_1);
+
+ if (!magic)
+ BIF_ERROR(BIF_P, BADARG);
+
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &MSO(BIF_P), magic);
+ erts_refc_dec(&magic->refc, 1);
+ BIF_RET(res);
+}
diff --git a/erts/emulator/hipe/hipe_bif0.h b/erts/emulator/hipe/hipe_bif0.h
index c9a8216368..811c3801c1 100644
--- a/erts/emulator/hipe/hipe_bif0.h
+++ b/erts/emulator/hipe/hipe_bif0.h
@@ -30,19 +30,16 @@ extern Uint *hipe_bifs_find_pc_from_mfa(Eterm mfa);
extern void hipe_mfa_info_table_init(void);
extern void *hipe_get_remote_na(Eterm m, Eterm f, unsigned int a);
-extern BIF_RETTYPE hipe_find_na_or_make_stub(BIF_ALIST_3);
+extern BIF_RETTYPE nbif_impl_hipe_find_na_or_make_stub(NBIF_ALIST_3);
extern int hipe_find_mfa_from_ra(const void *ra, Eterm *m, Eterm *f, unsigned int *a);
-#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
-extern void *hipe_mfa_get_trampoline(Eterm m, Eterm f, unsigned int a);
-extern void hipe_mfa_set_trampoline(Eterm m, Eterm f, unsigned int a, void *trampoline);
-#endif
-#if defined(__arm__)
-extern void *hipe_primop_get_trampoline(Eterm name);
-extern void hipe_primop_set_trampoline(Eterm name, void *trampoline);
-#endif
/* needed in beam_load.c */
-void hipe_mfa_save_orig_beam_op(Eterm m, Eterm f, unsigned int a, Eterm *pc);
+int hipe_need_blocking(Module*);
+int hipe_purge_need_blocking(Module*);
+void hipe_purge_refs(struct hipe_ref*, Eterm, int is_blocking);
+void hipe_purge_sdescs(struct hipe_sdesc*, Eterm, int is_blocking);
+void hipe_purge_module(Module*, int is_blocking);
+void hipe_redirect_to_module(Module* modp);
/* these are also needed in hipe_amd64.c */
extern void *term_to_address(Eterm);
diff --git a/erts/emulator/hipe/hipe_bif0.tab b/erts/emulator/hipe/hipe_bif0.tab
index 99237aae05..264ea2c34a 100644
--- a/erts/emulator/hipe/hipe_bif0.tab
+++ b/erts/emulator/hipe/hipe_bif0.tab
@@ -44,22 +44,20 @@ bif hipe_bifs:ref/1
bif hipe_bifs:ref_get/1
bif hipe_bifs:ref_set/2
-bif hipe_bifs:enter_code/2
-bif hipe_bifs:alloc_data/2
+bif hipe_bifs:enter_code/3
+bif hipe_bifs:alloc_data/3
bif hipe_bifs:constants_size/0
bif hipe_bifs:merge_term/1
bif hipe_bifs:fun_to_address/1
+bif hipe_bifs:commit_patch_load/1
bif hipe_bifs:set_native_address/3
#bif hipe_bifs:address_to_fun/1
bif hipe_bifs:set_funinfo_native_address/3
-bif hipe_bifs:invalidate_funinfo_native_addresses/1
+#bif hipe_bifs:invalidate_funinfo_native_addresses/1
-bif hipe_bifs:update_code_size/3
-bif hipe_bifs:code_size/1
-
-bif hipe_bifs:enter_sdesc/1
+bif hipe_bifs:enter_sdesc/2
bif hipe_bifs:bif_address/3
bif hipe_bifs:primop_address/1
@@ -72,7 +70,7 @@ bif hipe_bifs:term_to_word/1
bif hipe_bifs:get_fe/2
bif hipe_bifs:set_native_address_in_fe/2
-bif hipe_bifs:find_na_or_make_stub/2
+bif hipe_bifs:find_na_or_make_stub/1
bif hipe_bifs:check_crc/1
bif hipe_bifs:system_crc/0
@@ -84,9 +82,9 @@ bif hipe_bifs:patch_insn/3
bif hipe_bifs:patch_call/3
bif hipe_bifs:add_ref/2
-bif hipe_bifs:mark_referred_from/1
bif hipe_bifs:remove_refs_from/1
-bif hipe_bifs:redirect_referred_from/1
+
+bif hipe_bifs:alloc_loader_state/1
# atoms used by add_ref/2
atom call
diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c
index 5e127755c6..0c66eb6abe 100644
--- a/erts/emulator/hipe/hipe_bif1.c
+++ b/erts/emulator/hipe/hipe_bif1.c
@@ -45,7 +45,7 @@ BIF_RETTYPE hipe_bifs_call_count_on_1(BIF_ALIST_1)
pc = hipe_bifs_find_pc_from_mfa(BIF_ARG_1);
if (!pc)
BIF_ERROR(BIF_P, BADARG);
- ASSERT(pc[-5] == BeamOpCode(op_i_func_info_IaaI));
+ ASSERT(pc[-6] == BeamOpCode(op_i_func_info_IaaI));
if (pc[0] == BeamOpCode(op_hipe_trap_call))
BIF_ERROR(BIF_P, BADARG);
if (pc[0] == BeamOpCode(op_hipe_call_count))
@@ -67,7 +67,7 @@ BIF_RETTYPE hipe_bifs_call_count_off_1(BIF_ALIST_1)
pc = hipe_bifs_find_pc_from_mfa(BIF_ARG_1);
if (!pc)
BIF_ERROR(BIF_P, BADARG);
- ASSERT(pc[-5] == BeamOpCode(op_i_func_info_IaaI));
+ ASSERT(pc[-6] == BeamOpCode(op_i_func_info_IaaI));
if (pc[0] != BeamOpCode(op_hipe_call_count))
BIF_RET(am_false);
hcc = (struct hipe_call_count*)pc[-4];
@@ -86,7 +86,7 @@ BIF_RETTYPE hipe_bifs_call_count_get_1(BIF_ALIST_1)
pc = hipe_bifs_find_pc_from_mfa(BIF_ARG_1);
if (!pc)
BIF_ERROR(BIF_P, BADARG);
- ASSERT(pc[-5] == BeamOpCode(op_i_func_info_IaaI));
+ ASSERT(pc[-6] == BeamOpCode(op_i_func_info_IaaI));
if (pc[0] != BeamOpCode(op_hipe_call_count))
BIF_RET(am_false);
hcc = (struct hipe_call_count*)pc[-4];
@@ -102,7 +102,7 @@ BIF_RETTYPE hipe_bifs_call_count_clear_1(BIF_ALIST_1)
pc = hipe_bifs_find_pc_from_mfa(BIF_ARG_1);
if (!pc)
BIF_ERROR(BIF_P, BADARG);
- ASSERT(pc[-5] == BeamOpCode(op_i_func_info_IaaI));
+ ASSERT(pc[-6] == BeamOpCode(op_i_func_info_IaaI));
if (pc[0] != BeamOpCode(op_hipe_call_count))
BIF_RET(am_false);
hcc = (struct hipe_call_count*)pc[-4];
diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c
index dfd34e31d4..e04d3d32d1 100644
--- a/erts/emulator/hipe/hipe_bif2.c
+++ b/erts/emulator/hipe/hipe_bif2.c
@@ -155,7 +155,7 @@ BIF_RETTYPE hipe_bifs_modeswitch_debug_off_0(BIF_ALIST_0)
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1);
+BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1);
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
@@ -163,13 +163,13 @@ BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1);
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
-BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1)
+BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1)
{
- typedef BIF_RETTYPE Bif(BIF_ALIST_1);
- Bif* fp = (Bif*) (BIF_P->hipe.bif_callee);
+ typedef BIF_RETTYPE nBif(NBIF_ALIST_1);
+ nBif* fp = (nBif*) (BIF_P->hipe.bif_callee);
BIF_RETTYPE res;
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(BIF_P);
- res = (*fp)(BIF_P, BIF__ARGS);
+ res = (*fp)(NBIF_CALL_ARGS);
ERTS_SMP_REQ_PROC_MAIN_LOCK(BIF_P);
return res;
}
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index dcf3447af9..f034c4700c 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -71,6 +71,32 @@
****************************************************************/
/*
+ * NOTE:
+ * Beam BIFs have the prototype:
+ * Eterm (*BIF)(Process *c_p, Eterm *regs, UWord *I)
+ * Native BIFs have the prototype:
+ * Eterm (*BIF)(Process *c_p, Eterm *regs)
+ *
+ * Beam BIFs expect 'I' to contain current instruction
+ * pointer when called from beam, and expect 'I' to
+ * contain a pointer to the export entry of the BIF
+ * when called from native code. In order to facilitate
+ * this, beam BIFs are called via wrapper functions
+ * when called from native code. These wrapper functions
+ * are auto-generated (by utils/make_tables) and have
+ * the function names nbif_impl_<BIF>.
+ *
+ * The standard_bif_interface_*() and
+ * gc_bif_interface_*() will add the prefix and
+ * thus call nbif_impl_<cbif_name>. That is, all
+ * functions (true BIFs as well as other c-functions)
+ * called via these interfaces have to be named
+ * nbif_impl_<FUNC>.
+ */
+
+/*
+ * See NOTE above!
+ *
* standard_bif_interface_0(nbif_name, cbif_name)
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
@@ -93,6 +119,8 @@
*/
/*
+ * See NOTE above!
+ *
* gc_bif_interface_0(nbif_name, cbif_name)
* gc_bif_interface_1(nbif_name, cbif_name)
* gc_bif_interface_2(nbif_name, cbif_name)
@@ -153,9 +181,9 @@ standard_bif_interface_0(nbif_ports_0, ports_0)
* BIFs and primops that may do a GC (change heap limit and walk the native stack).
* XXX: erase/1 and put/2 cannot fail
*/
-gc_bif_interface_2(nbif_erts_internal_check_process_code_2, hipe_erts_internal_check_process_code_2)
+gc_bif_interface_1(nbif_erts_internal_check_process_code_1, hipe_erts_internal_check_process_code_1)
gc_bif_interface_1(nbif_erase_1, erase_1)
-gc_bif_interface_0(nbif_garbage_collect_0, garbage_collect_0)
+gc_bif_interface_1(nbif_erts_internal_garbage_collect_1, erts_internal_garbage_collect_1)
gc_nofail_primop_interface_1(nbif_gc_1, hipe_gc)
gc_bif_interface_2(nbif_put_2, put_2)
@@ -247,7 +275,7 @@ nocons_nofail_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_intege
noproc_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_integer)
')dnl
-gc_bif_interface_0(nbif_check_get_msg, hipe_check_get_msg)
+nofail_primop_interface_0(nbif_check_get_msg, hipe_check_get_msg)
#`ifdef' NO_FPE_SIGNALS
nocons_nofail_primop_interface_0(nbif_emulate_fpe, hipe_emulate_fpe)
@@ -291,8 +319,8 @@ gc_bif_interface_2(nbif_maps_merge_2, hipe_wrapper_maps_merge_2)
* BIF_LIST(ModuleAtom,FunctionAtom,Arity,CFun,Index)
*/
-define(BIF_LIST,`standard_bif_interface_$3(nbif_$4, $4)')
-include(TARGET/`erl_bif_list.h')
+define(BIF_LIST,`standard_bif_interface_$3(nbif_$5, $5)')
+include(TTF_DIR/`erl_bif_list.h')
/*
* Guard BIFs.
diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c
index ace489452f..222a11db3d 100644
--- a/erts/emulator/hipe/hipe_debug.c
+++ b/erts/emulator/hipe/hipe_debug.c
@@ -62,10 +62,12 @@ static void print_beam_pc(BeamInstr *pc)
} else if (pc == &beam_apply[1]) {
printf("normal-process-exit");
} else {
- BeamInstr *mfa = find_function_from_pc(pc);
- if (mfa)
+ ErtsCodeMFA *cmfa = find_function_from_pc(pc);
+ if (cmfa)
erts_printf("%T:%T/%bpu + 0x%bpx",
- mfa[0], mfa[1], mfa[2], pc - &mfa[3]);
+ cmfa->module, cmfa->function,
+ cmfa->arity,
+ pc - erts_codemfa_to_code(cmfa));
else
printf("?");
}
@@ -214,10 +216,10 @@ void hipe_print_pcb(Process *p)
U("seq..clock ", seq_trace_clock);
U("seq..astcnt", seq_trace_lastcnt);
U("seq..token ", seq_trace_token);
- U("intial[0] ", u.initial[0]);
- U("intial[1] ", u.initial[1]);
- U("intial[2] ", u.initial[2]);
- P("current ", current);
+ U("intial.mod ", u.initial.module);
+ U("intial.fun ", u.initial.function);
+ U("intial.ari ", u.initial.arity);
+ U("current ", current);
P("cp ", cp);
P("i ", i);
U("catches ", catches);
diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c
index 68c65dea27..2311beb34a 100644
--- a/erts/emulator/hipe/hipe_gc.c
+++ b/erts/emulator/hipe/hipe_gc.c
@@ -38,7 +38,7 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop)
/* known nstack walk state */
Eterm *nsp;
Eterm *nsp_end;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
unsigned int sdesc_size;
unsigned long ra;
unsigned int i;
@@ -91,7 +91,7 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop)
ASSERT(is_boxed(val));
*nsp_i = val;
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_BOXED(ptr, val, n_htop, nsp_i);
+ move_boxed(&ptr, val, &n_htop, nsp_i);
}
} else if (is_list(gval)) {
Eterm *ptr = list_val(gval);
@@ -99,8 +99,8 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop)
if (IS_MOVED_CONS(val)) {
*nsp_i = ptr[1];
} else if (!erts_is_literal(gval, ptr)) {
- ASSERT(within(ptr, p));
- MOVE_CONS(ptr, val, n_htop, nsp_i);
+ ASSERT(erts_dbg_within_proc(ptr, p, NULL));
+ move_cons(&ptr, val, &n_htop, nsp_i);
}
}
}
@@ -123,7 +123,7 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
/* known nstack walk state */
Eterm *nsp;
Eterm *nsp_end;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
unsigned int sdesc_size;
unsigned long ra;
unsigned int i;
@@ -206,10 +206,10 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
ASSERT(is_boxed(val));
*nsp_i = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr, val, old_htop, nsp_i);
+ move_boxed(&ptr, val, &old_htop, nsp_i);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- ASSERT(within(ptr, p));
- MOVE_BOXED(ptr, val, n_htop, nsp_i);
+ ASSERT(erts_dbg_within_proc(ptr, p, NULL));
+ move_boxed(&ptr, val, &n_htop, nsp_i);
}
} else if (is_list(gval)) {
Eterm *ptr = list_val(gval);
@@ -217,10 +217,10 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
if (IS_MOVED_CONS(val)) {
*nsp_i = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr, val, old_htop, nsp_i);
+ move_cons(&ptr, val, &old_htop, nsp_i);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- ASSERT(within(ptr, p));
- MOVE_CONS(ptr, val, n_htop, nsp_i);
+ ASSERT(erts_dbg_within_proc(ptr, p, NULL));
+ move_cons(&ptr, val, &n_htop, nsp_i);
}
}
}
@@ -244,7 +244,7 @@ Eterm *sweep_literals_nstack(Process *p, Eterm *old_htop, char *area,
/* known nstack walk state */
Eterm *nsp;
Eterm *nsp_end;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
/* arch-specific nstack walk state */
struct nstack_walk_state walk_state;
@@ -278,7 +278,7 @@ Eterm *sweep_literals_nstack(Process *p, Eterm *old_htop, char *area,
ASSERT(is_boxed(val));
*nsp_i = val;
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_BOXED(ptr, val, old_htop, nsp_i);
+ move_boxed(&ptr, val, &old_htop, nsp_i);
}
} else if (is_list(gval)) {
Eterm *ptr = list_val(gval);
@@ -286,7 +286,7 @@ Eterm *sweep_literals_nstack(Process *p, Eterm *old_htop, char *area,
if (IS_MOVED_CONS(val)) {
*nsp_i = ptr[1];
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_CONS(ptr, val, old_htop, nsp_i);
+ move_cons(&ptr, val, &old_htop, nsp_i);
}
}
}
@@ -311,7 +311,7 @@ nstack_any_heap_ref_ptrs(Process *rp, char* mod_start, Uint mod_size)
{
Eterm *nsp;
Eterm *nsp_end;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
/* arch-specific nstack walk state */
struct nstack_walk_state walk_state;
@@ -356,3 +356,36 @@ nstack_any_heap_ref_ptrs(Process *rp, char* mod_start, Uint mod_size)
}
return 0;
}
+
+int
+nstack_any_cps_in_segment(Process *p, char* seg_start, Uint seg_size)
+{
+ Eterm *nsp;
+ Eterm *nsp_end;
+ const struct hipe_sdesc *sdesc;
+ /* arch-specific nstack walk state */
+ struct nstack_walk_state walk_state;
+
+ if (!p->hipe.nstack || !nstack_walk_init_check(p))
+ return 0;
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
+ nsp = nstack_walk_nsp_begin(p);
+ nsp_end = nstack_walk_nsp_end(p);
+ sdesc = nstack_walk_init_sdesc_ignore_trap(p, &walk_state);
+
+ /* Check the topmost frame */
+ if (ErtsInArea(sdesc->bucket.hvalue, seg_start, seg_size))
+ return 1;
+
+ while (!nstack_walk_nsp_reached_end(nsp, nsp_end)) {
+ unsigned sdesc_size = nstack_walk_frame_size(sdesc);
+ unsigned long ra = nstack_walk_frame_ra(nsp, sdesc);
+ if (ra == (unsigned long)nbif_stack_trap_ra)
+ ra = (unsigned long)p->hipe.ngra;
+ if (ErtsInArea(ra, seg_start, seg_size))
+ return 1;
+ sdesc = hipe_find_sdesc(ra);
+ nsp = nstack_walk_next_frame(nsp, sdesc_size);
+ }
+ return 0;
+}
diff --git a/erts/emulator/hipe/hipe_load.c b/erts/emulator/hipe/hipe_load.c
new file mode 100644
index 0000000000..87c5004d2b
--- /dev/null
+++ b/erts/emulator/hipe/hipe_load.c
@@ -0,0 +1,106 @@
+/*
+ * %CopyrightBegin%
+
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * hipe_load.c
+ *
+ * HiPE atomic code loader
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include "sys.h"
+#include "global.h"
+#include "erl_binary.h"
+#include "hipe_load.h"
+#include "hipe_bif0.h"
+
+void hipe_free_loader_state(HipeLoaderState *stp)
+{
+ if (stp->module == NIL) return;
+
+ // TODO: Needs to be freed separately. We'd like have a unified executable
+ // code allocator, so postpone this for now.
+ /* if (stp->text_segment) */
+ /* erts_free(ERTS_ALC_T_HIPE, stp->text_segment); */
+ stp->text_segment = NULL;
+ stp->text_segment_size = 0;
+
+ if (stp->data_segment)
+ erts_free(ERTS_ALC_T_HIPE, stp->data_segment);
+ stp->data_segment = NULL;
+ stp->data_segment_size = 0;
+
+ if (stp->new_hipe_refs) {
+ hipe_purge_refs(stp->new_hipe_refs, stp->module, 0);
+ stp->new_hipe_refs = NULL;
+ }
+ if (stp->new_hipe_sdesc) {
+ hipe_purge_sdescs(stp->new_hipe_sdesc, stp->module, 0);
+ stp->new_hipe_sdesc = NULL;
+ }
+
+ stp->module = NIL;
+}
+
+static int
+hipe_loader_state_dtor(Binary* magic)
+{
+ HipeLoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(magic) == hipe_loader_state_dtor);
+
+ hipe_free_loader_state(stp);
+ return 1;
+}
+
+Binary *hipe_alloc_loader_state(Eterm module)
+{
+ HipeLoaderState *stp;
+ Binary *magic;
+
+ if (is_not_atom(module)) return NULL;
+
+ magic = erts_create_magic_binary(sizeof(HipeLoaderState),
+ hipe_loader_state_dtor);
+ erts_refc_inc(&magic->refc, 1);
+ stp = ERTS_MAGIC_BIN_DATA(magic);
+
+ stp->module = module;
+ stp->text_segment = NULL;
+ stp->text_segment_size = 0;
+ stp->data_segment = NULL;
+ stp->data_segment_size = 0;
+
+ stp->new_hipe_refs = NULL;
+ stp->new_hipe_sdesc = NULL;
+
+ return magic;
+}
+
+HipeLoaderState *
+hipe_get_loader_state(Binary *magic)
+{
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != hipe_loader_state_dtor)
+ return NULL;
+
+ return (HipeLoaderState*) ERTS_MAGIC_BIN_DATA(magic);
+}
diff --git a/erts/emulator/hipe/hipe_load.h b/erts/emulator/hipe/hipe_load.h
new file mode 100644
index 0000000000..40c8a8aa2a
--- /dev/null
+++ b/erts/emulator/hipe/hipe_load.h
@@ -0,0 +1,48 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * hipe_load.h
+ *
+ * HiPE atomic code loader
+ */
+#ifndef HIPE_LOAD_H
+#define HIPE_LOAD_H
+
+#include "global.h"
+
+typedef struct hipe_loader_state {
+ Eterm module; /* Module name, atom */
+
+ void *text_segment;
+ Uint text_segment_size;
+
+ void *data_segment;
+ Uint data_segment_size;
+
+ struct hipe_ref* new_hipe_refs;
+ struct hipe_sdesc* new_hipe_sdesc;
+
+} HipeLoaderState;
+
+extern Binary *hipe_alloc_loader_state(Eterm module);
+extern void hipe_free_loader_state(HipeLoaderState*);
+extern HipeLoaderState *hipe_get_loader_state(Binary *binary);
+
+#endif /* HIPE_LOAD_H */
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index b9d4226705..4573980e1e 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -435,9 +435,6 @@ static const struct rts_param rts_params[] = {
presence or absence of struct erl_fun_thing's "next" field. */
{ 5, "EFT_CREATOR", 1, offsetof(struct erl_fun_thing, creator) },
{ 6, "EFT_FE", 1, offsetof(struct erl_fun_thing, fe) },
-#ifdef HIPE
- { 7, "EFT_NATIVE_ADDRESS", 1, offsetof(struct erl_fun_thing, native_address) },
-#endif
{ 8, "EFT_ARITY", 1, offsetof(struct erl_fun_thing, arity) },
{ 9, "EFT_NUM_FREE", 1, offsetof(struct erl_fun_thing, num_free) },
{ 10, "EFT_ENV", 1, offsetof(struct erl_fun_thing, env[0]) },
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index ed95045292..f11223d8b0 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -174,33 +174,6 @@ void hipe_mode_switch_init(void)
make_catch(beam_catches_cons(hipe_beam_pc_throw, BEAM_CATCHES_NIL));
hipe_mfa_info_table_init();
-
-#if (defined(__i386__) || defined(__x86_64__)) && defined(__linux__)
- /* Verify that the offset of c-p->hipe does not change.
- The ErLLVM hipe backend depends on it being in a specific
- position. Kostis et al has promised to fix this in upstream
- llvm by OTP 20, so it should be possible to remove these asserts
- after that. */
- ERTS_CT_ASSERT(sizeof(ErtsPTabElementCommon) ==
- (sizeof(Eterm) + /* id */
- sizeof(((ErtsPTabElementCommon*)0)->refc) +
- sizeof(ErtsTracer) + /* tracer */
- sizeof(Uint) + /* trace_flags */
- sizeof(erts_smp_atomic_t) + /* timer */
- sizeof(((ErtsPTabElementCommon*)0)->u)));
-
- ERTS_CT_ASSERT(offsetof(Process, hipe) ==
- (sizeof(ErtsPTabElementCommon) + /* common */
- sizeof(Eterm*) + /* htop */
- sizeof(Eterm*) + /* stop */
- sizeof(Eterm*) + /* heap */
- sizeof(Eterm*) + /* hend */
- sizeof(Uint) + /* heap_sz */
- sizeof(Uint) + /* min_heap_size */
- sizeof(Uint) + /* min_vheap_size */
- sizeof(volatile unsigned long))); /* fp_exception */
-#endif
-
}
void hipe_set_call_trap(Uint *bfun, void *nfun, int is_closure)
@@ -718,12 +691,9 @@ void hipe_empty_nstack(Process *p)
p->hipe.nstend = NULL;
}
-void hipe_set_closure_stub(ErlFunEntry *fe, unsigned num_free)
+void hipe_set_closure_stub(ErlFunEntry *fe)
{
- unsigned arity;
-
- arity = fe->arity;
- fe->native_address = (Eterm*) hipe_closure_stub_address(arity);
+ fe->native_address = (Eterm*) hipe_closure_stub_address(fe->arity);
}
Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s)
diff --git a/erts/emulator/hipe/hipe_mode_switch.h b/erts/emulator/hipe/hipe_mode_switch.h
index e54b81cf78..334e978307 100644
--- a/erts/emulator/hipe/hipe_mode_switch.h
+++ b/erts/emulator/hipe/hipe_mode_switch.h
@@ -59,12 +59,11 @@ void hipe_set_call_trap(Uint *bfun, void *nfun, int is_closure);
Process *hipe_mode_switch(Process*, unsigned, Eterm*);
void hipe_inc_nstack(Process *p);
void hipe_empty_nstack(Process *p);
-void hipe_set_closure_stub(ErlFunEntry *fe, unsigned num_free);
+void hipe_set_closure_stub(ErlFunEntry *fe);
Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s);
ERTS_GLB_INLINE void hipe_reserve_beam_trap_frame(Process*, Eterm reg[], unsigned arity);
ERTS_GLB_INLINE void hipe_unreserve_beam_trap_frame(Process*);
-ERTS_GLB_INLINE int hipe_is_ra_mode_switch(const void* ra);
extern Uint hipe_beam_pc_return[];
extern Uint hipe_beam_pc_throw[];
@@ -113,11 +112,6 @@ ERTS_GLB_INLINE void hipe_unreserve_beam_trap_frame(Process *p)
p->stop += 2;
}
-ERTS_GLB_INLINE int hipe_is_ra_mode_switch(const void* ra)
-{
- return ra == nbif_return;
-}
-
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ASM */
diff --git a/erts/emulator/hipe/hipe_module.c b/erts/emulator/hipe/hipe_module.c
new file mode 100644
index 0000000000..469f077dd2
--- /dev/null
+++ b/erts/emulator/hipe/hipe_module.c
@@ -0,0 +1,35 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include "sys.h"
+#include "hipe_arch.h"
+#include "hipe_module.h"
+
+void hipe_free_module(HipeModule *mod)
+{
+ hipe_free_code(mod->text_segment, mod->text_segment_size);
+ if (mod->data_segment) /* Some modules lack data segments */
+ erts_free(ERTS_ALC_T_HIPE, mod->data_segment);
+
+ erts_free(ERTS_ALC_T_HIPE, mod);
+}
diff --git a/erts/emulator/hipe/hipe_module.h b/erts/emulator/hipe/hipe_module.h
new file mode 100644
index 0000000000..b489f567cb
--- /dev/null
+++ b/erts/emulator/hipe/hipe_module.h
@@ -0,0 +1,45 @@
+/*
+ * %CopyrightBegin%
+
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * hipe_module.h
+ *
+ *
+ */
+#ifndef HIPE_MODULE_H
+#define HIPE_MODULE_H
+
+/* Forward-declare type to resolve circular dependency with module.h */
+typedef struct hipe_module HipeModule;
+
+#include "global.h"
+
+struct hipe_module {
+ void *text_segment;
+ Uint text_segment_size;
+ void *data_segment;
+
+ struct hipe_ref* first_hipe_ref; /* all external hipe calls from this module */
+ struct hipe_sdesc* first_hipe_sdesc; /* all stack descriptors for this module */
+};
+
+extern void hipe_free_module(HipeModule *mod);
+
+#endif /* HIPE_MODULE_H */
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index a2165958a9..e581f07f56 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -42,8 +42,8 @@
*/
/* for -Wmissing-prototypes :-( */
-extern Eterm hipe_erts_internal_check_process_code_2(BIF_ALIST_2);
-extern Eterm hipe_show_nstack_1(BIF_ALIST_1);
+extern Eterm nbif_impl_hipe_erts_internal_check_process_code_1(NBIF_ALIST_1);
+extern Eterm nbif_impl_hipe_show_nstack_1(NBIF_ALIST_1);
/* Used when a BIF can trigger a stack walk. */
static __inline__ void hipe_set_narity(Process *p, unsigned int arity)
@@ -51,22 +51,24 @@ static __inline__ void hipe_set_narity(Process *p, unsigned int arity)
p->hipe.narity = arity;
}
-Eterm hipe_erts_internal_check_process_code_2(BIF_ALIST_2)
+/* Called via standard_bif_interface_2 */
+Eterm nbif_impl_hipe_erts_internal_check_process_code_1(NBIF_ALIST_1)
{
Eterm ret;
- hipe_set_narity(BIF_P, 2);
- ret = erts_internal_check_process_code_2(BIF_P, BIF__ARGS);
+ hipe_set_narity(BIF_P, 1);
+ ret = nbif_impl_erts_internal_check_process_code_1(NBIF_CALL_ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
-Eterm hipe_show_nstack_1(BIF_ALIST_1)
+/* Called via standard_bif_interface_1 */
+Eterm nbif_impl_hipe_show_nstack_1(NBIF_ALIST_1)
{
Eterm ret;
hipe_set_narity(BIF_P, 1);
- ret = hipe_bifs_show_nstack_1(BIF_P, BIF__ARGS);
+ ret = nbif_impl_hipe_bifs_show_nstack_1(NBIF_CALL_ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -89,7 +91,7 @@ void hipe_gc(Process *p, Eterm need)
* has begun.
* XXX: BUG: native code should check return status
*/
-BIF_RETTYPE hipe_set_timeout(BIF_ALIST_1)
+BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1)
{
Process* p = BIF_P;
Eterm timeout_value = BIF_ARG_1;
@@ -270,10 +272,10 @@ static struct StackTrace *get_trace_from_exc(Eterm exc)
* This does what the (misnamed) Beam instruction 'raise_ss' does,
* namely, a proper re-throw of an exception that was caught by 'try'.
*/
-
-BIF_RETTYPE hipe_rethrow(BIF_ALIST_2)
+/* Called via standard_bif_interface_2 */
+BIF_RETTYPE nbif_impl_hipe_rethrow(NBIF_ALIST_2)
{
- Process* c_p = BIF_P;
+ Process *c_p = BIF_P;
Eterm exc = BIF_ARG_1;
Eterm value = BIF_ARG_2;
@@ -397,7 +399,7 @@ Eterm hipe_bs_utf8_size(Eterm arg)
return make_small(4);
}
-BIF_RETTYPE hipe_bs_put_utf8(BIF_ALIST_3)
+BIF_RETTYPE nbif_impl_hipe_bs_put_utf8(NBIF_ALIST_3)
{
Process* p = BIF_P;
Eterm arg = BIF_ARG_1;
@@ -458,7 +460,7 @@ Eterm hipe_bs_put_utf16(Process *p, Eterm arg, byte *base, unsigned int offset,
return new_offset;
}
-BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3)
+BIF_RETTYPE nbif_impl_hipe_bs_put_utf16be(NBIF_ALIST_3)
{
Process *p = BIF_P;
Eterm arg = BIF_ARG_1;
@@ -467,7 +469,7 @@ BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3)
return hipe_bs_put_utf16(p, arg, base, offset, 0);
}
-BIF_RETTYPE hipe_bs_put_utf16le(BIF_ALIST_3)
+BIF_RETTYPE nbif_impl_hipe_bs_put_utf16le(NBIF_ALIST_3)
{
Process *p = BIF_P;
Eterm arg = BIF_ARG_1;
@@ -485,7 +487,7 @@ static int validate_unicode(Eterm arg)
return 1;
}
-BIF_RETTYPE hipe_bs_validate_unicode(BIF_ALIST_1)
+BIF_RETTYPE nbif_impl_hipe_bs_validate_unicode(NBIF_ALIST_1)
{
Process *p = BIF_P;
Eterm arg = BIF_ARG_1;
@@ -503,7 +505,8 @@ int hipe_bs_validate_unicode_retract(ErlBinMatchBuffer* mb, Eterm arg)
return 1;
}
-BIF_RETTYPE hipe_is_divisible(BIF_ALIST_2)
+/* Called via standard_bif_interface_2 */
+BIF_RETTYPE nbif_impl_hipe_is_divisible(NBIF_ALIST_2)
{
/* Arguments are Eterm-sized unsigned integers */
Uint dividend = BIF_ARG_1;
diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h
index a02d26087b..38f874888b 100644
--- a/erts/emulator/hipe/hipe_native_bif.h
+++ b/erts/emulator/hipe/hipe_native_bif.h
@@ -74,27 +74,27 @@ AEXTERN(void,nbif_select_msg,(Process*));
AEXTERN(Eterm,nbif_cmp_2,(void));
AEXTERN(Eterm,nbif_eq_2,(void));
-BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2);
-BIF_RETTYPE hipe_conv_big_to_float(BIF_ALIST_1);
+BIF_RETTYPE nbif_impl_hipe_nonclosure_address(NBIF_ALIST_2);
+BIF_RETTYPE nbif_impl_hipe_conv_big_to_float(NBIF_ALIST_1);
void hipe_fclearerror_error(Process*);
void hipe_select_msg(Process*);
void hipe_gc(Process*, Eterm);
-BIF_RETTYPE hipe_set_timeout(BIF_ALIST_1);
+BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1);
void hipe_handle_exception(Process*);
-BIF_RETTYPE hipe_rethrow(BIF_ALIST_2);
+BIF_RETTYPE nbif_impl_hipe_rethrow(NBIF_ALIST_2);
char *hipe_bs_allocate(int);
Binary *hipe_bs_reallocate(Binary*, int);
int hipe_bs_put_small_float(Process*, Eterm, Uint, byte*, unsigned, unsigned);
void hipe_bs_put_bits(Eterm, Uint, byte*, unsigned, unsigned);
Eterm hipe_bs_utf8_size(Eterm);
-BIF_RETTYPE hipe_bs_put_utf8(BIF_ALIST_3);
+BIF_RETTYPE nbif_impl_hipe_bs_put_utf8(NBIF_ALIST_3);
Eterm hipe_bs_utf16_size(Eterm);
-BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3);
-BIF_RETTYPE hipe_bs_put_utf16le(BIF_ALIST_3);
-BIF_RETTYPE hipe_bs_validate_unicode(BIF_ALIST_1);
+BIF_RETTYPE nbif_impl_hipe_bs_put_utf16be(NBIF_ALIST_3);
+BIF_RETTYPE nbif_impl_hipe_bs_put_utf16le(NBIF_ALIST_3);
+BIF_RETTYPE nbif_impl_hipe_bs_validate_unicode(NBIF_ALIST_1);
struct erl_bin_match_buffer;
int hipe_bs_validate_unicode_retract(struct erl_bin_match_buffer*, Eterm);
-BIF_RETTYPE hipe_is_divisible(BIF_ALIST_2);
+BIF_RETTYPE nbif_impl_hipe_is_divisible(NBIF_ALIST_2);
#ifdef NO_FPE_SIGNALS
AEXTERN(void,nbif_emulate_fpe,(Process*));
@@ -129,7 +129,7 @@ void hipe_atomic_inc(int*);
void hipe_clear_timeout(Process*);
#endif
-#define BIF_LIST(M,F,A,C,I) AEXTERN(Eterm,nbif_##C,(void));
+#define BIF_LIST(M,F,A,B,C,I) AEXTERN(Eterm,nbif_##C,(void));
#include "erl_bif_list.h"
#undef BIF_LIST
diff --git a/erts/emulator/hipe/hipe_ppc.c b/erts/emulator/hipe/hipe_ppc.c
index 9b2048c457..4413748936 100644
--- a/erts/emulator/hipe/hipe_ppc.c
+++ b/erts/emulator/hipe/hipe_ppc.c
@@ -25,7 +25,6 @@
#endif
#include "global.h"
#include "erl_binary.h"
-#include <sys/mman.h>
#include "hipe_arch.h"
#include "hipe_native_bif.h" /* nbif_callemu() */
@@ -68,34 +67,6 @@ void hipe_flush_icache_range(void *address, unsigned int nbytes)
asm volatile("sync\n\tisync");
}
-/*
- * Management of 32MB code segments for regular code and trampolines.
- */
-
-#define SEGMENT_NRBYTES (32*1024*1024) /* named constant, _not_ a tunable */
-
-static struct segment {
- unsigned int *base; /* [base,base+32MB[ */
- unsigned int *code_pos; /* INV: base <= code_pos <= tramp_pos */
- unsigned int *tramp_pos; /* INV: tramp_pos <= base+32MB */
-} curseg;
-
-#define in_area(ptr,start,nbytes) \
- ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
-
-/* Darwin breakage */
-#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-static void *new_code_mapping(void)
-{
- return mmap(0, SEGMENT_NRBYTES,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS,
- -1, 0);
-}
-
static int check_callees(Eterm callees)
{
Eterm *tuple;
@@ -119,126 +90,71 @@ static int check_callees(Eterm callees)
return arity;
}
-static unsigned int *try_alloc(Uint nrwords, int nrcallees, Eterm callees, unsigned int **trampvec)
+
+static void generate_trampolines(Uint32* address,
+ int nrcallees, Eterm callees,
+ Uint32** trampvec)
{
- unsigned int *base, *address, *tramp_pos, nrfreewords;
- int trampnr;
+ Uint32* trampoline = address;
+ int i;
- tramp_pos = curseg.tramp_pos;
- address = curseg.code_pos;
- nrfreewords = tramp_pos - address;
- if (nrwords > nrfreewords)
- return NULL;
- curseg.code_pos = address + nrwords;
- nrfreewords -= nrwords;
-
- base = curseg.base;
- for (trampnr = 1; trampnr <= nrcallees; ++trampnr) {
- Eterm mfa = tuple_val(callees)[trampnr];
- Eterm m = tuple_val(mfa)[1];
- Eterm f = tuple_val(mfa)[2];
- unsigned int a = unsigned_val(tuple_val(mfa)[3]);
- unsigned int *trampoline = hipe_mfa_get_trampoline(m, f, a);
- if (!in_area(trampoline, base, SEGMENT_NRBYTES)) {
+ for (i = 0; i < nrcallees; ++i) {
#if defined(__powerpc64__)
- if (nrfreewords < 7)
- return NULL;
- nrfreewords -= 7;
- tramp_pos = trampoline = tramp_pos - 7;
- trampoline[0] = 0x3D600000; /* addis r11,r0,0 */
- trampoline[1] = 0x616B0000; /* ori r11,r11,0 */
- trampoline[2] = 0x796B07C6; /* rldicr r11,r11,32,31 */
- trampoline[3] = 0x656B0000; /* oris r11,r11,0 */
- trampoline[4] = 0x616B0000; /* ori r11,r11,0 */
- trampoline[5] = 0x7D6903A6; /* mtctr r11 */
- trampoline[6] = 0x4E800420; /* bctr */
- hipe_flush_icache_range(trampoline, 7*sizeof(int));
+# define TRAMPOLINE_WORDS 7
+ trampoline[0] = 0x3D600000; /* addis r11,r0,0 */
+ trampoline[1] = 0x616B0000; /* ori r11,r11,0 */
+ trampoline[2] = 0x796B07C6; /* rldicr r11,r11,32,31 */
+ trampoline[3] = 0x656B0000; /* oris r11,r11,0 */
+ trampoline[4] = 0x616B0000; /* ori r11,r11,0 */
+ trampoline[5] = 0x7D6903A6; /* mtctr r11 */
+ trampoline[6] = 0x4E800420; /* bctr */
#else
- if (nrfreewords < 4)
- return NULL;
- nrfreewords -= 4;
- tramp_pos = trampoline = tramp_pos - 4;
- trampoline[0] = 0x39600000; /* addi r11,r0,0 */
- trampoline[1] = 0x3D6B0000; /* addis r11,r11,0 */
- trampoline[2] = 0x7D6903A6; /* mtctr r11 */
- trampoline[3] = 0x4E800420; /* bctr */
- hipe_flush_icache_range(trampoline, 4*sizeof(int));
+# define TRAMPOLINE_WORDS 4
+ trampoline[0] = 0x39600000; /* addi r11,r0,0 */
+ trampoline[1] = 0x3D6B0000; /* addis r11,r11,0 */
+ trampoline[2] = 0x7D6903A6; /* mtctr r11 */
+ trampoline[3] = 0x4E800420; /* bctr */
#endif
- hipe_mfa_set_trampoline(m, f, a, trampoline);
- }
- trampvec[trampnr-1] = trampoline;
+ trampvec[i] = trampoline;
+ trampoline += TRAMPOLINE_WORDS;
}
- curseg.tramp_pos = tramp_pos;
- return address;
+ hipe_flush_icache_range(address, nrcallees*TRAMPOLINE_WORDS*sizeof(Uint32));
}
void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p)
{
- Uint nrwords;
+ Uint code_words;
int nrcallees;
Eterm trampvecbin;
- unsigned int **trampvec;
- unsigned int *address;
- unsigned int *base;
- struct segment oldseg;
+ Uint32 **trampvec;
+ Uint32 *address;
if (nrbytes & 0x3)
return NULL;
- nrwords = nrbytes >> 2;
+ code_words = nrbytes / sizeof(Uint32);
nrcallees = check_callees(callees);
if (nrcallees < 0)
return NULL;
- trampvecbin = new_binary(p, NULL, nrcallees*sizeof(unsigned int*));
- trampvec = (unsigned int**)binary_bytes(trampvecbin);
-
- address = try_alloc(nrwords, nrcallees, callees, trampvec);
- if (!address) {
- base = new_code_mapping();
- if (base == MAP_FAILED)
- return NULL;
- oldseg = curseg;
- curseg.base = base;
- curseg.code_pos = base;
- curseg.tramp_pos = (unsigned int*)((char*)base + SEGMENT_NRBYTES);
-
- address = try_alloc(nrwords, nrcallees, callees, trampvec);
- if (!address) {
- munmap(base, SEGMENT_NRBYTES);
- curseg = oldseg;
- return NULL;
- }
- /* commit to new segment, ignore leftover space in old segment */
- }
+ trampvecbin = new_binary(p, NULL, nrcallees*sizeof(Uint32*));
+ trampvec = (Uint32**)binary_bytes(trampvecbin);
+
+ address = erts_alloc(ERTS_ALC_T_HIPE_EXEC,
+ (code_words + nrcallees*TRAMPOLINE_WORDS)*sizeof(Uint32));
+
+ generate_trampolines(address + code_words, nrcallees, callees, trampvec);
*trampolines = trampvecbin;
return address;
}
-static unsigned int *alloc_stub(Uint nrwords)
+void hipe_free_code(void* code, unsigned int bytes)
{
- unsigned int *address;
- unsigned int *base;
- struct segment oldseg;
-
- address = try_alloc(nrwords, 0, NIL, NULL);
- if (!address) {
- base = new_code_mapping();
- if (base == MAP_FAILED)
- return NULL;
- oldseg = curseg;
- curseg.base = base;
- curseg.code_pos = base;
- curseg.tramp_pos = (unsigned int*)((char*)base + SEGMENT_NRBYTES);
-
- address = try_alloc(nrwords, 0, NIL, NULL);
- if (!address) {
- munmap(base, SEGMENT_NRBYTES);
- curseg = oldseg;
- return NULL;
- }
- /* commit to new segment, ignore leftover space in old segment */
- }
- return address;
+ erts_free(ERTS_ALC_T_HIPE_EXEC, code);
+}
+
+void hipe_free_native_stub(void* stub)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, stub);
}
static void patch_imm16(Uint32 *address, unsigned int imm16)
@@ -288,12 +204,12 @@ int hipe_patch_insn(void *address, Uint64 value, Eterm type)
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
- unsigned int *code;
+ Uint32 *code;
if ((unsigned long)&nbif_callemu & ~0x01FFFFFCUL)
abort();
- code = alloc_stub(7);
+ code = erts_alloc(ERTS_ALC_T_HIPE_EXEC, 7*sizeof(Uint32));
if (!code)
return NULL;
@@ -312,7 +228,7 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
/* ba nbif_callemu */
code[6] = 0x48000002 | (unsigned long)&nbif_callemu;
- hipe_flush_icache_range(code, 7*sizeof(int));
+ hipe_flush_icache_range(code, 7*sizeof(Uint32));
return code;
}
@@ -360,7 +276,7 @@ int hipe_patch_insn(void *address, Uint32 value, Eterm type)
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
- unsigned int *code;
+ Uint32 *code;
/*
* Native code calls BEAM via a stub looking as follows:
@@ -383,7 +299,7 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
if ((unsigned long)&nbif_callemu & ~0x01FFFFFCUL)
abort();
- code = alloc_stub(4);
+ code = erts_alloc(ERTS_ALC_T_HIPE_EXEC, 4*sizeof(Uint32));
if (!code)
return NULL;
@@ -396,7 +312,7 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
/* ba nbif_callemu */
code[3] = 0x48000002 | (unsigned long)&nbif_callemu;
- hipe_flush_icache_range(code, 4*sizeof(int));
+ hipe_flush_icache_range(code, 4*sizeof(Uint32));
return code;
}
diff --git a/erts/emulator/hipe/hipe_ppc_bifs.m4 b/erts/emulator/hipe/hipe_ppc_bifs.m4
index b540562185..79a8bef77d 100644
--- a/erts/emulator/hipe/hipe_ppc_bifs.m4
+++ b/erts/emulator/hipe/hipe_ppc_bifs.m4
@@ -26,9 +26,9 @@ include(`hipe/hipe_ppc_asm.m4')
#`include' "hipe_literals.h"
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) STORE_IA(CSYM(F), P_BIF_CALLEE(P), r29); bl CSYM(hipe_debug_bif_wrapper)
+# define CALL_BIF(F) STORE_IA(CSYM(nbif_impl_##F), P_BIF_CALLEE(P), r29); bl CSYM(hipe_debug_bif_wrapper)
#else
-# define CALL_BIF(F) bl CSYM(F)
+# define CALL_BIF(F) bl CSYM(nbif_impl_##F)
#endif'
.text
diff --git a/erts/emulator/hipe/hipe_risc_gc.h b/erts/emulator/hipe/hipe_risc_gc.h
index 09568c140e..f019434f67 100644
--- a/erts/emulator/hipe/hipe_risc_gc.h
+++ b/erts/emulator/hipe/hipe_risc_gc.h
@@ -27,7 +27,7 @@
/* arch wrapper includes hipe_${arch}_asm.h to define NR_ARG_REGS */
struct nstack_walk_state {
- const struct sdesc *sdesc0; /* .sdesc0 must be a pointer rvalue */
+ const struct hipe_sdesc *sdesc0; /* .sdesc0 must be a pointer rvalue */
};
static inline int nstack_walk_init_check(const Process *p)
@@ -43,20 +43,20 @@ static inline Eterm *nstack_walk_nsp_begin(const Process *p)
return p->hipe.nsp + nstkarity;
}
-static inline const struct sdesc*
+static inline const struct hipe_sdesc*
nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state)
{
- const struct sdesc *sdesc = hipe_find_sdesc((unsigned long)p->hipe.nra);
+ const struct hipe_sdesc *sdesc = hipe_find_sdesc((unsigned long)p->hipe.nra);
state->sdesc0 = sdesc;
return sdesc;
}
-static inline const struct sdesc*
+static inline const struct hipe_sdesc*
nstack_walk_init_sdesc_ignore_trap(const Process *p,
struct nstack_walk_state *state)
{
unsigned long ra = (unsigned long)p->hipe.nra;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
if (ra == (unsigned long)&nbif_stack_trap_ra)
ra = (unsigned long)p->hipe.ngra;
sdesc = hipe_find_sdesc(ra);
@@ -64,7 +64,7 @@ nstack_walk_init_sdesc_ignore_trap(const Process *p,
return sdesc;
}
-static inline void nstack_walk_update_trap(Process *p, const struct sdesc *sdesc0)
+static inline void nstack_walk_update_trap(Process *p, const struct hipe_sdesc *sdesc0)
{
Eterm *nsp = p->hipe.nsp;
p->hipe.nsp = nstack_walk_nsp_begin(p);
@@ -103,7 +103,7 @@ static inline int nstack_walk_nsp_reached_end(const Eterm *nsp, const Eterm *nsp
return nsp >= nsp_end;
}
-static inline unsigned int nstack_walk_frame_size(const struct sdesc *sdesc)
+static inline unsigned int nstack_walk_frame_size(const struct hipe_sdesc *sdesc)
{
return sdesc_fsize(sdesc) + 1 + sdesc_arity(sdesc);
}
@@ -114,7 +114,7 @@ static inline Eterm *nstack_walk_frame_index(Eterm *nsp, unsigned int i)
}
static inline unsigned long
-nstack_walk_frame_ra(const Eterm *nsp, const struct sdesc *sdesc)
+nstack_walk_frame_ra(const Eterm *nsp, const struct hipe_sdesc *sdesc)
{
return nsp[sdesc_fsize(sdesc)];
}
diff --git a/erts/emulator/hipe/hipe_risc_glue.h b/erts/emulator/hipe/hipe_risc_glue.h
index 0284265307..1369b392fe 100644
--- a/erts/emulator/hipe/hipe_risc_glue.h
+++ b/erts/emulator/hipe/hipe_risc_glue.h
@@ -66,13 +66,17 @@ static __inline__ unsigned int max(unsigned int x, unsigned int y)
static __inline__ void hipe_arch_glue_init(void)
{
- static struct sdesc_with_exnra nbif_return_sdesc = {
- .exnra = (unsigned long)&nbif_fail,
- .sdesc = {
- .bucket = { .hvalue = (unsigned long)&nbif_return },
- .summary = (1<<8),
- },
- };
+ static struct hipe_sdesc_with_exnra nbif_return_sdesc;
+
+ nbif_return_sdesc.exnra = (unsigned long)nbif_fail;
+ nbif_return_sdesc.sdesc.bucket.hvalue = (unsigned long)nbif_return;
+ nbif_return_sdesc.sdesc.fsize = 0;
+ nbif_return_sdesc.sdesc.has_exnra = 1;
+ nbif_return_sdesc.sdesc.stk_nargs = 0;
+ nbif_return_sdesc.sdesc.m_aix = atom_val(am_Empty);
+ nbif_return_sdesc.sdesc.f_aix = atom_val(am_return);
+ nbif_return_sdesc.sdesc.a = 0;
+
hipe_init_sdesc_table(&nbif_return_sdesc.sdesc);
}
diff --git a/erts/emulator/hipe/hipe_risc_stack.c b/erts/emulator/hipe/hipe_risc_stack.c
index dc98c96b8f..4001bedeb6 100644
--- a/erts/emulator/hipe/hipe_risc_stack.c
+++ b/erts/emulator/hipe/hipe_risc_stack.c
@@ -56,8 +56,8 @@ void hipe_print_nstack(Process *p)
{
Eterm *nsp;
Eterm *nsp_end;
- const struct sdesc *sdesc1;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc1;
+ const struct hipe_sdesc *sdesc;
unsigned long ra;
unsigned long exnra;
unsigned int mask;
@@ -175,7 +175,7 @@ void hipe_print_nstack(Process *p)
#define MINSTACK 128
#define NSKIPFRAMES 4
-void hipe_update_stack_trap(Process *p, const struct sdesc *sdesc)
+void hipe_update_stack_trap(Process *p, const struct hipe_sdesc *sdesc)
{
Eterm *nsp;
Eterm *nsp_end;
@@ -216,7 +216,7 @@ void hipe_update_stack_trap(Process *p, const struct sdesc *sdesc)
void (*hipe_handle_stack_trap(Process *p))(void)
{
void (*ngra)(void) = p->hipe.ngra;
- const struct sdesc *sdesc = hipe_find_sdesc((unsigned long)ngra);
+ const struct hipe_sdesc *sdesc = hipe_find_sdesc((unsigned long)ngra);
hipe_update_stack_trap(p, sdesc);
return ngra;
}
@@ -237,7 +237,7 @@ void hipe_find_handler(Process *p)
unsigned long ra;
unsigned long exnra;
unsigned int arity;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
nsp = p->hipe.nsp;
nsp_end = p->hipe.nstend;
@@ -277,7 +277,7 @@ int hipe_fill_stacktrace(Process *p, int depth, Eterm **trace)
Eterm *nsp_end;
unsigned long ra, prev_ra;
unsigned int arity;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
int i;
if (depth < 1)
@@ -292,7 +292,7 @@ int hipe_fill_stacktrace(Process *p, int depth, Eterm **trace)
ra = (unsigned long)p->hipe.nra;
prev_ra = 0;
i = 0;
- for (;;) {
+ while (nsp < nsp_end) {
if (ra == (unsigned long)nbif_stack_trap_ra)
ra = (unsigned long)p->hipe.ngra;
if (ra != prev_ra) {
@@ -302,8 +302,6 @@ int hipe_fill_stacktrace(Process *p, int depth, Eterm **trace)
break;
prev_ra = ra;
}
- if (nsp >= nsp_end)
- break;
sdesc = hipe_find_sdesc(ra);
nsp += arity + sdesc_fsize(sdesc);
arity = sdesc_arity(sdesc);
diff --git a/erts/emulator/hipe/hipe_sparc.c b/erts/emulator/hipe/hipe_sparc.c
index 23020f34ee..876b20bb15 100644
--- a/erts/emulator/hipe/hipe_sparc.c
+++ b/erts/emulator/hipe/hipe_sparc.c
@@ -24,7 +24,6 @@
#include "config.h"
#endif
#include "global.h"
-#include <sys/mman.h>
#include "hipe_arch.h"
#include "hipe_native_bif.h" /* nbif_callemu() */
@@ -88,8 +87,8 @@ int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline)
{
Uint32 relDest, newI;
- if (trampoline)
- return -1;
+ ASSERT(trampoline == NULL);
+
relDest = (Uint32)((Sint32)destAddress - (Sint32)callAddress);
newI = (1 << 30) | (relDest >> 2);
*(Uint32*)callAddress = newI;
@@ -97,105 +96,9 @@ int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline)
return 0;
}
-/*
- * Memory allocator for executable code.
- *
- * This is required on x86 because some combinations
- * of Linux kernels and CPU generations default to
- * non-executable memory mappings, causing ordinary
- * malloc() memory to be non-executable.
- */
-static unsigned int code_bytes;
-static char *code_next;
-
-#if 0 /* change to non-zero to get allocation statistics at exit() */
-static unsigned int total_mapped, nr_joins, nr_splits, total_alloc, nr_allocs, nr_large, total_lost;
-static unsigned int atexit_done;
-
-static void alloc_code_stats(void)
-{
- printf("\r\nalloc_code_stats: %u bytes mapped, %u joins, %u splits, %u bytes allocated, %u average alloc, %u large allocs, %u bytes lost\r\n",
- total_mapped, nr_joins, nr_splits, total_alloc, nr_allocs ? total_alloc/nr_allocs : 0, nr_large, total_lost);
-}
-
-static void atexit_alloc_code_stats(void)
-{
- if (!atexit_done) {
- atexit_done = 1;
- (void)atexit(alloc_code_stats);
- }
-}
-
-#define ALLOC_CODE_STATS(X) do{X;}while(0)
-#else
-#define ALLOC_CODE_STATS(X) do{}while(0)
-#endif
-
-static int morecore(unsigned int alloc_bytes)
-{
- unsigned int map_bytes;
- char *map_hint, *map_start;
-
- /* Page-align the amount to allocate. */
- map_bytes = (alloc_bytes + 4095) & ~4095;
-
- /* Round up small allocations. */
- if (map_bytes < 1024*1024)
- map_bytes = 1024*1024;
- else
- ALLOC_CODE_STATS(++nr_large);
-
- /* Create a new memory mapping, ensuring it is executable
- and in the low 2GB of the address space. Also attempt
- to make it adjacent to the previous mapping. */
- map_hint = code_next + code_bytes;
- if ((unsigned long)map_hint & 4095)
- abort();
- map_start = mmap(map_hint, map_bytes,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS
-#ifdef __x86_64__
- |MAP_32BIT
-#endif
- ,
- -1, 0);
- if (map_start == MAP_FAILED)
- return -1;
-
- ALLOC_CODE_STATS(total_mapped += map_bytes);
-
- /* Merge adjacent mappings, so the trailing portion of the previous
- mapping isn't lost. In practice this is quite successful. */
- if (map_start == map_hint) {
- ALLOC_CODE_STATS(++nr_joins);
- code_bytes += map_bytes;
- } else {
- ALLOC_CODE_STATS(++nr_splits);
- ALLOC_CODE_STATS(total_lost += code_bytes);
- code_next = map_start;
- code_bytes = map_bytes;
- }
-
- ALLOC_CODE_STATS(atexit_alloc_code_stats());
-
- return 0;
-}
-
static void *alloc_code(unsigned int alloc_bytes)
{
- void *res;
-
- /* Align function entries. */
- alloc_bytes = (alloc_bytes + 3) & ~3;
-
- if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
- return NULL;
- ALLOC_CODE_STATS(++nr_allocs);
- ALLOC_CODE_STATS(total_alloc += alloc_bytes);
- res = code_next;
- code_next += alloc_bytes;
- code_bytes -= alloc_bytes;
- return res;
+ return erts_alloc(ERTS_ALC_T_HIPE_EXEC, alloc_bytes);
}
void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p)
@@ -206,6 +109,11 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
+void hipe_free_code(void* code, unsigned int nrbytes)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, code);
+}
+
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
unsigned int *code;
@@ -235,6 +143,11 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
return code;
}
+void hipe_free_native_stub(void* stub)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, stub);
+}
+
void hipe_arch_print_pcb(struct hipe_process_state *p)
{
#define U(n,x) \
diff --git a/erts/emulator/hipe/hipe_sparc_bifs.m4 b/erts/emulator/hipe/hipe_sparc_bifs.m4
index 1389beaa61..14330c2f1c 100644
--- a/erts/emulator/hipe/hipe_sparc_bifs.m4
+++ b/erts/emulator/hipe/hipe_sparc_bifs.m4
@@ -29,9 +29,9 @@ include(`hipe/hipe_sparc_asm.m4')
.align 4
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) set F, %o7; st %o7, [%o0+P_BIF_CALLEE]; call hipe_debug_bif_wrapper
+# define CALL_BIF(F) set nbif_impl_##F, %o7; st %o7, [%o0+P_BIF_CALLEE]; call hipe_debug_bif_wrapper
#else
-# define CALL_BIF(F) call F
+# define CALL_BIF(F) call nbif_impl_##F
#endif'
/*
diff --git a/erts/emulator/hipe/hipe_stack.c b/erts/emulator/hipe/hipe_stack.c
index e2e6eb74b1..b80e44bc37 100644
--- a/erts/emulator/hipe/hipe_stack.c
+++ b/erts/emulator/hipe/hipe_stack.c
@@ -43,10 +43,10 @@
*/
struct hipe_sdesc_table hipe_sdesc_table;
-static struct sdesc **alloc_bucket(unsigned int size)
+static struct hipe_sdesc **alloc_bucket(unsigned int size)
{
- unsigned long nbytes = size * sizeof(struct sdesc*);
- struct sdesc **bucket = erts_alloc(ERTS_ALC_T_HIPE, nbytes);
+ unsigned long nbytes = size * sizeof(struct hipe_sdesc*);
+ struct hipe_sdesc **bucket = erts_alloc(ERTS_ALC_T_HIPE, nbytes);
sys_memzero(bucket, nbytes);
return bucket;
}
@@ -54,7 +54,7 @@ static struct sdesc **alloc_bucket(unsigned int size)
static void hipe_grow_sdesc_table(void)
{
unsigned int old_size, new_size, new_mask;
- struct sdesc **old_bucket, **new_bucket;
+ struct hipe_sdesc **old_bucket, **new_bucket;
unsigned int i;
old_size = 1 << hipe_sdesc_table.log2size;
@@ -66,9 +66,9 @@ static void hipe_grow_sdesc_table(void)
new_bucket = alloc_bucket(new_size);
hipe_sdesc_table.bucket = new_bucket;
for (i = 0; i < old_size; ++i) {
- struct sdesc *b = old_bucket[i];
+ struct hipe_sdesc *b = old_bucket[i];
while (b != NULL) {
- struct sdesc *next = b->bucket.next;
+ struct hipe_sdesc *next = b->bucket.next;
unsigned int j = (b->bucket.hvalue >> HIPE_RA_LSR_COUNT) & new_mask;
b->bucket.next = new_bucket[j];
new_bucket[j] = b;
@@ -78,11 +78,11 @@ static void hipe_grow_sdesc_table(void)
erts_free(ERTS_ALC_T_HIPE, old_bucket);
}
-struct sdesc *hipe_put_sdesc(struct sdesc *sdesc)
+struct hipe_sdesc *hipe_put_sdesc(struct hipe_sdesc *sdesc)
{
unsigned long ra;
unsigned int i;
- struct sdesc *chain;
+ struct hipe_sdesc *chain;
unsigned int size;
ra = sdesc->bucket.hvalue;
@@ -102,7 +102,29 @@ struct sdesc *hipe_put_sdesc(struct sdesc *sdesc)
return sdesc;
}
-void hipe_init_sdesc_table(struct sdesc *sdesc)
+void hipe_destruct_sdesc(struct hipe_sdesc *sdesc)
+{
+ unsigned int i;
+ struct hipe_sdesc** prevp;
+ void* free_me;
+
+ i = (sdesc->bucket.hvalue >> HIPE_RA_LSR_COUNT) & hipe_sdesc_table.mask;
+ prevp = &hipe_sdesc_table.bucket[i];
+
+ for (; *prevp != sdesc; prevp = &(*prevp)->bucket.next)
+ ASSERT(*prevp);
+
+ *prevp = sdesc->bucket.next;
+ hipe_sdesc_table.used -= 1;
+
+ if (sdesc->has_exnra)
+ free_me = ErtsContainerStruct(sdesc, struct hipe_sdesc_with_exnra, sdesc);
+ else
+ free_me = sdesc;
+ erts_free(ERTS_ALC_T_HIPE, free_me);
+}
+
+void hipe_init_sdesc_table(struct hipe_sdesc *sdesc)
{
unsigned int log2size, size;
@@ -121,31 +143,46 @@ void hipe_init_sdesc_table(struct sdesc *sdesc)
* representation. If different representations are needed in
* the future, this code has to be made target dependent.
*/
-struct sdesc *hipe_decode_sdesc(Eterm arg)
+struct hipe_sdesc *hipe_decode_sdesc(Eterm arg)
{
Uint ra, exnra;
Eterm *live;
- Uint fsize, arity, nlive, i, nslots, off;
+ Uint fsize, nargs, stk_nargs, nlive, i, nslots, off;
Uint livebitswords, sdescbytes;
void *p;
- struct sdesc *sdesc;
-
- if (is_not_tuple(arg) ||
- (tuple_val(arg))[0] != make_arityval(6) ||
- term_to_Uint((tuple_val(arg))[1], &ra) == 0 ||
- term_to_Uint((tuple_val(arg))[2], &exnra) == 0 ||
- is_not_small((tuple_val(arg))[3]) ||
- (fsize = unsigned_val((tuple_val(arg))[3])) > 65535 ||
- is_not_small((tuple_val(arg))[4]) ||
- (arity = unsigned_val((tuple_val(arg))[4])) > 255 ||
- is_not_tuple((tuple_val(arg))[5]))
+ struct hipe_sdesc *sdesc;
+ Eterm* mfa_tpl;
+ Eterm* tp;
+
+ if (is_not_tuple(arg))
+ return 0;
+
+ tp = tuple_val(arg);
+ if (tp[0] != make_arityval(6) ||
+ term_to_Uint(tp[1], &ra) == 0 ||
+ term_to_Uint(tp[2], &exnra) == 0 ||
+ is_not_small(tp[3]) ||
+ (fsize = unsigned_val(tp[3])) > 65535 ||
+ is_not_small(tp[4]) ||
+ (stk_nargs = unsigned_val(tp[4])) > 255 ||
+ is_not_tuple(tp[5]) ||
+ is_not_tuple(tp[6]) ||
+ (mfa_tpl = tuple_val(tp[6]))[0] != make_arityval(3) ||
+ is_not_atom(mfa_tpl[1]) ||
+ is_not_atom(mfa_tpl[2]) ||
+ is_not_small(mfa_tpl[3]) ||
+ (nargs = unsigned_val(mfa_tpl[3])) > 255)
return 0;
+
+ if (stk_nargs > nargs)
+ return 0;
+
/* Get tuple with live slots */
- live = tuple_val((tuple_val(arg))[5]) + 1;
+ live = tuple_val(tp[5]) + 1;
/* Get number of live slots */
nlive = arityval(live[-1]);
- /* Calculate size of frame = locals + ra + arguments */
- nslots = fsize + 1 + arity;
+ /* Calculate size of frame = locals + ra + stack arguments */
+ nslots = fsize + 1 + stk_nargs;
/* Check that only valid slots are given. */
for (i = 0; i < nlive; ++i) {
if (is_not_small(live[i]) ||
@@ -155,27 +192,34 @@ struct sdesc *hipe_decode_sdesc(Eterm arg)
}
/* Calculate number of words for the live bitmap. */
- livebitswords = (fsize + arity + 1 + 31) / 32;
+ livebitswords = (fsize + stk_nargs + 1 + 31) / 32;
/* Calculate number of bytes needed for the stack descriptor. */
sdescbytes =
(exnra
- ? offsetof(struct sdesc_with_exnra, sdesc.livebits)
- : offsetof(struct sdesc, livebits))
+ ? offsetof(struct hipe_sdesc_with_exnra, sdesc.livebits)
+ : offsetof(struct hipe_sdesc, livebits))
+ livebitswords * sizeof(int);
p = erts_alloc(ERTS_ALC_T_HIPE, sdescbytes);
/* If we have an exception handler use the
special sdesc_with_exnra structure. */
if (exnra) {
- struct sdesc_with_exnra *sdesc_we = p;
+ struct hipe_sdesc_with_exnra *sdesc_we = p;
sdesc_we->exnra = exnra;
sdesc = &(sdesc_we->sdesc);
} else
sdesc = p;
+ sdesc->m_aix = atom_val(mfa_tpl[1]);
+ sdesc->f_aix = atom_val(mfa_tpl[2]);
+ sdesc->a = nargs;
+
+
/* Initialise head of sdesc. */
sdesc->bucket.next = 0;
sdesc->bucket.hvalue = ra;
- sdesc->summary = (fsize << 9) | (exnra ? (1<<8) : 0) | arity;
+ sdesc->fsize = fsize;
+ sdesc->has_exnra = (exnra ? 1 : 0);
+ sdesc->stk_nargs = stk_nargs;
/* Clear all live-bits */
for (i = 0; i < livebitswords; ++i)
sdesc->livebits[i] = 0;
@@ -184,13 +228,5 @@ struct sdesc *hipe_decode_sdesc(Eterm arg)
off = unsigned_val(live[i]);
sdesc->livebits[off / 32] |= (1 << (off & 31));
}
-#ifdef DEBUG
- {
- Eterm mfa_tpl = tuple_val(arg)[6];
- sdesc->dbg_M = tuple_val(mfa_tpl)[1];
- sdesc->dbg_F = tuple_val(mfa_tpl)[2];
- sdesc->dbg_A = tuple_val(mfa_tpl)[3];
- }
-#endif
return sdesc;
}
diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h
index afa0ed4256..7e30358767 100644
--- a/erts/emulator/hipe/hipe_stack.h
+++ b/erts/emulator/hipe/hipe_stack.h
@@ -30,40 +30,43 @@
#include <stddef.h> /* offsetof() */
-struct sdesc {
+struct hipe_sdesc {
struct {
unsigned long hvalue; /* return address */
- struct sdesc *next; /* hash collision chain */
+ struct hipe_sdesc *next; /* hash collision chain */
} bucket;
- unsigned int summary; /* frame size, exn handler presence flag, arity */
-#ifdef DEBUG
- Eterm dbg_M, dbg_F;
- unsigned dbg_A;
-#endif
- unsigned int livebits[1]; /* size depends on arch & data in summary field */
+ unsigned int fsize : 23; /* frame size */
+ unsigned int has_exnra : 1; /* exn handler presence flag */
+ unsigned int stk_nargs : 8; /* arguments on stack */
+ Uint32 m_aix;
+ Uint32 f_aix;
+ Uint32 a;
+ struct hipe_sdesc* next_in_modi;
+ Uint32 livebits[1]; /* size depends on arch & data in summary field */
};
-struct sdesc_with_exnra {
+struct hipe_sdesc_with_exnra {
unsigned long exnra;
- struct sdesc sdesc;
+ struct hipe_sdesc sdesc;
};
-static __inline__ unsigned int sdesc_fsize(const struct sdesc *sdesc)
+static __inline__ unsigned int sdesc_fsize(const struct hipe_sdesc *sdesc)
{
- return sdesc->summary >> 9;
+ return sdesc->fsize;
}
-static __inline__ unsigned int sdesc_arity(const struct sdesc *sdesc)
+/* Nr of arguments pushed on stack */
+static __inline__ unsigned int sdesc_arity(const struct hipe_sdesc *sdesc)
{
- return sdesc->summary & 0xFF;
+ return sdesc->stk_nargs;
}
-static __inline__ unsigned long sdesc_exnra(const struct sdesc *sdesc)
+static __inline__ unsigned long sdesc_exnra(const struct hipe_sdesc *sdesc)
{
- if ((sdesc->summary & (1<<8))) {
+ if (sdesc->has_exnra) {
const char *tmp;
- tmp = (const char*)sdesc - offsetof(struct sdesc_with_exnra, sdesc);
- return ((const struct sdesc_with_exnra*)tmp)->exnra;
+ tmp = (const char*)sdesc - offsetof(struct hipe_sdesc_with_exnra, sdesc);
+ return ((const struct hipe_sdesc_with_exnra*)tmp)->exnra;
}
return 0;
}
@@ -72,13 +75,14 @@ struct hipe_sdesc_table {
unsigned int log2size;
unsigned int mask; /* INV: mask == (1 << log2size)-1 */
unsigned int used;
- struct sdesc **bucket;
+ struct hipe_sdesc **bucket;
};
extern struct hipe_sdesc_table hipe_sdesc_table;
-extern struct sdesc *hipe_put_sdesc(struct sdesc*);
-extern void hipe_init_sdesc_table(struct sdesc*);
-extern struct sdesc *hipe_decode_sdesc(Eterm);
+extern struct hipe_sdesc *hipe_put_sdesc(struct hipe_sdesc*);
+extern void hipe_destruct_sdesc(struct hipe_sdesc*);
+extern void hipe_init_sdesc_table(struct hipe_sdesc*);
+extern struct hipe_sdesc *hipe_decode_sdesc(Eterm);
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
#define __builtin_expect(x, expected_value) (x)
@@ -86,10 +90,10 @@ extern struct sdesc *hipe_decode_sdesc(Eterm);
#define likely(x) __builtin_expect((x),1)
#define unlikely(x) __builtin_expect((x),0)
-static __inline__ const struct sdesc *hipe_find_sdesc(unsigned long ra)
+static __inline__ const struct hipe_sdesc *hipe_find_sdesc(unsigned long ra)
{
unsigned int i = (ra >> HIPE_RA_LSR_COUNT) & hipe_sdesc_table.mask;
- const struct sdesc *sdesc = hipe_sdesc_table.bucket[i];
+ const struct hipe_sdesc *sdesc = hipe_sdesc_table.bucket[i];
if (likely(sdesc->bucket.hvalue == ra))
return sdesc;
do {
@@ -103,7 +107,7 @@ AEXTERN(void,nbif_stack_trap_ra,(void));
extern void hipe_print_nstack(Process*);
extern void hipe_find_handler(Process*);
extern void (*hipe_handle_stack_trap(Process*))(void);
-extern void hipe_update_stack_trap(Process*, const struct sdesc*);
+extern void hipe_update_stack_trap(Process*, const struct hipe_sdesc*);
extern int hipe_fill_stacktrace(Process*, int, Eterm**);
#if 0 && defined(HIPE_NSTACK_GROWS_UP)
@@ -134,5 +138,7 @@ extern void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop
extern Eterm *sweep_literals_nstack(Process *p, Eterm *n_htop, char *area,
Uint area_size);
extern int nstack_any_heap_ref_ptrs(Process *, char* mod_start, Uint mod_size);
+extern int nstack_any_cps_in_segment(Process *, char* seg_start, Uint seg_size);
+
#endif /* HIPE_STACK_H */
diff --git a/erts/emulator/hipe/hipe_x86.c b/erts/emulator/hipe/hipe_x86.c
index 5f6c8c200e..c7e24673ac 100644
--- a/erts/emulator/hipe/hipe_x86.c
+++ b/erts/emulator/hipe/hipe_x86.c
@@ -24,7 +24,6 @@
#include "config.h"
#endif
#include "global.h"
-#include <sys/mman.h>
#include "hipe_arch.h"
#include "hipe_native_bif.h" /* nbif_callemu() */
@@ -62,118 +61,17 @@ int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline)
{
Uint rel32;
- if (trampoline)
- return -1;
+ ASSERT(trampoline == NULL);
+
rel32 = (Uint)destAddress - (Uint)callAddress - 4;
*(Uint32*)callAddress = rel32;
hipe_flush_icache_word(callAddress);
return 0;
}
-/*
- * Memory allocator for executable code.
- *
- * This is required on x86 because some combinations
- * of Linux kernels and CPU generations default to
- * non-executable memory mappings, causing ordinary
- * malloc() memory to be non-executable.
- */
-static unsigned int code_bytes;
-static char *code_next;
-
-#if 0 /* change to non-zero to get allocation statistics at exit() */
-static unsigned int total_mapped, nr_joins, nr_splits, total_alloc, nr_allocs, nr_large, total_lost;
-static unsigned int atexit_done;
-
-static void alloc_code_stats(void)
-{
- printf("\r\nalloc_code_stats: %u bytes mapped, %u joins, %u splits, %u bytes allocated, %u average alloc, %u large allocs, %u bytes lost\r\n",
- total_mapped, nr_joins, nr_splits, total_alloc, nr_allocs ? total_alloc/nr_allocs : 0, nr_large, total_lost);
-}
-
-static void atexit_alloc_code_stats(void)
-{
- if (!atexit_done) {
- atexit_done = 1;
- (void)atexit(alloc_code_stats);
- }
-}
-
-#define ALLOC_CODE_STATS(X) do{X;}while(0)
-#else
-#define ALLOC_CODE_STATS(X) do{}while(0)
-#endif
-
-/* FreeBSD 6.1 and Darwin breakage */
-#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-static int morecore(unsigned int alloc_bytes)
-{
- unsigned int map_bytes;
- char *map_hint, *map_start;
-
- /* Page-align the amount to allocate. */
- map_bytes = (alloc_bytes + 4095) & ~4095;
-
- /* Round up small allocations. */
- if (map_bytes < 1024*1024)
- map_bytes = 1024*1024;
- else
- ALLOC_CODE_STATS(++nr_large);
-
- /* Create a new memory mapping, ensuring it is executable
- and in the low 2GB of the address space. Also attempt
- to make it adjacent to the previous mapping. */
- map_hint = code_next + code_bytes;
- if ((unsigned long)map_hint & 4095)
- abort();
- map_start = mmap(map_hint, map_bytes,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS
-#ifdef __x86_64__
- |MAP_32BIT
-#endif
- ,
- -1, 0);
- if (map_start == MAP_FAILED)
- return -1;
-
- ALLOC_CODE_STATS(total_mapped += map_bytes);
-
- /* Merge adjacent mappings, so the trailing portion of the previous
- mapping isn't lost. In practice this is quite successful. */
- if (map_start == map_hint) {
- ALLOC_CODE_STATS(++nr_joins);
- code_bytes += map_bytes;
- } else {
- ALLOC_CODE_STATS(++nr_splits);
- ALLOC_CODE_STATS(total_lost += code_bytes);
- code_next = map_start;
- code_bytes = map_bytes;
- }
-
- ALLOC_CODE_STATS(atexit_alloc_code_stats());
-
- return 0;
-}
-
static void *alloc_code(unsigned int alloc_bytes)
{
- void *res;
-
- /* Align function entries. */
- alloc_bytes = (alloc_bytes + 3) & ~3;
-
- if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
- return NULL;
- ALLOC_CODE_STATS(++nr_allocs);
- ALLOC_CODE_STATS(total_alloc += alloc_bytes);
- res = code_next;
- code_next += alloc_bytes;
- code_bytes -= alloc_bytes;
- return res;
+ return erts_alloc(ERTS_ALC_T_HIPE_EXEC, alloc_bytes);
}
void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p)
@@ -184,6 +82,11 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
+void hipe_free_code(void* code, unsigned int bytes)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, code);
+}
+
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
/*
@@ -264,6 +167,11 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
return code;
}
+void hipe_free_native_stub(void* stub)
+{
+ erts_free(ERTS_ALC_T_HIPE_EXEC, stub);
+}
+
void hipe_arch_print_pcb(struct hipe_process_state *p)
{
#define U(n,x) \
diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4
index c0c149733c..aecf67dc1b 100644
--- a/erts/emulator/hipe/hipe_x86_bifs.m4
+++ b/erts/emulator/hipe/hipe_x86_bifs.m4
@@ -32,9 +32,9 @@ include(`hipe/hipe_x86_asm.m4')
#endif'
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) movl $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+# define CALL_BIF(F) movl $CSYM(nbif_impl_##F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
#else
-# define CALL_BIF(F) call CSYM(F)
+# define CALL_BIF(F) call CSYM(nbif_impl_##F)
#endif'
define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx /* `TEST_GOT_MBUF' */
@@ -666,13 +666,9 @@ noproc_primop_interface_0(nbif_handle_fp_exception, erts_restore_fpu)
#endif /* NO_FPE_SIGNALS */
/*
- * Implement gc_bif_interface_0 as nofail_primop_interface_0.
- */
-define(gc_bif_interface_0,`nofail_primop_interface_0($1, $2)')
-
-/*
- * Implement gc_bif_interface_N as standard_bif_interface_N (N=1,2,3).
+ * Implement gc_bif_interface_N as standard_bif_interface_N.
*/
+define(gc_bif_interface_0,`standard_bif_interface_0($1, $2)')
define(gc_bif_interface_1,`standard_bif_interface_1($1, $2)')
define(gc_bif_interface_2,`standard_bif_interface_2($1, $2)')
define(gc_bif_interface_3,`standard_bif_interface_3($1, $2)')
diff --git a/erts/emulator/hipe/hipe_x86_gc.h b/erts/emulator/hipe/hipe_x86_gc.h
index 00fe03d8f9..a703e24b8c 100644
--- a/erts/emulator/hipe/hipe_x86_gc.h
+++ b/erts/emulator/hipe/hipe_x86_gc.h
@@ -30,9 +30,9 @@
struct nstack_walk_state {
#ifdef SKIP_YOUNGEST_FRAME
- const struct sdesc *sdesc0; /* .sdesc0 must be a pointer rvalue */
+ const struct hipe_sdesc *sdesc0; /* .sdesc0 must be a pointer rvalue */
#else
- struct sdesc sdesc0[1]; /* .sdesc0 must be a pointer rvalue */
+ struct hipe_sdesc sdesc0[1]; /* .sdesc0 must be a pointer rvalue */
#endif
};
@@ -57,37 +57,36 @@ static inline Eterm *nstack_walk_nsp_begin(const Process *p)
#endif
}
-static inline const struct sdesc*
+static inline const struct hipe_sdesc*
nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state)
{
#ifdef SKIP_YOUNGEST_FRAME
- const struct sdesc *sdesc = hipe_find_sdesc(p->hipe.nsp[0]);
+ const struct hipe_sdesc *sdesc = hipe_find_sdesc(p->hipe.nsp[0]);
state->sdesc0 = sdesc;
return sdesc;
#else
- unsigned int nstkarity = p->hipe.narity - NR_ARG_REGS;
- if ((int)nstkarity < 0)
- nstkarity = 0;
- state->sdesc0[0].summary = (0 << 9) | (0 << 8) | nstkarity;
+ state->sdesc0[0].bucket.hvalue = 0; /* for nstack_any_cps_in_segment */
+ state->sdesc0[0].fsize = 0;
+ state->sdesc0[0].has_exnra = 0;
+ state->sdesc0[0].stk_nargs = (p->hipe.narity < NR_ARG_REGS ? 0 :
+ p->hipe.narity - NR_ARG_REGS);
state->sdesc0[0].livebits[0] = 0;
-# ifdef DEBUG
- state->sdesc0[0].dbg_M = 0;
- state->sdesc0[0].dbg_F = am_undefined;
- state->sdesc0[0].dbg_A = 0;
-# endif
+ state->sdesc0[0].m_aix = 0;
+ state->sdesc0[0].f_aix = atom_val(am_undefined);
+ state->sdesc0[0].a = 0;
/* XXX: this appears to prevent a gcc-4.1.1 bug on x86 */
__asm__ __volatile__("" : : "m"(*state) : "memory");
return &state->sdesc0[0];
#endif
}
-static inline const struct sdesc*
+static inline const struct hipe_sdesc*
nstack_walk_init_sdesc_ignore_trap(const Process *p,
struct nstack_walk_state *state)
{
#ifdef SKIP_YOUNGEST_FRAME
unsigned long ra = p->hipe.nsp[0];
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
if (ra == (unsigned long)nbif_stack_trap_ra)
ra = (unsigned long)p->hipe.ngra;
sdesc = hipe_find_sdesc(ra);
@@ -98,7 +97,7 @@ nstack_walk_init_sdesc_ignore_trap(const Process *p,
#endif
}
-static inline void nstack_walk_update_trap(Process *p, const struct sdesc *sdesc0)
+static inline void nstack_walk_update_trap(Process *p, const struct hipe_sdesc *sdesc0)
{
#ifdef SKIP_YOUNGEST_FRAME
Eterm *nsp = p->hipe.nsp;
@@ -137,7 +136,7 @@ static inline int nstack_walk_nsp_reached_end(const Eterm *nsp, const Eterm *nsp
return nsp >= nsp_end;
}
-static inline unsigned int nstack_walk_frame_size(const struct sdesc *sdesc)
+static inline unsigned int nstack_walk_frame_size(const struct hipe_sdesc *sdesc)
{
return sdesc_fsize(sdesc) + 1 + sdesc_arity(sdesc);
}
@@ -148,7 +147,7 @@ static inline Eterm *nstack_walk_frame_index(Eterm *nsp, unsigned int i)
}
static inline unsigned long
-nstack_walk_frame_ra(const Eterm *nsp, const struct sdesc *sdesc)
+nstack_walk_frame_ra(const Eterm *nsp, const struct hipe_sdesc *sdesc)
{
return nsp[sdesc_fsize(sdesc)];
}
diff --git a/erts/emulator/hipe/hipe_x86_glue.h b/erts/emulator/hipe/hipe_x86_glue.h
index 818d7444e2..de2b061706 100644
--- a/erts/emulator/hipe/hipe_x86_glue.h
+++ b/erts/emulator/hipe/hipe_x86_glue.h
@@ -58,16 +58,17 @@ static __inline__ unsigned int max(unsigned int x, unsigned int y)
static __inline__ void hipe_arch_glue_init(void)
{
- static struct sdesc_with_exnra nbif_return_sdesc = {
- .exnra = (unsigned long)nbif_fail,
- .sdesc = {
- .bucket = { .hvalue = (unsigned long)nbif_return },
- .summary = (1<<8),
- #ifdef DEBUG
- .dbg_F = am_return,
- #endif
- },
- };
+ static struct hipe_sdesc_with_exnra nbif_return_sdesc;
+
+ nbif_return_sdesc.exnra = (unsigned long)nbif_fail;
+ nbif_return_sdesc.sdesc.bucket.hvalue = (unsigned long)nbif_return;
+ nbif_return_sdesc.sdesc.fsize = 0;
+ nbif_return_sdesc.sdesc.has_exnra = 1;
+ nbif_return_sdesc.sdesc.stk_nargs = 0;
+ nbif_return_sdesc.sdesc.m_aix = atom_val(am_Empty);
+ nbif_return_sdesc.sdesc.f_aix = atom_val(am_return);
+ nbif_return_sdesc.sdesc.a = 0;
+
hipe_init_sdesc_table(&nbif_return_sdesc.sdesc);
}
diff --git a/erts/emulator/hipe/hipe_x86_signal.c b/erts/emulator/hipe/hipe_x86_signal.c
index 50d08b96d3..1a34ce786c 100644
--- a/erts/emulator/hipe/hipe_x86_signal.c
+++ b/erts/emulator/hipe/hipe_x86_signal.c
@@ -51,7 +51,7 @@
#endif
#include "hipe_signal.h"
-#if __GLIBC__ == 2 && (__GLIBC_MINOR__ >= 3)
+#if defined(__GLIBC__) && __GLIBC__ == 2 && (__GLIBC_MINOR__ >= 3)
/*
* __libc_sigaction() is the core routine.
* Without libpthread, sigaction() and __sigaction() are both aliases
diff --git a/erts/emulator/hipe/hipe_x86_stack.c b/erts/emulator/hipe/hipe_x86_stack.c
index f1559b1451..31582b3a2e 100644
--- a/erts/emulator/hipe/hipe_x86_stack.c
+++ b/erts/emulator/hipe/hipe_x86_stack.c
@@ -52,15 +52,14 @@ void hipe_print_nstack(Process *p)
{
Eterm *nsp;
Eterm *nsp_end;
- struct sdesc sdesc0;
- const struct sdesc *sdesc1;
- const struct sdesc *sdesc;
+ struct hipe_sdesc sdesc0;
+ const struct hipe_sdesc *sdesc1;
+ const struct hipe_sdesc *sdesc;
unsigned long ra;
unsigned long exnra;
unsigned int mask;
unsigned int sdesc_size;
unsigned int i;
- unsigned int nstkarity;
static const char dashes[2*sizeof(long)+5] = {
[0 ... 2*sizeof(long)+3] = '-'
};
@@ -68,10 +67,10 @@ void hipe_print_nstack(Process *p)
nsp = p->hipe.nsp;
nsp_end = p->hipe.nstend;
- nstkarity = p->hipe.narity - NR_ARG_REGS;
- if ((int)nstkarity < 0)
- nstkarity = 0;
- sdesc0.summary = nstkarity;
+ sdesc0.fsize = 0;
+ sdesc0.has_exnra = 0;
+ sdesc0.stk_nargs = (p->hipe.narity < NR_ARG_REGS ? 0 :
+ p->hipe.narity - NR_ARG_REGS);
sdesc0.livebits[0] = ~1;
sdesc = &sdesc0;
@@ -158,7 +157,7 @@ void hipe_print_nstack(Process *p)
#define MINSTACK 128
#define NSKIPFRAMES 4
-void hipe_update_stack_trap(Process *p, const struct sdesc *sdesc)
+void hipe_update_stack_trap(Process *p, const struct hipe_sdesc *sdesc)
{
Eterm *nsp;
Eterm *nsp_end;
@@ -199,7 +198,7 @@ void hipe_update_stack_trap(Process *p, const struct sdesc *sdesc)
void (*hipe_handle_stack_trap(Process *p))(void)
{
void (*ngra)(void) = p->hipe.ngra;
- const struct sdesc *sdesc = hipe_find_sdesc((unsigned long)ngra);
+ const struct hipe_sdesc *sdesc = hipe_find_sdesc((unsigned long)ngra);
hipe_update_stack_trap(p, sdesc);
return ngra;
}
@@ -220,7 +219,7 @@ void hipe_find_handler(Process *p)
unsigned long ra;
unsigned long exnra;
unsigned int arity;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
unsigned int nstkarity;
nsp = p->hipe.nsp;
@@ -262,7 +261,7 @@ int hipe_fill_stacktrace(Process *p, int depth, Eterm **trace)
Eterm *nsp_end;
unsigned long ra, prev_ra;
unsigned int arity;
- const struct sdesc *sdesc;
+ const struct hipe_sdesc *sdesc;
unsigned int nstkarity;
int i;
diff --git a/erts/emulator/internal_doc/DelayedDealloc.md b/erts/emulator/internal_doc/DelayedDealloc.md
index b7d87b839f..4b7c774141 100644
--- a/erts/emulator/internal_doc/DelayedDealloc.md
+++ b/erts/emulator/internal_doc/DelayedDealloc.md
@@ -19,7 +19,7 @@ the Erlang VM where memory allocation/deallocation is frequent and
references to memory also are passed around between threads this
solution will also scale poorly due to lock contention.
-Functionality Used to Adress This problem
+Functionality Used to Address This problem
-----------------------------------------
In order to reduce contention due to locking of allocator instances we
@@ -44,12 +44,12 @@ deallocation.
The "message box" is implemented using a lock free single linked list
through the memory blocks to deallocate. The order of the elements in
this list is not important. Insertion of new free blocks will be made
-somewhere near the end of this list. Requirering that the new blocks
+somewhere near the end of this list. Requiring that the new blocks
need to be inserted at the end would cause unnecessary contention when
large amount of memory blocks are inserted simultaneous by multiple
threads.
-The data structure refering to this single linked list cover two cache
+The data structure referring to this single linked list cover two cache
lines. One cache line containing information about the head of the
list, and one cache line containing information about the tail of the
list. This in order to reduce cache line ping ponging of this data
@@ -65,21 +65,21 @@ list. In the uncontended case it will point to the end of the list,
but when simultaneous insert operations are performed it will point to
something near the end of the list.
-When insterting an element one will try to write a pointer to the new
+When inserting an element one will try to write a pointer to the new
element in the next pointer of the element pointed to by the last
pointer. This is done using an atomic compare and swap that expects
-the next pointer to be `NULL`. If this succeds the thread performing
+the next pointer to be `NULL`. If this succeeds the thread performing
this operation moves the last pointer to point to the newly inserted
element.
If the atomic compare and swap described above failed, the last
pointer didn't point to the last element. In this case we need to
-insert the new element somewhere inbetween the element that the last
+insert the new element somewhere between the element that the last
pointer pointed to and the actual last element. If we do it this way
the last pointer will eventually end up at the last element when
threads stop adding new elements. When trying to insert somewhere near
the end and failing to do so, the inserting thread sometimes moves to
-the next element and somtimes tries with the same element again. This
+the next element and sometimes tries with the same element again. This
in order to spread the inserted elements during heavy contention. That
is, we try to spread the modifications of memory to different
locations instead of letting all threads continue to try to modify the
@@ -87,7 +87,7 @@ same location in memory.
### Head ###
-The head contains pointers to begining of the list (`head.first`), and
+The head contains pointers to beginning of the list (`head.first`), and
to the first block which other threads may refer to
(`head.unref_end`). Blocks between these pointers are only refered to
by the head part of the data structure which is only used by the
@@ -142,7 +142,7 @@ contains this "marker" element.
### Contention ###
-When elements are continously inserted by threads not owning the
+When elements are continuously inserted by threads not owning the
allocator instance, the thread owning the allocator instance will be
able to work more or less undisturbed by other threads at the head end
of the list. At the tail end large amounts of simultaneous inserts may
diff --git a/erts/emulator/internal_doc/PortSignals.md b/erts/emulator/internal_doc/PortSignals.md
index b1afb7c5cb..8782ae4e17 100644
--- a/erts/emulator/internal_doc/PortSignals.md
+++ b/erts/emulator/internal_doc/PortSignals.md
@@ -204,7 +204,7 @@ high limit is 8 KB and the low limit is 4 KB.
Previously all operations sending signals to ports began by acquiring
the port lock, then performed preparations for sending the signal, and
-then finaly sent the signal. The preparations typically included
+then finally sent the signal. The preparations typically included
inspecting the state of the port, and preparing the data to pass along
with the signal. The preparation of data is frequently quite time
consuming, and did not really depend on the port. That is we would
diff --git a/erts/emulator/internal_doc/SuperCarrier.md b/erts/emulator/internal_doc/SuperCarrier.md
index 0ad6af41de..acf722ea37 100644
--- a/erts/emulator/internal_doc/SuperCarrier.md
+++ b/erts/emulator/internal_doc/SuperCarrier.md
@@ -151,7 +151,7 @@ To find the smallest free segment that will satisfy a carrier allocation
size (`stree`). We search in this tree at allocation. If no free segment of
sufficient size was found, the area (`sa` or `sua`) is instead expanded.
If two or more free segments with equal size exist, the one at lowest
-address is choosen for `sa` and highest address for `sua`.
+address is chosen for `sa` and highest address for `sua`.
At carrier deallocation, we want to coalesce with any adjacent free
segments, to form one large free segment. To do that, all free
diff --git a/erts/emulator/internal_doc/ThreadProgress.md b/erts/emulator/internal_doc/ThreadProgress.md
index 6118bcf0f6..03a802f904 100644
--- a/erts/emulator/internal_doc/ThreadProgress.md
+++ b/erts/emulator/internal_doc/ThreadProgress.md
@@ -60,7 +60,7 @@ threads are managed threads.
### Thread Progress Events ###
Any thread in the system may use the thread progress functionality in
-order to determine when the following events have occured at least
+order to determine when the following events have occurred at least
once in all managed threads:
1. The thread has returned from other code to a known state in the
@@ -160,7 +160,7 @@ calling the following functions:
* `int erts_thr_progress_leader_update(ErtsSchedulerData *esdp)` -
Leader update thread progress.
-Unmanaged threads can delay thread progress beeing made:
+Unmanaged threads can delay thread progress being made:
* `ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay(void)` -
Delay thread progress.
@@ -251,7 +251,7 @@ doing so. If not zero, the leader isn't allowed to increment the
global counter, and needs to wait before it can do this. When it is
zero, it swaps the `waiting` and `current` counters before increasing
the global counter. From now on the new `waiting` counter will
-decrease, so that it eventualy will reach zero, making it possible to
+decrease, so that it eventually will reach zero, making it possible to
increment the global counter the next time. If we only used one
reference counter it would potentially be held above zero for ever by
different unmanaged threads.
@@ -261,7 +261,7 @@ prevent the next increment of the global counter, but instead the
increment after that. This is sufficient since the global counter
needs to be incremented two times before thread progress has been
made. It is also desirable not to prevent the first increment, since
-the likelyhood increases that the delay is withdrawn before any
+the likelihood increases that the delay is withdrawn before any
increment of the global counter is delayed. That is, the operation
will cause as little disruption as possible.
diff --git a/erts/emulator/internal_doc/Tracing.md b/erts/emulator/internal_doc/Tracing.md
index 728f315263..7f97f64765 100644
--- a/erts/emulator/internal_doc/Tracing.md
+++ b/erts/emulator/internal_doc/Tracing.md
@@ -51,7 +51,7 @@ the new instrumented code. Normally loaded code can only be reached
through external functions calls. Trace settings must be activated
instantaneously without the need of external function calls.
-The choosen solution is instead for tracing to use the technique of
+The chosen solution is instead for tracing to use the technique of
replication applied on the data structures for breakpoints. Two
generations of breakpoints are kept and indentified by index of 0 and
1. The global atomic variables `erts_active_bp_index` will determine
diff --git a/erts/emulator/pcre/pcre_exec.c b/erts/emulator/pcre/pcre_exec.c
index 1cab78cdd8..07e662ff07 100644
--- a/erts/emulator/pcre/pcre_exec.c
+++ b/erts/emulator/pcre/pcre_exec.c
@@ -1134,7 +1134,7 @@ for (;;)
the result of a recursive call to match() whatever happened so it was
possible to reduce stack usage by turning this into a tail recursion,
except in the case of a possibly empty group. However, now that there is
- the possiblity of (*THEN) occurring in the final alternative, this
+ the possibility of (*THEN) occurring in the final alternative, this
optimization is no longer always possible.
We can optimize if we know there are no (*THEN)s in the pattern; at present
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 44a77f3ea5..1ef9fa2a05 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -38,6 +38,7 @@
#include "erl_port.h"
#include "erl_check_io.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
#include "dtrace-wrapper.h"
#include "lttng-wrapper.h"
#define ERTS_WANT_TIMER_WHEEL_API
@@ -53,6 +54,8 @@ typedef char EventStateType;
#define ERTS_EV_TYPE_DRV_SEL ((EventStateType) 1) /* driver_select */
#define ERTS_EV_TYPE_DRV_EV ((EventStateType) 2) /* driver_event */
#define ERTS_EV_TYPE_STOP_USE ((EventStateType) 3) /* pending stop_select */
+#define ERTS_EV_TYPE_NIF ((EventStateType) 4) /* enif_select */
+#define ERTS_EV_TYPE_STOP_NIF ((EventStateType) 5) /* pending nif stop */
typedef char EventStateFlags;
#define ERTS_EV_FLAG_USED ((EventStateFlags) 1) /* ERL_DRV_USE has been turned on */
@@ -122,7 +125,11 @@ typedef struct {
#if ERTS_CIO_HAVE_DRV_EVENT
ErtsDrvEventDataState *event; /* ERTS_EV_TYPE_DRV_EV */
#endif
- erts_driver_t* drv_ptr; /* ERTS_EV_TYPE_STOP_USE */
+ ErtsNifSelectDataState *nif; /* ERTS_EV_TYPE_NIF */
+ union {
+ erts_driver_t* drv_ptr; /* ERTS_EV_TYPE_STOP_USE */
+ ErtsResource* resource; /* ERTS_EV_TYPE_STOP_NIF */
+ }stop;
} driver;
ErtsPollEvents events;
unsigned short remove_cnt; /* number of removed_fd's referring to this fd */
@@ -194,7 +201,8 @@ static ERTS_INLINE ErtsDrvEventState* hash_new_drv_ev_state(ErtsSysFdType fd)
#if ERTS_CIO_HAVE_DRV_EVENT
tmpl.driver.event = NULL;
#endif
- tmpl.driver.drv_ptr = NULL;
+ tmpl.driver.nif = NULL;
+ tmpl.driver.stop.drv_ptr = NULL;
tmpl.events = 0;
tmpl.remove_cnt = 0;
tmpl.type = ERTS_EV_TYPE_NONE;
@@ -211,24 +219,35 @@ static ERTS_INLINE void hash_erase_drv_ev_state(ErtsDrvEventState *state)
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
static void stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode);
-static void select_steal(ErlDrvPort ix, ErtsDrvEventState *state,
- int mode, int on);
-static void print_select_op(erts_dsprintf_buf_t *dsbufp,
- ErlDrvPort ix, ErtsSysFdType fd, int mode, int on);
+static void drv_select_steal(ErlDrvPort ix, ErtsDrvEventState *state,
+ int mode, int on);
+static void nif_select_steal(ErtsDrvEventState *state, int mode,
+ ErtsResource* resource, Eterm ref);
+
+static void print_drv_select_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort ix, ErtsSysFdType fd, int mode, int on);
+static void print_nif_select_op(erts_dsprintf_buf_t*, ErtsSysFdType,
+ int mode, ErtsResource*, Eterm ref);
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
-static void select_large_fd_error(ErlDrvPort, ErtsSysFdType, int, int);
+static void drv_select_large_fd_error(ErlDrvPort, ErtsSysFdType, int, int);
+static void nif_select_large_fd_error(ErtsSysFdType, int, ErtsResource*,Eterm ref);
#endif
#if ERTS_CIO_HAVE_DRV_EVENT
-static void event_steal(ErlDrvPort ix, ErtsDrvEventState *state,
+static void drv_event_steal(ErlDrvPort ix, ErtsDrvEventState *state,
ErlDrvEventData event_data);
-static void print_event_op(erts_dsprintf_buf_t *dsbufp,
- ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
+static void print_drv_event_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
static void event_large_fd_error(ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
#endif
#endif
-static void steal_pending_stop_select(erts_dsprintf_buf_t*, ErlDrvPort,
- ErtsDrvEventState*, int mode, int on);
+static void
+steal_pending_stop_use(erts_dsprintf_buf_t*, ErlDrvPort, ErtsDrvEventState*,
+ int mode, int on);
+static void
+steal_pending_stop_nif(erts_dsprintf_buf_t *dsbufp, ErtsResource*,
+ ErtsDrvEventState *state, int mode, int on);
#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST)
@@ -263,6 +282,18 @@ alloc_drv_select_data(void)
return dsp;
}
+static ERTS_INLINE ErtsNifSelectDataState *
+alloc_nif_select_data(void)
+{
+ ErtsNifSelectDataState *dsp = erts_alloc(ERTS_ALC_T_NIF_SEL_D_STATE,
+ sizeof(ErtsNifSelectDataState));
+ dsp->in.pid = NIL;
+ dsp->out.pid = NIL;
+ dsp->in.ddeselect_cnt = 0;
+ dsp->out.ddeselect_cnt = 0;
+ return dsp;
+}
+
static ERTS_INLINE void
free_drv_select_data(ErtsDrvSelectDataState *dsp)
{
@@ -271,6 +302,12 @@ free_drv_select_data(ErtsDrvSelectDataState *dsp)
erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, dsp);
}
+static ERTS_INLINE void
+free_nif_select_data(ErtsNifSelectDataState *dsp)
+{
+ erts_free(ERTS_ALC_T_NIF_SEL_D_STATE, dsp);
+}
+
#if ERTS_CIO_HAVE_DRV_EVENT
static ERTS_INLINE ErtsDrvEventDataState *
@@ -352,6 +389,7 @@ forget_removed(struct pollset_info* psi)
erts_smp_spin_unlock(&psi->removed_list_lock);
while (fdlp) {
+ ErtsResource* resource = NULL;
erts_driver_t* drv_ptr = NULL;
erts_smp_mtx_t* mtx;
ErtsSysFdType fd;
@@ -372,15 +410,25 @@ forget_removed(struct pollset_info* psi)
ASSERT(state->remove_cnt > 0);
if (--state->remove_cnt == 0) {
switch (state->type) {
+ case ERTS_EV_TYPE_STOP_NIF:
+ /* Now we can call stop */
+ resource = state->driver.stop.resource;
+ state->driver.stop.resource = NULL;
+ ASSERT(resource);
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags &= ~ERTS_EV_FLAG_USED;
+ goto case_ERTS_EV_TYPE_NONE;
+
case ERTS_EV_TYPE_STOP_USE:
/* Now we can call stop_select */
- drv_ptr = state->driver.drv_ptr;
+ drv_ptr = state->driver.stop.drv_ptr;
ASSERT(drv_ptr);
state->type = ERTS_EV_TYPE_NONE;
state->flags &= ~ERTS_EV_FLAG_USED;
- state->driver.drv_ptr = NULL;
+ state->driver.stop.drv_ptr = NULL;
/* Fall through */
- case ERTS_EV_TYPE_NONE:
+ case ERTS_EV_TYPE_NONE:
+ case_ERTS_EV_TYPE_NONE:
#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
hash_erase_drv_ev_state(state);
#endif
@@ -403,6 +451,11 @@ forget_removed(struct pollset_info* psi)
erts_ddll_dereference_driver(drv_ptr->handle);
}
}
+ if (resource) {
+ erts_resource_stop(resource, (ErlNifEvent)fd, 0);
+ enif_release_resource(resource->data);
+ }
+
tofree = fdlp;
fdlp = fdlp->next;
removed_fd_free(tofree);
@@ -440,7 +493,8 @@ grow_drv_ev_state(int min_ix)
#if ERTS_CIO_HAVE_DRV_EVENT
drv_ev_state[i].driver.event = NULL;
#endif
- drv_ev_state[i].driver.drv_ptr = NULL;
+ drv_ev_state[i].driver.stop.drv_ptr = NULL;
+ drv_ev_state[i].driver.nif = NULL;
drv_ev_state[i].events = 0;
drv_ev_state[i].remove_cnt = 0;
drv_ev_state[i].type = ERTS_EV_TYPE_NONE;
@@ -480,6 +534,7 @@ abort_tasks(ErtsDrvEventState *state, int mode)
ERTS_EV_TYPE_DRV_EV);
return;
#endif
+ case ERTS_EV_TYPE_NIF:
case ERTS_EV_TYPE_NONE:
return;
default:
@@ -534,6 +589,14 @@ deselect(ErtsDrvEventState *state, int mode)
if (!(state->events)) {
switch (state->type) {
+ case ERTS_EV_TYPE_NIF:
+ state->driver.nif->in.pid = NIL;
+ state->driver.nif->out.pid = NIL;
+ state->driver.nif->in.ddeselect_cnt = 0;
+ state->driver.nif->out.ddeselect_cnt = 0;
+ enif_release_resource(state->driver.stop.resource);
+ state->driver.stop.resource = NULL;
+ break;
case ERTS_EV_TYPE_DRV_SEL:
state->driver.select->inport = NIL;
state->driver.select->outport = NIL;
@@ -569,7 +632,8 @@ check_fd_cleanup(ErtsDrvEventState *state,
#if ERTS_CIO_HAVE_DRV_EVENT
ErtsDrvEventDataState **free_event,
#endif
- ErtsDrvSelectDataState **free_select)
+ ErtsDrvSelectDataState **free_select,
+ ErtsNifSelectDataState **free_nif)
{
erts_aint_t current_cio_time;
@@ -586,6 +650,12 @@ check_fd_cleanup(ErtsDrvEventState *state,
state->driver.select = NULL;
}
+ *free_nif = NULL;
+ if (state->driver.nif && (state->type != ERTS_EV_TYPE_NIF)) {
+ *free_nif = state->driver.nif;
+ state->driver.nif = NULL;
+ }
+
#if ERTS_CIO_HAVE_DRV_EVENT
*free_event = NULL;
if (state->driver.event
@@ -617,12 +687,14 @@ check_cleanup_active_fd(ErtsSysFdType fd,
ErtsPollControlEntry *pce,
int *pce_ix,
#endif
- erts_aint_t current_cio_time)
+ erts_aint_t current_cio_time,
+ int may_sleep)
{
ErtsDrvEventState *state;
int active = 0;
erts_smp_mtx_t *mtx = fd_mtx(fd);
void *free_select = NULL;
+ void *free_nif = NULL;
#if ERTS_CIO_HAVE_DRV_EVENT
void *free_event = NULL;
#endif
@@ -682,6 +754,39 @@ check_cleanup_active_fd(ErtsSysFdType fd,
}
}
+ if (state->driver.nif) {
+ ErtsPollEvents rm_events = 0;
+ if (state->driver.nif->in.ddeselect_cnt) {
+ ASSERT(state->type == ERTS_EV_TYPE_NIF);
+ ASSERT(state->events & ERTS_POLL_EV_IN);
+ ASSERT(is_nil(state->driver.nif->in.pid));
+ if (may_sleep || state->driver.nif->in.ddeselect_cnt == 1) {
+ rm_events = ERTS_POLL_EV_IN;
+ state->driver.nif->in.ddeselect_cnt = 0;
+ }
+ }
+ if (state->driver.nif->out.ddeselect_cnt) {
+ ASSERT(state->type == ERTS_EV_TYPE_NIF);
+ ASSERT(state->events & ERTS_POLL_EV_OUT);
+ ASSERT(is_nil(state->driver.nif->out.pid));
+ if (may_sleep || state->driver.nif->out.ddeselect_cnt == 1) {
+ rm_events |= ERTS_POLL_EV_OUT;
+ state->driver.nif->out.ddeselect_cnt = 0;
+ }
+ }
+ if (rm_events) {
+ int do_wake = 0;
+ state->events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd,
+ rm_events, 0, &do_wake);
+ }
+ if (state->events)
+ active = 1;
+ else if (state->type != ERTS_EV_TYPE_NIF) {
+ free_nif = state->driver.nif;
+ state->driver.nif = NULL;
+ }
+ }
+
#if ERTS_CIO_HAVE_DRV_EVENT
if (state->driver.event) {
if (is_iotask_active(&state->driver.event->iotask, current_cio_time)) {
@@ -722,6 +827,8 @@ check_cleanup_active_fd(ErtsSysFdType fd,
if (free_select)
free_drv_select_data(free_select);
+ if (free_nif)
+ free_nif_select_data(free_nif);
#if ERTS_CIO_HAVE_DRV_EVENT
if (free_event)
free_drv_event_data(free_event);
@@ -746,7 +853,7 @@ check_cleanup_active_fd(ErtsSysFdType fd,
}
static void
-check_cleanup_active_fds(erts_aint_t current_cio_time)
+check_cleanup_active_fds(erts_aint_t current_cio_time, int may_sleep)
{
int six = pollset.active_fd.six;
int eix = pollset.active_fd.eix;
@@ -773,7 +880,8 @@ check_cleanup_active_fds(erts_aint_t current_cio_time)
pctrl_entries,
&pctrl_ix,
#endif
- current_cio_time)) {
+ current_cio_time,
+ may_sleep)) {
no--;
if (ix == six) {
#ifdef DEBUG
@@ -807,13 +915,30 @@ check_cleanup_active_fds(erts_aint_t current_cio_time)
erts_smp_atomic32_set_relb(&pollset.active_fd.no, no);
}
+static void grow_active_fds(void)
+{
+ ASSERT(pollset.active_fd.six == pollset.active_fd.eix);
+ pollset.active_fd.six = 0;
+ pollset.active_fd.eix = pollset.active_fd.size;
+ pollset.active_fd.size += ERTS_ACTIVE_FD_INC;
+ pollset.active_fd.array = erts_realloc(ERTS_ALC_T_ACTIVE_FD_ARR,
+ pollset.active_fd.array,
+ pollset.active_fd.size*sizeof(ErtsSysFdType));
+#ifdef DEBUG
+ {
+ int i;
+ for (i = pollset.active_fd.eix + 1; i < pollset.active_fd.size; i++)
+ pollset.active_fd.array[i] = ERTS_SYS_FD_INVALID;
+ }
+#endif
+}
+
static ERTS_INLINE void
add_active_fd(ErtsSysFdType fd)
{
int eix = pollset.active_fd.eix;
int size = pollset.active_fd.size;
-
pollset.active_fd.array[eix] = fd;
erts_smp_atomic32_set_relb(&pollset.active_fd.no,
@@ -823,25 +948,11 @@ add_active_fd(ErtsSysFdType fd)
eix++;
if (eix >= size)
eix = 0;
- if (pollset.active_fd.six == eix) {
- pollset.active_fd.six = 0;
- eix = size;
- size += ERTS_ACTIVE_FD_INC;
- pollset.active_fd.array = erts_realloc(ERTS_ALC_T_ACTIVE_FD_ARR,
- pollset.active_fd.array,
- sizeof(ErtsSysFdType)*size);
- pollset.active_fd.size = size;
-#ifdef DEBUG
- {
- int i;
- for (i = eix + 1; i < size; i++)
- pollset.active_fd.array[i] = ERTS_SYS_FD_INVALID;
- }
-#endif
+ pollset.active_fd.eix = eix;
+ if (pollset.active_fd.six == eix) {
+ grow_active_fds();
}
-
- pollset.active_fd.eix = eix;
}
int
@@ -863,6 +974,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ErtsDrvEventDataState *free_event = NULL;
#endif
ErtsDrvSelectDataState *free_select = NULL;
+ ErtsNifSelectDataState *free_nif = NULL;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(name, 64);
#endif
@@ -878,7 +990,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
return -1;
}
if (fd >= max_fds) {
- select_large_fd_error(ix, fd, mode, on);
+ drv_select_large_fd_error(ix, fd, mode, on);
return -1;
}
grow_drv_ev_state(fd);
@@ -916,26 +1028,38 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
}
#endif
+ switch (state->type) {
#if ERTS_CIO_HAVE_DRV_EVENT
- if (state->type == ERTS_EV_TYPE_DRV_EV)
- select_steal(ix, state, mode, on);
+ case ERTS_EV_TYPE_DRV_EV:
#endif
- if (state->type == ERTS_EV_TYPE_STOP_USE) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- print_select_op(dsbufp, ix, state->fd, mode, on);
- steal_pending_stop_select(dsbufp, ix, state, mode, on);
- if (state->type == ERTS_EV_TYPE_STOP_USE) {
- ret = 0;
- goto done; /* stop_select still pending */
- }
- ASSERT(state->type == ERTS_EV_TYPE_NONE);
- }
+ case ERTS_EV_TYPE_NIF:
+ drv_select_steal(ix, state, mode, on);
+ break;
+ case ERTS_EV_TYPE_STOP_USE: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_drv_select_op(dsbufp, ix, state->fd, mode, on);
+ steal_pending_stop_use(dsbufp, ix, state, mode, on);
+ if (state->type == ERTS_EV_TYPE_STOP_USE) {
+ ret = 0;
+ goto done; /* stop_select still pending */
+ }
+ ASSERT(state->type == ERTS_EV_TYPE_NONE);
+ break;
+ }
+ case ERTS_EV_TYPE_STOP_NIF: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_drv_select_op(dsbufp, ix, state->fd, mode, on);
+ steal_pending_stop_nif(dsbufp, NULL, state, mode, on);
+ ASSERT(state->type == ERTS_EV_TYPE_NONE);
+ break;
+
+ }}
if (mode & ERL_DRV_READ) {
if (state->type == ERTS_EV_TYPE_DRV_SEL) {
Eterm owner = state->driver.select->inport;
if (owner != id && is_not_nil(owner))
- select_steal(ix, state, mode, on);
+ drv_select_steal(ix, state, mode, on);
}
ctl_events |= ERTS_POLL_EV_IN;
}
@@ -943,7 +1067,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (state->type == ERTS_EV_TYPE_DRV_SEL) {
Eterm owner = state->driver.select->outport;
if (owner != id && is_not_nil(owner))
- select_steal(ix, state, mode, on);
+ drv_select_steal(ix, state, mode, on);
}
ctl_events |= ERTS_POLL_EV_OUT;
}
@@ -1033,7 +1157,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
else {
/* Not safe to close fd, postpone stop_select callback. */
state->type = ERTS_EV_TYPE_STOP_USE;
- state->driver.drv_ptr = drv_ptr;
+ state->driver.stop.drv_ptr = drv_ptr;
if (drv_ptr->handle) {
erts_ddll_reference_referenced_driver(drv_ptr->handle);
}
@@ -1050,7 +1174,8 @@ done:
#if ERTS_CIO_HAVE_DRV_EVENT
&free_event,
#endif
- &free_select);
+ &free_select,
+ &free_nif);
done_unknown:
erts_smp_mtx_unlock(fd_mtx(fd));
@@ -1063,6 +1188,9 @@ done_unknown:
}
if (free_select)
free_drv_select_data(free_select);
+ if (free_nif)
+ free_nif_select_data(free_nif);
+
#if ERTS_CIO_HAVE_DRV_EVENT
if (free_event)
free_drv_event_data(free_event);
@@ -1071,6 +1199,261 @@ done_unknown:
}
int
+ERTS_CIO_EXPORT(enif_select)(ErlNifEnv* env,
+ ErlNifEvent e,
+ enum ErlNifSelectFlags mode,
+ void* obj,
+ const ErlNifPid* pid,
+ Eterm ref)
+{
+ int on;
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsSysFdType fd = (ErtsSysFdType) e;
+ ErtsPollEvents ctl_events = (ErtsPollEvents) 0;
+ ErtsPollEvents new_events, old_events;
+ ErtsDrvEventState *state;
+ int wake_poller;
+ int ret;
+ enum { NO_STOP=0, CALL_STOP, CALL_STOP_AND_RELEASE } call_stop = NO_STOP;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ ErtsDrvEventDataState *free_event = NULL;
+#endif
+ ErtsDrvSelectDataState *free_select = NULL;
+ ErtsNifSelectDataState *free_nif = NULL;
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(name, 64);
+#endif
+
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
+ if (fd < 0) {
+ return INT_MIN | ERL_NIF_SELECT_INVALID_EVENT;
+ }
+ if (fd >= max_fds) {
+ nif_select_large_fd_error(fd, mode, resource, ref);
+ return INT_MIN | ERL_NIF_SELECT_INVALID_EVENT;
+ }
+ grow_drv_ev_state(fd);
+ }
+#endif
+
+ erts_smp_mtx_lock(fd_mtx(fd));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ state = &drv_ev_state[(int) fd];
+#else
+ state = hash_get_drv_ev_state(fd); /* may be NULL! */
+#endif
+
+ if (mode & ERL_NIF_SELECT_STOP) {
+ ASSERT(resource->type->stop);
+ if (IS_FD_UNKNOWN(state)) {
+ /* fast track to stop callback */
+ call_stop = CALL_STOP;
+ ret = ERL_NIF_SELECT_STOP_CALLED;
+ goto done_unknown;
+ }
+ on = 0;
+ mode = ERL_DRV_READ | ERL_DRV_WRITE | ERL_DRV_USE;
+ wake_poller = 1; /* to eject fd from pollset (if needed) */
+ ctl_events = ERTS_POLL_EV_IN | ERTS_POLL_EV_OUT;
+ }
+ else {
+ on = 1;
+ ASSERT(mode);
+ wake_poller = 0;
+ if (mode & ERL_DRV_READ) {
+ ctl_events |= ERTS_POLL_EV_IN;
+ }
+ if (mode & ERL_DRV_WRITE) {
+ ctl_events |= ERTS_POLL_EV_OUT;
+ }
+ }
+
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (state == NULL) {
+ state = hash_new_drv_ev_state(fd);
+ }
+#endif
+
+ switch (state->type) {
+ case ERTS_EV_TYPE_NIF:
+ /*
+ * Changing resource is considered stealing.
+ * Changing process and/or ref is ok (I think?).
+ */
+ if (state->driver.stop.resource != resource)
+ nif_select_steal(state, ERL_DRV_READ | ERL_DRV_WRITE, resource, ref);
+ break;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV:
+#endif
+ case ERTS_EV_TYPE_DRV_SEL:
+ nif_select_steal(state, mode, resource, ref);
+ break;
+ case ERTS_EV_TYPE_STOP_USE: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_nif_select_op(dsbufp, fd, mode, resource, ref);
+ steal_pending_stop_use(dsbufp, ERTS_INVALID_ERL_DRV_PORT, state, mode, on);
+ ASSERT(state->type == ERTS_EV_TYPE_NONE);
+ break;
+ }
+ case ERTS_EV_TYPE_STOP_NIF: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_nif_select_op(dsbufp, fd, mode, resource, ref);
+ steal_pending_stop_nif(dsbufp, resource, state, mode, on);
+ if (state->type == ERTS_EV_TYPE_STOP_NIF) {
+ ret = ERL_NIF_SELECT_STOP_SCHEDULED; /* ?? */
+ goto done;
+ }
+ ASSERT(state->type == ERTS_EV_TYPE_NONE);
+ break;
+ }}
+
+ ASSERT((state->type == ERTS_EV_TYPE_NIF) ||
+ (state->type == ERTS_EV_TYPE_NONE && !state->events));
+
+ new_events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, ctl_events, on, &wake_poller);
+
+ if (new_events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
+ if (state->type == ERTS_EV_TYPE_NIF && !state->events) {
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags &= ~ERTS_EV_FLAG_USED;
+ state->driver.nif->in.pid = NIL;
+ state->driver.nif->out.pid = NIL;
+ state->driver.nif->in.ddeselect_cnt = 0;
+ state->driver.nif->out.ddeselect_cnt = 0;
+ state->driver.stop.resource = NULL;
+ }
+ ret = INT_MIN | ERL_NIF_SELECT_FAILED;
+ goto done;
+ }
+
+ old_events = state->events;
+
+ ASSERT(on
+ ? (new_events == (state->events | ctl_events))
+ : (new_events == (state->events & ~ctl_events)));
+
+ ASSERT(state->type == ERTS_EV_TYPE_NIF
+ || state->type == ERTS_EV_TYPE_NONE);
+
+ state->events = new_events;
+ if (on) {
+ const Eterm recipient = pid ? pid->pid : env->proc->common.id;
+ Uint32* refn;
+ if (!state->driver.nif)
+ state->driver.nif = alloc_nif_select_data();
+ if (state->type == ERTS_EV_TYPE_NONE) {
+ state->type = ERTS_EV_TYPE_NIF;
+ state->driver.stop.resource = resource;
+ enif_keep_resource(resource->data);
+ }
+ ASSERT(state->type == ERTS_EV_TYPE_NIF);
+ ASSERT(state->driver.stop.resource == resource);
+ if (ctl_events & ERTS_POLL_EV_IN) {
+ state->driver.nif->in.pid = recipient;
+ if (is_immed(ref)) {
+ state->driver.nif->in.immed = ref;
+ } else {
+ ASSERT(is_internal_ref(ref));
+ refn = internal_ref_numbers(ref);
+ state->driver.nif->in.immed = THE_NON_VALUE;
+ state->driver.nif->in.refn[0] = refn[0];
+ state->driver.nif->in.refn[1] = refn[1];
+ state->driver.nif->in.refn[2] = refn[2];
+ }
+ state->driver.nif->in.ddeselect_cnt = 0;
+ }
+ if (ctl_events & ERTS_POLL_EV_OUT) {
+ state->driver.nif->out.pid = recipient;
+ if (is_immed(ref)) {
+ state->driver.nif->out.immed = ref;
+ } else {
+ ASSERT(is_internal_ref(ref));
+ refn = internal_ref_numbers(ref);
+ state->driver.nif->out.immed = THE_NON_VALUE;
+ state->driver.nif->out.refn[0] = refn[0];
+ state->driver.nif->out.refn[1] = refn[1];
+ state->driver.nif->out.refn[2] = refn[2];
+ }
+ state->driver.nif->out.ddeselect_cnt = 0;
+ }
+ ret = 0;
+ }
+ else { /* off */
+ if (state->type == ERTS_EV_TYPE_NIF) {
+ state->driver.nif->in.pid = NIL;
+ state->driver.nif->out.pid = NIL;
+ state->driver.nif->in.ddeselect_cnt = 0;
+ state->driver.nif->out.ddeselect_cnt = 0;
+ if (old_events != 0) {
+ remember_removed(state, &pollset);
+ }
+ }
+ ASSERT(new_events==0);
+ if (state->remove_cnt == 0 || !wake_poller) {
+ /*
+ * Safe to close fd now as it is not in pollset
+ * or there was no need to eject fd (kernel poll)
+ */
+ if (state->type == ERTS_EV_TYPE_NIF) {
+ ASSERT(state->driver.stop.resource == resource);
+ call_stop = CALL_STOP_AND_RELEASE;
+ state->driver.stop.resource = NULL;
+ }
+ else {
+ ASSERT(!state->driver.stop.resource);
+ call_stop = CALL_STOP;
+ }
+ state->type = ERTS_EV_TYPE_NONE;
+ ret = ERL_NIF_SELECT_STOP_CALLED;
+ }
+ else {
+ /* Not safe to close fd, postpone stop_select callback. */
+ if (state->type == ERTS_EV_TYPE_NONE) {
+ ASSERT(!state->driver.stop.resource);
+ state->driver.stop.resource = resource;
+ enif_keep_resource(resource);
+ }
+ state->type = ERTS_EV_TYPE_STOP_NIF;
+ ret = ERL_NIF_SELECT_STOP_SCHEDULED;
+ }
+ }
+
+done:
+
+ check_fd_cleanup(state,
+#if ERTS_CIO_HAVE_DRV_EVENT
+ &free_event,
+#endif
+ &free_select,
+ &free_nif);
+
+done_unknown:
+ erts_smp_mtx_unlock(fd_mtx(fd));
+ if (call_stop) {
+ erts_resource_stop(resource, (ErlNifEvent)fd, 1);
+ if (call_stop == CALL_STOP_AND_RELEASE) {
+ enif_release_resource(resource->data);
+ }
+ }
+ if (free_select)
+ free_drv_select_data(free_select);
+ if (free_nif)
+ free_nif_select_data(free_nif);
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (free_event)
+ free_drv_event_data(free_event);
+#endif
+ return ret;
+}
+
+
+int
ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ErlDrvEvent e,
ErlDrvEventData event_data)
@@ -1090,6 +1473,7 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ErtsDrvEventDataState *free_event;
#endif
ErtsDrvSelectDataState *free_select;
+ ErtsNifSelectDataState *free_nif;
Port *prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
@@ -1126,12 +1510,12 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
if (state->driver.event->port == id) break;
/*fall through*/
case ERTS_EV_TYPE_DRV_SEL:
- event_steal(ix, state, event_data);
+ drv_event_steal(ix, state, event_data);
break;
case ERTS_EV_TYPE_STOP_USE: {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- print_event_op(dsbufp, ix, fd, event_data);
- steal_pending_stop_select(dsbufp, ix, state, 0, 1);
+ print_drv_event_op(dsbufp, ix, fd, event_data);
+ steal_pending_stop_use(dsbufp, ix, state, 0, 1);
break;
}
}
@@ -1199,12 +1583,15 @@ done:
#if ERTS_CIO_HAVE_DRV_EVENT
&free_event,
#endif
- &free_select);
+ &free_select,
+ &free_nif);
erts_smp_mtx_unlock(fd_mtx(fd));
if (free_select)
free_drv_select_data(free_select);
+ if (free_nif)
+ free_nif_select_data(free_nif);
#if ERTS_CIO_HAVE_DRV_EVENT
if (free_event)
free_drv_event_data(free_event);
@@ -1240,13 +1627,19 @@ need2steal(ErtsDrvEventState *state, int mode)
state,
ERL_DRV_WRITE);
break;
+ case ERTS_EV_TYPE_NIF:
+ ASSERT(state->driver.stop.resource);
+ do_steal = 1;
+ break;
+
#if ERTS_CIO_HAVE_DRV_EVENT
case ERTS_EV_TYPE_DRV_EV:
do_steal |= chk_stale(state->driver.event->port, state, 0);
break;
#endif
case ERTS_EV_TYPE_STOP_USE:
- ASSERT(0);
+ case ERTS_EV_TYPE_STOP_NIF:
+ ASSERT(0);
break;
default:
break;
@@ -1307,6 +1700,25 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
erts_dsprintf(dsbufp, "\n");
break;
}
+ case ERTS_EV_TYPE_NIF: {
+ Eterm iid = state->driver.nif->in.pid;
+ Eterm oid = state->driver.nif->out.pid;
+ const char* with = "with";
+ ErlNifResourceType* rt = state->driver.stop.resource->type;
+
+ erts_dsprintf(dsbufp, "resource %T:%T", rt->module, rt->name);
+
+ if (is_not_nil(iid)) {
+ erts_dsprintf(dsbufp, " %s in-pid %T", with, iid);
+ with = "and";
+ }
+ if (is_not_nil(oid)) {
+ erts_dsprintf(dsbufp, " %s out-pid %T", with, oid);
+ }
+ deselect(state, 0);
+ erts_dsprintf(dsbufp, "\n");
+ break;
+ }
#if ERTS_CIO_HAVE_DRV_EVENT
case ERTS_EV_TYPE_DRV_EV: {
Eterm eid = state->driver.event->port;
@@ -1324,7 +1736,8 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
break;
}
#endif
- case ERTS_EV_TYPE_STOP_USE: {
+ case ERTS_EV_TYPE_STOP_USE:
+ case ERTS_EV_TYPE_STOP_NIF: {
ASSERT(0);
break;
}
@@ -1335,8 +1748,8 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
}
static void
-print_select_op(erts_dsprintf_buf_t *dsbufp,
- ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
+print_drv_select_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
{
Port *pp = erts_drvport2port(ix);
erts_dsprintf(dsbufp,
@@ -1354,11 +1767,40 @@ print_select_op(erts_dsprintf_buf_t *dsbufp,
}
static void
-select_steal(ErlDrvPort ix, ErtsDrvEventState *state, int mode, int on)
+print_nif_select_op(erts_dsprintf_buf_t *dsbufp,
+ ErtsSysFdType fd, int mode,
+ ErtsResource* resource, Eterm ref)
+{
+ erts_dsprintf(dsbufp,
+ "enif_select(_, %d,%s%s%s, %T:%T, %T) ",
+ (int) GET_FD(fd),
+ mode & ERL_NIF_SELECT_READ ? " READ" : "",
+ mode & ERL_NIF_SELECT_WRITE ? " WRITE" : "",
+ mode & ERL_NIF_SELECT_STOP ? " STOP" : "",
+ resource->type->module,
+ resource->type->name,
+ ref);
+}
+
+
+static void
+drv_select_steal(ErlDrvPort ix, ErtsDrvEventState *state, int mode, int on)
{
if (need2steal(state, mode)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- print_select_op(dsbufp, ix, state->fd, mode, on);
+ print_drv_select_op(dsbufp, ix, state->fd, mode, on);
+ steal(dsbufp, state, mode);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+}
+
+static void
+nif_select_steal(ErtsDrvEventState *state, int mode,
+ ErtsResource* resource, Eterm ref)
+{
+ if (need2steal(state, mode)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_nif_select_op(dsbufp, state->fd, mode, resource, ref);
steal(dsbufp, state, mode);
erts_send_error_to_logger_nogl(dsbufp);
}
@@ -1374,10 +1816,20 @@ large_fd_error_common(erts_dsprintf_buf_t *dsbufp, ErtsSysFdType fd)
}
static void
-select_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
+drv_select_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
{
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- print_select_op(dsbufp, ix, fd, mode, on);
+ print_drv_select_op(dsbufp, ix, fd, mode, on);
+ erts_dsprintf(dsbufp, "failed: ");
+ large_fd_error_common(dsbufp, fd);
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+static void
+nif_select_large_fd_error(ErtsSysFdType fd, int mode,
+ ErtsResource* resource, Eterm ref)
+{
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_nif_select_op(dsbufp, fd, mode, resource, ref);
erts_dsprintf(dsbufp, "failed: ");
large_fd_error_common(dsbufp, fd);
erts_send_error_to_logger_nogl(dsbufp);
@@ -1387,41 +1839,81 @@ select_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
static void
-steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
- ErtsDrvEventState *state, int mode, int on)
+steal_pending_stop_use(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
+ ErtsDrvEventState *state, int mode, int on)
{
+ int cancel = 0;
ASSERT(state->type == ERTS_EV_TYPE_STOP_USE);
- erts_dsprintf(dsbufp, "failed: fd=%d (re)selected before stop_select "
- "was called for driver %s\n",
- (int) GET_FD(state->fd), state->driver.drv_ptr->name);
- erts_send_error_to_logger_nogl(dsbufp);
if (on) {
/* Either fd-owner changed its mind about closing
* or closed fd before stop_select callback and fd is now reused.
* In either case stop_select should not be called.
- */
- state->type = ERTS_EV_TYPE_NONE;
- state->flags &= ~ERTS_EV_FLAG_USED;
- if (state->driver.drv_ptr->handle) {
- erts_ddll_dereference_driver(state->driver.drv_ptr->handle);
- }
- state->driver.drv_ptr = NULL;
+ */
+ cancel = 1;
}
else if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
Port *prt = erts_drvport2port(ix);
- erts_driver_t* drv_ptr = prt != ERTS_INVALID_ERL_DRV_PORT ? prt->drv_ptr : NULL;
- if (drv_ptr && drv_ptr != state->driver.drv_ptr) {
- /* Some other driver wants the stop_select callback */
- if (state->driver.drv_ptr->handle) {
- erts_ddll_dereference_driver(state->driver.drv_ptr->handle);
- }
- if (drv_ptr->handle) {
- erts_ddll_reference_referenced_driver(drv_ptr->handle);
- }
- state->driver.drv_ptr = drv_ptr;
- }
+ if (prt == ERTS_INVALID_ERL_DRV_PORT
+ || prt->drv_ptr != state->driver.stop.drv_ptr) {
+ /* Some other driver or nif wants the stop_select callback */
+ cancel = 1;
+ }
+ }
+
+ if (cancel) {
+ erts_dsprintf(dsbufp, "called before stop_select was called for driver '%s'\n",
+ state->driver.stop.drv_ptr->name);
+ if (state->driver.stop.drv_ptr->handle) {
+ erts_ddll_dereference_driver(state->driver.stop.drv_ptr->handle);
+ }
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags &= ~ERTS_EV_FLAG_USED;
+ state->driver.stop.drv_ptr = NULL;
}
+ else {
+ erts_dsprintf(dsbufp, "ignored repeated call\n");
+ }
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+
+static void
+steal_pending_stop_nif(erts_dsprintf_buf_t *dsbufp, ErtsResource* resource,
+ ErtsDrvEventState *state, int mode, int on)
+{
+ int cancel = 0;
+
+ ASSERT(state->type == ERTS_EV_TYPE_STOP_NIF);
+ ASSERT(state->driver.stop.resource);
+
+ if (on) {
+ ASSERT(mode & (ERL_NIF_SELECT_READ | ERL_NIF_SELECT_WRITE));
+ /* Either fd-owner changed its mind about closing
+ * or closed fd before stop callback and fd is now reused.
+ * In either case, stop should not be called.
+ */
+ cancel = 1;
+ }
+ else if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE
+ && resource != state->driver.stop.resource) {
+ /* Some driver or other resource wants the stop callback */
+ cancel = 1;
+ }
+
+ if (cancel) {
+ ErlNifResourceType* rt = state->driver.stop.resource->type;
+ erts_dsprintf(dsbufp, "called before stop was called for NIF resource %T:%T\n",
+ rt->module, rt->name);
+
+ enif_release_resource(state->driver.stop.resource);
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags &= ~ERTS_EV_FLAG_USED;
+ state->driver.stop.resource = NULL;
+ }
+ else {
+ erts_dsprintf(dsbufp, "ignored repeated call\n");
+ }
+ erts_send_error_to_logger_nogl(dsbufp);
}
@@ -1429,8 +1921,8 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
#if ERTS_CIO_HAVE_DRV_EVENT
static void
-print_event_op(erts_dsprintf_buf_t *dsbufp,
- ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
+print_drv_event_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
{
Port *pp = erts_drvport2port(ix);
erts_dsprintf(dsbufp, "driver_event(%p, %d, ", ix, (int) fd);
@@ -1447,11 +1939,11 @@ print_event_op(erts_dsprintf_buf_t *dsbufp,
}
static void
-event_steal(ErlDrvPort ix, ErtsDrvEventState *state, ErlDrvEventData event_data)
+drv_event_steal(ErlDrvPort ix, ErtsDrvEventState *state, ErlDrvEventData event_data)
{
if (need2steal(state, ERL_DRV_READ|ERL_DRV_WRITE)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- print_event_op(dsbufp, ix, state->fd, event_data);
+ print_drv_event_op(dsbufp, ix, state->fd, event_data);
steal(dsbufp, state, ERL_DRV_READ|ERL_DRV_WRITE);
erts_send_error_to_logger_nogl(dsbufp);
}
@@ -1466,7 +1958,7 @@ static void
event_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
{
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- print_event_op(dsbufp, ix, fd, event_data);
+ print_drv_event_op(dsbufp, ix, fd, event_data);
erts_dsprintf(dsbufp, "failed: ");
large_fd_error_common(dsbufp, fd);
erts_send_error_to_logger_nogl(dsbufp);
@@ -1553,6 +2045,55 @@ oready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time)
}
}
+static ERTS_INLINE void
+send_event_tuple(struct erts_nif_select_event* e, ErtsResource* resource,
+ Eterm event_atom)
+{
+ Process* rp = erts_proc_lookup(e->pid);
+ ErtsProcLocks rp_locks = 0;
+ ErtsMessage* mp;
+ ErlOffHeap* ohp;
+ ErtsBinary* bin;
+ Eterm* hp;
+ Uint hsz;
+ Eterm resource_term, ref_term, tuple;
+
+ if (!rp) {
+ return;
+ }
+
+ bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+
+ /* {select, Resource, Ref, EventAtom} */
+ if (is_value(e->immed)) {
+ hsz = 5 + ERTS_MAGIC_REF_THING_SIZE;
+ }
+ else {
+ hsz = 5 + ERTS_MAGIC_REF_THING_SIZE + ERTS_REF_THING_SIZE;
+ }
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
+
+ resource_term = erts_mk_magic_ref(&hp, ohp, &bin->binary);
+ if (is_value(e->immed)) {
+ ASSERT(is_immed(e->immed));
+ ref_term = e->immed;
+ }
+ else {
+ write_ref_thing(hp, e->refn[0], e->refn[1], e->refn[2]);
+ ref_term = make_internal_ref(hp);
+ hp += ERTS_REF_THING_SIZE;
+ }
+ tuple = TUPLE4(hp, am_select, resource_term, ref_term, event_atom);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, rp_locks, mp, tuple, am_system);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+}
+
+
#if ERTS_CIO_HAVE_DRV_EVENT
static ERTS_INLINE void
eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data,
@@ -1575,7 +2116,7 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data,
}
#endif
-static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents);
+static void bad_fd_in_pollset(ErtsDrvEventState *, Eterm inport, Eterm outport);
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
void
@@ -1598,6 +2139,17 @@ ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set,
ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, timeout_time);
}
+#if !ERTS_CIO_DEFER_ACTIVE_EVENTS
+/*
+ * Number of ignored events, for a lingering fd added by enif_select(),
+ * until we deselect fd-event from pollset.
+ */
+# define ERTS_NIF_DELAYED_DESELECT 20
+#else
+/* Disable delayed deselect as pollset cannot handle active events */
+# define ERTS_NIF_DELAYED_DESELECT 1
+#endif
+
void
ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
{
@@ -1631,7 +2183,8 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
current_cio_time++;
erts_smp_atomic_set_relb(&erts_check_io_time, current_cio_time);
- check_cleanup_active_fds(current_cio_time);
+ check_cleanup_active_fds(current_cio_time,
+ timeout_time != ERTS_POLL_NO_TIMEOUT);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
@@ -1699,31 +2252,22 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
switch (state->type) {
case ERTS_EV_TYPE_DRV_SEL: { /* Requested via driver_select()... */
- ErtsPollEvents revents;
- ErtsPollEvents revent_mask;
-
- revent_mask = ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT);
- revent_mask |= state->events;
- revents = pollres[i].events & revent_mask;
-
- if (revents & ERTS_POLL_EV_ERR) {
- /*
- * Let the driver handle the error condition. Only input,
- * only output, or nothing might have been selected.
- * We *do not* want to call a callback that corresponds
- * to an event not selected. revents might give us a clue
- * on which one to call.
- */
- if ((revents & ERTS_POLL_EV_IN)
- || (!(revents & ERTS_POLL_EV_OUT)
- && state->events & ERTS_POLL_EV_IN)) {
- iready(state->driver.select->inport, state, current_cio_time);
- }
- else if (state->events & ERTS_POLL_EV_OUT) {
- oready(state->driver.select->outport, state, current_cio_time);
- }
- }
- else if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
+ ErtsPollEvents revents = pollres[i].events;
+
+ if (revents & ERTS_POLL_EV_ERR) {
+ /*
+ * Handle error events by triggering all in/out events
+ * that the driver has selected.
+ * We *do not* want to call a callback that corresponds
+ * to an event not selected.
+ */
+ revents = state->events;
+ }
+ else {
+ revents &= (state->events | ERTS_POLL_EV_NVAL);
+ }
+
+ if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
if (revents & ERTS_POLL_EV_OUT) {
oready(state->driver.select->outport, state, current_cio_time);
}
@@ -1731,21 +2275,84 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
was read (true also on the non-smp emulator since
oready() may have been called); therefore, update
revents... */
- revents &= ~(~state->events & ERTS_POLL_EV_IN);
+ revents &= state->events;
if (revents & ERTS_POLL_EV_IN) {
iready(state->driver.select->inport, state, current_cio_time);
}
}
else if (revents & ERTS_POLL_EV_NVAL) {
bad_fd_in_pollset(state,
- state->driver.select->inport,
- state->driver.select->outport,
- state->events);
+ state->driver.select->inport,
+ state->driver.select->outport);
add_active_fd(state->fd);
}
break;
}
+ case ERTS_EV_TYPE_NIF: { /* Requested via enif_select()... */
+ struct erts_nif_select_event in = {NIL};
+ struct erts_nif_select_event out = {NIL};
+ ErtsResource* resource;
+ ErtsPollEvents revents = pollres[i].events;
+
+ if (revents & ERTS_POLL_EV_ERR) {
+ /*
+ * Handle error events by triggering all in/out events
+ * that the NIF has selected.
+ * We *do not* want to send a message that corresponds
+ * to an event not selected.
+ */
+ revents = state->events;
+ }
+ else {
+ revents &= (state->events | ERTS_POLL_EV_NVAL);
+ }
+
+ if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
+ if (revents & ERTS_POLL_EV_OUT) {
+ if (is_not_nil(state->driver.nif->out.pid)) {
+ out = state->driver.nif->out;
+ resource = state->driver.stop.resource;
+ state->driver.nif->out.ddeselect_cnt = ERTS_NIF_DELAYED_DESELECT;
+ state->driver.nif->out.pid = NIL;
+ add_active_fd(state->fd);
+ }
+ else {
+ ASSERT(state->driver.nif->out.ddeselect_cnt >= 2);
+ state->driver.nif->out.ddeselect_cnt--;
+ }
+ }
+ if (revents & ERTS_POLL_EV_IN) {
+ if (is_not_nil(state->driver.nif->in.pid)) {
+ in = state->driver.nif->in;
+ resource = state->driver.stop.resource;
+ state->driver.nif->in.ddeselect_cnt = ERTS_NIF_DELAYED_DESELECT;
+ state->driver.nif->in.pid = NIL;
+ add_active_fd(state->fd);
+ }
+ else {
+ ASSERT(state->driver.nif->in.ddeselect_cnt >= 2);
+ state->driver.nif->in.ddeselect_cnt--;
+ }
+ }
+ }
+ else if (revents & ERTS_POLL_EV_NVAL) {
+ bad_fd_in_pollset(state, NIL, NIL);
+ add_active_fd(state->fd);
+ }
+
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(fd_mtx(fd));
+#endif
+ if (is_not_nil(in.pid)) {
+ send_event_tuple(&in, resource, am_ready_input);
+ }
+ if (is_not_nil(out.pid)) {
+ send_event_tuple(&out, resource, am_ready_output);
+ }
+ goto next_pollres_unlocked;
+ }
+
#if ERTS_CIO_HAVE_DRV_EVENT
case ERTS_EV_TYPE_DRV_EV: { /* Requested via driver_event()... */
ErlDrvEventData event_data;
@@ -1786,6 +2393,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#ifdef ERTS_SMP
erts_smp_mtx_unlock(fd_mtx(fd));
#endif
+ next_pollres_unlocked:;
}
erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
@@ -1794,9 +2402,9 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
}
static void
-bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport,
- Eterm outport, ErtsPollEvents events)
+bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport, Eterm outport)
{
+ ErtsPollEvents events = state->events;
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
if (events & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
@@ -1820,27 +2428,36 @@ bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport,
erts_dsprintf(dsbufp,
"Bad %s fd in erts_poll()! fd=%d, ",
io_str, (int) state->fd);
- if (is_nil(port)) {
- ErtsPortNames *ipnp = erts_get_port_names(inport, ERTS_INVALID_ERL_DRV_PORT);
- ErtsPortNames *opnp = erts_get_port_names(outport, ERTS_INVALID_ERL_DRV_PORT);
- erts_dsprintf(dsbufp, "ports=%T/%T, drivers=%s/%s, names=%s/%s\n",
- is_nil(inport) ? am_undefined : inport,
- is_nil(outport) ? am_undefined : outport,
- ipnp->driver_name ? ipnp->driver_name : "<unknown>",
- opnp->driver_name ? opnp->driver_name : "<unknown>",
- ipnp->name ? ipnp->name : "<unknown>",
- opnp->name ? opnp->name : "<unknown>");
- erts_free_port_names(ipnp);
- erts_free_port_names(opnp);
- }
- else {
- ErtsPortNames *pnp = erts_get_port_names(port, ERTS_INVALID_ERL_DRV_PORT);
- erts_dsprintf(dsbufp, "port=%T, driver=%s, name=%s\n",
- is_nil(port) ? am_undefined : port,
- pnp->driver_name ? pnp->driver_name : "<unknown>",
- pnp->name ? pnp->name : "<unknown>");
- erts_free_port_names(pnp);
- }
+ if (state->type == ERTS_EV_TYPE_DRV_SEL) {
+ if (is_nil(port)) {
+ ErtsPortNames *ipnp = erts_get_port_names(inport, ERTS_INVALID_ERL_DRV_PORT);
+ ErtsPortNames *opnp = erts_get_port_names(outport, ERTS_INVALID_ERL_DRV_PORT);
+ erts_dsprintf(dsbufp, "ports=%T/%T, drivers=%s/%s, names=%s/%s\n",
+ is_nil(inport) ? am_undefined : inport,
+ is_nil(outport) ? am_undefined : outport,
+ ipnp->driver_name ? ipnp->driver_name : "<unknown>",
+ opnp->driver_name ? opnp->driver_name : "<unknown>",
+ ipnp->name ? ipnp->name : "<unknown>",
+ opnp->name ? opnp->name : "<unknown>");
+ erts_free_port_names(ipnp);
+ erts_free_port_names(opnp);
+ }
+ else {
+ ErtsPortNames *pnp = erts_get_port_names(port, ERTS_INVALID_ERL_DRV_PORT);
+ erts_dsprintf(dsbufp, "port=%T, driver=%s, name=%s\n",
+ is_nil(port) ? am_undefined : port,
+ pnp->driver_name ? pnp->driver_name : "<unknown>",
+ pnp->name ? pnp->name : "<unknown>");
+ erts_free_port_names(pnp);
+ }
+ }
+ else {
+ ErlNifResourceType* rt;
+ ASSERT(state->type == ERTS_EV_TYPE_NIF);
+ ASSERT(state->driver.stop.resource);
+ rt = state->driver.stop.resource->type;
+ erts_dsprintf(dsbufp, "resource={%T,%T}\n", rt->module, rt->name);
+ }
}
else {
erts_dsprintf(dsbufp, "Bad fd in erts_poll()! fd=%d\n", (int) state->fd);
@@ -1905,6 +2522,10 @@ static void drv_ev_state_free(void *des)
void
ERTS_CIO_EXPORT(erts_init_check_io)(void)
{
+ ERTS_CT_ASSERT((INT_MIN & (ERL_NIF_SELECT_STOP_CALLED |
+ ERL_NIF_SELECT_STOP_SCHEDULED |
+ ERL_NIF_SELECT_INVALID_EVENT |
+ ERL_NIF_SELECT_FAILED)) == 0);
erts_smp_atomic_init_nob(&erts_check_io_time, 0);
erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0);
@@ -2311,6 +2932,39 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters)
}
}
}
+ else if (state->type == ERTS_EV_TYPE_NIF) {
+ ErtsResource* r;
+ erts_printf("enif_select ");
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (internal) {
+ erts_printf("internal ");
+ err = 1;
+ }
+
+ if (cio_events == ep_events) {
+ erts_printf("ev=");
+ if (print_events(cio_events) != 0)
+ err = 1;
+ }
+ else {
+ err = 1;
+ erts_printf("cio_ev=");
+ print_events(cio_events);
+ erts_printf(" ep_ev=");
+ print_events(ep_events);
+ }
+#else
+ if (print_events(cio_events) != 0)
+ err = 1;
+#endif
+ erts_printf(" inpid=%T dd_cnt=%b32d", state->driver.nif->in.pid,
+ state->driver.nif->in.ddeselect_cnt);
+ erts_printf(" outpid=%T dd_cnt=%b32d", state->driver.nif->out.pid,
+ state->driver.nif->out.ddeselect_cnt);
+ r = state->driver.stop.resource;
+ erts_printf(" resource=%p(%T:%T)", r, r->type->module, r->type->name);
+ }
#if ERTS_CIO_HAVE_DRV_EVENT
else if (state->type == ERTS_EV_TYPE_DRV_EV) {
Eterm id;
@@ -2367,11 +3021,12 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters)
erts_printf("control_type=%d ", (int)state->type);
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if (cio_events == ep_events) {
- erts_printf("ev=0x%b32x", (Uint32) cio_events);
+ erts_printf("ev=");
+ print_events(cio_events);
}
else {
- erts_printf("cio_ev=0x%b32x", (Uint32) cio_events);
- erts_printf(" ep_ev=0x%b32x", (Uint32) ep_events);
+ erts_printf("cio_ev="); print_events(cio_events);
+ erts_printf(" ep_ev="); print_events(ep_events);
}
#else
erts_printf("ev=0x%b32x", (Uint32) cio_events);
@@ -2400,7 +3055,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip)
#if ERTS_CIO_HAVE_DRV_EVENT
null_des.driver.event = NULL;
#endif
- null_des.driver.drv_ptr = NULL;
+ null_des.driver.stop.drv_ptr = NULL;
null_des.events = 0;
null_des.remove_cnt = 0;
null_des.type = ERTS_EV_TYPE_NONE;
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index 14f1ea3f43..f02d6c1f62 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -34,6 +34,8 @@
int driver_select_kp(ErlDrvPort, ErlDrvEvent, int, int);
int driver_select_nkp(ErlDrvPort, ErlDrvEvent, int, int);
+int enif_select_kp(ErlNifEnv*, ErlNifEvent, enum ErlNifSelectFlags, void*, const ErlNifPid*, Eterm);
+int enif_select_nkp(ErlNifEnv*, ErlNifEvent, enum ErlNifSelectFlags, void*, const ErlNifPid*, Eterm);
int driver_event_kp(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
int driver_event_nkp(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
Uint erts_check_io_size_kp(void);
@@ -136,4 +138,20 @@ typedef struct {
ErtsIoTask iniotask;
ErtsIoTask outiotask;
} ErtsDrvSelectDataState;
+
+struct erts_nif_select_event {
+ Eterm pid;
+ Eterm immed;
+ Uint32 refn[ERTS_REF_NUMBERS];
+ Sint32 ddeselect_cnt; /* 0: No delayed deselect in progress
+ * 1: Do deselect before next poll
+ * >1: Countdown of ignored events
+ */
+};
+
+typedef struct {
+ struct erts_nif_select_event in;
+ struct erts_nif_select_event out;
+} ErtsNifSelectDataState;
+
#endif /* #ifndef ERL_CHECK_IO_INTERNAL__ */
diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c
index a5714f8325..bb930ff03b 100644
--- a/erts/emulator/sys/common/erl_mmap.c
+++ b/erts/emulator/sys/common/erl_mmap.c
@@ -21,6 +21,7 @@
# include "config.h"
#endif
+#define ERTS_WANT_MEM_MAPPERS
#include "sys.h"
#include "erl_process.h"
#include "erl_smp.h"
@@ -358,12 +359,11 @@ char* erts_literals_start;
UWord erts_literals_size;
#endif
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
ErtsMemMapper erts_exec_mmapper;
#endif
-
#define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \
do { \
mm->size.supercarrier.used.total += (SZ); \
@@ -1878,7 +1878,7 @@ erts_mremap(ErtsMemMapper* mm,
return NULL;
}
-#if ERTS_HAVE_OS_MREMAP || ERTS_HAVE_GENUINE_OS_MMAP
+#if defined(ERTS_HAVE_OS_MREMAP) || defined(ERTS_HAVE_GENUINE_OS_MMAP)
superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags);
if (superaligned) {
@@ -1898,7 +1898,7 @@ erts_mremap(ErtsMemMapper* mm,
}
}
-#if ERTS_HAVE_GENUINE_OS_MMAP
+#ifdef ERTS_HAVE_GENUINE_OS_MMAP
if (asize < old_size
&& (!superaligned
|| ERTS_IS_SUPERALIGNED(ptr))) {
@@ -1913,7 +1913,7 @@ erts_mremap(ErtsMemMapper* mm,
return ptr;
}
#endif
-#if ERTS_HAVE_OS_MREMAP
+#ifdef ERTS_HAVE_OS_MREMAP
if (superaligned)
return remap_move(mm, flags, new_ptr, old_size, sizep);
else {
diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h
index 623b5188ac..2a07d93c8c 100644
--- a/erts/emulator/sys/common/erl_mmap.h
+++ b/erts/emulator/sys/common/erl_mmap.h
@@ -158,12 +158,23 @@ Eterm erts_mmap_info_options(ErtsMemMapper*,
# include "erl_alloc_types.h"
extern ErtsMemMapper erts_dflt_mmapper;
-# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+
+# if defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+
+# if defined(ARCH_64)
extern ErtsMemMapper erts_literal_mmapper;
# endif
-# ifdef ERTS_ALC_A_EXEC
+
+# if defined(ERTS_ALC_A_EXEC) && defined(__x86_64__)
+ /*
+ * On x86_64, exec_alloc employs its own super carrier 'erts_exec_mmaper'
+ * to ensure low memory for HiPE AMD64 small code model.
+ */
+# define ERTS_HAVE_EXEC_MMAPPER
extern ErtsMemMapper erts_exec_mmapper;
# endif
+
+# endif /* ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
#endif /* ERTS_WANT_MEM_MAPPERS */
/*#define HARD_DEBUG_MSEG*/
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index 882c93a83c..1e05fd3490 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -378,7 +378,7 @@ static ERTS_INLINE int cache_bless_segment(ErtsMsegAllctr_t *ma, void *seg, UWor
ASSERT(!MSEG_FLG_IS_2POW(flags) || (MSEG_FLG_IS_2POW(flags) && MAP_IS_ALIGNED(seg) && IS_2POW(size)));
- /* The idea is that sbc caching is prefered over mbc caching.
+ /* The idea is that sbc caching is preferred over mbc caching.
* Blocks are normally allocated in mb carriers and thus cached there.
* Large blocks has no such cache and it is up to mseg to cache them to speed things up.
*/
@@ -1414,7 +1414,7 @@ erts_mseg_init(ErtsMsegInit_t *init)
erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
/* Initialize erts_exec_mapper *FIRST*, to increase probability
* of getting low memory for HiPE AMD64's small code model.
*/
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index b8a28bcc18..5e7ae8953a 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -51,7 +51,6 @@
#ifndef WANT_NONBLOCKING
# define WANT_NONBLOCKING
#endif
-#define ERTS_WANT_GOT_SIGUSR1
#include "erl_poll.h"
#if ERTS_POLL_USE_KQUEUE
diff --git a/erts/emulator/sys/unix/erl_child_setup.h b/erts/emulator/sys/unix/erl_child_setup.h
index a28b136bfc..b61e557ddf 100644
--- a/erts/emulator/sys/unix/erl_child_setup.h
+++ b/erts/emulator/sys/unix/erl_child_setup.h
@@ -17,7 +17,7 @@
*
* %CopyrightEnd%
*
- * This file defines the interface inbetween erts and child_setup.
+ * This file defines the interface between erts and child_setup.
*/
#ifndef _ERL_UNIX_FORKER_H
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index b64b0d87f6..bd554238b7 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -46,10 +46,10 @@
#include <signal.h>
#include <setjmp.h>
-#if HAVE_SYS_SOCKETIO_H
+#ifdef HAVE_SYS_SOCKETIO_H
# include <sys/socketio.h>
#endif
-#if HAVE_SYS_SOCKIO_H
+#ifdef HAVE_SYS_SOCKIO_H
# include <sys/sockio.h>
#endif
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index e135dbff99..0d1ed17449 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -50,7 +50,6 @@
#endif
#define ERTS_WANT_BREAK_HANDLING
-#define ERTS_WANT_GOT_SIGUSR1
#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
#include "sys.h"
#include "erl_thr_progress.h"
@@ -96,18 +95,10 @@ static int debug_log = 0;
#endif
#ifdef ERTS_SMP
-erts_smp_atomic32_t erts_got_sigusr1;
-#define ERTS_SET_GOT_SIGUSR1 \
- erts_smp_atomic32_set_mb(&erts_got_sigusr1, 1)
-#define ERTS_UNSET_GOT_SIGUSR1 \
- erts_smp_atomic32_set_mb(&erts_got_sigusr1, 0)
static erts_smp_atomic32_t have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1))
#else
-volatile int erts_got_sigusr1;
-#define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
-#define ERTS_UNSET_GOT_SIGUSR1 (erts_got_sigusr1 = 0)
static volatile int have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
(have_prepared_crash_dump++)
@@ -116,7 +107,7 @@ static volatile int have_prepared_crash_dump;
erts_smp_atomic_t sys_misc_mem_sz;
#if defined(ERTS_SMP)
-static void smp_sig_notify(char c);
+static void smp_sig_notify(int signum);
static int sig_notify_fds[2] = {-1, -1};
#ifdef ERTS_SYS_SUSPEND_SIGNAL
@@ -152,7 +143,7 @@ volatile int erts_break_requested = 0;
static struct termios initial_tty_mode;
static int replace_intr = 0;
/* assume yes initially, ttsl_init will clear it */
-int using_oldshell = 1;
+int using_oldshell = 1;
#ifdef ERTS_ENABLE_KERNEL_POLL
@@ -160,6 +151,7 @@ int erts_use_kernel_poll = 0;
struct {
int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
+ int (*enif_select)(ErlNifEnv*, ErlNifEvent, enum ErlNifSelectFlags, void*, const ErlNifPid*, Eterm);
int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
void (*check_io_as_interrupt)(void);
void (*check_io_interrupt)(int);
@@ -183,6 +175,13 @@ driver_event(ErlDrvPort port, ErlDrvEvent event, ErlDrvEventData event_data)
return (*io_func.event)(port, event, event_data);
}
+int enif_select(ErlNifEnv* env, ErlNifEvent event,
+ enum ErlNifSelectFlags flags, void* obj, const ErlNifPid* pid, Eterm ref)
+{
+ return (*io_func.enif_select)(env, event, flags, obj, pid, ref);
+}
+
+
Eterm erts_check_io_info(void *p)
{
return (*io_func.info)(p);
@@ -200,6 +199,7 @@ init_check_io(void)
{
if (erts_use_kernel_poll) {
io_func.select = driver_select_kp;
+ io_func.enif_select = enif_select_kp;
io_func.event = driver_event_kp;
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp;
@@ -215,6 +215,7 @@ init_check_io(void)
}
else {
io_func.select = driver_select_nkp;
+ io_func.enif_select = enif_select_nkp;
io_func.event = driver_event_nkp;
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp;
@@ -403,13 +404,6 @@ erts_sys_pre_init(void)
/* After creation in parent */
eid.thread_create_parent_func = thr_create_cleanup,
-#ifdef ERTS_THR_HAVE_SIG_FUNCS
- sigemptyset(&thr_create_sigmask);
- sigaddset(&thr_create_sigmask, SIGINT); /* block interrupt */
- sigaddset(&thr_create_sigmask, SIGTERM); /* block terminate signal */
- sigaddset(&thr_create_sigmask, SIGUSR1); /* block user defined signal */
-#endif
-
erts_thr_init(&eid);
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -428,11 +422,9 @@ erts_sys_pre_init(void)
#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_break_requested, 0);
- erts_smp_atomic32_init_nob(&erts_got_sigusr1, 0);
erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0);
#else
erts_break_requested = 0;
- erts_got_sigusr1 = 0;
have_prepared_crash_dump = 0;
#endif
@@ -624,6 +616,28 @@ int erts_sys_prepare_crash_dump(int secs)
return prepare_crash_dump(secs);
}
+static void signal_notify_requested(Eterm type) {
+ Process* p = NULL;
+ Eterm msg, *hp;
+ ErtsProcLocks locks = 0;
+ ErlOffHeap *ohp;
+
+ Eterm id = erts_whereis_name_to_id(NULL, am_erl_signal_server);
+
+ if ((p = (erts_pid2proc_opt(NULL, 0, id, 0, ERTS_P2P_FLG_INC_REFC))) != NULL) {
+ ErtsMessage *msgp = erts_alloc_message_heap(p, &locks, 3, &hp, &ohp);
+
+ /* erl_signal_server ! {notify, sighup} */
+ msg = TUPLE2(hp, am_notify, type);
+ erts_queue_message(p, locks, msgp, msg, am_system);
+
+ if (locks)
+ erts_smp_proc_unlock(p, locks);
+ erts_proc_dec_refc(p);
+ }
+}
+
+
static ERTS_INLINE void
break_requested(void)
{
@@ -641,76 +655,15 @@ break_requested(void)
ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
}
-/* set up signal handlers for break and quit */
-#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
-static RETSIGTYPE request_break(void)
-#else
static RETSIGTYPE request_break(int signum)
-#endif
{
#ifdef ERTS_SMP
- smp_sig_notify('I');
+ smp_sig_notify(signum);
#else
break_requested();
#endif
}
-static void stop_requested(void) {
- Process* p = NULL;
- Eterm msg, *hp;
- ErtsProcLocks locks = 0;
- ErlOffHeap *ohp;
- Eterm id = erts_whereis_name_to_id(NULL, am_init);
-
- if ((p = (erts_pid2proc_opt(NULL, 0, id, 0, ERTS_P2P_FLG_INC_REFC))) != NULL) {
- ErtsMessage *msgp = erts_alloc_message_heap(p, &locks, 3, &hp, &ohp);
-
- /* init ! {stop,stop} */
- msg = TUPLE2(hp, am_stop, am_stop);
- erts_queue_message(p, locks, msgp, msg, am_system);
-
- if (locks)
- erts_smp_proc_unlock(p, locks);
- erts_proc_dec_refc(p);
- }
-}
-
-#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
-static RETSIGTYPE request_stop(void)
-#else
-static RETSIGTYPE request_stop(int signum)
-#endif
-{
-#ifdef ERTS_SMP
- smp_sig_notify('S');
-#else
- stop_requested();
-#endif
-}
-
-
-static ERTS_INLINE void
-sigusr1_exit(void)
-{
- char env[21]; /* enough to hold any 64-bit integer */
- size_t envsz;
- int i, secs = -1;
-
- /* We do this at interrupt level, since the main reason for
- * wanting to generate a crash dump in this way is that the emulator
- * is hung somewhere, so it won't be able to poll any flag we set here.
- */
- ERTS_SET_GOT_SIGUSR1;
-
- envsz = sizeof(env);
- if ((i = erts_sys_getenv_raw("ERL_CRASH_DUMP_SECONDS", env, &envsz)) >= 0) {
- secs = i != 0 ? 0 : atoi(env);
- }
-
- prepare_crash_dump(secs);
- erts_exit(ERTS_DUMP_EXIT, "Received SIGUSR1\n");
-}
-
#ifdef ETHR_UNUSABLE_SIGUSRX
#warning "Unusable SIGUSR1 & SIGUSR2. Disabling use of these signals"
@@ -731,19 +684,6 @@ sys_thr_resume(erts_tid_t tid) {
}
#endif
-#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
-static RETSIGTYPE user_signal1(void)
-#else
-static RETSIGTYPE user_signal1(int signum)
-#endif
-{
-#ifdef ERTS_SMP
- smp_sig_notify('1');
-#else
- sigusr1_exit();
-#endif
-}
-
#ifdef ERTS_SYS_SUSPEND_SIGNAL
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
static RETSIGTYPE suspend_signal(void)
@@ -763,29 +703,101 @@ static RETSIGTYPE suspend_signal(int signum)
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
-static void
-quit_requested(void)
-{
- erts_exit(ERTS_INTR_EXIT, "");
+/*
+ Signal Action Comment
+ ─────────────────────────────────────────────────────────────
+ SIGHUP Term Hangup detected on controlling terminal or death of controlling process
+ !SIGINT Term Interrupt from keyboard
+ SIGQUIT Core Quit from keyboard
+ !SIGILL Core Illegal Instruction
+ SIGABRT Core Abort signal from abort(3)
+ !SIGFPE Core Floating point exception
+ !SIGKILL Term Kill signal
+ !SIGSEGV Core Invalid memory reference
+ !SIGPIPE Term Broken pipe: write to pipe with no readers
+ SIGALRM Term Timer signal from alarm(2)
+ SIGTERM Term Termination signal
+ SIGUSR1 Term User-defined signal 1
+ SIGUSR2 Term User-defined signal 2
+ !SIGCHLD Ign Child stopped or terminated
+ !SIGCONT Cont Continue if stopped
+ SIGSTOP Stop Stop process
+ SIGTSTP Stop Stop typed at terminal
+ !SIGTTIN Stop Terminal input for background process
+ !SIGTTOU Stop Terminal output for background process
+*/
+
+
+static ERTS_INLINE int
+signalterm_to_signum(Eterm signal)
+{
+ switch (signal) {
+ case am_sighup: return SIGHUP;
+ /* case am_sigint: return SIGINT; */
+ case am_sigquit: return SIGQUIT;
+ /* case am_sigill: return SIGILL; */
+ case am_sigabrt: return SIGABRT;
+ /* case am_sigsegv: return SIGSEGV; */
+ case am_sigalrm: return SIGALRM;
+ case am_sigterm: return SIGTERM;
+ case am_sigusr1: return SIGUSR1;
+ case am_sigusr2: return SIGUSR2;
+ case am_sigchld: return SIGCHLD;
+ case am_sigstop: return SIGSTOP;
+ case am_sigtstp: return SIGTSTP;
+ default: return 0;
+ }
}
-#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
-static RETSIGTYPE do_quit(void)
-#else
-static RETSIGTYPE do_quit(int signum)
-#endif
+static ERTS_INLINE Eterm
+signum_to_signalterm(int signum)
+{
+ switch (signum) {
+ case SIGHUP: return am_sighup;
+ /* case SIGINT: return am_sigint; */ /* ^c */
+ case SIGQUIT: return am_sigquit; /* ^\ */
+ /* case SIGILL: return am_sigill; */
+ case SIGABRT: return am_sigabrt;
+ /* case SIGSEGV: return am_sigsegv; */
+ case SIGALRM: return am_sigalrm;
+ case SIGTERM: return am_sigterm;
+ case SIGUSR1: return am_sigusr1;
+ case SIGUSR2: return am_sigusr2;
+ case SIGCHLD: return am_sigchld;
+ case SIGSTOP: return am_sigstop;
+ case SIGTSTP: return am_sigtstp; /* ^z */
+ default: return am_error;
+ }
+}
+
+static RETSIGTYPE generic_signal_handler(int signum)
{
#ifdef ERTS_SMP
- smp_sig_notify('Q');
+ smp_sig_notify(signum);
#else
- quit_requested();
+ Eterm signal = signum_to_signalterm(signum);
+ signal_notify_requested(signal);
#endif
}
+int erts_set_signal(Eterm signal, Eterm type) {
+ int signum;
+ if ((signum = signalterm_to_signum(signal)) > 0) {
+ if (type == am_ignore) {
+ sys_signal(signum, SIG_IGN);
+ } else if (type == am_default) {
+ sys_signal(signum, SIG_DFL);
+ } else {
+ sys_signal(signum, generic_signal_handler);
+ }
+ return 1;
+ }
+ return 0;
+}
+
/* Disable break */
void erts_set_ignore_break(void) {
sys_signal(SIGINT, SIG_IGN);
- sys_signal(SIGTERM, SIG_IGN);
sys_signal(SIGQUIT, SIG_IGN);
sys_signal(SIGTSTP, SIG_IGN);
}
@@ -797,11 +809,11 @@ void erts_replace_intr(void) {
if (isatty(0)) {
tcgetattr(0, &mode);
-
+
/* here's an example of how to replace ctrl-c with ctrl-u */
/* mode.c_cc[VKILL] = 0;
mode.c_cc[VINTR] = CKILL; */
-
+
mode.c_cc[VINTR] = 0; /* disable ctrl-c */
tcsetattr(0, TCSANOW, &mode);
replace_intr = 1;
@@ -810,12 +822,13 @@ void erts_replace_intr(void) {
void init_break_handler(void)
{
- sys_signal(SIGINT, request_break);
- sys_signal(SIGTERM, request_stop);
+ sys_signal(SIGINT, request_break);
+ sys_signal(SIGTERM, generic_signal_handler);
+ sys_signal(SIGHUP, generic_signal_handler);
#ifndef ETHR_UNUSABLE_SIGUSRX
- sys_signal(SIGUSR1, user_signal1);
+ sys_signal(SIGUSR1, generic_signal_handler);
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
- sys_signal(SIGQUIT, do_quit);
+ sys_signal(SIGQUIT, generic_signal_handler);
}
void sys_init_suspend_handler(void)
@@ -855,10 +868,7 @@ get_number(char **str_ptr)
}
}
-void
-os_flavor(char* namebuf, /* Where to return the name. */
- unsigned size) /* Size of name buffer. */
-{
+void os_flavor(char* namebuf, unsigned size) {
struct utsname uts; /* Information about the system. */
char* s;
@@ -871,22 +881,16 @@ os_flavor(char* namebuf, /* Where to return the name. */
strcpy(namebuf, uts.sysname);
}
-void
-os_version(pMajor, pMinor, pBuild)
-int* pMajor; /* Pointer to major version. */
-int* pMinor; /* Pointer to minor version. */
-int* pBuild; /* Pointer to build number. */
-{
+void os_version(int *pMajor, int *pMinor, int *pBuild) {
struct utsname uts; /* Information about the system. */
char* release; /* Pointer to the release string:
- * X.Y or X.Y.Z.
- */
+ * X.Y or X.Y.Z. */
(void) uname(&uts);
release = uts.release;
- *pMajor = get_number(&release);
- *pMinor = get_number(&release);
- *pBuild = get_number(&release);
+ *pMajor = get_number(&release); /* Pointer to major version. */
+ *pMinor = get_number(&release); /* Pointer to minor version. */
+ *pBuild = get_number(&release); /* Pointer to build number. */
}
void init_getenv_state(GETENV_STATE *state)
@@ -921,7 +925,7 @@ void erts_do_break_handling(void)
{
struct termios temp_mode;
int saved = 0;
-
+
/*
* Most functions that do_break() calls are intentionally not thread safe;
* therefore, make sure that all threads but this one are blocked before
@@ -939,14 +943,14 @@ void erts_do_break_handling(void)
tcsetattr(0,TCSANOW,&initial_tty_mode);
saved = 1;
}
-
+
/* call the break handling function, reset the flag */
do_break();
ERTS_UNSET_BREAK_REQUESTED;
fflush(stdout);
-
+
/* after break we go back to saved settings */
if (using_oldshell && !replace_intr) {
SET_NONBLOCKING(1);
@@ -1054,37 +1058,12 @@ erts_sys_unsetenv(char *key)
return res;
}
-void
-sys_init_io(void)
-{
-}
-
-#if (0) /* unused? */
-static int write_fill(fd, buf, len)
-int fd, len;
-char *buf;
-{
- int i, done = 0;
-
- do {
- if ((i = write(fd, buf+done, len-done)) < 0) {
- if (errno != EINTR)
- return (i);
- i = 0;
- }
- done += i;
- } while (done < len);
- return (len);
-}
-#endif
+void sys_init_io(void) { }
+void erts_sys_alloc_init(void) { }
extern const char pre_loaded_code[];
extern Preload pre_loaded[];
-void erts_sys_alloc_init(void)
-{
-}
-
#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
void *erts_sys_aligned_alloc(UWord alignment, UWord size)
{
@@ -1185,9 +1164,7 @@ void sys_preload_end(Preload* p)
Here we assume that all schedulers are stopped so that erl_poll
does not interfere with the select below.
*/
-int sys_get_key(fd)
-int fd;
-{
+int sys_get_key(int fd) {
int c, ret;
unsigned char rbuf[64];
fd_set fds;
@@ -1206,15 +1183,14 @@ int fd;
if (c <= 0)
return c;
}
-
- return rbuf[0];
+ return rbuf[0];
}
extern int erts_initialized;
void
erl_assert_error(const char* expr, const char* func, const char* file, int line)
-{
+{
fflush(stdout);
fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n",
file, line, func, expr);
@@ -1239,7 +1215,7 @@ erl_debug(char* fmt, ...)
{
char sbuf[1024]; /* Temporary buffer. */
va_list va;
-
+
if (debug_log) {
va_start(va, fmt);
vsprintf(sbuf, fmt, va);
@@ -1268,14 +1244,14 @@ erl_sys_schedule(int runnable)
static erts_smp_tid_t sig_dispatcher_tid;
static void
-smp_sig_notify(char c)
+smp_sig_notify(int signum)
{
int res;
do {
/* write() is async-signal safe (according to posix) */
- res = write(sig_notify_fds[1], &c, 1);
+ res = write(sig_notify_fds[1], &signum, sizeof(int));
} while (res < 0 && errno == EINTR);
- if (res != 1) {
+ if (res != sizeof(int)) {
char msg[] =
"smp_sig_notify(): Failed to notify signal-dispatcher thread "
"about received signal";
@@ -1291,60 +1267,55 @@ signal_dispatcher_thread_func(void *unused)
erts_lc_set_thread_name("signal_dispatcher");
#endif
while (1) {
- char buf[32];
- int res, i;
+ union {int signum; char buf[4];} sb;
+ Eterm signal;
+ int res, i = 0;
/* Block on read() waiting for a signal notification to arrive... */
- res = read(sig_notify_fds[0], (void *) &buf[0], 32);
+
+ do {
+ res = read(sig_notify_fds[0], (void *) &sb.buf[i], sizeof(int) - i);
+ i += res;
+ } while ((i != sizeof(int) && res >= 0) || (res < 0 && errno == EINTR));
+
if (res < 0) {
- if (errno == EINTR)
- continue;
erts_exit(ERTS_ABORT_EXIT,
"signal-dispatcher thread got unexpected error: %s (%d)\n",
erl_errno_id(errno),
errno);
}
- for (i = 0; i < res; i++) {
- /*
- * NOTE 1: The signal dispatcher thread should not do work
- * that takes a substantial amount of time (except
- * perhaps in test and debug builds). It needs to
- * be responsive, i.e, it should only dispatch work
- * to other threads.
- *
- * NOTE 2: The signal dispatcher thread is not a blockable
- * thread (i.e., not a thread managed by the
- * erl_thr_progress module). This is intentional.
- * We want to be able to interrupt writing of a crash
- * dump by hitting C-c twice. Since it isn't a
- * blockable thread it is important that it doesn't
- * change the state of any data that a blocking thread
- * expects to have exclusive access to (unless the
- * signal dispatcher itself explicitly is blocking all
- * blockable threads).
- */
- switch (buf[i]) {
- case 0: /* Emulator initialized */
+ /*
+ * NOTE 1: The signal dispatcher thread should not do work
+ * that takes a substantial amount of time (except
+ * perhaps in test and debug builds). It needs to
+ * be responsive, i.e, it should only dispatch work
+ * to other threads.
+ *
+ * NOTE 2: The signal dispatcher thread is not a blockable
+ * thread (i.e., not a thread managed by the
+ * erl_thr_progress module). This is intentional.
+ * We want to be able to interrupt writing of a crash
+ * dump by hitting C-c twice. Since it isn't a
+ * blockable thread it is important that it doesn't
+ * change the state of any data that a blocking thread
+ * expects to have exclusive access to (unless the
+ * signal dispatcher itself explicitly is blocking all
+ * blockable threads).
+ */
+ switch (sb.signum) {
+ case 0: continue;
+ case SIGINT:
+ break_requested();
break;
- case 'S': /* SIGTERM */
- stop_requested();
- break;
- case 'I': /* SIGINT */
- break_requested();
- break;
- case 'Q': /* SIGQUIT */
- quit_requested();
- break;
- case '1': /* SIGUSR1 */
- sigusr1_exit();
- break;
- default:
- erts_exit(ERTS_ABORT_EXIT,
- "signal-dispatcher thread received unknown "
- "signal notification: '%c'\n",
- buf[i]);
- }
- }
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ default:
+ if ((signal = signum_to_signalterm(sb.signum)) == am_error) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "signal-dispatcher thread received unknown "
+ "signal notification: '%d'\n",
+ sb.signum);
+ }
+ signal_notify_requested(signal);
+ }
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
return NULL;
}
@@ -1387,9 +1358,9 @@ init_smp_sig_suspend(void) {
int erts_darwin_main_thread_pipe[2];
int erts_darwin_main_thread_result_pipe[2];
-static void initialize_darwin_main_thread_pipes(void)
+static void initialize_darwin_main_thread_pipes(void)
{
- if (pipe(erts_darwin_main_thread_pipe) < 0 ||
+ if (pipe(erts_darwin_main_thread_pipe) < 0 ||
pipe(erts_darwin_main_thread_result_pipe) < 0) {
erts_exit(ERTS_ERROR_EXIT,"Fatal error initializing Darwin main thread stealing");
}
@@ -1555,5 +1526,4 @@ erl_sys_args(int* argc, char** argv)
argv[j++] = argv[i];
}
*argc = j;
-
}
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index f23c7ab03d..93013a412b 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -842,9 +842,9 @@ event_happened:
ASSERT(WAIT_OBJECT_0 < i && i < WAIT_OBJECT_0+w->active_events);
notify_io_ready(ps);
- /*
- * The main thread wont start working on our arrays untill we're
- * stopped, so we can work in peace although the main thread runs
+ /*
+ * The main thread wont start working on our arrays until we're
+ * stopped, so we can work in peace although the main thread runs
*/
ASSERT(i >= WAIT_OBJECT_0+1);
i -= WAIT_OBJECT_0;
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index f3881e0736..40e5595533 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -294,6 +294,10 @@ int erts_sys_prepare_crash_dump(int secs)
return 0;
}
+int erts_set_signal(Eterm signal, Eterm type) {
+ return 0;
+}
+
static void
init_console(void)
{
@@ -492,7 +496,7 @@ struct driver_data {
int outBufSize; /* Size of output buffer. */
byte *outbuf; /* Buffer to use for overlapped write. */
ErlDrvPort port_num; /* The port handle. */
- int packet_bytes; /* 0: continous stream, 1, 2, or 4: the number
+ int packet_bytes; /* 0: continuous stream, 1, 2, or 4: the number
* of bytes in the packet header.
*/
HANDLE port_pid; /* PID of the port process. */
@@ -1423,7 +1427,7 @@ int parse_command(wchar_t* cmd){
*
* If new == NULL we just calculate the length.
*
- * The reason for having to quote all of the is becasue CreateProcessW removes
+ * The reason for having to quote all of the is because CreateProcessW removes
* one level of escaping since it takes a single long command line rather
* than the argument chunks that unix uses.
*/
@@ -2482,7 +2486,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
* event object has been signaled, indicating that there is
* something to read on the corresponding file handle.
*
- * If the port is working in the continous stream mode (packet_bytes == 0),
+ * If the port is working in the continuous stream mode (packet_bytes == 0),
* whatever data read will be sent straight to Erlang.
*
* Results:
@@ -2523,7 +2527,7 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
#endif
if (error == NO_ERROR) {
- if (pb == 0) { /* Continous stream. */
+ if (pb == 0) { /* Continuous stream. */
#ifdef DEBUG
DEBUGF(("ready_input: %d: ", bytesRead));
erl_bin_write(dp->inbuf, 16, bytesRead);
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 453f819d1b..5478932b13 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -53,6 +53,7 @@ MODULES= \
crypto_SUITE \
ddll_SUITE \
decode_packet_SUITE \
+ dirty_bif_SUITE \
dirty_nif_SUITE \
distribution_SUITE \
driver_SUITE \
@@ -85,6 +86,7 @@ MODULES= \
num_bif_SUITE \
message_queue_data_SUITE \
op_SUITE \
+ os_signal_SUITE \
port_SUITE \
port_bif_SUITE \
prim_eval_SUITE \
diff --git a/erts/emulator/test/alloc_SUITE_data/testcase_driver.h b/erts/emulator/test/alloc_SUITE_data/testcase_driver.h
index f0ca91bd06..2b742dd7e3 100644
--- a/erts/emulator/test/alloc_SUITE_data/testcase_driver.h
+++ b/erts/emulator/test/alloc_SUITE_data/testcase_driver.h
@@ -20,7 +20,7 @@
#ifndef TESTCASE_DRIVER_H__
#define TESTCASE_DRIVER_H__
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdlib.h>
typedef struct {
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index f70fb0e501..339c827602 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -26,7 +26,7 @@
-export([all/0, suite/0,
display/1, display_huge/0,
erl_bif_types/1,guard_bifs_in_erl_bif_types/1,
- shadow_comments/1,
+ shadow_comments/1,list_to_utf8_atom/1,
specs/1,improper_bif_stubs/1,auto_imports/1,
t_list_to_existing_atom/1,os_env/1,otp_7526/1,
binary_to_atom/1,binary_to_existing_atom/1,
@@ -43,7 +43,7 @@ all() ->
[erl_bif_types, guard_bifs_in_erl_bif_types, shadow_comments,
specs, improper_bif_stubs, auto_imports,
t_list_to_existing_atom, os_env, otp_7526,
- display,
+ display, list_to_utf8_atom,
atom_to_binary, binary_to_atom, binary_to_existing_atom,
erl_crash_dump_bytes, min_max, erlang_halt, is_builtin,
error_stacktrace, error_stacktrace_during_call_trace].
@@ -339,6 +339,38 @@ check_stub({_,F,A}, B) ->
ct:fail(invalid_body)
end.
+list_to_utf8_atom(Config) when is_list(Config) ->
+ 'hello' = atom_roundtrip("hello"),
+ 'こんにちは' = atom_roundtrip("こんにちは"),
+
+ %% Test all edge cases.
+ _ = atom_roundtrip([16#80]),
+ _ = atom_roundtrip([16#7F]),
+ _ = atom_roundtrip([16#FF]),
+ _ = atom_roundtrip([16#100]),
+ _ = atom_roundtrip([16#7FF]),
+ _ = atom_roundtrip([16#800]),
+ _ = atom_roundtrip([16#D7FF]),
+ atom_badarg([16#D800]),
+ atom_badarg([16#DFFF]),
+ _ = atom_roundtrip([16#E000]),
+ _ = atom_roundtrip([16#FFFF]),
+ _ = atom_roundtrip([16#1000]),
+ _ = atom_roundtrip([16#10FFFF]),
+ atom_badarg([16#110000]),
+ ok.
+
+atom_roundtrip(String) ->
+ Atom = list_to_atom(String),
+ Atom = list_to_existing_atom(String),
+ String = atom_to_list(Atom),
+ Atom.
+
+atom_badarg(String) ->
+ {'EXIT',{badarg,_}} = (catch list_to_atom(String)),
+ {'EXIT',{badarg,_}} = (catch list_to_existing_atom(String)),
+ ok.
+
t_list_to_existing_atom(Config) when is_list(Config) ->
all = list_to_existing_atom("all"),
?MODULE = list_to_existing_atom(?MODULE_STRING),
@@ -429,6 +461,8 @@ binary_to_atom(Config) when is_list(Config) ->
Long = lists:seq(0, 254),
LongAtom = list_to_atom(Long),
LongBin = list_to_binary(Long),
+ UnicodeLongAtom = list_to_atom([$é || _ <- lists:seq(0, 254)]),
+ UnicodeLongBin = << <<"é"/utf8>> || _ <- lists:seq(0, 254)>>,
%% latin1
'' = test_binary_to_atom(<<>>, latin1),
@@ -440,12 +474,17 @@ binary_to_atom(Config) when is_list(Config) ->
'' = test_binary_to_atom(<<>>, utf8),
HalfLongAtom = test_binary_to_atom(HalfLongBin, utf8),
HalfLongAtom = test_binary_to_atom(HalfLongBin, unicode),
+ UnicodeLongAtom = test_binary_to_atom(UnicodeLongBin, utf8),
+ UnicodeLongAtom = test_binary_to_atom(UnicodeLongBin, unicode),
[] = [C || C <- lists:seq(128, 255),
begin
list_to_atom([C]) =/=
test_binary_to_atom(<<C/utf8>>, utf8)
end],
+ <<"こんにちは"/utf8>> =
+ atom_to_binary(test_binary_to_atom(<<"こんにちは"/utf8>>, utf8), utf8),
+
%% badarg failures.
fail_binary_to_atom(atom),
fail_binary_to_atom(42),
@@ -464,10 +503,6 @@ binary_to_atom(Config) when is_list(Config) ->
?BADARG(binary_to_atom(id(<<255>>), utf8)),
?BADARG(binary_to_atom(id(<<255,0>>), utf8)),
?BADARG(binary_to_atom(id(<<16#C0,16#80>>), utf8)), %Overlong 0.
- [?BADARG(binary_to_atom(<<C/utf8>>, utf8)) || C <- lists:seq(256, 16#D7FF)],
- [?BADARG(binary_to_atom(<<C/utf8>>, utf8)) || C <- lists:seq(16#E000, 16#FFFD)],
- [?BADARG(binary_to_atom(<<C/utf8>>, utf8)) || C <- lists:seq(16#10000, 16#8FFFF)],
- [?BADARG(binary_to_atom(<<C/utf8>>, utf8)) || C <- lists:seq(16#90000, 16#10FFFF)],
%% system_limit failures.
?SYS_LIMIT(binary_to_atom(id(<<0:512/unit:8,255>>), utf8)),
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 1c7d278bb0..324730d562 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -19,7 +19,6 @@
%%
-module(binary_SUITE).
--compile({nowarn_deprecated_function, {erlang,hash,2}}).
%% Tests binaries and the BIFs:
%% list_to_binary/1
@@ -392,7 +391,6 @@ test_hash(List) ->
Bin = list_to_binary(List),
Sbin = make_sub_binary(List),
Unaligned = make_unaligned_sub_binary(Sbin),
- test_hash_1(Bin, Sbin, Unaligned, fun erlang:hash/2),
test_hash_1(Bin, Sbin, Unaligned, fun erlang:phash/2),
test_hash_1(Bin, Sbin, Unaligned, fun erlang:phash2/2).
@@ -1010,7 +1008,7 @@ ordering(Config) when is_list(Config) ->
ok.
-%% Test that comparisions between binaries with different alignment work.
+%% Test that comparison between binaries with different alignment work.
unaligned_order(Config) when is_list(Config) ->
L = lists:seq(0, 7),
[test_unaligned_order(I, J) || I <- L, J <- L],
diff --git a/erts/emulator/test/bs_match_int_SUITE.erl b/erts/emulator/test/bs_match_int_SUITE.erl
index a7bd4b8ac3..32bfa2f1fa 100644
--- a/erts/emulator/test/bs_match_int_SUITE.erl
+++ b/erts/emulator/test/bs_match_int_SUITE.erl
@@ -247,7 +247,7 @@ match_huge_int(Config) when is_list(Config) ->
8 ->
%% An attempt will be made to allocate heap space for
%% the bignum (which will probably fail); only if the
- %% allocation succeds will the matching fail because
+ %% allocation succeeds will the matching fail because
%% the binary is too small.
ok
end,
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index f7ff04430a..e74631e916 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -45,7 +45,7 @@ suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap, {seconds, 30}}].
-all() ->
+all() ->
Common = [errors, on_load],
NotHipe = [process_specs, basic, flags, pam, change_pam,
upgrade,
@@ -233,7 +233,7 @@ basic() ->
trace_func({'_','_','_'}, false),
[b,a] = lists:reverse([a,b]),
- %% Read out the remaing trace messages.
+ %% Read out the remaining trace messages.
?MODULE:expect({trace,Self,call,{lists,seq,[1,10]}}),
?MODULE:expect({trace,Self,call,{erlang,list_to_integer,["777"]}}),
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index 3ee14f2d1c..d07166ed98 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -22,10 +22,9 @@
-export([all/0, suite/0, init_per_suite/1, end_per_suite/1,
versions/1,new_binary_types/1, call_purged_fun_code_gone/1,
call_purged_fun_code_reload/1, call_purged_fun_code_there/1,
- t_check_process_code/1,t_check_old_code/1,
- t_check_process_code_ets/1,
- external_fun/1,get_chunk/1,module_md5/1,make_stub/1,
- make_stub_many_funs/1,constant_pools/1,constant_refc_binaries/1,
+ multi_proc_purge/1, t_check_old_code/1,
+ external_fun/1,get_chunk/1,module_md5/1,
+ constant_pools/1,constant_refc_binaries/1,
false_dependency/1,coverage/1,fun_confusion/1,
t_copy_literals/1, t_copy_literals_frags/1]).
@@ -36,9 +35,9 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[versions, new_binary_types, call_purged_fun_code_gone,
- call_purged_fun_code_reload, call_purged_fun_code_there, t_check_process_code,
- t_check_process_code_ets, t_check_old_code, external_fun, get_chunk,
- module_md5, make_stub, make_stub_many_funs,
+ call_purged_fun_code_reload, call_purged_fun_code_there,
+ multi_proc_purge, t_check_old_code, external_fun, get_chunk,
+ module_md5,
constant_pools, constant_refc_binaries, false_dependency,
coverage, fun_confusion, t_copy_literals, t_copy_literals_frags].
@@ -154,218 +153,80 @@ call_purged_fun_code_there(Config) when is_list(Config) ->
ok.
call_purged_fun_test(Priv, Data, Type) ->
- File = filename:join(Data, "my_code_test2"),
- Code = filename:join(Priv, "my_code_test2"),
-
- catch erlang:purge_module(my_code_test2),
- catch erlang:delete_module(my_code_test2),
- catch erlang:purge_module(my_code_test2),
-
- {ok,my_code_test2} = c:c(File, [{outdir,Priv}]),
-
- T = ets:new(my_code_test2_fun_table, []),
- ets:insert(T, {my_fun,my_code_test2:make_fun(4711)}),
- ets:insert(T, {my_fun2,my_code_test2:make_fun2()}),
-
- spawn(fun () ->
- [{my_fun2,F2}] = ets:lookup(T, my_fun2),
- F2(fun () ->
- receive after infinity -> ok end
- end,
- fun () -> ok end),
- exit(completed)
- end),
-
- PurgeType = case Type of
- code_gone ->
- ok = file:delete(Code++".beam"),
- true;
- code_reload ->
- true;
- code_there ->
- false
- end,
-
- true = erlang:delete_module(my_code_test2),
-
- Purge = start_purge(my_code_test2, PurgeType),
-
- {P0, M0} = spawn_monitor(fun () ->
- [{my_fun,F}] = ets:lookup(T, my_fun),
- 4712 = F(1),
- exit(completed)
- end),
-
- wait_until(fun () ->
- {status, suspended}
- == process_info(P0, status)
- end),
-
- ok = continue_purge(Purge),
-
- {P1, M1} = spawn_monitor(fun () ->
- [{my_fun,F}] = ets:lookup(T, my_fun),
- 4713 = F(2),
- exit(completed)
- end),
- {P2, M2} = spawn_monitor(fun () ->
- [{my_fun,F}] = ets:lookup(T, my_fun),
- 4714 = F(3),
- exit(completed)
- end),
-
- wait_until(fun () ->
- {status, suspended}
- == process_info(P1, status)
- end),
- wait_until(fun () ->
- {status, suspended}
- == process_info(P2, status)
- end),
-
- {current_function,
- {erts_code_purger,
- pending_purge_lambda,
- 3}} = process_info(P0, current_function),
- {current_function,
- {erts_code_purger,
- pending_purge_lambda,
- 3}} = process_info(P1, current_function),
- {current_function,
- {erts_code_purger,
- pending_purge_lambda,
- 3}} = process_info(P2, current_function),
-
- case Type of
- code_there ->
- false = complete_purge(Purge);
- _ ->
- {true, true} = complete_purge(Purge)
- end,
-
- case Type of
- code_gone ->
- receive
- {'DOWN', M0, process, P0, Reason0} ->
- {undef, _} = Reason0
- end,
- receive
- {'DOWN', M1, process, P1, Reason1} ->
- {undef, _} = Reason1
- end,
- receive
- {'DOWN', M2, process, P2, Reason2} ->
- {undef, _} = Reason2
- end;
- _ ->
- receive
- {'DOWN', M0, process, P0, Reason0} ->
- completed = Reason0
- end,
- receive
- {'DOWN', M1, process, P1, Reason1} ->
- completed = Reason1
- end,
- receive
- {'DOWN', M2, process, P2, Reason2} ->
- completed = Reason2
- end,
- catch erlang:purge_module(my_code_test2),
- catch erlang:delete_module(my_code_test2),
- catch erlang:purge_module(my_code_test2)
- end,
- ok.
-
-t_check_process_code(Config) when is_list(Config) ->
- case check_process_code_handle(indirect_references) of
- false -> {skipped, "check_process_code() ignores funs"};
- true -> t_check_process_code_test(Config)
- end.
-
-t_check_process_code_test(Config) ->
- Priv = proplists:get_value(priv_dir, Config),
- Data = proplists:get_value(data_dir, Config),
- File = filename:join(Data, "my_code_test"),
- Code = filename:join(Priv, "my_code_test"),
+ OptsList = case erlang:system_info(hipe_architecture) of
+ undefined -> [[]];
+ _ -> [[], [native,{d,hipe}]]
+ end,
+ [call_purged_fun_test_do(Priv, Data, Type, CO, FO)
+ || CO <- OptsList, FO <- OptsList].
- catch erlang:purge_module(my_code_test),
- catch erlang:delete_module(my_code_test),
- catch erlang:purge_module(my_code_test),
-
- {ok,my_code_test} = c:c(File, [{outdir,Priv}]),
-
- MyFun = fun(X, Y) -> X + Y end, %Confuse things.
- F = my_code_test:make_fun(42),
- 2 = fun_refc(F),
- MyFun2 = fun(X, Y) -> X * Y end, %Confuse things.
- 44 = F(2),
- %% Delete the module and call the fun again.
- true = erlang:delete_module(my_code_test),
- 2 = fun_refc(F),
- 45 = F(3),
- {'EXIT',{undef,_}} = (catch my_code_test:make_fun(33)),
-
- %% The fun should still be there, preventing purge.
- true = erlang:check_process_code(self(), my_code_test),
- gc(),
- gc(), %Place funs on the old heap.
- true = erlang:check_process_code(self(), my_code_test),
-
- %% Using the funs here guarantees that they will not be prematurely garbed.
- 48 = F(6),
- 3 = MyFun(1, 2),
- 12 = MyFun2(3, 4),
-
- %% Kill all funs.
- t_check_process_code1(Code, []).
-
-%% The real fun was killed, but we have some fakes which look similar.
-
-t_check_process_code1(Code, Fakes) ->
- MyFun = fun(X, Y) -> X + Y + 1 end, %Confuse things.
- false = erlang:check_process_code(self(), my_code_test),
- 4 = MyFun(1, 2),
- t_check_process_code2(Code, Fakes).
-
-t_check_process_code2(Code, _) ->
- false = erlang:check_process_code(self(), my_code_test),
- true = erlang:purge_module(my_code_test),
-
- %% In the next test we will load the same module twice.
- {module,my_code_test} = code:load_abs(Code),
- F = my_code_test:make_fun(37),
- 2 = fun_refc(F),
- false = erlang:check_process_code(self(), my_code_test),
- {module,my_code_test} = code:load_abs(Code),
- 2 = fun_refc(F),
+call_purged_fun_test_do(Priv, Data, Type, CallerOpts, FunOpts) ->
+ io:format("Compile caller as ~p and funs as ~p\n", [CallerOpts, FunOpts]),
+ SrcFile = filename:join(Data, "call_purged_fun_tester.erl"),
+ ObjFile = filename:join(Priv, "call_purged_fun_tester.beam"),
+ {ok,Mod,Code} = compile:file(SrcFile, [binary, report | CallerOpts]),
+ {module,Mod} = code:load_binary(Mod, ObjFile, Code),
- %% Still false because the fun with the same identify is found
- %% in the current code.
- false = erlang:check_process_code(self(), my_code_test),
+ call_purged_fun_tester:do(Priv, Data, Type, FunOpts).
- %% Some fake funs in the same module should not do any difference.
- false = erlang:check_process_code(self(), my_code_test),
- 38 = F(1),
- t_check_process_code3(Code, F, []).
+multi_proc_purge(Config) when is_list(Config) ->
+ %%
+ %% Make sure purge requests aren't lost when
+ %% purger process is working.
+ %%
+ Priv = proplists:get_value(priv_dir, Config),
+ Data = proplists:get_value(data_dir, Config),
+ File1 = filename:join(Data, "my_code_test"),
+ File2 = filename:join(Data, "my_code_test2"),
+
+ {ok,my_code_test} = c:c(File1, [{outdir,Priv}]),
+ {ok,my_code_test2} = c:c(File2, [{outdir,Priv}]),
+ erlang:delete_module(my_code_test),
+ erlang:delete_module(my_code_test2),
-t_check_process_code3(Code, F, Fakes) ->
- Pid = spawn_link(fun() -> body(F, Fakes) end),
- true = erlang:purge_module(my_code_test),
- false = erlang:check_process_code(self(), my_code_test),
- false = erlang:check_process_code(Pid, my_code_test),
+ Self = self(),
- true = erlang:delete_module(my_code_test),
- true = erlang:check_process_code(self(), my_code_test),
- true = erlang:check_process_code(Pid, my_code_test),
- 39 = F(2),
- t_check_process_code4(Code, Pid).
-
-t_check_process_code4(_Code, Pid) ->
- Pid ! drop_funs,
- receive after 1 -> ok end,
- false = erlang:check_process_code(Pid, my_code_test),
+ Fun1 = fun () ->
+ erts_code_purger:purge(my_code_test),
+ Self ! {self(), done}
+ end,
+ Fun2 = fun () ->
+ erts_code_purger:soft_purge(my_code_test2),
+ Self ! {self(), done}
+ end,
+ Fun3 = fun () ->
+ erts_code_purger:purge('__nonexisting_module__'),
+ Self ! {self(), done}
+ end,
+ Fun4 = fun () ->
+ erts_code_purger:soft_purge('__another_nonexisting_module__'),
+ Self ! {self(), done}
+ end,
+
+ Pid1 = spawn_link(Fun1),
+ Pid2 = spawn_link(Fun2),
+ Pid3 = spawn_link(Fun3),
+ Pid4 = spawn_link(Fun4),
+ Pid5 = spawn_link(Fun1),
+ Pid6 = spawn_link(Fun2),
+ Pid7 = spawn_link(Fun3),
+ receive after 50 -> ok end,
+ Pid8 = spawn_link(Fun4),
+ Pid9 = spawn_link(Fun1),
+ Pid10 = spawn_link(Fun2),
+ Pid11 = spawn_link(Fun3),
+ Pid12 = spawn_link(Fun4),
+ Pid13 = spawn_link(Fun1),
+ receive after 50 -> ok end,
+ Pid14 = spawn_link(Fun2),
+ Pid15 = spawn_link(Fun3),
+ Pid16 = spawn_link(Fun4),
+
+ lists:foreach(fun (P) -> receive {P, done} -> ok end end,
+ [Pid1, Pid2, Pid3, Pid4, Pid5, Pid6, Pid7, Pid8,
+ Pid9, Pid10, Pid11, Pid12, Pid13, Pid14, Pid15, Pid16]),
ok.
body(F, Fakes) ->
@@ -388,72 +249,6 @@ gc() ->
gc1().
gc1() -> ok.
-%% Test check_process_code/2 in combination with a fun obtained from an ets table.
-t_check_process_code_ets(Config) when is_list(Config) ->
- case check_process_code_handle(indirect_references) of
- false ->
- {skipped, "check_process_code() ignores funs"};
- true ->
- case test_server:is_native(?MODULE) of
- true ->
- {skip,"Native code"};
- false ->
- do_check_process_code_ets(Config)
- end
- end.
-
-do_check_process_code_ets(Config) ->
- Priv = proplists:get_value(priv_dir, Config),
- Data = proplists:get_value(data_dir, Config),
- File = filename:join(Data, "my_code_test"),
-
- catch erlang:purge_module(my_code_test),
- catch erlang:delete_module(my_code_test),
- catch erlang:purge_module(my_code_test),
- {ok,my_code_test} = c:c(File, [{outdir,Priv}]),
-
- T = ets:new(my_code_test, []),
- ets:insert(T, {7,my_code_test:make_fun(107)}),
- ets:insert(T, {8,my_code_test:make_fun(108)}),
- erlang:delete_module(my_code_test),
- false = erlang:check_process_code(self(), my_code_test),
- Body = fun() ->
- [{7,F1}] = ets:lookup(T, 7),
- [{8,F2}] = ets:lookup(T, 8),
- IdleLoop = fun() -> receive _X -> ok end end,
- RecLoop = fun(Again) ->
- receive
- call -> 110 = F1(3),
- 100 = F2(-8),
- Again(Again);
- {drop_funs,To} ->
- To ! funs_dropped,
- IdleLoop()
- end
- end,
- true = erlang:check_process_code(self(), my_code_test),
- RecLoop(RecLoop)
- end,
- Pid = spawn_link(Body),
- receive after 1 -> ok end,
- true = erlang:check_process_code(Pid, my_code_test),
- Pid ! call,
- Pid ! {drop_funs,self()},
-
- receive
- funs_dropped -> ok;
- Other -> ct:fail({unexpected,Other})
- after 10000 ->
- ct:fail(no_funs_dropped_answer)
- end,
-
- false = erlang:check_process_code(Pid, my_code_test),
- ok.
-
-fun_refc(F) ->
- {refc,Count} = erlang:fun_info(F, refc),
- Count.
-
%% Test the erlang:check_old_code/1 BIF.
t_check_old_code(Config) when is_list(Config) ->
@@ -501,16 +296,16 @@ get_chunk(Config) when is_list(Config) ->
{ok,my_code_test,Code} = compile:file(File, [binary]),
%% Should work.
- Chunk = get_chunk_ok("Atom", Code),
- Chunk = get_chunk_ok("Atom", make_sub_binary(Code)),
- Chunk = get_chunk_ok("Atom", make_unaligned_sub_binary(Code)),
+ Chunk = get_chunk_ok("AtU8", Code),
+ Chunk = get_chunk_ok("AtU8", make_sub_binary(Code)),
+ Chunk = get_chunk_ok("AtU8", make_unaligned_sub_binary(Code)),
%% Should fail.
- {'EXIT',{badarg,_}} = (catch code:get_chunk(bit_sized_binary(Code), "Atom")),
+ {'EXIT',{badarg,_}} = (catch code:get_chunk(bit_sized_binary(Code), "AtU8")),
{'EXIT',{badarg,_}} = (catch code:get_chunk(Code, "bad chunk id")),
%% Invalid beam code or missing chunk should return 'undefined'.
- undefined = code:get_chunk(<<"not a beam module">>, "Atom"),
+ undefined = code:get_chunk(<<"not a beam module">>, "AtU8"),
undefined = code:get_chunk(Code, "XXXX"),
ok.
@@ -543,67 +338,6 @@ module_md5_ok(Code) ->
end.
-make_stub(Config) when is_list(Config) ->
- catch erlang:purge_module(my_code_test),
- MD5 = erlang:md5(<<>>),
-
- Data = proplists:get_value(data_dir, Config),
- File = filename:join(Data, "my_code_test"),
- {ok,my_code_test,Code} = compile:file(File, [binary]),
-
- my_code_test = code:make_stub_module(my_code_test, Code, {[],[],MD5}),
- true = erlang:delete_module(my_code_test),
- true = erlang:purge_module(my_code_test),
-
- my_code_test = code:make_stub_module(my_code_test,
- make_unaligned_sub_binary(Code),
- {[],[],MD5}),
- true = erlang:delete_module(my_code_test),
- true = erlang:purge_module(my_code_test),
-
- my_code_test = code:make_stub_module(my_code_test, zlib:gzip(Code),
- {[],[],MD5}),
- true = erlang:delete_module(my_code_test),
- true = erlang:purge_module(my_code_test),
-
- %% Should fail.
- {'EXIT',{badarg,_}} =
- (catch code:make_stub_module(my_code_test, <<"bad">>, {[],[],MD5})),
- {'EXIT',{badarg,_}} =
- (catch code:make_stub_module(my_code_test,
- bit_sized_binary(Code),
- {[],[],MD5})),
- {'EXIT',{badarg,_}} =
- (catch code:make_stub_module(my_code_test_with_wrong_name,
- Code, {[],[],MD5})),
- ok.
-
-make_stub_many_funs(Config) when is_list(Config) ->
- catch erlang:purge_module(many_funs),
- MD5 = erlang:md5(<<>>),
-
- Data = proplists:get_value(data_dir, Config),
- File = filename:join(Data, "many_funs"),
- {ok,many_funs,Code} = compile:file(File, [binary]),
-
- many_funs = code:make_stub_module(many_funs, Code, {[],[],MD5}),
- true = erlang:delete_module(many_funs),
- true = erlang:purge_module(many_funs),
- many_funs = code:make_stub_module(many_funs,
- make_unaligned_sub_binary(Code),
- {[],[],MD5}),
- true = erlang:delete_module(many_funs),
- true = erlang:purge_module(many_funs),
-
- %% Should fail.
- {'EXIT',{badarg,_}} =
- (catch code:make_stub_module(many_funs, <<"bad">>, {[],[],MD5})),
- {'EXIT',{badarg,_}} =
- (catch code:make_stub_module(many_funs,
- bit_sized_binary(Code),
- {[],[],MD5})),
- ok.
-
constant_pools(Config) when is_list(Config) ->
Data = proplists:get_value(data_dir, Config),
File = filename:join(Data, "literals"),
@@ -1137,38 +871,3 @@ flush() ->
id(I) -> I.
-check_process_code_handle(What) ->
- lists:member(What, erlang:system_info(check_process_code)).
-
-wait_until(Fun) ->
- case Fun() of
- true ->
- ok;
- false ->
- receive after 100 -> ok end,
- wait_until(Fun)
- end.
-
-start_purge(Mod, Type) when is_atom(Mod)
- andalso ((Type == true)
- orelse (Type == false)) ->
- Ref = make_ref(),
- erts_code_purger ! {test_purge, Mod, self(), Type, Ref},
- receive
- {started, Ref} ->
- Ref
- end.
-
-continue_purge(Ref) when is_reference(Ref) ->
- erts_code_purger ! {continue, Ref},
- receive
- {continued, Ref} ->
- ok
- end.
-
-complete_purge(Ref) when is_reference(Ref) ->
- erts_code_purger ! {complete, Ref},
- receive
- {test_purge, Res, Ref} ->
- Res
- end.
diff --git a/erts/emulator/test/code_SUITE_data/call_purged_fun_tester.erl b/erts/emulator/test/code_SUITE_data/call_purged_fun_tester.erl
new file mode 100644
index 0000000000..699f0c1161
--- /dev/null
+++ b/erts/emulator/test/code_SUITE_data/call_purged_fun_tester.erl
@@ -0,0 +1,186 @@
+-module(call_purged_fun_tester).
+
+-export([do/4]).
+
+%% Resurrect line macro when hipe compiled
+-ifdef(hipe).
+-define(line, put(the_line,?LINE),).
+do(Priv, Data, Type, Opts) ->
+ try do_it(Priv, Data, Type, Opts)
+ catch
+ C:E ->
+ ST = erlang:get_stacktrace(),
+ io:format("Caught exception from line ~p:\n~p\n",
+ [get(the_line), ST]),
+ io:format("Message queue: ~p\n", [process_info(self(), messages)]),
+ erlang:raise(C, E, ST)
+ end.
+-else.
+-define(line,).
+do(P,D,T,O) ->
+ do_it(P,D,T,O).
+-endif.
+
+
+do_it(Priv, Data, Type, Opts) ->
+ File = filename:join(Data, "my_code_test2"),
+ Code = filename:join(Priv, "my_code_test2"),
+
+ catch erlang:purge_module(my_code_test2),
+ catch erlang:delete_module(my_code_test2),
+ catch erlang:purge_module(my_code_test2),
+
+ ?line {ok,my_code_test2} = c:c(File, [{outdir,Priv} | Opts]),
+
+ ?line IsNative = lists:member(native,Opts),
+ ?line IsNative = code:is_module_native(my_code_test2),
+
+ ?line T = ets:new(my_code_test2_fun_table, []),
+ ets:insert(T, {my_fun,my_code_test2:make_fun(4711)}),
+ ets:insert(T, {my_fun2,my_code_test2:make_fun2()}),
+
+ Papa = self(),
+ {P0,M0} = spawn_monitor(fun () ->
+ [{my_fun2,F2}] = ets:lookup(T, my_fun2),
+ F2(fun () ->
+ Papa ! {self(),"going to sleep"},
+ receive {Papa,"wake up"} -> ok end
+ end,
+ fun () -> ok end),
+ exit(completed)
+ end),
+
+ ?line PurgeType = case Type of
+ code_gone ->
+ ok = file:delete(Code++".beam"),
+ true;
+ code_reload ->
+ true;
+ code_there ->
+ false
+ end,
+
+ ?line true = erlang:delete_module(my_code_test2),
+
+ ?line ok = receive {P0, "going to sleep"} -> ok
+ after 1000 -> timeout
+ end,
+
+ ?line Purge = start_purge(my_code_test2, PurgeType),
+
+ ?line {P1, M1} = spawn_monitor(fun () ->
+ ?line [{my_fun,F}] = ets:lookup(T, my_fun),
+ ?line 4712 = F(1),
+ exit(completed)
+ end),
+
+ ?line ok = wait_until(fun () ->
+ {status, suspended}
+ == process_info(P1, status)
+ end),
+
+ ?line ok = continue_purge(Purge),
+
+ ?line {P2, M2} = spawn_monitor(fun () ->
+ ?line [{my_fun,F}] = ets:lookup(T, my_fun),
+ ?line 4713 = F(2),
+ exit(completed)
+ end),
+ ?line {P3, M3} = spawn_monitor(fun () ->
+ ?line [{my_fun,F}] = ets:lookup(T, my_fun),
+ ?line 4714 = F(3),
+ exit(completed)
+ end),
+
+ ?line ok = wait_until(fun () ->
+ {status, suspended}
+ == process_info(P2, status)
+ end),
+ ?line ok = wait_until(fun () ->
+ {status, suspended}
+ == process_info(P3, status)
+ end),
+
+ ?line {current_function,
+ {erts_code_purger,
+ pending_purge_lambda,
+ 3}} = process_info(P1, current_function),
+ ?line {current_function,
+ {erts_code_purger,
+ pending_purge_lambda,
+ 3}} = process_info(P2, current_function),
+ ?line {current_function,
+ {erts_code_purger,
+ pending_purge_lambda,
+ 3}} = process_info(P3, current_function),
+
+ case Type of
+ code_there ->
+ ?line false = complete_purge(Purge),
+ P0 ! {self(), "wake up"},
+ ?line completed = wait_for_down(P0,M0);
+ _ ->
+ ?line {true, true} = complete_purge(Purge),
+ ?line killed = wait_for_down(P0,M0)
+ end,
+
+ case Type of
+ code_gone ->
+ ?line {undef, _} = wait_for_down(P1,M1),
+ ?line {undef, _} = wait_for_down(P2,M2),
+ ?line {undef, _} = wait_for_down(P3,M3);
+ _ ->
+ ?line completed = wait_for_down(P1,M1),
+ ?line completed = wait_for_down(P2,M2),
+ ?line completed = wait_for_down(P3,M3),
+ catch erlang:purge_module(my_code_test2),
+ catch erlang:delete_module(my_code_test2),
+ catch erlang:purge_module(my_code_test2)
+ end,
+ ok.
+
+wait_for_down(P,M) ->
+ receive
+ {'DOWN', M, process, P, Reason} ->
+ Reason
+ after 1000 ->
+ timeout
+ end.
+
+wait_until(Fun) ->
+ wait_until(Fun, 20).
+
+wait_until(Fun, N) ->
+ case {Fun(),N} of
+ {true, _} ->
+ ok;
+ {false, 0} ->
+ timeout;
+ {false, _} ->
+ receive after 100 -> ok end,
+ wait_until(Fun, N-1)
+ end.
+
+start_purge(Mod, Type) when is_atom(Mod)
+ andalso ((Type == true)
+ orelse (Type == false)) ->
+ Ref = make_ref(),
+ erts_code_purger ! {test_purge, Mod, self(), Type, Ref},
+ receive
+ {started, Ref} ->
+ Ref
+ end.
+
+continue_purge(Ref) when is_reference(Ref) ->
+ erts_code_purger ! {continue, Ref},
+ receive
+ {continued, Ref} ->
+ ok
+ end.
+
+complete_purge(Ref) when is_reference(Ref) ->
+ erts_code_purger ! {complete, Ref},
+ receive
+ {test_purge, Res, Ref} ->
+ Res
+ end.
diff --git a/erts/emulator/test/ddll_SUITE.erl b/erts/emulator/test/ddll_SUITE.erl
index 93b6f2d956..e7e518f82b 100644
--- a/erts/emulator/test/ddll_SUITE.erl
+++ b/erts/emulator/test/ddll_SUITE.erl
@@ -805,7 +805,7 @@ reference_count(Config) when is_list(Config) ->
Pid1 ! {self(), die},
test_server:sleep(200), % Give time to unload.
- % Verify that the driver was automaticly unloaded when the
+ % Verify that the driver was automatically unloaded when the
% process died.
{error, not_loaded}=erl_ddll:unload_driver(echo_drv),
ok.
diff --git a/erts/emulator/test/dirty_bif_SUITE.erl b/erts/emulator/test/dirty_bif_SUITE.erl
new file mode 100644
index 0000000000..308323594d
--- /dev/null
+++ b/erts/emulator/test/dirty_bif_SUITE.erl
@@ -0,0 +1,583 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010-2014. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(dirty_bif_SUITE).
+
+%%-define(line_trace,true).
+-define(CHECK(Exp,Got), check(Exp,Got,?LINE)).
+%%-define(CHECK(Exp,Got), Exp = Got).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([all/0, suite/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_testcase/2, end_per_testcase/2,
+ dirty_bif/1, dirty_bif_exception/1,
+ dirty_bif_multischedule/1,
+ dirty_bif_multischedule_exception/1,
+ dirty_scheduler_exit/1,
+ dirty_call_while_terminated/1,
+ dirty_heap_access/1,
+ dirty_process_info/1,
+ dirty_process_register/1,
+ dirty_process_trace/1,
+ code_purge/1]).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+%%
+%% All these tests utilize the debug BIFs:
+%% - erts_debug:dirty_cpu/2 - Statically determined
+%% to (begin to) execute on a dirty CPU scheduler.
+%% - erts_debug:dirty_io/2 - Statically determined
+%% to (begin to) execute on a dirty IO scheduler.
+%% - erts_debug:dirty/3
+%% Their implementations are located in
+%% $ERL_TOP/erts/emulator/beam/beam_debug.c
+%%
+
+all() ->
+ [dirty_bif,
+ dirty_bif_multischedule,
+ dirty_bif_exception,
+ dirty_bif_multischedule_exception,
+ dirty_scheduler_exit,
+ dirty_call_while_terminated,
+ dirty_heap_access,
+ dirty_process_info,
+ dirty_process_register,
+ dirty_process_trace,
+ code_purge].
+
+init_per_suite(Config) ->
+ case erlang:system_info(dirty_cpu_schedulers) of
+ N when N > 0 ->
+ Config;
+ _ ->
+ {skipped, "No dirty scheduler support"}
+ end.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(Case, Config) ->
+ [{testcase, Case} | Config].
+
+end_per_testcase(_Case, _Config) ->
+ ok.
+
+dirty_bif(Config) when is_list(Config) ->
+ dirty_cpu = erts_debug:dirty_cpu(scheduler,type),
+ dirty_io = erts_debug:dirty_io(scheduler,type),
+ normal = erts_debug:dirty(normal,scheduler,type),
+ dirty_cpu = erts_debug:dirty(dirty_cpu,scheduler,type),
+ dirty_io = erts_debug:dirty(dirty_io,scheduler,type),
+ ok.
+
+dirty_bif_multischedule(Config) when is_list(Config) ->
+ ok = erts_debug:dirty_cpu(reschedule,1000),
+ ok = erts_debug:dirty_io(reschedule,1000),
+ ok = erts_debug:dirty(normal,reschedule,1000),
+ ok.
+
+
+dirty_bif_exception(Config) when is_list(Config) ->
+ lists:foreach(fun (Error) ->
+ ErrorType = case Error of
+ _ when is_atom(Error) -> Error;
+ _ -> badarg
+ end,
+ try
+ erts_debug:dirty_cpu(error, Error),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty_cpu,[error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ apply(erts_debug,dirty_cpu,[error, Error]),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty_cpu,[error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ erts_debug:dirty_io(error, Error),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty_io,[error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ apply(erts_debug,dirty_io,[error, Error]),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty_io,[error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ erts_debug:dirty(normal, error, Error),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty,[normal, error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ apply(erts_debug,dirty,[normal, error, Error]),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty,[normal, error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ erts_debug:dirty(dirty_cpu, error, Error),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty,[dirty_cpu, error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ apply(erts_debug,dirty,[dirty_cpu, error, Error]),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty,[dirty_cpu, error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ erts_debug:dirty(dirty_io, error, Error),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty,[dirty_io, error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ apply(erts_debug,dirty,[dirty_io, error, Error]),
+ ct:fail(expected_exception)
+ catch
+ error:ErrorType ->
+ [{erts_debug,dirty,[dirty_io, error, Error],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end
+ end,
+ [badarg, undef, badarith, system_limit, noproc,
+ make_ref(), {another, "heap", term_to_binary("term")}]),
+ ok.
+
+
+dirty_bif_multischedule_exception(Config) when is_list(Config) ->
+ try
+ erts_debug:dirty_cpu(reschedule,1001)
+ catch
+ error:badarg ->
+ [{erts_debug,dirty_cpu,[reschedule, 1001],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ erts_debug:dirty_io(reschedule,1001)
+ catch
+ error:badarg ->
+ [{erts_debug,dirty_io,[reschedule, 1001],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ erts_debug:dirty(normal,reschedule,1001)
+ catch
+ error:badarg ->
+ [{erts_debug,dirty,[normal,reschedule,1001],_}|_]
+ = erlang:get_stacktrace(),
+ ok
+ end.
+
+dirty_scheduler_exit(Config) when is_list(Config) ->
+ {ok, Node} = start_node(Config, "+SDio 1"),
+ [ok] = mcall(Node,
+ [fun() ->
+ Start = erlang:monotonic_time(millisecond),
+ ok = test_dirty_scheduler_exit(),
+ End = erlang:monotonic_time(millisecond),
+ io:format("Time=~p ms~n", [End-Start]),
+ ok
+ end]),
+ stop_node(Node),
+ ok.
+
+test_dirty_scheduler_exit() ->
+ process_flag(trap_exit,true),
+ test_dse(10,[]).
+test_dse(0,Pids) ->
+ timer:sleep(100),
+ kill_dse(Pids,[]);
+test_dse(N,Pids) ->
+ Pid = spawn_link(fun () -> erts_debug:dirty_io(wait, 5000) end),
+ test_dse(N-1,[Pid|Pids]).
+
+kill_dse([],Killed) ->
+ wait_dse(Killed);
+kill_dse([Pid|Pids],AlreadyKilled) ->
+ exit(Pid,kill),
+ kill_dse(Pids,[Pid|AlreadyKilled]).
+
+wait_dse([]) ->
+ ok;
+wait_dse([Pid|Pids]) ->
+ receive
+ {'EXIT',Pid,Reason} ->
+ killed = Reason
+ end,
+ wait_dse(Pids).
+
+dirty_call_while_terminated(Config) when is_list(Config) ->
+ Me = self(),
+ Bin = list_to_binary(lists:duplicate(4711, $r)),
+ {value, {BinAddr, 4711, 1}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ {Dirty, DM} = spawn_opt(fun () ->
+ erts_debug:dirty_cpu(alive_waitexiting, Me),
+ blipp:blupp(Bin)
+ end,
+ [monitor,link]),
+ receive {alive, Dirty} -> ok end,
+ {value, {BinAddr, 4711, 2}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ Reason = die_dirty_process,
+ OT = process_flag(trap_exit, true),
+ exit(Dirty, Reason),
+ receive
+ {'DOWN', DM, process, Dirty, R0} ->
+ R0 = Reason
+ end,
+ receive
+ {'EXIT', Dirty, R1} ->
+ R1 = Reason
+ end,
+ undefined = process_info(Dirty),
+ undefined = process_info(Dirty, status),
+ false = erlang:is_process_alive(Dirty),
+ false = lists:member(Dirty, processes()),
+ %% Binary still refered by Dirty process not yet cleaned up
+ %% since the dirty bif has not yet returned...
+ {value, {BinAddr, 4711, 2}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ receive after 2000 -> ok end,
+ receive
+ Msg ->
+ ct:fail({unexpected_message, Msg})
+ after
+ 0 ->
+ ok
+ end,
+ {value, {BinAddr, 4711, 1}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ process_flag(trap_exit, OT),
+ try
+ blipp:blupp(Bin)
+ catch
+ _ : _ -> ok
+ end.
+
+dirty_heap_access(Config) when is_list(Config) ->
+ {ok, Node} = start_node(Config),
+ Me = self(),
+ RGL = rpc:call(Node,erlang,whereis,[init]),
+ Ref = rpc:call(Node,erlang,make_ref,[]),
+ Dirty = spawn_link(fun () ->
+ Res = erts_debug:dirty_cpu(copy, Ref),
+ garbage_collect(),
+ Me ! {self(), Res},
+ receive after infinity -> ok end
+ end),
+ {N, R} = access_dirty_heap(Dirty, RGL, 0, 0),
+ receive
+ {_Pid, Res} ->
+ 1000 = length(Res),
+ lists:foreach(fun (X) -> Ref = X end, Res)
+ end,
+ unlink(Dirty),
+ exit(Dirty, kill),
+ stop_node(Node),
+ {comment, integer_to_list(N) ++ " GL change loops; "
+ ++ integer_to_list(R) ++ " while running dirty"}.
+
+access_dirty_heap(Dirty, RGL, N, R) ->
+ case process_info(Dirty, status) of
+ {status, waiting} ->
+ {N, R};
+ {status, Status} ->
+ {group_leader, GL} = process_info(Dirty, group_leader),
+ true = group_leader(RGL, Dirty),
+ {group_leader, RGL} = process_info(Dirty, group_leader),
+ true = group_leader(GL, Dirty),
+ {group_leader, GL} = process_info(Dirty, group_leader),
+ access_dirty_heap(Dirty, RGL, N+1, case Status of
+ running ->
+ R+1;
+ _ ->
+ R
+ end)
+ end.
+
+%% These tests verify that processes that access a process executing a
+%% dirty BIF where the main lock is needed for that access do not get
+%% blocked. Each test passes its pid to dirty_sleeper, which sends an
+%% 'alive' message when it's running on a dirty scheduler and just before
+%% it starts a 6 second sleep. When it receives the message, it verifies
+%% that access to the dirty process is as it expects. After the dirty
+%% process finishes its 6 second sleep but before it returns from the dirty
+%% scheduler, it sends a 'done' message. If the tester already received
+%% that message, the test fails because it means attempting to access the
+%% dirty process waited for that process to return to a regular scheduler,
+%% so verify that we haven't received that message, and also verify that
+%% the dirty process is still alive immediately after accessing it.
+dirty_process_info(Config) when is_list(Config) ->
+ access_dirty_process(
+ Config,
+ fun() -> ok end,
+ fun(BifPid) ->
+ PI = process_info(BifPid),
+ {current_function,{erts_debug,dirty_io,2}} =
+ lists:keyfind(current_function, 1, PI),
+ ok
+ end,
+ fun(_) -> ok end).
+
+dirty_process_register(Config) when is_list(Config) ->
+ access_dirty_process(
+ Config,
+ fun() -> ok end,
+ fun(BifPid) ->
+ register(test_dirty_process_register, BifPid),
+ BifPid = whereis(test_dirty_process_register),
+ unregister(test_dirty_process_register),
+ false = lists:member(test_dirty_process_register,
+ registered()),
+ ok
+ end,
+ fun(_) -> ok end).
+
+dirty_process_trace(Config) when is_list(Config) ->
+ access_dirty_process(
+ Config,
+ fun() ->
+ erlang:trace_pattern({erts_debug,dirty_io,2},
+ [{'_',[],[{return_trace}]}],
+ [local,meta]),
+ ok
+ end,
+ fun(BifPid) ->
+ erlang:trace(BifPid, true, [call,timestamp]),
+ ok
+ end,
+ fun(BifPid) ->
+ receive
+ {done, BifPid} ->
+ receive
+ {trace_ts,BifPid,call,{erts_debug,dirty_io,_},_} ->
+ ok
+ after
+ 0 ->
+ error(missing_trace_call_message)
+ end %%,
+ %% receive
+ %% {trace_ts,BifPid,return_from,{erts_debug,dirty_io,2},
+ %% ok,_} ->
+ %% ok
+ %% after
+ %% 100 ->
+ %% error(missing_trace_return_message)
+ %% end
+ after
+ 6500 ->
+ error(missing_done_message)
+ end,
+ ok
+ end).
+
+dirty_code_test_code() ->
+ "
+-module(dirty_code_test).
+
+-export([func/1]).
+
+func(Fun) ->
+ Fun(),
+ blipp:blapp().
+
+".
+
+code_purge(Config) when is_list(Config) ->
+ Path = ?config(data_dir, Config),
+ File = filename:join(Path, "dirty_code_test.erl"),
+ ok = file:write_file(File, dirty_code_test_code()),
+ {ok, dirty_code_test, Bin} = compile:file(File, [binary]),
+ {module, dirty_code_test} = erlang:load_module(dirty_code_test, Bin),
+ Start = erlang:monotonic_time(),
+ {Pid1, Mon1} = spawn_monitor(fun () ->
+ dirty_code_test:func(fun () ->
+ %% Sleep for 6 seconds
+ %% in dirty bif...
+ erts_debug:dirty_io(wait,6000)
+ end)
+ end),
+ {module, dirty_code_test} = erlang:load_module(dirty_code_test, Bin),
+ {Pid2, Mon2} = spawn_monitor(fun () ->
+ dirty_code_test:func(fun () ->
+ %% Sleep for 6 seconds
+ %% in dirty bif...
+ erts_debug:dirty_io(wait,6000)
+ end)
+ end),
+ receive
+ {'DOWN', Mon1, process, Pid1, _} ->
+ ct:fail(premature_death)
+ after 100 ->
+ ok
+ end,
+ true = erlang:purge_module(dirty_code_test),
+ receive
+ {'DOWN', Mon1, process, Pid1, Reason1} ->
+ killed = Reason1
+ end,
+ receive
+ {'DOWN', Mon2, process, Pid2, _} ->
+ ct:fail(premature_death)
+ after 100 ->
+ ok
+ end,
+ true = erlang:delete_module(dirty_code_test),
+ receive
+ {'DOWN', Mon2, process, Pid2, _} ->
+ ct:fail(premature_death)
+ after 100 ->
+ ok
+ end,
+ true = erlang:purge_module(dirty_code_test),
+ receive
+ {'DOWN', Mon2, process, Pid2, Reason2} ->
+ killed = Reason2
+ end,
+ End = erlang:monotonic_time(),
+ Time = erlang:convert_time_unit(End-Start, native, milli_seconds),
+ io:format("Time=~p~n", [Time]),
+ true = Time =< 1000,
+ ok.
+
+%%
+%% Internal...
+%%
+
+access_dirty_process(Config, Start, Test, Finish) ->
+ {ok, Node} = start_node(Config, ""),
+ [ok] = mcall(Node,
+ [fun() ->
+ ok = test_dirty_process_access(Start, Test, Finish)
+ end]),
+ stop_node(Node),
+ ok.
+
+test_dirty_process_access(Start, Test, Finish) ->
+ ok = Start(),
+ Self = self(),
+ BifPid = spawn_link(fun() ->
+ ok = erts_debug:dirty_io(ready_wait6_done, Self)
+ end),
+ ok = receive
+ {ready, BifPid} ->
+ ok = Test(BifPid),
+ receive
+ {done, BifPid} ->
+ error(dirty_process_info_blocked)
+ after
+ 0 ->
+ true = erlang:is_process_alive(BifPid),
+ ok
+ end
+ after
+ 3000 ->
+ error(timeout)
+ end,
+ ok = Finish(BifPid).
+
+receive_any() ->
+ receive M -> M end.
+
+start_node(Config) ->
+ start_node(Config, "").
+
+start_node(Config, Args) when is_list(Config) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(proplists:get_value(testcase, Config))
+ ++ "-"
+ ++ integer_to_list(erlang:system_time(second))
+ ++ "-"
+ ++ integer_to_list(erlang:unique_integer([positive]))),
+ test_server:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
+
+stop_node(Node) ->
+ test_server:stop_node(Node).
+
+mcall(Node, Funs) ->
+ Parent = self(),
+ Refs = lists:map(fun (Fun) ->
+ Ref = make_ref(),
+ spawn_link(Node,
+ fun () ->
+ Res = Fun(),
+ unlink(Parent),
+ Parent ! {Ref, Res}
+ end),
+ Ref
+ end, Funs),
+ lists:map(fun (Ref) ->
+ receive
+ {Ref, Res} ->
+ Res
+ end
+ end, Refs).
diff --git a/erts/emulator/test/dirty_bif_SUITE_data/.gitignore b/erts/emulator/test/dirty_bif_SUITE_data/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/erts/emulator/test/dirty_bif_SUITE_data/.gitignore
diff --git a/erts/emulator/test/dirty_nif_SUITE.erl b/erts/emulator/test/dirty_nif_SUITE.erl
index a61fd92a18..5ba0d85ff3 100644
--- a/erts/emulator/test/dirty_nif_SUITE.erl
+++ b/erts/emulator/test/dirty_nif_SUITE.erl
@@ -54,8 +54,8 @@ all() ->
dirty_nif_send_traced].
init_per_suite(Config) ->
- try erlang:system_info(dirty_cpu_schedulers) of
- N when is_integer(N), N > 0 ->
+ case erlang:system_info(dirty_cpu_schedulers) of
+ N when N > 0 ->
case lib_loaded() of
false ->
ok = erlang:load_nif(
@@ -64,8 +64,8 @@ init_per_suite(Config) ->
true ->
ok
end,
- Config
- catch _:_ ->
+ Config;
+ _ ->
{skipped, "No dirty scheduler support"}
end.
@@ -214,7 +214,7 @@ dirty_call_while_terminated(Config) when is_list(Config) ->
undefined = process_info(Dirty, status),
false = erlang:is_process_alive(Dirty),
false = lists:member(Dirty, processes()),
- %% Binary still refered by Dirty process not yet cleaned up
+ %% Binary still referred by Dirty process not yet cleaned up
%% since the dirty nif has not yet returned...
{value, {BinAddr, 4711, 2}} = lists:keysearch(4711, 2,
element(2,
diff --git a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c
index 08efa23c81..caf99c952f 100644
--- a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c
+++ b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c
@@ -17,7 +17,7 @@
*
* %CopyrightEnd%
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <assert.h>
#ifdef __WIN32__
#include <windows.h>
diff --git a/erts/emulator/test/driver_SUITE_data/chkio_drv.c b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
index 614b68e865..8e5e81665c 100644
--- a/erts/emulator/test/driver_SUITE_data/chkio_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
@@ -1397,10 +1397,18 @@ static void assert_print(char* str, int line)
static void assert_failed(ErlDrvPort port, char* str, int line)
{
char buf[30];
+ size_t bufsz = sizeof(buf);
+
assert_print(str,line);
- snprintf(buf,sizeof(buf),"failed_at_line_%d",line);
- driver_failure_atom(port,buf);
- /*abort();*/
+
+ if (erl_drv_getenv("ERL_ABORT_ON_FAILURE", buf, &bufsz) == 0
+ && (strcmp("true", buf) == 0 || strcmp("yes", buf) == 0)) {
+ abort();
+ }
+ else {
+ snprintf(buf,sizeof(buf),"failed_at_line_%d",line);
+ driver_failure_atom(port,buf);
+ }
}
#define my_driver_select(PORT,FD,MODE,ON) \
diff --git a/erts/emulator/test/erl_link_SUITE.erl b/erts/emulator/test/erl_link_SUITE.erl
index 89e1aefb50..9258897764 100644
--- a/erts/emulator/test/erl_link_SUITE.erl
+++ b/erts/emulator/test/erl_link_SUITE.erl
@@ -60,7 +60,7 @@
% These are to be kept in sync with erl_monitors.h
-define(MON_ORIGIN, 1).
--define(MON_TARGET, 3).
+-define(MON_TARGET, 2).
-record(erl_link, {type = ?LINK_UNDEF,
@@ -69,7 +69,7 @@
% This is to be kept in sync with erl_bif_info.c (make_monitor_list)
--record(erl_monitor, {type, % MON_ORIGIN or MON_TARGET (1 or 3)
+-record(erl_monitor, {type, % MON_ORIGIN or MON_TARGET
ref,
pid, % Process or nodename
name = []}). % registered name or []
diff --git a/erts/emulator/test/estone_SUITE.erl b/erts/emulator/test/estone_SUITE.erl
index 3ce849b88e..35f695ffe5 100644
--- a/erts/emulator/test/estone_SUITE.erl
+++ b/erts/emulator/test/estone_SUITE.erl
@@ -708,7 +708,7 @@ alloc(I) ->
%% Time to call bif's
%% Lot's of element stuff which reflects the record code which
-%% is becomming more and more common
+%% is becoming more and more common
bif_dispatch(0) ->
0;
bif_dispatch(I) ->
diff --git a/erts/emulator/test/float_SUITE.erl b/erts/emulator/test/float_SUITE.erl
index 36b1f9179f..ad33ad705b 100644
--- a/erts/emulator/test/float_SUITE.erl
+++ b/erts/emulator/test/float_SUITE.erl
@@ -208,7 +208,7 @@ span_cmp(Axis, Incr, Length) ->
%% for both negative and positive numbers.
%%
%% Axis: The number around which to do the tests eg. (1 bsl 58) - 1.0
-%% Incr: How much to increment the test numbers inbetween each test.
+%% Incr: How much to increment the test numbers in-between each test.
%% Length: Length/2 is the number of Incr away from Axis to test on the
%% negative and positive plane.
%% Diff: How much the float and int should differ when comparing
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index 26fa955e3c..e4640909aa 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -19,12 +19,11 @@
%%
-module(fun_SUITE).
--compile({nowarn_deprecated_function, {erlang,hash,2}}).
-export([all/0, suite/0,
bad_apply/1,bad_fun_call/1,badarity/1,ext_badarity/1,
equality/1,ordering/1,
- fun_to_port/1,t_hash/1,t_phash/1,t_phash2/1,md5/1,
+ fun_to_port/1,t_phash/1,t_phash2/1,md5/1,
refc/1,refc_ets/1,refc_dist/1,
const_propagation/1,t_arity/1,t_is_function2/1,
t_fun_info/1,t_fun_info_mfa/1]).
@@ -38,9 +37,9 @@ suite() ->
{timetrap, {minutes, 1}}].
-all() ->
+all() ->
[bad_apply, bad_fun_call, badarity, ext_badarity,
- equality, ordering, fun_to_port, t_hash, t_phash,
+ equality, ordering, fun_to_port, t_phash,
t_phash2, md5, refc, refc_ets, refc_dist,
const_propagation, t_arity, t_is_function2, t_fun_info,
t_fun_info_mfa].
@@ -412,33 +411,6 @@ build_io_list(N) ->
1 -> [7,L|L]
end.
-%% Test the hash/2 BIF on funs.
-t_hash(Config) when is_list(Config) ->
- F1 = fun(_X) -> 1 end,
- F2 = fun(_X) -> 2 end,
- true = hash(F1) /= hash(F2),
-
- G1 = make_fun(1, 2, 3),
- G2 = make_fun(1, 2, 3),
- G3 = make_fun(1, 2, 4),
- true = hash(G1) == hash(G2),
- true = hash(G2) /= hash(G3),
-
- FF0 = fun erlang:abs/1,
- FF1 = fun erlang:exit/1,
- FF2 = fun erlang:exit/2,
- FF3 = fun blurf:exit/2,
- true = hash(FF0) =/= hash(FF1),
- true = hash(FF0) =/= hash(FF2),
- true = hash(FF0) =/= hash(FF3),
- true = hash(FF1) =/= hash(FF2),
- true = hash(FF1) =/= hash(FF3),
- true = hash(FF2) =/= hash(FF3),
- ok.
-
-hash(Term) ->
- erlang:hash(Term, 16#7ffffff).
-
%% Test the phash/2 BIF on funs.
t_phash(Config) when is_list(Config) ->
F1 = fun(_X) -> 1 end,
@@ -461,7 +433,6 @@ t_phash(Config) when is_list(Config) ->
true = phash(FF1) =/= phash(FF2),
true = phash(FF1) =/= phash(FF3),
true = phash(FF2) =/= phash(FF3),
-
ok.
phash(Term) ->
diff --git a/erts/emulator/test/gc_SUITE.erl b/erts/emulator/test/gc_SUITE.erl
index 8a600b7d9f..35dd147550 100644
--- a/erts/emulator/test/gc_SUITE.erl
+++ b/erts/emulator/test/gc_SUITE.erl
@@ -23,15 +23,26 @@
-module(gc_SUITE).
-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
-export([all/0, suite/0]).
--export([grow_heap/1, grow_stack/1, grow_stack_heap/1, max_heap_size/1]).
+-export([
+ grow_heap/1,
+ grow_stack/1,
+ grow_stack_heap/1,
+ max_heap_size/1,
+ minor_major_gc_option_async/1,
+ minor_major_gc_option_self/1
+]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
all() ->
- [grow_heap, grow_stack, grow_stack_heap, max_heap_size].
+ [grow_heap, grow_stack, grow_stack_heap, max_heap_size,
+ minor_major_gc_option_self,
+ minor_major_gc_option_async].
%% Produce a growing list of elements,
@@ -190,3 +201,91 @@ long_receive() ->
after 10000 ->
ok
end.
+
+minor_major_gc_option_self(_Config) ->
+ Endless = fun Endless() ->
+ receive
+ {gc, Type} -> erlang:garbage_collect(self(), [{type, Type}])
+ after 100 -> ok end,
+ Endless()
+ end,
+
+ %% Try as major, a test process will self-trigger GC
+ P1 = spawn(Endless),
+ erlang:garbage_collect(P1, []),
+ erlang:trace(P1, true, [garbage_collection]),
+ P1 ! {gc, major},
+ expect_trace_messages(P1, [gc_major_start, gc_major_end]),
+ erlang:trace(P1, false, [garbage_collection]),
+ erlang:exit(P1, kill),
+
+ %% Try as minor, a test process will self-trigger GC
+ P2 = spawn(Endless),
+ erlang:garbage_collect(P2, []),
+ erlang:trace(P2, true, [garbage_collection]),
+ P2 ! {gc, minor},
+ expect_trace_messages(P2, [gc_minor_start, gc_minor_end]),
+ erlang:trace(P2, false, [garbage_collection]),
+ erlang:exit(P2, kill).
+
+minor_major_gc_option_async(_Config) ->
+ Endless = fun Endless() ->
+ receive after 100 -> ok end,
+ Endless()
+ end,
+
+ %% Try with default option, must be major gc
+ P1 = spawn(Endless),
+ erlang:garbage_collect(P1, []),
+ erlang:trace(P1, true, [garbage_collection]),
+ erlang:garbage_collect(P1, []),
+ expect_trace_messages(P1, [gc_major_start, gc_major_end]),
+ erlang:trace(P1, false, [garbage_collection]),
+ erlang:exit(P1, kill),
+
+ %% Try with the 'major' type
+ P2 = spawn(Endless),
+ erlang:garbage_collect(P2, []),
+ erlang:trace(P2, true, [garbage_collection]),
+ erlang:garbage_collect(P2, [{type, major}]),
+ expect_trace_messages(P2, [gc_major_start, gc_major_end]),
+ erlang:trace(P2, false, [garbage_collection]),
+ erlang:exit(P2, kill),
+
+ %% Try with 'minor' option, once
+ P3 = spawn(Endless),
+ erlang:garbage_collect(P3, []),
+ erlang:trace(P3, true, [garbage_collection]),
+ erlang:garbage_collect(P3, [{type, minor}]),
+ expect_trace_messages(P3, [gc_minor_start, gc_minor_end]),
+ erlang:trace(P3, false, [garbage_collection]),
+ erlang:exit(P3, kill),
+
+ %% Try with 'minor' option, once, async
+ P4 = spawn(Endless),
+ Ref = erlang:make_ref(),
+ erlang:garbage_collect(P4, []),
+ erlang:trace(P4, true, [garbage_collection]),
+ ?assertEqual(async,
+ erlang:garbage_collect(P4, [{type, minor}, {async, Ref}])),
+ expect_trace_messages(P4, [gc_minor_start, gc_minor_end]),
+ erlang:trace(P4, false, [garbage_collection]),
+ receive {garbage_collect, Ref, true} -> ok;
+ Other4 -> ct:pal("Unexpected message: ~p~n"
+ ++ "while waiting for async gc result", [Other4])
+ after 2000 -> ?assert(false)
+ end,
+ erlang:exit(P4, kill).
+
+%% Given a list of atoms, trace tags - receives messages and checks if they are
+%% trace events, and if the tag matches. Else will crash failing the test.
+expect_trace_messages(_Pid, []) -> ok;
+expect_trace_messages(Pid, [Tag | TraceTags]) ->
+ receive
+ {trace, Pid, Tag, _Data} -> ok;
+ AnythingElse ->
+ ct:pal("Unexpected message: ~p~nWhile expected {trace, _, ~p, _}",
+ [AnythingElse, Tag]),
+ ?assert(false)
+ end,
+ expect_trace_messages(Pid, TraceTags).
diff --git a/erts/emulator/test/hash_SUITE.erl b/erts/emulator/test/hash_SUITE.erl
index a39d101b0d..3cbb3c7d5f 100644
--- a/erts/emulator/test/hash_SUITE.erl
+++ b/erts/emulator/test/hash_SUITE.erl
@@ -34,7 +34,6 @@
-export([basic_test/0,cmp_test/1,range_test/0,spread_test/1,
phash2_test/0, otp_5292_test/0,
otp_7127_test/0]).
--compile({nowarn_deprecated_function, {erlang,hash,2}}).
%%
%% Define to run outside of test server
@@ -130,24 +129,12 @@ test_hash_zero(Config) when is_list(Config) ->
%%
basic_test() ->
685556714 = erlang:phash({a,b,c},16#FFFFFFFF),
- 14468079 = erlang:hash({a,b,c},16#7FFFFFF),
37442646 = erlang:phash([a,b,c,{1,2,3},c:pid(0,2,3),
16#77777777777777],16#FFFFFFFF),
- Comment = case erlang:hash([a,b,c,{1,2,3},c:pid(0,2,3),
- 16#77777777777777],16#7FFFFFF) of
- 102727602 ->
- big = erlang:system_info(endian),
- "Big endian machine";
- 105818829 ->
- little = erlang:system_info(endian),
- "Little endian machine"
- end,
ExternalReference = <<131,114,0,3,100,0,13,110,111,110,111,100,101,64,
110,111,104,111,115,116,0,0,0,0,122,0,0,0,0,0,0,0,0>>,
1113403635 = erlang:phash(binary_to_term(ExternalReference),
16#FFFFFFFF),
- 123 = erlang:hash(binary_to_term(ExternalReference),
- 16#7FFFFFF),
ExternalFun = <<131,117,0,0,0,3,103,100,0,13,110,111,110,111,100,101,64,
110,111,104,111,115,116,0,0,0,38,0,0,0,0,0,100,0,8,101,
114,108,95,101,118,97,108,97,20,98,5,182,139,98,108,0,0,
@@ -166,11 +153,9 @@ basic_test() ->
64,110,111,104,111,115,116,0,0,0,22,0,0,0,0,0,106>>,
170987488 = erlang:phash(binary_to_term(ExternalFun),
16#FFFFFFFF),
- 124460689 = erlang:hash(binary_to_term(ExternalFun),
- 16#7FFFFFF),
case (catch erlang:phash(1,0)) of
{'EXIT',{badarg, _}} ->
- {comment, Comment};
+ ok;
_ ->
exit(phash_accepted_zero_as_range)
end.
@@ -193,7 +178,6 @@ range_test() ->
end,
F(1,16#100000000,F).
-
spread_test(N) ->
test_fun(N,{erlang,phash},16#50000000000,fun(X) ->
@@ -419,7 +403,7 @@ phash2_test() ->
{"abc"++[1009], 290369864},
{"abc"++[1009]++"de", 4134369195},
{"1234567890123456", 963649519},
-
+
%% tuple
{{}, 221703996},
{{{}}, 2165044361},
@@ -452,30 +436,15 @@ f3(X, Y) ->
-endif.
otp_5292_test() ->
- H = fun(E) -> [erlang:hash(E, 16#7FFFFFF),
- erlang:hash(-E, 16#7FFFFFF)]
- end,
- S1 = md5([md5(hash_int(S, E, H)) || {Start, N, Sz} <- d(),
- {S, E} <- int(Start, N, Sz)]),
PH = fun(E) -> [erlang:phash(E, 1 bsl 32),
erlang:phash(-E, 1 bsl 32),
erlang:phash2(E, 1 bsl 32),
erlang:phash2(-E, 1 bsl 32)]
end,
- S2 = md5([md5(hash_int(S, E, PH)) || {Start, N, Sz} <- d(),
+ S2 = md5([md5(hash_int(S, E, PH)) || {Start, N, Sz} <- d(),
{S, E} <- int(Start, N, Sz)]),
- Comment = case S1 of
- <<4,248,208,156,200,131,7,1,173,13,239,173,112,81,16,174>> ->
- big = erlang:system_info(endian),
- "Big endian machine";
- <<180,28,33,231,239,184,71,125,76,47,227,241,78,184,176,233>> ->
- little = erlang:system_info(endian),
- "Little endian machine"
- end,
<<124,81,198,121,174,233,19,137,10,83,33,80,226,111,238,99>> = S2,
- 2 = erlang:hash(1, (1 bsl 27) -1),
- {'EXIT', _} = (catch erlang:hash(1, (1 bsl 27))),
- {comment, Comment}.
+ ok.
d() ->
[%% Start, NumOfIntervals, SizeOfInterval
@@ -496,8 +465,6 @@ md5(T) ->
bit_level_binaries_do() ->
[3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] =
- bit_level_all_different(fun erlang:hash/2),
- [3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] =
bit_level_all_different(fun erlang:phash/2),
[102233154,19716,102133857,4532024,123369135,24565730,109558721|_] =
bit_level_all_different(fun erlang:phash2/2),
@@ -535,9 +502,7 @@ bit_level_all_different(Hash) ->
Hashes1.
test_hash_phash(Bitstr, Rem) ->
- Hash = erlang:hash(Bitstr, Rem),
Hash = erlang:phash(Bitstr, Rem),
- Hash = erlang:hash(unaligned_sub_bitstr(Bitstr), Rem),
Hash = erlang:phash(unaligned_sub_bitstr(Bitstr), Rem).
test_phash2(Bitstr, Rem) ->
@@ -555,7 +520,6 @@ hash_zero_test() ->
binary_to_term(<<131,70,128,0,0,0,0,0,0,0>>)], %% -0.0
ok = hash_zero_test(Zs,fun(T) -> erlang:phash2(T, 1 bsl 32) end),
ok = hash_zero_test(Zs,fun(T) -> erlang:phash(T, 1 bsl 32) end),
- ok = hash_zero_test(Zs,fun(T) -> erlang:hash(T, (1 bsl 27) - 1) end),
ok.
hash_zero_test([Z|Zs],F) ->
diff --git a/erts/emulator/test/hibernate_SUITE.erl b/erts/emulator/test/hibernate_SUITE.erl
index 6f8ce02266..90693595a7 100644
--- a/erts/emulator/test/hibernate_SUITE.erl
+++ b/erts/emulator/test/hibernate_SUITE.erl
@@ -349,7 +349,7 @@ clean_dict() ->
lists:foreach(fun ({Key, _}) -> erase(Key) end, Dict).
%%
-%% Wake up and then immediatly bif trap with a lengthy computation.
+%% Wake up and then immediately bif trap with a lengthy computation.
%%
wake_up_and_bif_trap(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/hipe_SUITE.erl b/erts/emulator/test/hipe_SUITE.erl
index a556b4ddc0..0b44dd7fb7 100644
--- a/erts/emulator/test/hipe_SUITE.erl
+++ b/erts/emulator/test/hipe_SUITE.erl
@@ -19,12 +19,17 @@
%%
-module(hipe_SUITE).
--export([all/0, t_copy_literals/1]).
+-export([all/0
+ ,t_copy_literals/1
+ ,t_purge/1
+ ]).
all() ->
case erlang:system_info(hipe_architecture) of
undefined -> {skip, "HiPE is disabled"};
- _ -> [t_copy_literals]
+ _ -> [t_copy_literals
+ ,t_purge
+ ]
end.
t_copy_literals(doc) ->
@@ -65,3 +70,51 @@ t_copy_literals(Config) when is_list(Config) ->
true = erlang:delete_module(ref_cell),
true = erlang:purge_module(ref_cell),
ok.
+
+t_purge(doc) -> "Checks that native code is properly found and purged";
+t_purge(Config) when is_list(Config) ->
+ Data = proplists:get_value(data_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
+ SrcFile = filename:join(Data, "ref_cell"),
+ BeamFile = filename:join(Priv, "ref_cell"),
+ {ok,ref_cell} = c:c(SrcFile, [{outdir,Priv},native]),
+ true = code:is_module_native(ref_cell),
+
+ PA = ref_cell:start_link(),
+
+ %% Unload, PA should still be running
+ true = erlang:delete_module(ref_cell),
+ %% Can't use ref_cel:call/2, it's in old code!
+ call(PA, {put_res_of, fun()-> hej end}),
+ hej = call(PA, get),
+
+ %% Load same module again
+ code:load_abs(BeamFile),
+ true = code:is_module_native(ref_cell),
+ PB = ref_cell:start_link(),
+
+ %% Purge old code, PA should be killed, PB should survive
+ unlink(PA),
+ ARef = monitor(process, PA),
+ true = erlang:purge_module(ref_cell),
+ receive {'DOWN', ARef, process, PA, killed} -> ok
+ after 1 -> ct:fail("PA was not killed")
+ end,
+
+ %% Unload, PB should still be running
+ true = erlang:delete_module(ref_cell),
+ call(PB, {put_res_of, fun()-> svejs end}),
+ svejs = call(PB, get),
+
+ unlink(PB),
+ BRef = monitor(process, PB),
+ true = erlang:purge_module(ref_cell),
+ receive {'DOWN', BRef, process, PB, killed} -> ok
+ after 1 -> ct:fail("PB was not killed")
+ end,
+
+ ok.
+
+call(Pid, Call) ->
+ Pid ! {Call, self()},
+ receive {Pid, Res} -> Res end.
diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl
index 5af676c409..02f3c89318 100644
--- a/erts/emulator/test/map_SUITE.erl
+++ b/erts/emulator/test/map_SUITE.erl
@@ -18,7 +18,6 @@
%%
-module(map_SUITE).
-export([all/0, suite/0]).
--compile({nowarn_deprecated_function, {erlang,hash,2}}).
-export([t_build_and_match_literals/1, t_build_and_match_literals_large/1,
t_update_literals/1, t_update_literals_large/1,
@@ -53,6 +52,7 @@
t_bif_map_values/1,
t_bif_map_to_list/1,
t_bif_map_from_list/1,
+ t_bif_erts_internal_maps_to_list/1,
%% erlang
t_erlang_hash/1,
@@ -119,6 +119,7 @@ all() -> [t_build_and_match_literals, t_build_and_match_literals_large,
t_bif_map_update,
t_bif_map_values,
t_bif_map_to_list, t_bif_map_from_list,
+ t_bif_erts_internal_maps_to_list,
%% erlang
t_erlang_hash, t_map_encode_decode,
@@ -2130,8 +2131,6 @@ t_erlang_hash(Config) when is_list(Config) ->
ok = t_bif_erlang_phash2(),
ok = t_bif_erlang_phash(),
- ok = t_bif_erlang_hash(),
-
ok.
t_bif_erlang_phash2() ->
@@ -2174,27 +2173,6 @@ t_bif_erlang_phash() ->
2620391445 = erlang:phash(M2,Sz), % 3590546636
ok.
-t_bif_erlang_hash() ->
- Sz = 1 bsl 27 - 1,
- 39684169 = erlang:hash(#{},Sz), % 5158
- 33673142 = erlang:hash(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 },Sz), % 71555838
- 95337869 = erlang:hash(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} },Sz), % 5497225
- 108959561 = erlang:hash(#{ 1 => a },Sz), % 126071654
- 59623150 = erlang:hash(#{ a => 1 },Sz), % 126426236
-
- 42775386 = erlang:hash(#{{} => <<>>},Sz), % 101655720
- 71692856 = erlang:hash(#{<<>> => {}},Sz), % 101655720
-
- M0 = #{ a => 1, "key" => <<"value">> },
- M1 = maps:remove("key",M0),
- M2 = M1#{ "key" => <<"value">> },
-
- 70254632 = erlang:hash(M0,Sz), % 38260486
- 59623150 = erlang:hash(M1,Sz), % 126426236
- 70254632 = erlang:hash(M2,Sz), % 38260486
- ok.
-
-
t_map_encode_decode(Config) when is_list(Config) ->
<<131,116,0,0,0,0>> = erlang:term_to_binary(#{}),
Pairs = [
@@ -2386,23 +2364,55 @@ t_bif_map_from_list(Config) when is_list(Config) ->
{'EXIT', {badarg,_}} = (catch maps:from_list(id(42))),
ok.
-t_bif_build_and_check(Config) when is_list(Config) ->
- ok = check_build_and_remove(750,[
- fun(K) -> [K,K] end,
- fun(K) -> [float(K),K] end,
- fun(K) -> K end,
- fun(K) -> {1,K} end,
- fun(K) -> {K} end,
- fun(K) -> [K|K] end,
- fun(K) -> [K,1,2,3,4] end,
- fun(K) -> {K,atom} end,
- fun(K) -> float(K) end,
- fun(K) -> integer_to_list(K) end,
- fun(K) -> list_to_atom(integer_to_list(K)) end,
- fun(K) -> [K,{K,[K,{K,[K]}]}] end,
- fun(K) -> <<K:32>> end
- ]),
+t_bif_erts_internal_maps_to_list(Config) when is_list(Config) ->
+ %% small maps
+ [] = erts_internal:maps_to_list(#{},-1),
+ [] = erts_internal:maps_to_list(#{},-2),
+ [] = erts_internal:maps_to_list(#{},10),
+ [{a,1},{b,2}] = lists:sort(erts_internal:maps_to_list(#{a=>1,b=>2}, 2)),
+ [{a,1},{b,2}] = lists:sort(erts_internal:maps_to_list(#{a=>1,b=>2}, -1)),
+ [{_,_}] = erts_internal:maps_to_list(#{a=>1,b=>2}, 1),
+ [{a,1},{b,2},{c,3}] = lists:sort(erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},-2)),
+ [{a,1},{b,2},{c,3}] = lists:sort(erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},3)),
+ [{a,1},{b,2},{c,3}] = lists:sort(erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},5)),
+ [{_,_},{_,_}] = erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},2),
+ [{_,_}] = erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},1),
+ [] = erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},0),
+
+ %% big maps
+ M = maps:from_list([{I,ok}||I <- lists:seq(1,500)]),
+ [] = erts_internal:maps_to_list(M,0),
+ [{_,_}] = erts_internal:maps_to_list(M,1),
+ [{_,_},{_,_}] = erts_internal:maps_to_list(M,2),
+ Ls1 = erts_internal:maps_to_list(M,10),
+ 10 = length(Ls1),
+ Ls2 = erts_internal:maps_to_list(M,20),
+ 20 = length(Ls2),
+ Ls3 = erts_internal:maps_to_list(M,120),
+ 120 = length(Ls3),
+ Ls4 = erts_internal:maps_to_list(M,-1),
+ 500 = length(Ls4),
+ %% error cases
+ {'EXIT', {{badmap,[{a,b},b]},_}} = (catch erts_internal:maps_to_list(id([{a,b},b]),id(1))),
+ {'EXIT', {badarg,_}} = (catch erts_internal:maps_to_list(id(#{}),id(a))),
+ {'EXIT', {badarg,_}} = (catch erts_internal:maps_to_list(id(#{1=>2}),id(<<>>))),
+ ok.
+
+t_bif_build_and_check(Config) when is_list(Config) ->
+ ok = check_build_and_remove(750,[fun(K) -> [K,K] end,
+ fun(K) -> [float(K),K] end,
+ fun(K) -> K end,
+ fun(K) -> {1,K} end,
+ fun(K) -> {K} end,
+ fun(K) -> [K|K] end,
+ fun(K) -> [K,1,2,3,4] end,
+ fun(K) -> {K,atom} end,
+ fun(K) -> float(K) end,
+ fun(K) -> integer_to_list(K) end,
+ fun(K) -> list_to_atom(integer_to_list(K)) end,
+ fun(K) -> [K,{K,[K,{K,[K]}]}] end,
+ fun(K) -> <<K:32>> end]),
ok.
check_build_and_remove(_,[]) -> ok;
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 6733237b20..971a047309 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -427,13 +427,13 @@ silent_no_ms(Config) when is_list(Config) ->
%%
[{trace,Tracee,call,{?MODULE,f1,[start]}},
{trace,Tracee,return_to,
- {?MODULE,'-silent_no_ms/1-fun-2-',0}},
+ {?MODULE,'-silent_no_ms/1-fun-3-',0}},
{trace,Tracee,call,{?MODULE,f2,[f,g]}},
{trace,Tracee,return_to,
- {?MODULE,'-silent_no_ms/1-fun-2-',0}},
+ {?MODULE,'-silent_no_ms/1-fun-3-',0}},
{trace,Tracee,call,{erlang,integer_to_list,[2]}},
{trace,Tracee,return_to,
- {?MODULE,'-silent_no_ms/1-fun-2-',0}},
+ {?MODULE,'-silent_no_ms/1-fun-3-',0}},
{trace,Tracee,call,{?MODULE,f2,[h,i]}},
{trace,Tracee,return_to,{?MODULE,f3,2}}]
end).
@@ -484,7 +484,7 @@ ms_trace2(Config) when is_list(Config) ->
%%
%% Expected: (no return_to for global call trace)
%%
- Origin = {match_spec_SUITE,'-ms_trace2/1-fun-0-',1},
+ Origin = {match_spec_SUITE,'-ms_trace2/1-fun-1-',1},
[{trace_ts,Tracee,call,
{?MODULE,fn,
[[all],[call,return_to,{tracer,Tracer}]]},
@@ -574,7 +574,7 @@ ms_trace3(Config) when is_list(Config) ->
%%
%% Expected: (no return_to for global call trace)
%%
- Origin = {match_spec_SUITE,'-ms_trace3/1-fun-1-',2},
+ Origin = {match_spec_SUITE,'-ms_trace3/1-fun-2-',2},
[{trace_ts,Controller,call,
{?MODULE,fn,[TraceeName,[all],
[call,return_to,send,'receive',
@@ -646,7 +646,7 @@ destructive_in_test_bif(Config) when is_list(Config) ->
([],[{'_',[],[{message,{get_tcw}}]}],trace),
ok.
-%% Test that the comparision between boxed and small does not crash emulator
+%% Test that the comparison between boxed and small does not crash emulator
boxed_and_small(Config) when is_list(Config) ->
{ok, Node} = start_node(match_spec_suite_other),
ok = rpc:call(Node,?MODULE,do_boxed_and_small,[]),
diff --git a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
index e011aadce9..46ee8b5540 100644
--- a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
+++ b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
@@ -24,7 +24,7 @@
* Author: Rickard Green
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#ifdef __WIN32__
# ifndef WIN32_LEAN_AND_MEAN
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 9c1694fa8a..693db42e58 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -21,15 +21,24 @@
-module(nif_SUITE).
%%-define(line_trace,true).
--define(CHECK(Exp,Got), check(Exp,Got,?LINE)).
+-define(CHECK(Exp,Got), Exp = check(Exp,Got,?LINE)).
%%-define(CHECK(Exp,Got), Exp = Got).
-include_lib("common_test/include/ct.hrl").
--export([all/0, suite/0,
+-export([all/0, suite/0, groups/0,
+ init_per_group/2, end_per_group/2,
init_per_testcase/2, end_per_testcase/2,
- basic/1, reload/1, upgrade/1, heap_frag/1,
+ basic/1, reload_error/1, upgrade/1, heap_frag/1,
t_on_load/1,
+ select/1,
+ monitor_process_a/1,
+ monitor_process_b/1,
+ monitor_process_c/1,
+ monitor_process_d/1,
+ demonitor_process/1,
+ monitor_frenzy/1,
+ hipe/1,
types/1, many_args/1, binaries/1, get_string/1, get_atom/1,
maps/1,
api_macros/1,
@@ -52,27 +61,25 @@
-export([many_args_100/100]).
-
-%% -export([lib_version/0,call_history/0,hold_nif_mod_priv_data/1,nif_mod_call_history/0,
-%% list_seq/1,type_test/0,tuple_2_list/1,is_identical/2,compare/2,
-%% clone_bin/1,make_sub_bin/3,string_to_bin/2,atom_to_bin/2,macros/1,
-%% tuple_2_list_and_tuple/1,iolist_2_bin/1,get_resource_type/1,alloc_resource/2,
-%% make_resource/1,get_resource/2,release_resource/1,last_resource_dtor_call/0, suite/0,
-%% make_new_resource/2,make_new_resource_binary/1,send_list_seq/2,send_new_blob/2,
-%% alloc_msgenv/0,clear_msgenv/1,grow_blob/2,send_blob/2,send_blob_thread/3,
-%% join_send_thread/1]).
-
-
-define(nif_stub,nif_stub_error(?LINE)).
+-define(is_resource, is_reference).
+
suite() -> [{ct_hooks,[ts_install_cth]}].
-all() ->
- [basic, reload, upgrade, heap_frag, types, many_args,
- t_on_load,
+all() ->
+ [basic]
+ ++
+ [{group, G} || G <- api_groups()]
+ ++
+ [reload_error, heap_frag, types, many_args,
+ select,
+ {group, monitor},
+ monitor_frenzy,
+ hipe,
binaries, get_string, get_atom, maps, api_macros, from_array,
iolist_as_binary, resource, resource_binary,
- resource_takeover, threading, send, send2, send3,
+ threading, send, send2, send3,
send_threaded, neg, is_checks, get_length, make_atom,
make_string,reverse_list_test,
otp_9828,
@@ -85,9 +92,49 @@ all() ->
nif_port_command,
nif_snprintf].
+groups() ->
+ [{G, [], api_repeaters()} || G <- api_groups()]
+ ++
+ [{monitor, [], [monitor_process_a,
+ monitor_process_b,
+ monitor_process_c,
+ monitor_process_d,
+ demonitor_process]}].
+
+
+api_groups() -> [api_latest, api_2_4, api_2_0].
+
+api_repeaters() -> [upgrade, resource_takeover, t_on_load].
+
+init_per_group(api_2_4, Config) ->
+ [{nif_api_version, ".2_4"} | Config];
+init_per_group(api_2_0, Config) ->
+ case {os:type(),erlang:system_info({wordsize, internal})} of
+ {{win32,_}, 8} ->
+ %% ERL_NIF_TERM was declared as 32-bit 'long' until 2.3
+ {skip, "API 2.0 buggy on Windows 64-bit"};
+ _ ->
+ [{nif_api_version, ".2_0"} | Config]
+ end;
+init_per_group(_, Config) -> Config.
+
+end_per_group(_,_) -> ok.
+
init_per_testcase(t_on_load, Config) ->
ets:new(nif_SUITE, [named_table]),
Config;
+init_per_testcase(hipe, Config) ->
+ case erlang:system_info(hipe_architecture) of
+ undefined -> {skip, "HiPE is disabled"};
+ _ -> Config
+ end;
+init_per_testcase(select, Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip, "Test not yet implemented for windows"};
+ _ ->
+ Config
+ end;
init_per_testcase(_Case, Config) ->
Config.
@@ -112,8 +159,8 @@ basic(Config) when is_list(Config) ->
true = lists:member(?MODULE, erlang:system_info(taints)),
ok.
-%% Test reload callback in nif lib
-reload(Config) when is_list(Config) ->
+%% Test old reload feature now always fails
+reload_error(Config) when is_list(Config) ->
TmpMem = tmpmem(),
ensure_lib_loaded(Config),
@@ -127,20 +174,20 @@ reload(Config) when is_list(Config) ->
hold_nif_mod_priv_data(nif_mod:get_priv_data_ptr()),
[{load,1,1,101},{get_priv_data_ptr,1,2,102}] = nif_mod_call_history(),
- ok = nif_mod:load_nif_lib(Config, 2),
- 2 = nif_mod:lib_version(),
- [{reload,2,1,201},{lib_version,2,2,202}] = nif_mod_call_history(),
+ {error, {reload, _}} = nif_mod:load_nif_lib(Config, 2),
+ 1 = nif_mod:lib_version(),
+ [{lib_version,1,3,103}] = nif_mod_call_history(),
- ok = nif_mod:load_nif_lib(Config, 1),
+ {error, {reload, _}} = nif_mod:load_nif_lib(Config, 1),
1 = nif_mod:lib_version(),
- [{reload,1,1,101},{lib_version,1,2,102}] = nif_mod_call_history(),
+ [{lib_version,1,4,104}] = nif_mod_call_history(),
true = erlang:delete_module(nif_mod),
[] = nif_mod_call_history(),
%%false= check_process_code(Pid, nif_mod),
true = erlang:purge_module(nif_mod),
- [{unload,1,3,103}] = nif_mod_call_history(),
+ [{unload,1,5,105}] = nif_mod_call_history(),
true = lists:member(?MODULE, erlang:system_info(taints)),
true = lists:member(nif_mod, erlang:system_info(taints)),
@@ -148,7 +195,7 @@ reload(Config) when is_list(Config) ->
ok.
%% Test upgrade callback in nif lib
-upgrade(Config) when is_list(Config) ->
+upgrade(Config) when is_list(Config) ->
TmpMem = tmpmem(),
ensure_lib_loaded(Config),
@@ -299,6 +346,8 @@ t_on_load(Config) when is_list(Config) ->
%% Use ETS to tell nif_mod:on_load what to do
ets:insert(nif_SUITE, {data_dir, Data}),
ets:insert(nif_SUITE, {lib_version, 1}),
+ API = proplists:get_value(nif_api_version, Config, ""),
+ ets:insert(nif_SUITE, {nif_api_version, API}),
{module,nif_mod} = code:load_binary(nif_mod,File,Bin),
hold_nif_mod_priv_data(nif_mod:get_priv_data_ptr()),
[{load,1,1,101},{get_priv_data_ptr,1,2,102}] = nif_mod_call_history(),
@@ -411,6 +460,375 @@ t_on_load(Config) when is_list(Config) ->
verify_tmpmem(TmpMem),
ok.
+-define(ERL_NIF_SELECT_READ, (1 bsl 0)).
+-define(ERL_NIF_SELECT_WRITE, (1 bsl 1)).
+-define(ERL_NIF_SELECT_STOP, (1 bsl 2)).
+
+-define(ERL_NIF_SELECT_STOP_CALLED, (1 bsl 0)).
+-define(ERL_NIF_SELECT_STOP_SCHEDULED, (1 bsl 1)).
+-define(ERL_NIF_SELECT_INVALID_EVENT, (1 bsl 2)).
+-define(ERL_NIF_SELECT_FAILED, (1 bsl 3)).
+
+
+select(Config) when is_list(Config) ->
+ ensure_lib_loaded(Config),
+
+ Ref = make_ref(),
+ Ref2 = make_ref(),
+ {{R, R_ptr}, {W, W_ptr}} = pipe_nif(),
+ ok = write_nif(W, <<"hej">>),
+ <<"hej">> = read_nif(R, 3),
+
+ %% Wait for read
+ eagain = read_nif(R, 3),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,null,Ref),
+ [] = flush(),
+ ok = write_nif(W, <<"hej">>),
+ [{select, R, Ref, ready_input}] = flush(),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,self(),Ref2),
+ [{select, R, Ref2, ready_input}] = flush(),
+ Papa = self(),
+ Pid = spawn_link(fun() ->
+ [{select, R, Ref, ready_input}] = flush(),
+ Papa ! {self(), done}
+ end),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,Pid,Ref),
+ {Pid, done} = receive_any(1000),
+ <<"hej">> = read_nif(R, 3),
+
+ %% Wait for write
+ Written = write_full(W, $a),
+ 0 = select_nif(W,?ERL_NIF_SELECT_WRITE,W,self(),Ref),
+ [] = flush(),
+ Written = read_nif(R,byte_size(Written)),
+ [{select, W, Ref, ready_output}] = flush(),
+
+ %% Close write and wait for EOF
+ eagain = read_nif(R, 1),
+ check_stop_ret(select_nif(W,?ERL_NIF_SELECT_STOP,W,null,Ref)),
+ [{fd_resource_stop, W_ptr, _}] = flush(),
+ {1, {W_ptr,_}} = last_fd_stop_call(),
+ true = is_closed_nif(W),
+ [] = flush(),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,self(),Ref),
+ [{select, R, Ref, ready_input}] = flush(),
+ eof = read_nif(R,1),
+
+ check_stop_ret(select_nif(R,?ERL_NIF_SELECT_STOP,R,null,Ref)),
+ [{fd_resource_stop, R_ptr, _}] = flush(),
+ {1, {R_ptr,_}} = last_fd_stop_call(),
+ true = is_closed_nif(R),
+
+ select_2(Config).
+
+select_2(Config) ->
+ erlang:garbage_collect(),
+ {_,_,2} = last_resource_dtor_call(),
+
+ Ref1 = make_ref(),
+ Ref2 = make_ref(),
+ {{R, R_ptr}, {W, W_ptr}} = pipe_nif(),
+
+ %% Change ref
+ eagain = read_nif(R, 1),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,null,Ref1),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,self(),Ref2),
+
+ [] = flush(),
+ ok = write_nif(W, <<"hej">>),
+ [{select, R, Ref2, ready_input}] = flush(),
+ <<"hej">> = read_nif(R, 3),
+
+ %% Change pid
+ eagain = read_nif(R, 1),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,null,Ref1),
+ Papa = self(),
+ spawn_link(fun() ->
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,null,Ref1),
+ [] = flush(),
+ Papa ! sync,
+ [{select, R, Ref1, ready_input}] = flush(),
+ <<"hej">> = read_nif(R, 3),
+ Papa ! done
+ end),
+ sync = receive_any(),
+ ok = write_nif(W, <<"hej">>),
+ done = receive_any(),
+ [] = flush(),
+
+ check_stop_ret(select_nif(R,?ERL_NIF_SELECT_STOP,R,null,Ref1)),
+ [{fd_resource_stop, R_ptr, _}] = flush(),
+ {1, {R_ptr,_}} = last_fd_stop_call(),
+ true = is_closed_nif(R),
+
+ %% Stop without previous read/write select
+ ?ERL_NIF_SELECT_STOP_CALLED = select_nif(W,?ERL_NIF_SELECT_STOP,W,null,Ref1),
+ [{fd_resource_stop, W_ptr, 1}] = flush(),
+ {1, {W_ptr,1}} = last_fd_stop_call(),
+ true = is_closed_nif(W),
+
+ select_3(Config).
+
+select_3(_Config) ->
+ erlang:garbage_collect(),
+ {_,_,2} = last_resource_dtor_call(),
+ ok.
+
+check_stop_ret(?ERL_NIF_SELECT_STOP_CALLED) -> ok;
+check_stop_ret(?ERL_NIF_SELECT_STOP_SCHEDULED) -> ok.
+
+write_full(W, C) ->
+ write_full(W, C, <<>>).
+write_full(W, C, Acc) ->
+ case write_nif(W, <<C>>) of
+ ok ->
+ write_full(W, (C+1) band 255, <<Acc/binary, C>>);
+ {eagain,0} ->
+ Acc
+ end.
+
+%% Basic monitoring of one process that terminates
+monitor_process_a(Config) ->
+ ensure_lib_loaded(Config),
+
+ F = fun(Terminator, UseMsgEnv) ->
+ Pid = spawn(fun() ->
+ receive
+ {exit, Arg} -> exit(Arg);
+ return -> ok;
+ BadMatch -> goodmatch = BadMatch
+ end
+ end),
+ R_ptr = alloc_monitor_resource_nif(),
+ {0, Mon1} = monitor_process_nif(R_ptr, Pid, UseMsgEnv, self()),
+ [R_ptr] = monitored_by(Pid),
+ Terminator(Pid),
+ [{monitor_resource_down, R_ptr, Pid, Mon2}] = flush(),
+ 0 = compare_monitors_nif(Mon1, Mon2),
+ [] = last_resource_dtor_call(),
+ ok = release_resource(R_ptr),
+ {R_ptr, _, 1} = last_resource_dtor_call()
+ end,
+
+ T1 = fun(Pid) -> Pid ! {exit, 17} end,
+ T2 = fun(Pid) -> Pid ! return end,
+ T3 = fun(Pid) -> Pid ! badmatch end,
+ T4 = fun(Pid) -> exit(Pid, 18) end,
+
+ [F(T, UME) || T <- [T1,T2,T3,T4], UME <- [true, false]],
+
+ ok.
+
+%% Test auto-demonitoring at resource destruction
+monitor_process_b(Config) ->
+ ensure_lib_loaded(Config),
+
+ Pid = spawn_link(fun() ->
+ receive
+ return -> ok
+ end
+ end),
+ R_ptr = alloc_monitor_resource_nif(),
+ {0,_} = monitor_process_nif(R_ptr, Pid, true, self()),
+ [R_ptr] = monitored_by(Pid),
+ ok = release_resource(R_ptr),
+ [] = flush(),
+ {R_ptr, _, 1} = last_resource_dtor_call(),
+ [] = monitored_by(Pid),
+ Pid ! return,
+ ok.
+
+%% Test termination of monitored process holding last resource ref
+monitor_process_c(Config) ->
+ ensure_lib_loaded(Config),
+
+ Papa = self(),
+ Pid = spawn_link(fun() ->
+ R_ptr = alloc_monitor_resource_nif(),
+ {0,Mon} = monitor_process_nif(R_ptr, self(), true, Papa),
+ [R_ptr] = monitored_by(self()),
+ put(store, make_resource(R_ptr)),
+ ok = release_resource(R_ptr),
+ [] = last_resource_dtor_call(),
+ Papa ! {self(), done, R_ptr, Mon},
+ exit
+ end),
+ [{Pid, done, R_ptr, Mon1},
+ {monitor_resource_down, R_ptr, Pid, Mon2}] = flush(),
+ compare_monitors_nif(Mon1, Mon2),
+ {R_ptr, _, 1} = last_resource_dtor_call(),
+ ok.
+
+%% Test race of resource dtor called when monitored process is exiting
+monitor_process_d(Config) ->
+ ensure_lib_loaded(Config),
+
+ Papa = self(),
+ {Target,TRef} = spawn_monitor(fun() ->
+ nothing = receive_any()
+ end),
+
+ R_ptr = alloc_monitor_resource_nif(),
+ {0,_} = monitor_process_nif(R_ptr, Target, true, self()),
+ [Papa, R_ptr] = monitored_by(Target),
+
+ exit(Target, die),
+ ok = release_resource(R_ptr),
+
+ [{'DOWN', TRef, process, Target, die}] = flush(), %% no monitor_resource_down
+ {R_ptr, _, 1} = last_resource_dtor_call(),
+
+ ok.
+
+%% Test basic demonitoring
+demonitor_process(Config) ->
+ ensure_lib_loaded(Config),
+
+ Pid = spawn_link(fun() ->
+ receive
+ return -> ok
+ end
+ end),
+ R_ptr = alloc_monitor_resource_nif(),
+ {0,MonBin1} = monitor_process_nif(R_ptr, Pid, true, self()),
+ [R_ptr] = monitored_by(Pid),
+ {0,MonBin2} = monitor_process_nif(R_ptr, Pid, true, self()),
+ [R_ptr, R_ptr] = monitored_by(Pid),
+ 0 = demonitor_process_nif(R_ptr, MonBin1),
+ [R_ptr] = monitored_by(Pid),
+ 1 = demonitor_process_nif(R_ptr, MonBin1),
+ 0 = demonitor_process_nif(R_ptr, MonBin2),
+ [] = monitored_by(Pid),
+ 1 = demonitor_process_nif(R_ptr, MonBin2),
+
+ ok = release_resource(R_ptr),
+ [] = flush(),
+ {R_ptr, _, 1} = last_resource_dtor_call(),
+ [] = monitored_by(Pid),
+ Pid ! return,
+ ok.
+
+
+monitored_by(Pid) ->
+ {monitored_by, List0} = process_info(Pid, monitored_by),
+ List1 = lists:map(fun(E) when ?is_resource(E) ->
+ {Ptr, _} = get_resource(monitor_resource_type, E),
+ Ptr;
+ (E) -> E
+ end,
+ List0),
+ erlang:garbage_collect(),
+ lists:sort(List1).
+
+-define(FRENZY_RAND_BITS, 25).
+
+%% Exercise monitoring from NIF resources by randomly
+%% create/destruct processes, resources and monitors.
+monitor_frenzy(Config) ->
+ ensure_lib_loaded(Config),
+
+ Procs1 = processes(),
+ io:format("~p processes before: ~p\n", [length(Procs1), Procs1]),
+
+ %% Spawn first worker process
+ Master = self(),
+ spawn_link(fun() ->
+ SelfPix = monitor_frenzy_nif(init, ?FRENZY_RAND_BITS, 0, 0),
+ unlink(Master),
+ frenzy(SelfPix, {undefined, []})
+ end),
+ receive after 5*1000 -> ok end,
+
+ io:format("stats = ~p\n", [monitor_frenzy_nif(stats, 0, 0, 0)]),
+
+ Pids = monitor_frenzy_nif(stop, 0, 0, 0),
+ io:format("stats = ~p\n", [monitor_frenzy_nif(stats, 0, 0, 0)]),
+
+ lists:foreach(fun(P) ->
+ MRef = monitor(process, P),
+ exit(P, stop),
+ {'DOWN', MRef, process, P, _} = receive_any()
+ end,
+ Pids),
+
+ io:format("stats = ~p\n", [monitor_frenzy_nif(stats, 0, 0, 0)]),
+
+ Procs2 = processes(),
+ io:format("~p processes after: ~p\n", [length(Procs2), Procs2]),
+ ok.
+
+
+frenzy(_SelfPix, done) ->
+ ok;
+frenzy(SelfPix, State0) ->
+ Rnd = rand:uniform(1 bsl (?FRENZY_RAND_BITS+2)) - 1,
+ Op = Rnd band 3,
+ State1 = frenzy_do_op(SelfPix, Op, (Rnd bsr 2), State0),
+ frenzy(SelfPix, State1).
+
+frenzy_do_op(SelfPix, Op, Rnd, {Pid0,RBins}=State0) ->
+ case Op of
+ 0 -> % add/remove process
+ Papa = self(),
+ NewPid = case Pid0 of
+ undefined -> % Prepare new process to be added
+ spawn(fun() ->
+ MRef = monitor(process, Papa),
+ case receive_any() of
+ {go, MyPix, MyState} ->
+ demonitor(MRef, [flush]),
+ frenzy(MyPix, MyState);
+ {'DOWN', MRef, process, Papa, _} ->
+ ok
+ end
+ end);
+ _ ->
+ Pid0
+ end,
+ case monitor_frenzy_nif(Op, Rnd, SelfPix, NewPid) of
+ NewPix when is_integer(NewPix) ->
+ NewPid ! {go, NewPix, {undefined, []}},
+ {undefined, RBins};
+ ExitPid when is_pid(ExitPid) ->
+ false = (ExitPid =:= self()),
+ exit(ExitPid,die),
+ {NewPid, RBins};
+ done ->
+ done
+ end;
+
+ 3 ->
+ %% Try provoke revival-race of resource from magic ref external format
+ _ = [binary_to_term(B) || B <- RBins],
+ {Pid0, []};
+ _ ->
+ case monitor_frenzy_nif(Op, Rnd, SelfPix, undefined) of
+ Rsrc when ?is_resource(Rsrc) ->
+ %% Store resource in ext format only, for later revival
+ State1 = {Pid0, [term_to_binary(Rsrc) | RBins]},
+ gc_and_return(State1);
+ ok -> State0;
+ 0 -> State0;
+ 1 -> State0;
+ done -> done
+ end
+ end.
+
+gc_and_return(RetVal) ->
+ erlang:garbage_collect(),
+ RetVal.
+
+hipe(Config) when is_list(Config) ->
+ Data = proplists:get_value(data_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
+ Src = filename:join(Data, "hipe_compiled"),
+ {ok,hipe_compiled} = c:c(Src, [{outdir,Priv},native]),
+ true = code:is_module_native(hipe_compiled),
+ {error, {notsup,_}} = hipe_compiled:try_load_nif(),
+ true = code:delete(hipe_compiled),
+ false = code:purge(hipe_compiled),
+ ok.
+
%% Test NIF building heap fragments
heap_frag(Config) when is_list(Config) ->
@@ -655,6 +1073,7 @@ maps(Config) when is_list(Config) ->
{1, M2} = make_map_remove_nif(M2, "key3"),
{0, undefined} = make_map_remove_nif(self(), key),
+ verify_tmpmem(TmpMem),
ok.
%% Test macros enif_make_list<N> and enif_make_tuple<N>
@@ -714,7 +1133,7 @@ resource_hugo_do(Type) ->
HugoBin = <<"Hugo Hacker">>,
HugoPtr = alloc_resource(Type, HugoBin),
Hugo = make_resource(HugoPtr),
- <<>> = Hugo,
+ true = is_reference(Hugo),
release_resource(HugoPtr),
erlang:garbage_collect(),
{HugoPtr,HugoBin} = get_resource(Type,Hugo),
@@ -748,7 +1167,7 @@ resource_otto_do(Type) ->
OttoBin = <<"Otto Ordonnans">>,
OttoPtr = alloc_resource(Type, OttoBin),
Otto = make_resource(OttoPtr),
- <<>> = Otto,
+ true = is_reference(Otto),
%% forget resource term but keep referenced by NIF
{OttoPtr, OttoBin}.
@@ -771,8 +1190,9 @@ resource_new_do2(Type) ->
BinB = <<"NewB">>,
ResA = make_new_resource(Type, BinA),
ResB = make_new_resource(Type, BinB),
- <<>> = ResA,
- <<>> = ResB,
+ true = is_reference(ResA),
+ true = is_reference(ResB),
+ true = (ResA /= ResB),
{PtrA,BinA} = get_resource(Type, ResA),
{PtrB,BinB} = get_resource(Type, ResB),
true = (PtrA =/= PtrB),
@@ -828,7 +1248,7 @@ resource_binary_do() ->
-define(RT_CREATE,1).
-define(RT_TAKEOVER,2).
-%% Test resource takeover by module reload and upgrade
+%% Test resource takeover by module upgrade
resource_takeover(Config) when is_list(Config) ->
TmpMem = tmpmem(),
ensure_lib_loaded(Config),
@@ -893,6 +1313,7 @@ resource_takeover(Config) when is_list(Config) ->
ok = forget_resource(NGX1),
?CHECK([], nif_mod_call_history()), % no dtor
+ {module,nif_mod} = erlang:load_module(nif_mod,ModBin),
ok = nif_mod:load_nif_lib(Config, 2,
[{resource_type, 0, ?RT_TAKEOVER, "resource_type_A",resource_dtor_A,
?RT_TAKEOVER},
@@ -911,7 +1332,9 @@ resource_takeover(Config) when is_list(Config) ->
{resource_type, 4, ?RT_CREATE, "resource_type_null_goneY",null,
?RT_CREATE}
]),
- ?CHECK([{reload,2,1,201}], nif_mod_call_history()),
+ ?CHECK([{upgrade,2,1,201}], nif_mod_call_history()),
+ true = erlang:purge_module(nif_mod),
+ ?CHECK([], nif_mod_call_history()), % BGX2 keeping lib loaded
BinA2 = read_resource(0,A2),
ok = forget_resource(A2),
@@ -924,8 +1347,8 @@ resource_takeover(Config) when is_list(Config) ->
?CHECK([], nif_mod_call_history()), % no dtor
ok = forget_resource(BGX2), % calling dtor in orphan library v1 still loaded
- ?CHECK([{{resource_dtor_B_v1,BinBGX2},1,6,106}], nif_mod_call_history()),
- % How to test that lib v1 is closed here?
+ ?CHECK([{{resource_dtor_B_v1,BinBGX2},1,6,106}, {unload,1,7,107}],
+ nif_mod_call_history()),
ok = forget_resource(NGX2),
?CHECK([], nif_mod_call_history()), % no dtor
@@ -1140,7 +1563,7 @@ resource_takeover(Config) when is_list(Config) ->
[{load,1,1,101},{get_priv_data_ptr,1,2,102}] = nif_mod_call_history(),
{NA7,BinNA7} = make_resource(0, Holder, "NA7"),
- {AN7,BinAN7} = make_resource(1, Holder, "AN7"),
+ {AN7,_BinAN7} = make_resource(1, Holder, "AN7"),
ok = forget_resource(NA7),
[{{resource_dtor_A_v1,BinNA7},1,_,_}] = nif_mod_call_history(),
@@ -1148,6 +1571,9 @@ resource_takeover(Config) when is_list(Config) ->
ok = forget_resource(AN7),
[] = nif_mod_call_history(),
+ true = erlang:delete_module(nif_mod),
+ true = erlang:purge_module(nif_mod),
+
true = lists:member(?MODULE, erlang:system_info(taints)),
true = lists:member(nif_mod, erlang:system_info(taints)),
verify_tmpmem(TmpMem),
@@ -1221,11 +1647,19 @@ threading_do(Config) ->
ok = tester:load_nif_lib(Config, "basic"),
ok = tester:run(),
+ erlang:load_module(tester,ModBin),
+ erlang:purge_module(tester),
ok = tester:load_nif_lib(Config, "rwlock"),
ok = tester:run(),
+ erlang:load_module(tester,ModBin),
+ erlang:purge_module(tester),
ok = tester:load_nif_lib(Config, "tsd"),
- ok = tester:run().
+ ok = tester:run(),
+
+ erlang:delete_module(tester),
+ erlang:purge_module(tester).
+
%% Test NIF message sending
send(Config) when is_list(Config) ->
@@ -1513,13 +1947,13 @@ send3_new_state(State, Blob) ->
neg(Config) when is_list(Config) ->
TmpMem = tmpmem(),
{'EXIT',{badarg,_}} = (catch erlang:load_nif(badarg, 0)),
- {error,{load_failed,_}} = erlang:load_nif("pink_unicorn", 0),
Data = proplists:get_value(data_dir, Config),
File = filename:join(Data, "nif_mod"),
{ok,nif_mod,Bin} = compile:file(File, [binary,return_errors]),
{module,nif_mod} = erlang:load_module(nif_mod,Bin),
+ {error,{load_failed,_}} = nif_mod:load_nif_lib(Config, 0),
{error,{bad_lib,_}} = nif_mod:load_nif_lib(Config, no_init),
verify_tmpmem(TmpMem),
ok.
@@ -1620,7 +2054,7 @@ otp_9828(Config) ->
ensure_lib_loaded(Config, 1),
otp_9828_loop(<<"I'm alive!">>, 1000).
-otp_9828_loop(Bin, 0) ->
+otp_9828_loop(_Bin, 0) ->
ok;
otp_9828_loop(Bin, Val) ->
WrtBin = <<Bin/binary, Val:32>>,
@@ -1640,7 +2074,8 @@ consume_timeslice(Config) when is_list(Config) ->
end.
consume_timeslice_test(Config) when is_list(Config) ->
- CONTEXT_REDS = 2000,
+ ensure_lib_loaded(Config),
+ CONTEXT_REDS = 4000,
Me = self(),
Go = make_ref(),
RedDiff = make_ref(),
@@ -1716,7 +2151,7 @@ consume_timeslice_test(Config) when is_list(Config) ->
io:format("Reductions = ~p~n", [Reductions]),
ok;
{RedDiff, Reductions} ->
- ct:fail({unexpected_reduction_count, Reductions})
+ ct:fail({unexpected_reduction_count, Reductions, ExpReds})
end,
none = next_msg(P),
@@ -1861,6 +2296,19 @@ call(Pid,Cmd) ->
receive_any() ->
receive M -> M end.
+receive_any(Timeout) ->
+ receive M -> M
+ after Timeout -> timeout end.
+
+flush() ->
+ flush(10).
+flush(Timeout) ->
+ receive M ->
+ [M | flush(Timeout)]
+ after Timeout ->
+ []
+ end.
+
repeat(0, _, Arg) ->
Arg;
repeat(N, Fun, Arg0) ->
@@ -1870,7 +2318,8 @@ check(Exp,Got,Line) ->
case Got of
Exp -> Exp;
_ ->
- io:format("CHECK at ~p: Expected ~p but got ~p\n",[Line,Exp,Got]),
+ io:format("CHECK at line ~p\nExpected: ~p\nGot : ~p\n",
+ [Line,Exp,Got]),
Got
end.
@@ -1891,7 +2340,7 @@ nif_raise_exceptions(NifFunc) ->
-define(ERL_NIF_TIME_ERROR, -9223372036854775808).
-define(TIME_UNITS, [second, millisecond, microsecond, nanosecond]).
-nif_monotonic_time(Config) ->
+nif_monotonic_time(_Config) ->
?ERL_NIF_TIME_ERROR = monotonic_time(invalid_time_unit),
mtime_loop(1000000).
@@ -1916,7 +2365,7 @@ chk_mtime([TU|TUs]) ->
end,
chk_mtime(TUs).
-nif_time_offset(Config) ->
+nif_time_offset(_Config) ->
?ERL_NIF_TIME_ERROR = time_offset(invalid_time_unit),
toffs_loop(1000000).
@@ -1954,7 +2403,7 @@ chk_toffs([TU|TUs]) ->
end,
chk_toffs(TUs).
-nif_convert_time_unit(Config) ->
+nif_convert_time_unit(_Config) ->
?ERL_NIF_TIME_ERROR = convert_time_unit(0, second, invalid_time_unit),
?ERL_NIF_TIME_ERROR = convert_time_unit(0, invalid_time_unit, second),
?ERL_NIF_TIME_ERROR = convert_time_unit(0, invalid_time_unit, invalid_time_unit),
@@ -2223,6 +2672,17 @@ term_to_binary_nif(_, _) -> ?nif_stub.
binary_to_term_nif(_, _, _) -> ?nif_stub.
port_command_nif(_, _) -> ?nif_stub.
format_term_nif(_,_) -> ?nif_stub.
+select_nif(_,_,_,_,_) -> ?nif_stub.
+pipe_nif() -> ?nif_stub.
+write_nif(_,_) -> ?nif_stub.
+read_nif(_,_) -> ?nif_stub.
+is_closed_nif(_) -> ?nif_stub.
+last_fd_stop_call() -> ?nif_stub.
+alloc_monitor_resource_nif() -> ?nif_stub.
+monitor_process_nif(_,_,_,_) -> ?nif_stub.
+demonitor_process_nif(_,_) -> ?nif_stub.
+compare_monitors_nif(_,_) -> ?nif_stub.
+monitor_frenzy_nif(_,_,_,_) -> ?nif_stub.
%% maps
is_map_nif(_) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/Makefile.src b/erts/emulator/test/nif_SUITE_data/Makefile.src
index fbb8978771..de06026780 100644
--- a/erts/emulator/test/nif_SUITE_data/Makefile.src
+++ b/erts/emulator/test/nif_SUITE_data/Makefile.src
@@ -2,7 +2,13 @@
NIF_LIBS = nif_SUITE.1@dll@ \
nif_mod.1@dll@ \
nif_mod.2@dll@ \
- nif_mod.3@dll@
+ nif_mod.3@dll@ \
+ nif_mod.1.2_0@dll@ \
+ nif_mod.2.2_0@dll@ \
+ nif_mod.3.2_0@dll@ \
+ nif_mod.1.2_4@dll@ \
+ nif_mod.2.2_4@dll@ \
+ nif_mod.3.2_4@dll@
all: $(NIF_LIBS) basic@dll@ rwlock@dll@ tsd@dll@ echo_drv@dll@
diff --git a/erts/emulator/test/nif_SUITE_data/hipe_compiled.erl b/erts/emulator/test/nif_SUITE_data/hipe_compiled.erl
new file mode 100644
index 0000000000..84ddbc8d63
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/hipe_compiled.erl
@@ -0,0 +1,6 @@
+-module(hipe_compiled).
+
+-export([try_load_nif/0]).
+
+try_load_nif() ->
+ erlang:load_nif("doesn't matter", 0).
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index f2b1ef9d24..8fe5ee809a 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -17,18 +17,41 @@
*
* %CopyrightEnd%
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <limits.h>
+#include <errno.h>
#ifndef __WIN32__
#include <unistd.h>
+#include <fcntl.h>
#endif
#include "nif_mod.h"
+#if 0
+static ErlNifMutex* dbg_trace_lock;
+#define DBG_TRACE_INIT dbg_trace_lock = enif_mutex_create("nif_SUITE.DBG_TRACE")
+#define DBG_TRACE_FINI enif_mutex_destroy(dbg_trace_lock)
+#define DBG_TRACE_LOCK enif_mutex_lock(dbg_trace_lock)
+#define DBG_TRACE_UNLOCK enif_mutex_unlock(dbg_trace_lock)
+#define DBG_TRACE0(FMT) do {DBG_TRACE_LOCK; enif_fprintf(stderr, FMT); DBG_TRACE_UNLOCK; }while(0)
+#define DBG_TRACE1(FMT, A) do {DBG_TRACE_LOCK; enif_fprintf(stderr, FMT, A); DBG_TRACE_UNLOCK; }while(0)
+#define DBG_TRACE2(FMT, A, B) do {DBG_TRACE_LOCK; enif_fprintf(stderr, FMT, A, B); DBG_TRACE_UNLOCK; }while(0)
+#define DBG_TRACE3(FMT, A, B, C) do {DBG_TRACE_LOCK; enif_fprintf(stderr, FMT, A, B, C); DBG_TRACE_UNLOCK; }while(0)
+#define DBG_TRACE4(FMT, A, B, C, D) do {DBG_TRACE_LOCK; enif_fprintf(stderr, FMT, A, B, C, D); DBG_TRACE_UNLOCK; }while(0)
+#else
+#define DBG_TRACE_INIT
+#define DBG_TRACE_FINI
+#define DBG_TRACE0(FMT)
+#define DBG_TRACE1(FMT, A)
+#define DBG_TRACE2(FMT, A, B)
+#define DBG_TRACE3(FMT, A, B, C)
+#define DBG_TRACE4(FMT, A, B, C, D)
+#endif
+
static int static_cntA; /* zero by default */
static int static_cntB = NIF_SUITE_LIB_VER * 100;
@@ -42,7 +65,17 @@ static ERL_NIF_TERM atom_second;
static ERL_NIF_TERM atom_millisecond;
static ERL_NIF_TERM atom_microsecond;
static ERL_NIF_TERM atom_nanosecond;
-
+static ERL_NIF_TERM atom_eagain;
+static ERL_NIF_TERM atom_eof;
+static ERL_NIF_TERM atom_error;
+static ERL_NIF_TERM atom_fd_resource_stop;
+static ERL_NIF_TERM atom_monitor_resource_type;
+static ERL_NIF_TERM atom_monitor_resource_down;
+static ERL_NIF_TERM atom_init;
+static ERL_NIF_TERM atom_stats;
+static ERL_NIF_TERM atom_done;
+static ERL_NIF_TERM atom_stop;
+static ERL_NIF_TERM atom_null;
typedef struct
{
@@ -102,23 +135,60 @@ struct binary_resource {
unsigned size;
};
+static ErlNifResourceType* fd_resource_type;
+static void fd_resource_dtor(ErlNifEnv* env, void* obj);
+static void fd_resource_stop(ErlNifEnv* env, void* obj, ErlNifEvent, int);
+static ErlNifResourceTypeInit fd_rt_init = {
+ fd_resource_dtor,
+ fd_resource_stop
+};
+struct fd_resource {
+ ErlNifEvent fd;
+ int was_selected;
+ ErlNifPid pid;
+};
+
+static ErlNifResourceType* monitor_resource_type;
+static void monitor_resource_dtor(ErlNifEnv* env, void* obj);
+static void monitor_resource_down(ErlNifEnv*, void* obj, ErlNifPid*, ErlNifMonitor*);
+static ErlNifResourceTypeInit monitor_rt_init = {
+ monitor_resource_dtor,
+ NULL,
+ monitor_resource_down
+};
+struct monitor_resource {
+ ErlNifPid receiver;
+ int use_msgenv;
+};
+
+static ErlNifResourceType* frenzy_resource_type;
+static void frenzy_resource_dtor(ErlNifEnv* env, void* obj);
+static void frenzy_resource_down(ErlNifEnv*, void* obj, ErlNifPid*, ErlNifMonitor*);
+static ErlNifResourceTypeInit frenzy_rt_init = {
+ frenzy_resource_dtor,
+ NULL,
+ frenzy_resource_down
+};
+
static int get_pointer(ErlNifEnv* env, ERL_NIF_TERM term, void** pp)
{
- ErlNifUInt64 i64;
- int r = enif_get_uint64(env, term, &i64);
+ ErlNifBinary bin;
+ int r = enif_inspect_binary(env, term, &bin);
if (r) {
- *pp = (void*)i64;
+ *pp = *(void**)bin.data;
}
return r;
}
static ERL_NIF_TERM make_pointer(ErlNifEnv* env, void* p)
{
- ErlNifUInt64 i64 = (ErlNifUInt64) p;
- return enif_make_uint64(env, i64);
+ void** bin_data;
+ ERL_NIF_TERM res;
+ bin_data = (void**)enif_make_new_binary(env, sizeof(void*), &res);
+ *bin_data = p;
+ return res;
}
-
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
PrivData* data = enif_alloc(sizeof(PrivData));
@@ -127,6 +197,8 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
data->call_history = NULL;
data->nif_mod = NULL;
+ DBG_TRACE_INIT;
+
add_call(env, data, "load");
data->rt_arr[0].t = enif_open_resource_type(env,NULL,"Gold",resource_dtor,
@@ -141,6 +213,16 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
msgenv_resource_type = enif_open_resource_type(env,NULL,"nif_SUITE.msgenv",
msgenv_dtor,
ERL_NIF_RT_CREATE, NULL);
+ fd_resource_type = enif_open_resource_type_x(env, "nif_SUITE.fd",
+ &fd_rt_init,
+ ERL_NIF_RT_CREATE, NULL);
+ monitor_resource_type = enif_open_resource_type_x(env, "nif_SUITE.monitor",
+ &monitor_rt_init,
+ ERL_NIF_RT_CREATE, NULL);
+ frenzy_resource_type = enif_open_resource_type_x(env, "nif_SUITE.monitor_frenzy",
+ &frenzy_rt_init,
+ ERL_NIF_RT_CREATE, NULL);
+
atom_false = enif_make_atom(env,"false");
atom_true = enif_make_atom(env,"true");
atom_self = enif_make_atom(env,"self");
@@ -151,6 +233,17 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
atom_millisecond = enif_make_atom(env,"millisecond");
atom_microsecond = enif_make_atom(env,"microsecond");
atom_nanosecond = enif_make_atom(env,"nanosecond");
+ atom_eagain = enif_make_atom(env, "eagain");
+ atom_eof = enif_make_atom(env, "eof");
+ atom_error = enif_make_atom(env, "error");
+ atom_fd_resource_stop = enif_make_atom(env, "fd_resource_stop");
+ atom_monitor_resource_type = enif_make_atom(env, "monitor_resource_type");
+ atom_monitor_resource_down = enif_make_atom(env, "monitor_resource_down");
+ atom_init = enif_make_atom(env,"init");
+ atom_stats = enif_make_atom(env,"stats");
+ atom_done = enif_make_atom(env,"done");
+ atom_stop = enif_make_atom(env,"stop");
+ atom_null = enif_make_atom(env,"null");
*priv_data = data;
return 0;
@@ -184,14 +277,6 @@ static void resource_takeover(ErlNifEnv* env, PrivData* priv)
msgenv_resource_type = rt;
}
-static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
-{
- PrivData* priv = (PrivData*) *priv_data;
- add_call(env, priv, "reload");
- resource_takeover(env,priv);
- return 0;
-}
-
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
{
PrivData* priv = (PrivData*) *old_priv_data;
@@ -212,6 +297,7 @@ static void unload(ErlNifEnv* env, void* priv_data)
}
enif_free(priv_data);
}
+ DBG_TRACE_FINI;
}
static ERL_NIF_TERM lib_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -394,8 +480,7 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
ErlNifSInt64 sint64;
ErlNifUInt64 uint64;
double d;
- ERL_NIF_TERM atom, ref1, ref2, term;
- size_t len;
+ ERL_NIF_TERM atom, ref1, ref2;
sint = INT_MIN;
do {
@@ -836,6 +921,9 @@ static ERL_NIF_TERM get_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
if (enif_is_identical(argv[0], atom_binary_resource_type)) {
type.t = binary_resource_type;
}
+ else if (enif_is_identical(argv[0], atom_monitor_resource_type)) {
+ type.t = monitor_resource_type;
+ }
else {
get_pointer(env, argv[0], &type.vp);
}
@@ -1029,10 +1117,12 @@ struct make_term_info
{
ErlNifEnv* caller_env;
ErlNifEnv* dst_env;
+ int dst_env_valid;
ERL_NIF_TERM reuse[MAKE_TERM_REUSE_LEN];
unsigned reuse_push;
unsigned reuse_pull;
ErlNifResourceType* resource_type;
+ void *resource;
ERL_NIF_TERM other_term;
ERL_NIF_TERM blob;
ErlNifPid to_pid;
@@ -1058,6 +1148,7 @@ static ERL_NIF_TERM pull_term(struct make_term_info* mti)
mti->reuse_push < MAKE_TERM_REUSE_LEN) {
mti->reuse_pull = 0;
if (mti->reuse_push == 0) {
+ assert(mti->dst_env_valid);
mti->reuse[0] = enif_make_list(mti->dst_env, 0);
}
}
@@ -1111,10 +1202,6 @@ static ERL_NIF_TERM make_term_string(struct make_term_info* mti, int n)
{
return enif_make_string(mti->dst_env, "Hello!", ERL_NIF_LATIN1);
}
-static ERL_NIF_TERM make_term_ref(struct make_term_info* mti, int n)
-{
- return enif_make_ref(mti->dst_env);
-}
static ERL_NIF_TERM make_term_sub_binary(struct make_term_info* mti, int n)
{
ERL_NIF_TERM orig;
@@ -1144,12 +1231,7 @@ static ERL_NIF_TERM make_term_list0(struct make_term_info* mti, int n)
}
static ERL_NIF_TERM make_term_resource(struct make_term_info* mti, int n)
{
- void* resource = enif_alloc_resource(mti->resource_type, 10);
- ERL_NIF_TERM term;
- fill(resource, 10, n);
- term = enif_make_resource(mti->dst_env, resource);
- enif_release_resource(resource);
- return term;
+ return enif_make_resource(mti->dst_env, mti->resource);
}
static ERL_NIF_TERM make_term_new_binary(struct make_term_info* mti, int n)
{
@@ -1222,7 +1304,6 @@ static Make_term_Func* make_funcs[] = {
make_term_atom,
make_term_existing_atom,
make_term_string,
- //make_term_ref,
make_term_sub_binary,
make_term_uint,
make_term_long,
@@ -1246,6 +1327,7 @@ static unsigned num_of_make_funcs()
static int make_term_n(struct make_term_info* mti, int n, ERL_NIF_TERM* res)
{
if (n < num_of_make_funcs()) {
+ assert(mti->dst_env_valid);
*res = make_funcs[n](mti, n);
push_term(mti, *res);
return 1;
@@ -1253,22 +1335,39 @@ static int make_term_n(struct make_term_info* mti, int n, ERL_NIF_TERM* res)
return 0;
}
-static ERL_NIF_TERM make_blob(ErlNifEnv* caller_env, ErlNifEnv* dst_env,
- ERL_NIF_TERM other_term)
+
+static void
+init_make_blob(struct make_term_info *mti,
+ ErlNifEnv* caller_env,
+ ERL_NIF_TERM other_term)
{
PrivData* priv = (PrivData*) enif_priv_data(caller_env);
+ mti->caller_env = caller_env;
+ mti->resource_type = priv->rt_arr[0].t;
+ mti->resource = enif_alloc_resource(mti->resource_type, 10);
+ fill(mti->resource, 10, 17);
+ mti->other_term = other_term;
+}
+
+static void
+fini_make_blob(struct make_term_info *mti)
+{
+ enif_release_resource(mti->resource);
+}
+
+static ERL_NIF_TERM make_blob(struct make_term_info *mti,
+ ErlNifEnv* dst_env)
+{
ERL_NIF_TERM term, list;
int n = 0;
- struct make_term_info mti;
- mti.caller_env = caller_env;
- mti.dst_env = dst_env;
- mti.reuse_push = 0;
- mti.reuse_pull = 0;
- mti.resource_type = priv->rt_arr[0].t;
- mti.other_term = other_term;
+
+ mti->reuse_push = 0;
+ mti->reuse_pull = 0;
+ mti->dst_env = dst_env;
+ mti->dst_env_valid = 1;
list = enif_make_list(dst_env, 0);
- while (make_term_n(&mti, n++, &term)) {
+ while (make_term_n(mti, n++, &term)) {
list = enif_make_list_cell(dst_env, term, list);
}
return list;
@@ -1280,13 +1379,16 @@ static ERL_NIF_TERM send_new_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM a
ERL_NIF_TERM msg, copy;
ErlNifEnv* msg_env;
int res;
+ struct make_term_info mti;
if (!enif_get_local_pid(env, argv[0], &to)) {
return enif_make_badarg(env);
}
msg_env = enif_alloc_env();
- msg = make_blob(env,msg_env, argv[1]);
- copy = make_blob(env,env, argv[1]);
+ init_make_blob(&mti, env, argv[1]);
+ msg = make_blob(&mti,msg_env);
+ copy = make_blob(&mti,env);
+ fini_make_blob(&mti);
res = enif_send(env, &to, msg_env, msg);
enif_free_env(msg_env);
return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy);
@@ -1302,9 +1404,12 @@ static ERL_NIF_TERM alloc_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
sizeof(*mti));
mti->caller_env = NULL;
mti->dst_env = enif_alloc_env();
+ mti->dst_env_valid = 1;
mti->reuse_push = 0;
mti->reuse_pull = 0;
mti->resource_type = priv->rt_arr[0].t;
+ mti->resource = enif_alloc_resource(mti->resource_type, 10);
+ fill(mti->resource, 10, 17);
mti->other_term = enif_make_list(mti->dst_env, 0);
mti->blob = enif_make_list(mti->dst_env, 0);
mti->mtx = enif_mutex_create("nif_SUITE:mtx");
@@ -1322,6 +1427,7 @@ static void msgenv_dtor(ErlNifEnv* env, void* obj)
if (mti->dst_env != NULL) {
enif_free_env(mti->dst_env);
}
+ enif_release_resource(mti->resource);
enif_mutex_destroy(mti->mtx);
enif_cond_destroy(mti->cond);
}
@@ -1333,6 +1439,7 @@ static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
return enif_make_badarg(env);
}
enif_clear_env(mti.p->dst_env);
+ mti.p->dst_env_valid = 1;
mti.p->reuse_pull = 0;
mti.p->reuse_push = 0;
mti.p->blob = enif_make_list(mti.p->dst_env, 0);
@@ -1367,6 +1474,8 @@ static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
}
copy = enif_make_copy(env, mti.p->blob);
res = enif_send(env, &to, mti.p->dst_env, mti.p->blob);
+ if (res)
+ mti.p->dst_env_valid = 0;
return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy);
}
@@ -1374,7 +1483,6 @@ static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv
{
mti_t mti;
ErlNifPid to;
- ERL_NIF_TERM copy;
int res;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
|| !enif_get_local_pid(env, argv[1], &to)) {
@@ -1384,6 +1492,8 @@ static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv
enif_make_copy(mti.p->dst_env, argv[2]),
mti.p->blob);
res = enif_send(env, &to, mti.p->dst_env, mti.p->blob);
+ if (res)
+ mti.p->dst_env_valid = 0;
return enif_make_int(env,res);
}
@@ -1400,6 +1510,8 @@ void* threaded_sender(void *arg)
mti.p->send_it = 0;
enif_mutex_unlock(mti.p->mtx);
mti.p->send_res = enif_send(NULL, &mti.p->to_pid, mti.p->dst_env, mti.p->blob);
+ if (mti.p->send_res)
+ mti.p->dst_env_valid = 0;
return NULL;
}
@@ -1471,7 +1583,6 @@ static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
static ERL_NIF_TERM send_copy_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- ErlNifEnv* menv;
ErlNifPid pid;
int ret;
if (!enif_get_local_pid(env, argv[0], &pid)) {
@@ -2015,6 +2126,733 @@ static ERL_NIF_TERM format_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM arg
}
+static int get_fd(ErlNifEnv* env, ERL_NIF_TERM term, struct fd_resource** rsrc)
+{
+ if (!enif_get_resource(env, term, fd_resource_type, (void**)rsrc)) {
+ return 0;
+ }
+ return 1;
+}
+
+static ERL_NIF_TERM select_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct fd_resource* fdr;
+ enum ErlNifSelectFlags mode;
+ void* obj;
+ ErlNifPid nifpid, *pid = NULL;
+ ERL_NIF_TERM ref;
+ int retval;
+
+ if (!get_fd(env, argv[0], &fdr)
+ || !enif_get_uint(env, argv[1], (unsigned int*)&mode)
+ || !enif_get_resource(env, argv[2], fd_resource_type, &obj))
+ {
+ return enif_make_badarg(env);
+ }
+
+ if (argv[3] != atom_null) {
+ if (!enif_get_local_pid(env, argv[3], &nifpid))
+ return enif_make_badarg(env);
+ pid = &nifpid;
+ }
+ ref = argv[4];
+
+ fdr->was_selected = 1;
+ enif_self(env, &fdr->pid);
+ retval = enif_select(env, fdr->fd, mode, obj, pid, ref);
+
+ return enif_make_int(env, retval);
+}
+
+#ifndef __WIN32__
+static ERL_NIF_TERM pipe_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct fd_resource* read_rsrc;
+ struct fd_resource* write_rsrc;
+ ERL_NIF_TERM read_fd, write_fd;
+ int fds[2], flags;
+
+ if (pipe(fds) < 0)
+ return enif_make_string(env, "pipe failed", ERL_NIF_LATIN1);
+
+ if ((flags = fcntl(fds[0], F_GETFL, 0)) < 0
+ || fcntl(fds[0], F_SETFL, flags|O_NONBLOCK) < 0
+ || (flags = fcntl(fds[1], F_GETFL, 0)) < 0
+ || fcntl(fds[1], F_SETFL, flags|O_NONBLOCK) < 0) {
+ close(fds[0]);
+ close(fds[1]);
+ return enif_make_string(env, "fcntl failed on pipe", ERL_NIF_LATIN1);
+ }
+
+ read_rsrc = enif_alloc_resource(fd_resource_type, sizeof(struct fd_resource));
+ write_rsrc = enif_alloc_resource(fd_resource_type, sizeof(struct fd_resource));
+ read_rsrc->fd = fds[0];
+ read_rsrc->was_selected = 0;
+ write_rsrc->fd = fds[1];
+ write_rsrc->was_selected = 0;
+ read_fd = enif_make_resource(env, read_rsrc);
+ write_fd = enif_make_resource(env, write_rsrc);
+ enif_release_resource(read_rsrc);
+ enif_release_resource(write_rsrc);
+
+ return enif_make_tuple2(env,
+ enif_make_tuple2(env, read_fd, make_pointer(env, read_rsrc)),
+ enif_make_tuple2(env, write_fd, make_pointer(env, write_rsrc)));
+}
+
+static ERL_NIF_TERM write_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct fd_resource* fdr;
+ ErlNifBinary bin;
+ int n, written = 0;
+
+ if (!get_fd(env, argv[0], &fdr)
+ || !enif_inspect_binary(env, argv[1], &bin))
+ return enif_make_badarg(env);
+
+ for (;;) {
+ n = write(fdr->fd, bin.data + written, bin.size - written);
+ if (n >= 0) {
+ written += n;
+ if (written == bin.size) {
+ return atom_ok;
+ }
+ }
+ else if (errno == EAGAIN) {
+ return enif_make_tuple2(env, atom_eagain, enif_make_int(env, written));
+ }
+ else if (errno == EINTR) {
+ continue;
+ }
+ else {
+ return enif_make_tuple2(env, atom_error, enif_make_int(env, errno));
+ }
+ }
+}
+
+static ERL_NIF_TERM read_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct fd_resource* fdr;
+ unsigned char* buf;
+ int n, count;
+ ERL_NIF_TERM res;
+
+ if (!get_fd(env, argv[0], &fdr)
+ || !enif_get_int(env, argv[1], &count) || count < 1)
+ return enif_make_badarg(env);
+
+ buf = enif_make_new_binary(env, count, &res);
+
+ for (;;) {
+ n = read(fdr->fd, buf, count);
+ if (n > 0) {
+ if (n < count) {
+ res = enif_make_sub_binary(env, res, 0, n);
+ }
+ return res;
+ }
+ else if (n == 0) {
+ return atom_eof;
+ }
+ else if (errno == EAGAIN) {
+ return atom_eagain;
+ }
+ else if (errno == EINTR) {
+ continue;
+ }
+ else {
+ return enif_make_tuple2(env, atom_error, enif_make_int(env, errno));
+ }
+ }
+}
+
+static ERL_NIF_TERM is_closed_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct fd_resource* fdr;
+
+ if (!get_fd(env, argv[0], &fdr))
+ return enif_make_badarg(env);
+
+ return fdr->fd < 0 ? atom_true : atom_false;
+}
+#endif /* !__WIN32__ */
+
+
+static void fd_resource_dtor(ErlNifEnv* env, void* obj)
+{
+ struct fd_resource* fdr = (struct fd_resource*)obj;
+ resource_dtor(env, obj);
+#ifdef __WIN32__
+ abort();
+#else
+ if (fdr->fd >= 0) {
+ assert(!fdr->was_selected);
+ close(fdr->fd);
+ }
+#endif
+}
+
+static struct {
+ void* obj;
+ int was_direct_call;
+}last_fd_stop;
+int fd_stop_cnt = 0;
+
+static void fd_resource_stop(ErlNifEnv* env, void* obj, ErlNifEvent fd,
+ int is_direct_call)
+{
+ struct fd_resource* fdr = (struct fd_resource*)obj;
+ assert(fd == fdr->fd);
+ assert(fd >= 0);
+
+ last_fd_stop.obj = obj;
+ last_fd_stop.was_direct_call = is_direct_call;
+ fd_stop_cnt++;
+
+ close(fd);
+ fdr->fd = -1; /* thread safety ? */
+ fdr->was_selected = 0;
+
+ {
+ ErlNifEnv* msg_env = enif_alloc_env();
+ ERL_NIF_TERM msg;
+ msg = enif_make_tuple3(msg_env,
+ atom_fd_resource_stop,
+ make_pointer(msg_env, obj),
+ enif_make_int(msg_env, is_direct_call));
+
+ enif_send(env, &fdr->pid, msg_env, msg);
+ enif_free_env(msg_env);
+ }
+}
+
+static ERL_NIF_TERM last_fd_stop_call(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM last, ret;
+ last = enif_make_tuple2(env, make_pointer(env, last_fd_stop.obj),
+ enif_make_int(env, last_fd_stop.was_direct_call));
+ ret = enif_make_tuple2(env, enif_make_int(env, fd_stop_cnt), last);
+ fd_stop_cnt = 0;
+ return ret;
+}
+
+
+static void monitor_resource_dtor(ErlNifEnv* env, void* obj)
+{
+ resource_dtor(env, obj);
+}
+
+static ERL_NIF_TERM make_monitor(ErlNifEnv* env, const ErlNifMonitor* mon)
+{
+ ERL_NIF_TERM mon_bin;
+ memcpy(enif_make_new_binary(env, sizeof(ErlNifMonitor), &mon_bin),
+ mon, sizeof(ErlNifMonitor));
+ return mon_bin;
+}
+
+static int get_monitor(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifMonitor* mon)
+{
+ ErlNifBinary bin;
+ if (!enif_inspect_binary(env, term, &bin)
+ || bin.size != sizeof(ErlNifMonitor))
+ return 0;
+ memcpy(mon, bin.data, bin.size);
+ return 1;
+}
+
+static void monitor_resource_down(ErlNifEnv* env, void* obj, ErlNifPid* pid,
+ ErlNifMonitor* mon)
+{
+ struct monitor_resource* rsrc = (struct monitor_resource*)obj;
+ ErlNifEnv* build_env;
+ ErlNifEnv* msg_env;
+ ERL_NIF_TERM msg;
+
+ if (rsrc->use_msgenv) {
+ msg_env = enif_alloc_env();
+ build_env = msg_env;
+ }
+ else {
+ msg_env = NULL;
+ build_env = env;
+ }
+
+ msg = enif_make_tuple4(build_env,
+ atom_monitor_resource_down,
+ make_pointer(build_env, obj),
+ enif_make_pid(build_env, pid),
+ make_monitor(build_env, mon));
+
+ enif_send(env, &rsrc->receiver, msg_env, msg);
+ if (msg_env)
+ enif_free_env(msg_env);
+}
+
+static ERL_NIF_TERM alloc_monitor_resource_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct monitor_resource* rsrc;
+
+ rsrc = enif_alloc_resource(monitor_resource_type, sizeof(struct monitor_resource));
+
+ return make_pointer(env,rsrc);
+}
+
+static ERL_NIF_TERM monitor_process_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct monitor_resource* rsrc;
+ ErlNifPid target;
+ ErlNifMonitor mon;
+ int res;
+
+ if (!get_pointer(env, argv[0], (void**)&rsrc)
+ || !enif_get_local_pid(env, argv[1], &target)
+ || !enif_get_local_pid(env, argv[3], &rsrc->receiver)) {
+ return enif_make_badarg(env);
+ }
+
+ rsrc->use_msgenv = (argv[2] == atom_true);
+ res = enif_monitor_process(env, rsrc, &target, &mon);
+
+ return enif_make_tuple2(env, enif_make_int(env, res), make_monitor(env, &mon));
+}
+
+static ERL_NIF_TERM demonitor_process_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct monitor_resource* rsrc;
+ ErlNifMonitor mon;
+ int res;
+
+ if (!get_pointer(env, argv[0], (void**)&rsrc)
+ || !get_monitor(env, argv[1], &mon)) {
+ return enif_make_badarg(env);
+ }
+
+ res = enif_demonitor_process(env, rsrc, &mon);
+
+ return enif_make_int(env, res);
+}
+
+static ERL_NIF_TERM compare_monitors_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifMonitor m1, m2;
+ if (!get_monitor(env, argv[0], &m1)
+ || !get_monitor(env, argv[1], &m2)) {
+ return enif_make_badarg(env);
+ }
+
+ return enif_make_int(env, enif_compare_monitors(&m1, &m2));
+}
+
+
+/*********** monitor_frenzy ************/
+
+struct frenzy_rand_bits
+{
+ unsigned int source;
+ unsigned int bits_consumed;
+};
+
+static unsigned int frenzy_rand_bits_max;
+
+unsigned rand_bits(struct frenzy_rand_bits* rnd, unsigned int nbits)
+{
+ unsigned int res;
+
+ rnd->bits_consumed += nbits;
+ assert(rnd->bits_consumed <= frenzy_rand_bits_max);
+ res = rnd->source & ((1 << nbits)-1);
+ rnd->source >>= nbits;
+ return res;
+}
+
+#define FRENZY_PROCS_MAX_BITS 4
+#define FRENZY_PROCS_MAX (1 << FRENZY_PROCS_MAX_BITS)
+
+#define FRENZY_RESOURCES_MAX_BITS 4
+#define FRENZY_RESOURCES_MAX (1 << FRENZY_RESOURCES_MAX_BITS)
+
+#define FRENZY_MONITORS_MAX_BITS 4
+#define FRENZY_MONITORS_MAX (1 << FRENZY_MONITORS_MAX_BITS)
+
+struct frenzy_monitor {
+ ErlNifMutex* lock;
+ enum {
+ MON_FREE, MON_FREE_DOWN, MON_FREE_DEMONITOR,
+ MON_TRYING, MON_ACTIVE, MON_PENDING
+ } state;
+ ErlNifMonitor mon;
+ ErlNifPid pid;
+ unsigned int use_cnt;
+};
+
+struct frenzy_resource {
+ unsigned int rix;
+ struct frenzy_monitor monv[FRENZY_MONITORS_MAX];
+};
+struct frenzy_reslot {
+ ErlNifMutex* lock;
+ int stopped;
+ struct frenzy_resource* obj;
+ unsigned long alloc_cnt;
+ unsigned long release_cnt;
+ unsigned long dtor_cnt;
+};
+static struct frenzy_reslot resv[FRENZY_RESOURCES_MAX];
+
+static ERL_NIF_TERM monitor_frenzy_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct frenzy_proc {
+ ErlNifPid pid;
+ int is_free;
+ };
+ static struct frenzy_proc procs[FRENZY_PROCS_MAX];
+ static struct frenzy_proc* proc_refs[FRENZY_PROCS_MAX];
+ static unsigned int nprocs, old_nprocs;
+ static ErlNifMutex* procs_lock;
+ static unsigned long spawn_cnt = 0;
+ static unsigned long kill_cnt = 0;
+ static unsigned long proc_histogram[FRENZY_PROCS_MAX];
+
+ static const unsigned int primes[] = {7, 13, 17, 19};
+
+ struct frenzy_resource* r;
+ struct frenzy_rand_bits rnd;
+ unsigned int op, inc, my_nprocs;
+ unsigned int mix; /* r->monv[] index */
+ unsigned int rix; /* resv[] index */
+ unsigned int pix; /* procs[] index */
+ unsigned int ref_ix; /* proc_refs[] index */
+ int self_pix, rv;
+ ERL_NIF_TERM retval = atom_error;
+ const ERL_NIF_TERM Op = argv[0];
+ const ERL_NIF_TERM Rnd = argv[1];
+ const ERL_NIF_TERM SelfPix = argv[2];
+ const ERL_NIF_TERM NewPid = argv[3];
+
+ if (enif_is_atom(env, Op)) {
+ if (Op == atom_init) {
+ if (procs_lock || !enif_get_uint(env, Rnd, &frenzy_rand_bits_max))
+ return enif_make_badarg(env);
+
+ procs_lock = enif_mutex_create("nif_SUITE:monitor_frenzy.procs");
+ nprocs = 0;
+ old_nprocs = 0;
+ for (pix = 0; pix < FRENZY_PROCS_MAX; pix++) {
+ proc_refs[pix] = &procs[pix];
+ procs[pix].is_free = 1;
+ proc_histogram[pix] = 0;
+ }
+ for (rix = 0; rix < FRENZY_RESOURCES_MAX; rix++) {
+ resv[rix].lock = enif_mutex_create("nif_SUITE:monitor_frenzy.resv.lock");
+ resv[rix].obj = NULL;
+ resv[rix].stopped = 0;
+ resv[rix].alloc_cnt = 0;
+ resv[rix].release_cnt = 0;
+ resv[rix].dtor_cnt = 0;
+ }
+
+ /* Add self as first process */
+ enif_self(env, &procs[0].pid);
+ procs[0].is_free = 0;
+ old_nprocs = ++nprocs;
+
+ spawn_cnt = 1;
+ kill_cnt = 0;
+ return enif_make_uint(env, 0); /* SelfPix */
+ }
+ else if (Op == atom_stats) {
+ ERL_NIF_TERM hist[FRENZY_PROCS_MAX];
+ unsigned long res_alloc_cnt = 0;
+ unsigned long res_release_cnt = 0;
+ unsigned long res_dtor_cnt = 0;
+ for (ref_ix = 0; ref_ix < FRENZY_PROCS_MAX; ref_ix++) {
+ hist[ref_ix] = enif_make_ulong(env, proc_histogram[ref_ix]);
+ }
+ for (rix = 0; rix < FRENZY_RESOURCES_MAX; rix++) {
+ res_alloc_cnt += resv[rix].alloc_cnt;
+ res_release_cnt += resv[rix].release_cnt;
+ res_dtor_cnt += resv[rix].dtor_cnt;
+ }
+
+ return
+ enif_make_list4(env,
+ enif_make_tuple2(env, enif_make_string(env, "proc_histogram", ERL_NIF_LATIN1),
+ enif_make_list_from_array(env, hist, FRENZY_PROCS_MAX)),
+ enif_make_tuple2(env, enif_make_string(env, "spawn_cnt", ERL_NIF_LATIN1),
+ enif_make_ulong(env, spawn_cnt)),
+ enif_make_tuple2(env, enif_make_string(env, "kill_cnt", ERL_NIF_LATIN1),
+ enif_make_ulong(env, kill_cnt)),
+ enif_make_tuple4(env, enif_make_string(env, "resource_alloc", ERL_NIF_LATIN1),
+ enif_make_ulong(env, res_alloc_cnt),
+ enif_make_ulong(env, res_release_cnt),
+ enif_make_ulong(env, res_dtor_cnt)));
+
+ }
+ else if (Op == atom_stop && procs_lock) { /* stop all */
+
+ /* Release all resources */
+ for (rix = 0; rix < FRENZY_RESOURCES_MAX; rix++) {
+ enif_mutex_lock(resv[rix].lock);
+ r = resv[rix].obj;
+ if (r) {
+ resv[rix].obj = NULL;
+ resv[rix].release_cnt++;
+ }
+ resv[rix].stopped = 1;
+ enif_mutex_unlock(resv[rix].lock);
+ if (r)
+ enif_release_resource(r);
+ }
+
+ /* Remove and return all pids */
+ retval = enif_make_list(env, 0);
+ enif_mutex_lock(procs_lock);
+ for (ref_ix = 0; ref_ix < nprocs; ref_ix++) {
+ assert(!proc_refs[ref_ix]->is_free);
+ retval = enif_make_list_cell(env, enif_make_pid(env, &proc_refs[ref_ix]->pid),
+ retval);
+ proc_refs[ref_ix]->is_free = 1;
+ }
+ kill_cnt += nprocs;
+ nprocs = 0;
+ old_nprocs = 0;
+ enif_mutex_unlock(procs_lock);
+
+ return retval;
+ }
+ return enif_make_badarg(env);
+ }
+
+ if (!enif_get_int(env, SelfPix, &self_pix) ||
+ !enif_get_uint(env, Op, &op) ||
+ !enif_get_uint(env, Rnd, &rnd.source))
+ return enif_make_badarg(env);
+
+ rnd.bits_consumed = 0;
+ switch (op) {
+ case 0: { /* add/remove process */
+ ErlNifPid self;
+ enif_self(env, &self);
+
+ ref_ix = rand_bits(&rnd, FRENZY_PROCS_MAX_BITS) % FRENZY_PROCS_MAX;
+ enif_mutex_lock(procs_lock);
+ if (procs[self_pix].is_free || procs[self_pix].pid.pid != self.pid) {
+ /* Some one already removed me */
+ enif_mutex_unlock(procs_lock);
+ return atom_done;
+ }
+ if (ref_ix >= nprocs || nprocs < 2) { /* add process */
+ ref_ix = nprocs++;
+ pix = proc_refs[ref_ix] - procs;
+ assert(procs[pix].is_free);
+ if (!enif_get_local_pid(env, NewPid, &procs[pix].pid))
+ abort();
+ procs[pix].is_free = 0;
+ spawn_cnt++;
+ proc_histogram[ref_ix]++;
+ old_nprocs = nprocs;
+ enif_mutex_unlock(procs_lock);
+ DBG_TRACE2("Add pid %T, nprocs = %u\n", NewPid, nprocs);
+ retval = enif_make_uint(env, pix);
+ }
+ else { /* remove process */
+ pix = proc_refs[ref_ix] - procs;
+ if (pix == self_pix) {
+ ref_ix = (ref_ix + 1) % nprocs;
+ pix = proc_refs[ref_ix] - procs;
+ }
+ assert(procs[pix].pid.pid != self.pid);
+ assert(!procs[pix].is_free);
+ retval = enif_make_pid(env, &procs[pix].pid);
+ --nprocs;
+ assert(!proc_refs[nprocs]->is_free);
+ if (ref_ix != nprocs) {
+ struct frenzy_proc* tmp = proc_refs[ref_ix];
+ proc_refs[ref_ix] = proc_refs[nprocs];
+ proc_refs[nprocs] = tmp;
+ }
+ procs[pix].is_free = 1;
+ proc_histogram[nprocs]++;
+ kill_cnt++;
+ enif_mutex_unlock(procs_lock);
+ DBG_TRACE2("Removed pid %T, nprocs = %u\n", retval, nprocs);
+ }
+ break;
+ }
+ case 1:
+ case 2: /* create/delete/lookup resource */
+ rix = rand_bits(&rnd, FRENZY_RESOURCES_MAX_BITS) % FRENZY_RESOURCES_MAX;
+ inc = primes[rand_bits(&rnd, 2)];
+ while (enif_mutex_trylock(resv[rix].lock) == EBUSY) {
+ rix = (rix + inc) % FRENZY_RESOURCES_MAX;
+ }
+ if (resv[rix].stopped) {
+ retval = atom_done;
+ enif_mutex_unlock(resv[rix].lock);
+ break;
+ }
+ else if (resv[rix].obj == NULL) {
+ r = enif_alloc_resource(frenzy_resource_type,
+ sizeof(struct frenzy_resource));
+ resv[rix].obj = r;
+ resv[rix].alloc_cnt++;
+ r->rix = rix;
+ for (mix = 0; mix < FRENZY_MONITORS_MAX; mix++) {
+ r->monv[mix].lock = enif_mutex_create("nif_SUITE:monitor_frenzy.monv.lock");
+ r->monv[mix].state = MON_FREE;
+ r->monv[mix].use_cnt = 0;
+ r->monv[mix].pid.pid = 0; /* null-pid */
+ }
+ DBG_TRACE2("New resource at r=%p rix=%u\n", r, rix);
+ }
+ else {
+ unsigned int resource_op = rand_bits(&rnd, 3);
+ r = resv[rix].obj;
+ if (resource_op == 0) { /* delete resource */
+ resv[rix].obj = NULL;
+ resv[rix].release_cnt++;
+ enif_mutex_unlock(resv[rix].lock);
+ DBG_TRACE2("Delete resource at r=%p rix=%u\n", r, rix);
+ enif_release_resource(r);
+ retval = atom_ok;
+ break;
+ }
+ else if (resource_op == 1) { /* return resource */
+ retval = enif_make_resource(env, r);
+ enif_mutex_unlock(resv[rix].lock);
+ break;
+ }
+ }
+ enif_keep_resource(r);
+ enif_mutex_unlock(resv[rix].lock);
+
+ /* monitor/demonitor */
+
+ mix = rand_bits(&rnd, FRENZY_MONITORS_MAX_BITS) % FRENZY_MONITORS_MAX;
+ inc = primes[rand_bits(&rnd, 2)];
+ while (enif_mutex_trylock(r->monv[mix].lock) == EBUSY) {
+ mix = (mix + inc) % FRENZY_MONITORS_MAX;
+ }
+ switch (r->monv[mix].state) {
+ case MON_FREE:
+ case MON_FREE_DOWN:
+ case MON_FREE_DEMONITOR: { /* do monitor */
+ /*
+ * Use an old possibly larger value of 'nprocs', to increase
+ * probability of monitoring an already terminated process
+ */
+ my_nprocs = old_nprocs;
+ if (my_nprocs > 0) {
+ int save_state = r->monv[mix].state;
+ ref_ix = rand_bits(&rnd, FRENZY_PROCS_MAX_BITS) % my_nprocs;
+ pix = proc_refs[ref_ix] - procs;
+ r->monv[mix].pid.pid = procs[pix].pid.pid; /* "atomic" */
+ r->monv[mix].state = MON_TRYING;
+ rv = enif_monitor_process(env, r, &r->monv[mix].pid, &r->monv[mix].mon);
+ if (rv == 0) {
+ r->monv[mix].state = MON_ACTIVE;
+ r->monv[mix].use_cnt++;
+ DBG_TRACE3("Monitor from r=%p rix=%u to %T\n",
+ r, r->rix, r->monv[mix].pid.pid);
+ }
+ else {
+ r->monv[mix].state = save_state;
+ DBG_TRACE4("Monitor from r=%p rix=%u to %T FAILED with %d\n",
+ r, r->rix, r->monv[mix].pid.pid, rv);
+ }
+ retval = enif_make_int(env,rv);
+ }
+ else {
+ DBG_TRACE0("No pids to monitor\n");
+ retval = atom_ok;
+ }
+ break;
+ }
+ case MON_ACTIVE: /* do demonitor */
+ rv = enif_demonitor_process(env, r, &r->monv[mix].mon);
+ if (rv == 0) {
+ DBG_TRACE3("Demonitor from r=%p rix=%u to %T\n",
+ r, r->rix, r->monv[mix].pid.pid);
+ r->monv[mix].state = MON_FREE_DEMONITOR;
+ }
+ else {
+ DBG_TRACE4("Demonitor from r=%p rix=%u to %T FAILED with %d\n",
+ r, r->rix, r->monv[mix].pid.pid, rv);
+ r->monv[mix].state = MON_PENDING;
+ }
+ retval = enif_make_int(env,rv);
+ break;
+
+ case MON_PENDING: /* waiting for 'down' callback, do nothing */
+ retval = atom_ok;
+ break;
+ default:
+ abort();
+ break;
+ }
+ enif_mutex_unlock(r->monv[mix].lock);
+ enif_release_resource(r);
+ break;
+
+ case 3: /* no-op */
+ retval = atom_ok;
+ break;
+ }
+
+ {
+ int percent = (rand_bits(&rnd, 6) + 1) * 2; /* 2 to 128 */
+ if (percent <= 100)
+ enif_consume_timeslice(env, percent);
+ }
+
+ return retval;
+}
+
+static void frenzy_resource_dtor(ErlNifEnv* env, void* obj)
+{
+ struct frenzy_resource* r = (struct frenzy_resource*) obj;
+ unsigned int mix;
+
+ DBG_TRACE2("DTOR r=%p rix=%u\n", r, r->rix);
+
+ enif_mutex_lock(resv[r->rix].lock);
+ resv[r->rix].dtor_cnt++;
+ enif_mutex_unlock(resv[r->rix].lock);
+
+ for (mix = 0; mix < FRENZY_MONITORS_MAX; mix++) {
+ assert(r->monv[mix].state != MON_PENDING);
+ enif_mutex_destroy(r->monv[mix].lock);
+ r->monv[mix].lock = NULL;
+ }
+
+}
+
+static void frenzy_resource_down(ErlNifEnv* env, void* obj, ErlNifPid* pid,
+ ErlNifMonitor* mon)
+{
+ struct frenzy_resource* r = (struct frenzy_resource*) obj;
+ unsigned int mix;
+
+ DBG_TRACE3("DOWN pid=%T, r=%p rix=%u\n", pid->pid, r, r->rix);
+
+ for (mix = 0; mix < FRENZY_MONITORS_MAX; mix++) {
+ if (r->monv[mix].pid.pid == pid->pid && r->monv[mix].state >= MON_TRYING) {
+ enif_mutex_lock(r->monv[mix].lock);
+ if (enif_compare_monitors(mon, &r->monv[mix].mon) == 0) {
+ assert(r->monv[mix].state >= MON_ACTIVE);
+ r->monv[mix].state = MON_FREE_DOWN;
+ enif_mutex_unlock(r->monv[mix].lock);
+ return;
+ }
+ enif_mutex_unlock(r->monv[mix].lock);
+ }
+ }
+ enif_fprintf(stderr, "DOWN called for unknown monitor\n");
+ abort();
+}
+
+
+
static ErlNifFunc nif_funcs[] =
{
{"lib_version", 0, lib_version},
@@ -2091,7 +2929,20 @@ static ErlNifFunc nif_funcs[] =
{"term_to_binary_nif", 2, term_to_binary},
{"binary_to_term_nif", 3, binary_to_term},
{"port_command_nif", 2, port_command},
- {"format_term_nif", 2, format_term}
+ {"format_term_nif", 2, format_term},
+ {"select_nif", 5, select_nif},
+#ifndef __WIN32__
+ {"pipe_nif", 0, pipe_nif},
+ {"write_nif", 2, write_nif},
+ {"read_nif", 2, read_nif},
+ {"is_closed_nif", 1, is_closed_nif},
+#endif
+ {"last_fd_stop_call", 0, last_fd_stop_call},
+ {"alloc_monitor_resource_nif", 0, alloc_monitor_resource_nif},
+ {"monitor_process_nif", 4, monitor_process_nif},
+ {"demonitor_process_nif", 2, demonitor_process_nif},
+ {"compare_monitors_nif", 2, compare_monitors_nif},
+ {"monitor_frenzy_nif", 4, monitor_frenzy_nif}
};
-ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
+ERL_NIF_INIT(nif_SUITE,nif_funcs,load,NULL,upgrade,unload)
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_0/README b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/README
new file mode 100644
index 0000000000..a6ed36f634
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/README
@@ -0,0 +1,5 @@
+These are old genuine header files
+checked out from tag OTP_R14A c1e94fa9a6fe4ae717d35.
+
+I choose this API version (2.0) to test as it's
+before the addition of vm_variant in ErlNifEntry.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_drv_nif.h b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_drv_nif.h
new file mode 100644
index 0000000000..ea013a49a3
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_drv_nif.h
@@ -0,0 +1,48 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Common structures for both erl_driver.h and erl_nif.h
+ */
+
+#ifndef __ERL_DRV_NIF_H__
+#define __ERL_DRV_NIF_H__
+
+typedef struct {
+ int driver_major_version;
+ int driver_minor_version;
+ char *erts_version;
+ char *otp_release;
+ int thread_support;
+ int smp_support;
+ int async_threads;
+ int scheduler_threads;
+ int nif_major_version;
+ int nif_minor_version;
+} ErlDrvSysInfo;
+
+typedef struct {
+ int suggested_stack_size;
+} ErlDrvThreadOpts;
+
+#endif /* __ERL_DRV_NIF_H__ */
+
+
+
+
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif.h b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif.h
new file mode 100644
index 0000000000..936f03bce1
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif.h
@@ -0,0 +1,206 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Include file for writers of Native Implemented Functions.
+*/
+
+#ifndef __ERL_NIF_H__
+#define __ERL_NIF_H__
+
+
+#include "erl_drv_nif.h"
+
+/* Version history:
+** 0.1: R13B03
+** 1.0: R13B04
+** 2.0: R14A
+*/
+#define ERL_NIF_MAJOR_VERSION 2
+#define ERL_NIF_MINOR_VERSION 0
+
+#include <stdlib.h>
+
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+#ifdef HALFWORD_HEAP_EMULATOR
+# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
+# undef HALFWORD_HEAP_EMULATOR
+#endif
+#include "erl_int_sizes_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef HALFWORD_HEAP_EMULATOR
+typedef unsigned int ERL_NIF_TERM;
+#else
+typedef unsigned long ERL_NIF_TERM;
+#endif
+
+struct enif_environment_t;
+typedef struct enif_environment_t ErlNifEnv;
+
+typedef struct
+{
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+}ErlNifFunc;
+
+typedef struct enif_entry_t
+{
+ int major;
+ int minor;
+ const char* name;
+ int num_of_funcs;
+ ErlNifFunc* funcs;
+ int (*load) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
+ int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
+ int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+ void (*unload) (ErlNifEnv*, void* priv_data);
+}ErlNifEntry;
+
+
+
+typedef struct
+{
+ size_t size;
+ unsigned char* data;
+
+ /* Internals (avert your eyes) */
+ ERL_NIF_TERM bin_term;
+ void* ref_bin;
+}ErlNifBinary;
+
+typedef struct enif_resource_type_t ErlNifResourceType;
+typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+typedef enum
+{
+ ERL_NIF_RT_CREATE = 1,
+ ERL_NIF_RT_TAKEOVER = 2
+}ErlNifResourceFlags;
+
+typedef enum
+{
+ ERL_NIF_LATIN1 = 1
+}ErlNifCharEncoding;
+
+typedef struct
+{
+ ERL_NIF_TERM pid; /* internal, may change */
+}ErlNifPid;
+
+typedef ErlDrvSysInfo ErlNifSysInfo;
+
+typedef struct ErlDrvTid_ *ErlNifTid;
+typedef struct ErlDrvMutex_ ErlNifMutex;
+typedef struct ErlDrvCond_ ErlNifCond;
+typedef struct ErlDrvRWLock_ ErlNifRWLock;
+typedef int ErlNifTSDKey;
+
+typedef ErlDrvThreadOpts ErlNifThreadOpts;
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS
+typedef struct {
+# include "erl_nif_api_funcs.h"
+} TWinDynNifCallbacks;
+extern TWinDynNifCallbacks WinDynNifCallbacks;
+# undef ERL_NIF_API_FUNC_DECL
+#endif
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER)
+# define ERL_NIF_API_FUNC_MACRO(NAME) (WinDynNifCallbacks.NAME)
+# include "erl_nif_api_funcs.h"
+/* note that we have to keep ERL_NIF_API_FUNC_MACRO defined */
+
+#else /* non windows or included from emulator itself */
+
+# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) extern RET_TYPE NAME ARGS
+# include "erl_nif_api_funcs.h"
+# undef ERL_NIF_API_FUNC_DECL
+#endif
+
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
+# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
+# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+#else
+# define ERL_NIF_INIT_GLOB
+# define ERL_NIF_INIT_BODY
+# if defined(VXWORKS)
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _init(void)
+# else
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
+# endif
+#endif
+
+
+#ifdef __cplusplus
+}
+# define ERL_NIF_INIT_PROLOGUE extern "C" {
+# define ERL_NIF_INIT_EPILOGUE }
+#else
+# define ERL_NIF_INIT_PROLOGUE
+# define ERL_NIF_INIT_EPILOGUE
+#endif
+
+
+#define ERL_NIF_INIT(NAME, FUNCS, LOAD, RELOAD, UPGRADE, UNLOAD) \
+ERL_NIF_INIT_PROLOGUE \
+ERL_NIF_INIT_GLOB \
+ERL_NIF_INIT_DECL(NAME) \
+{ \
+ static ErlNifEntry entry = \
+ { \
+ ERL_NIF_MAJOR_VERSION, \
+ ERL_NIF_MINOR_VERSION, \
+ #NAME, \
+ sizeof(FUNCS) / sizeof(*FUNCS), \
+ FUNCS, \
+ LOAD, RELOAD, UPGRADE, UNLOAD \
+ }; \
+ ERL_NIF_INIT_BODY; \
+ return &entry; \
+} \
+ERL_NIF_INIT_EPILOGUE
+
+
+#endif /* __ERL_NIF_H__ */
+
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif_api_funcs.h b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif_api_funcs.h
new file mode 100644
index 0000000000..ef4e9580b0
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_0/erl_nif_api_funcs.h
@@ -0,0 +1,257 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#if !defined(ERL_NIF_API_FUNC_DECL) && !defined(ERL_NIF_API_FUNC_MACRO)
+# error This file should not be included directly
+#endif
+
+#ifdef ERL_NIF_API_FUNC_DECL
+ERL_NIF_API_FUNC_DECL(void*,enif_priv_data,(ErlNifEnv*));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_free,(void* ptr));
+ERL_NIF_API_FUNC_DECL(int,enif_is_atom,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_binary,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_ref,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(size_t size, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_realloc_binary,(ErlNifBinary* bin, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_get_int,(ErlNifEnv*, ERL_NIF_TERM term, int* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_ulong,(ErlNifEnv*, ERL_NIF_TERM term, unsigned long* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_double,(ErlNifEnv*, ERL_NIF_TERM term, double* dp));
+ERL_NIF_API_FUNC_DECL(int,enif_get_list_cell,(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM* head, ERL_NIF_TERM* tail));
+ERL_NIF_API_FUNC_DECL(int,enif_get_tuple,(ErlNifEnv* env, ERL_NIF_TERM tpl, int* arity, const ERL_NIF_TERM** array));
+ERL_NIF_API_FUNC_DECL(int,enif_is_identical,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(int,enif_compare,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_binary,(ErlNifEnv* env, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_badarg,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int,(ErlNifEnv* env, int i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ulong,(ErlNifEnv* env, unsigned long i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_double,(ErlNifEnv* env, double d));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_atom,(ErlNifEnv* env, const char* name));
+ERL_NIF_API_FUNC_DECL(int,enif_make_existing_atom,(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple,(ErlNifEnv* env, unsigned cnt, ...));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list,(ErlNifEnv* env, unsigned cnt, ...));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_cell,(ErlNifEnv* env, ERL_NIF_TERM car, ERL_NIF_TERM cdr));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string,(ErlNifEnv* env, const char* string, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ref,(ErlNifEnv* env));
+
+ERL_NIF_API_FUNC_DECL(ErlNifMutex*,enif_mutex_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_mutex_destroy,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(int,enif_mutex_trylock,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(void,enif_mutex_lock,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(void,enif_mutex_unlock,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(ErlNifCond*,enif_cond_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_destroy,(ErlNifCond *cnd));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_signal,(ErlNifCond *cnd));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_broadcast,(ErlNifCond *cnd));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_wait,(ErlNifCond *cnd, ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(ErlNifRWLock*,enif_rwlock_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_destroy,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(int,enif_rwlock_tryrlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_rlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_runlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(int,enif_rwlock_tryrwlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_rwlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_rwunlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(int,enif_tsd_key_create,(char *name, ErlNifTSDKey *key));
+ERL_NIF_API_FUNC_DECL(void,enif_tsd_key_destroy,(ErlNifTSDKey key));
+ERL_NIF_API_FUNC_DECL(void,enif_tsd_set,(ErlNifTSDKey key, void *data));
+ERL_NIF_API_FUNC_DECL(void*,enif_tsd_get,(ErlNifTSDKey key));
+ERL_NIF_API_FUNC_DECL(ErlNifThreadOpts*,enif_thread_opts_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_thread_opts_destroy,(ErlNifThreadOpts *opts));
+ERL_NIF_API_FUNC_DECL(int,enif_thread_create,(char *name,ErlNifTid *tid,void * (*func)(void *),void *args,ErlNifThreadOpts *opts));
+ERL_NIF_API_FUNC_DECL(ErlNifTid,enif_thread_self,(void));
+ERL_NIF_API_FUNC_DECL(int,enif_equal_tids,(ErlNifTid tid1, ErlNifTid tid2));
+ERL_NIF_API_FUNC_DECL(void,enif_thread_exit,(void *resp));
+ERL_NIF_API_FUNC_DECL(int,enif_thread_join,(ErlNifTid, void **respp));
+
+ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(void* ptr, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_system_info,(ErlNifSysInfo *sip, size_t si_size));
+ERL_NIF_API_FUNC_DECL(int,enif_fprintf,(void/* FILE* */ *filep, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_iolist_as_binary,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, size_t pos, size_t size));
+ERL_NIF_API_FUNC_DECL(int,enif_get_string,(ErlNifEnv*, ERL_NIF_TERM list, char* buf, unsigned len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom,(ErlNifEnv*, ERL_NIF_TERM atom, char* buf, unsigned len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_is_fun,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_pid,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_port,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_get_uint,(ErlNifEnv*, ERL_NIF_TERM term, unsigned* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_long,(ErlNifEnv*, ERL_NIF_TERM term, long* ip));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint,(ErlNifEnv*, unsigned i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_long,(ErlNifEnv*, long i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
+ERL_NIF_API_FUNC_DECL(int,enif_is_empty_list,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type,(ErlNifEnv*, const char* module_str, const char* name_str, void (*dtor)(ErlNifEnv*,void *), ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc_resource,(ErlNifResourceType* type, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource,(ErlNifEnv*, void* obj));
+ERL_NIF_API_FUNC_DECL(int,enif_get_resource,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp));
+ERL_NIF_API_FUNC_DECL(size_t,enif_sizeof_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(unsigned char*,enif_make_new_binary,(ErlNifEnv*,size_t size,ERL_NIF_TERM* termp));
+ERL_NIF_API_FUNC_DECL(int,enif_is_list,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_tuple,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom_length,(ErlNifEnv*, ERL_NIF_TERM atom, unsigned* len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_get_list_length,(ErlNifEnv* env, ERL_NIF_TERM term, unsigned* len));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_atom_len,(ErlNifEnv* env, const char* name, size_t len));
+ERL_NIF_API_FUNC_DECL(int, enif_make_existing_atom_len,(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string_len,(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ErlNifEnv*,enif_alloc_env,(void));
+ERL_NIF_API_FUNC_DECL(void,enif_free_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(void,enif_clear_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(int,enif_send,(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_copy,(ErlNifEnv* dst_env, ERL_NIF_TERM src_term));
+ERL_NIF_API_FUNC_DECL(ErlNifPid*,enif_self,(ErlNifEnv* caller_env, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(int,enif_get_local_pid,(ErlNifEnv* env, ERL_NIF_TERM, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(void,enif_keep_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* obj,const void* data, size_t size));
+
+/*
+** Add last to keep compatibility on Windows!!!
+*/
+#endif
+
+#ifdef ERL_NIF_API_FUNC_MACRO
+# define enif_priv_data ERL_NIF_API_FUNC_MACRO(enif_priv_data)
+# define enif_alloc ERL_NIF_API_FUNC_MACRO(enif_alloc)
+# define enif_free ERL_NIF_API_FUNC_MACRO(enif_free)
+# define enif_is_atom ERL_NIF_API_FUNC_MACRO(enif_is_atom)
+# define enif_is_binary ERL_NIF_API_FUNC_MACRO(enif_is_binary)
+# define enif_is_ref ERL_NIF_API_FUNC_MACRO(enif_is_ref)
+# define enif_inspect_binary ERL_NIF_API_FUNC_MACRO(enif_inspect_binary)
+# define enif_alloc_binary ERL_NIF_API_FUNC_MACRO(enif_alloc_binary)
+# define enif_realloc_binary ERL_NIF_API_FUNC_MACRO(enif_realloc_binary)
+# define enif_release_binary ERL_NIF_API_FUNC_MACRO(enif_release_binary)
+# define enif_get_int ERL_NIF_API_FUNC_MACRO(enif_get_int)
+# define enif_get_ulong ERL_NIF_API_FUNC_MACRO(enif_get_ulong)
+# define enif_get_double ERL_NIF_API_FUNC_MACRO(enif_get_double)
+# define enif_get_tuple ERL_NIF_API_FUNC_MACRO(enif_get_tuple)
+# define enif_get_list_cell ERL_NIF_API_FUNC_MACRO(enif_get_list_cell)
+# define enif_is_identical ERL_NIF_API_FUNC_MACRO(enif_is_identical)
+# define enif_compare ERL_NIF_API_FUNC_MACRO(enif_compare)
+
+# define enif_make_binary ERL_NIF_API_FUNC_MACRO(enif_make_binary)
+# define enif_make_badarg ERL_NIF_API_FUNC_MACRO(enif_make_badarg)
+# define enif_make_int ERL_NIF_API_FUNC_MACRO(enif_make_int)
+# define enif_make_ulong ERL_NIF_API_FUNC_MACRO(enif_make_ulong)
+# define enif_make_double ERL_NIF_API_FUNC_MACRO(enif_make_double)
+# define enif_make_atom ERL_NIF_API_FUNC_MACRO(enif_make_atom)
+# define enif_make_existing_atom ERL_NIF_API_FUNC_MACRO(enif_make_existing_atom)
+# define enif_make_tuple ERL_NIF_API_FUNC_MACRO(enif_make_tuple)
+# define enif_make_list ERL_NIF_API_FUNC_MACRO(enif_make_list)
+# define enif_make_list_cell ERL_NIF_API_FUNC_MACRO(enif_make_list_cell)
+# define enif_make_string ERL_NIF_API_FUNC_MACRO(enif_make_string)
+# define enif_make_ref ERL_NIF_API_FUNC_MACRO(enif_make_ref)
+
+# define enif_mutex_create ERL_NIF_API_FUNC_MACRO(enif_mutex_create)
+# define enif_mutex_destroy ERL_NIF_API_FUNC_MACRO(enif_mutex_destroy)
+# define enif_mutex_trylock ERL_NIF_API_FUNC_MACRO(enif_mutex_trylock)
+# define enif_mutex_lock ERL_NIF_API_FUNC_MACRO(enif_mutex_lock)
+# define enif_mutex_unlock ERL_NIF_API_FUNC_MACRO(enif_mutex_unlock)
+# define enif_cond_create ERL_NIF_API_FUNC_MACRO(enif_cond_create)
+# define enif_cond_destroy ERL_NIF_API_FUNC_MACRO(enif_cond_destroy)
+# define enif_cond_signal ERL_NIF_API_FUNC_MACRO(enif_cond_signal)
+# define enif_cond_broadcast ERL_NIF_API_FUNC_MACRO(enif_cond_broadcast)
+# define enif_cond_wait ERL_NIF_API_FUNC_MACRO(enif_cond_wait)
+# define enif_rwlock_create ERL_NIF_API_FUNC_MACRO(enif_rwlock_create)
+# define enif_rwlock_destroy ERL_NIF_API_FUNC_MACRO(enif_rwlock_destroy)
+# define enif_rwlock_tryrlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_tryrlock)
+# define enif_rwlock_rlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_rlock)
+# define enif_rwlock_runlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_runlock)
+# define enif_rwlock_tryrwlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_tryrwlock)
+# define enif_rwlock_rwlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_rwlock)
+# define enif_rwlock_rwunlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_rwunlock)
+# define enif_tsd_key_create ERL_NIF_API_FUNC_MACRO(enif_tsd_key_create)
+# define enif_tsd_key_destroy ERL_NIF_API_FUNC_MACRO(enif_tsd_key_destroy)
+# define enif_tsd_set ERL_NIF_API_FUNC_MACRO(enif_tsd_set)
+# define enif_tsd_get ERL_NIF_API_FUNC_MACRO(enif_tsd_get)
+# define enif_thread_opts_create ERL_NIF_API_FUNC_MACRO(enif_thread_opts_create)
+# define enif_thread_opts_destroy ERL_NIF_API_FUNC_MACRO(enif_thread_opts_destroy)
+# define enif_thread_create ERL_NIF_API_FUNC_MACRO(enif_thread_create)
+# define enif_thread_self ERL_NIF_API_FUNC_MACRO(enif_thread_self)
+# define enif_equal_tids ERL_NIF_API_FUNC_MACRO(enif_equal_tids)
+# define enif_thread_exit ERL_NIF_API_FUNC_MACRO(enif_thread_exit)
+# define enif_thread_join ERL_NIF_API_FUNC_MACRO(enif_thread_join)
+
+# define enif_realloc ERL_NIF_API_FUNC_MACRO(enif_realloc)
+# define enif_system_info ERL_NIF_API_FUNC_MACRO(enif_system_info)
+# define enif_fprintf ERL_NIF_API_FUNC_MACRO(enif_fprintf)
+# define enif_inspect_iolist_as_binary ERL_NIF_API_FUNC_MACRO(enif_inspect_iolist_as_binary)
+# define enif_make_sub_binary ERL_NIF_API_FUNC_MACRO(enif_make_sub_binary)
+# define enif_get_string ERL_NIF_API_FUNC_MACRO(enif_get_string)
+# define enif_get_atom ERL_NIF_API_FUNC_MACRO(enif_get_atom)
+# define enif_is_fun ERL_NIF_API_FUNC_MACRO(enif_is_fun)
+# define enif_is_pid ERL_NIF_API_FUNC_MACRO(enif_is_pid)
+# define enif_is_port ERL_NIF_API_FUNC_MACRO(enif_is_port)
+# define enif_get_uint ERL_NIF_API_FUNC_MACRO(enif_get_uint)
+# define enif_get_long ERL_NIF_API_FUNC_MACRO(enif_get_long)
+# define enif_make_uint ERL_NIF_API_FUNC_MACRO(enif_make_uint)
+# define enif_make_long ERL_NIF_API_FUNC_MACRO(enif_make_long)
+# define enif_make_tuple_from_array ERL_NIF_API_FUNC_MACRO(enif_make_tuple_from_array)
+# define enif_make_list_from_array ERL_NIF_API_FUNC_MACRO(enif_make_list_from_array)
+# define enif_is_empty_list ERL_NIF_API_FUNC_MACRO(enif_is_empty_list)
+# define enif_open_resource_type ERL_NIF_API_FUNC_MACRO(enif_open_resource_type)
+# define enif_alloc_resource ERL_NIF_API_FUNC_MACRO(enif_alloc_resource)
+# define enif_release_resource ERL_NIF_API_FUNC_MACRO(enif_release_resource)
+# define enif_make_resource ERL_NIF_API_FUNC_MACRO(enif_make_resource)
+# define enif_get_resource ERL_NIF_API_FUNC_MACRO(enif_get_resource)
+# define enif_sizeof_resource ERL_NIF_API_FUNC_MACRO(enif_sizeof_resource)
+# define enif_make_new_binary ERL_NIF_API_FUNC_MACRO(enif_make_new_binary)
+# define enif_is_list ERL_NIF_API_FUNC_MACRO(enif_is_list)
+# define enif_is_tuple ERL_NIF_API_FUNC_MACRO(enif_is_tuple)
+# define enif_get_atom_length ERL_NIF_API_FUNC_MACRO(enif_get_atom_length)
+# define enif_get_list_length ERL_NIF_API_FUNC_MACRO(enif_get_list_length)
+# define enif_make_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_atom_len)
+# define enif_make_existing_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_existing_atom_len)
+# define enif_make_string_len ERL_NIF_API_FUNC_MACRO(enif_make_string_len)
+# define enif_alloc_env ERL_NIF_API_FUNC_MACRO(enif_alloc_env)
+# define enif_free_env ERL_NIF_API_FUNC_MACRO(enif_free_env)
+# define enif_clear_env ERL_NIF_API_FUNC_MACRO(enif_clear_env)
+# define enif_send ERL_NIF_API_FUNC_MACRO(enif_send)
+# define enif_make_copy ERL_NIF_API_FUNC_MACRO(enif_make_copy)
+# define enif_self ERL_NIF_API_FUNC_MACRO(enif_self)
+# define enif_get_local_pid ERL_NIF_API_FUNC_MACRO(enif_get_local_pid)
+# define enif_keep_resource ERL_NIF_API_FUNC_MACRO(enif_keep_resource)
+# define enif_make_resource_binary ERL_NIF_API_FUNC_MACRO(enif_make_resource_binary)
+#endif
+
+#ifndef enif_make_list1
+# define enif_make_list1(ENV,E1) enif_make_list(ENV,1,E1)
+# define enif_make_list2(ENV,E1,E2) enif_make_list(ENV,2,E1,E2)
+# define enif_make_list3(ENV,E1,E2,E3) enif_make_list(ENV,3,E1,E2,E3)
+# define enif_make_list4(ENV,E1,E2,E3,E4) enif_make_list(ENV,4,E1,E2,E3,E4)
+# define enif_make_list5(ENV,E1,E2,E3,E4,E5) enif_make_list(ENV,5,E1,E2,E3,E4,E5)
+# define enif_make_list6(ENV,E1,E2,E3,E4,E5,E6) enif_make_list(ENV,6,E1,E2,E3,E4,E5,E6)
+# define enif_make_list7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_list(ENV,7,E1,E2,E3,E4,E5,E6,E7)
+# define enif_make_list8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_list(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
+# define enif_make_list9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_list(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+# define enif_make_tuple1(ENV,E1) enif_make_tuple(ENV,1,E1)
+# define enif_make_tuple2(ENV,E1,E2) enif_make_tuple(ENV,2,E1,E2)
+# define enif_make_tuple3(ENV,E1,E2,E3) enif_make_tuple(ENV,3,E1,E2,E3)
+# define enif_make_tuple4(ENV,E1,E2,E3,E4) enif_make_tuple(ENV,4,E1,E2,E3,E4)
+# define enif_make_tuple5(ENV,E1,E2,E3,E4,E5) enif_make_tuple(ENV,5,E1,E2,E3,E4,E5)
+# define enif_make_tuple6(ENV,E1,E2,E3,E4,E5,E6) enif_make_tuple(ENV,6,E1,E2,E3,E4,E5,E6)
+# define enif_make_tuple7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_tuple(ENV,7,E1,E2,E3,E4,E5,E6,E7)
+# define enif_make_tuple8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_tuple(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
+# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+
+# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
+#endif
+
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_4/README b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/README
new file mode 100644
index 0000000000..7abd0319a6
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/README
@@ -0,0 +1,6 @@
+These are old genuine header files
+checked out from tag OTP_R16B 05f11890bdfec4bfc3a78e191
+
+I choose this API version (2.4) to test, as it's before
+the addition of 'options' in ErlNifEntry and 'flags' in ErlNifFunc
+and without include of generated erl_native_features_config.h.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_drv_nif.h b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_drv_nif.h
new file mode 100644
index 0000000000..ea013a49a3
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_drv_nif.h
@@ -0,0 +1,48 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Common structures for both erl_driver.h and erl_nif.h
+ */
+
+#ifndef __ERL_DRV_NIF_H__
+#define __ERL_DRV_NIF_H__
+
+typedef struct {
+ int driver_major_version;
+ int driver_minor_version;
+ char *erts_version;
+ char *otp_release;
+ int thread_support;
+ int smp_support;
+ int async_threads;
+ int scheduler_threads;
+ int nif_major_version;
+ int nif_minor_version;
+} ErlDrvSysInfo;
+
+typedef struct {
+ int suggested_stack_size;
+} ErlDrvThreadOpts;
+
+#endif /* __ERL_DRV_NIF_H__ */
+
+
+
+
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif.h b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif.h
new file mode 100644
index 0000000000..8006741a63
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif.h
@@ -0,0 +1,237 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Include file for writers of Native Implemented Functions.
+*/
+
+#ifndef __ERL_NIF_H__
+#define __ERL_NIF_H__
+
+
+#include "erl_drv_nif.h"
+
+/* Version history:
+** 0.1: R13B03
+** 1.0: R13B04
+** 2.0: R14A
+** 2.1: R14B02 "vm_variant"
+** 2.2: R14B03 enif_is_exception
+** 2.3: R15 enif_make_reverse_list, enif_is_number
+** 2.4: R16 enif_consume_timeslice
+*/
+#define ERL_NIF_MAJOR_VERSION 2
+#define ERL_NIF_MINOR_VERSION 4
+
+#include <stdlib.h>
+
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+#ifdef HALFWORD_HEAP_EMULATOR
+# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
+# undef HALFWORD_HEAP_EMULATOR
+#endif
+#include "erl_int_sizes_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef unsigned __int64 ErlNifUInt64;
+typedef __int64 ErlNifSInt64;
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlNifUInt64;
+typedef long ErlNifSInt64;
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlNifUInt64;
+typedef long long ErlNifSInt64;
+#else
+#error No 64-bit integer type
+#endif
+
+#ifdef HALFWORD_HEAP_EMULATOR
+# define ERL_NIF_VM_VARIANT "beam.halfword"
+typedef unsigned int ERL_NIF_TERM;
+#else
+# define ERL_NIF_VM_VARIANT "beam.vanilla"
+# if SIZEOF_LONG == SIZEOF_VOID_P
+typedef unsigned long ERL_NIF_TERM;
+# elif SIZEOF_LONG_LONG == SIZEOF_VOID_P
+typedef unsigned long long ERL_NIF_TERM;
+# endif
+#endif
+
+struct enif_environment_t;
+typedef struct enif_environment_t ErlNifEnv;
+
+typedef struct
+{
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+}ErlNifFunc;
+
+typedef struct enif_entry_t
+{
+ int major;
+ int minor;
+ const char* name;
+ int num_of_funcs;
+ ErlNifFunc* funcs;
+ int (*load) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
+ int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
+ int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+ void (*unload) (ErlNifEnv*, void* priv_data);
+ const char* vm_variant;
+}ErlNifEntry;
+
+
+
+typedef struct
+{
+ size_t size;
+ unsigned char* data;
+
+ /* Internals (avert your eyes) */
+ ERL_NIF_TERM bin_term;
+ void* ref_bin;
+}ErlNifBinary;
+
+typedef struct enif_resource_type_t ErlNifResourceType;
+typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+typedef enum
+{
+ ERL_NIF_RT_CREATE = 1,
+ ERL_NIF_RT_TAKEOVER = 2
+}ErlNifResourceFlags;
+
+typedef enum
+{
+ ERL_NIF_LATIN1 = 1
+}ErlNifCharEncoding;
+
+typedef struct
+{
+ ERL_NIF_TERM pid; /* internal, may change */
+}ErlNifPid;
+
+typedef ErlDrvSysInfo ErlNifSysInfo;
+
+typedef struct ErlDrvTid_ *ErlNifTid;
+typedef struct ErlDrvMutex_ ErlNifMutex;
+typedef struct ErlDrvCond_ ErlNifCond;
+typedef struct ErlDrvRWLock_ ErlNifRWLock;
+typedef int ErlNifTSDKey;
+
+typedef ErlDrvThreadOpts ErlNifThreadOpts;
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS
+typedef struct {
+# include "erl_nif_api_funcs.h"
+} TWinDynNifCallbacks;
+extern TWinDynNifCallbacks WinDynNifCallbacks;
+# undef ERL_NIF_API_FUNC_DECL
+#endif
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER)
+# define ERL_NIF_API_FUNC_MACRO(NAME) (WinDynNifCallbacks.NAME)
+# include "erl_nif_api_funcs.h"
+/* note that we have to keep ERL_NIF_API_FUNC_MACRO defined */
+
+#else /* non windows or included from emulator itself */
+
+# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) extern RET_TYPE NAME ARGS
+# include "erl_nif_api_funcs.h"
+# undef ERL_NIF_API_FUNC_DECL
+#endif
+
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
+# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
+# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+#else
+# define ERL_NIF_INIT_GLOB
+# define ERL_NIF_INIT_BODY
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
+#endif
+
+
+#ifdef __cplusplus
+}
+# define ERL_NIF_INIT_PROLOGUE extern "C" {
+# define ERL_NIF_INIT_EPILOGUE }
+#else
+# define ERL_NIF_INIT_PROLOGUE
+# define ERL_NIF_INIT_EPILOGUE
+#endif
+
+
+#define ERL_NIF_INIT(NAME, FUNCS, LOAD, RELOAD, UPGRADE, UNLOAD) \
+ERL_NIF_INIT_PROLOGUE \
+ERL_NIF_INIT_GLOB \
+ERL_NIF_INIT_DECL(NAME); \
+ERL_NIF_INIT_DECL(NAME) \
+{ \
+ static ErlNifEntry entry = \
+ { \
+ ERL_NIF_MAJOR_VERSION, \
+ ERL_NIF_MINOR_VERSION, \
+ #NAME, \
+ sizeof(FUNCS) / sizeof(*FUNCS), \
+ FUNCS, \
+ LOAD, RELOAD, UPGRADE, UNLOAD, \
+ ERL_NIF_VM_VARIANT \
+ }; \
+ ERL_NIF_INIT_BODY; \
+ return &entry; \
+} \
+ERL_NIF_INIT_EPILOGUE
+
+#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
+#define HAVE_USE_DTRACE 1
+#endif
+
+#ifdef HAVE_USE_DTRACE
+ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ERL_NIF_TERM erl_nif_user_trace_i4s4(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ERL_NIF_TERM erl_nif_user_trace_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+#endif
+
+#endif /* __ERL_NIF_H__ */
+
diff --git a/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif_api_funcs.h b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif_api_funcs.h
new file mode 100644
index 0000000000..2f841645e1
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_api_2_4/erl_nif_api_funcs.h
@@ -0,0 +1,503 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#if !defined(ERL_NIF_API_FUNC_DECL) && !defined(ERL_NIF_API_FUNC_MACRO)
+# error This file should not be included directly
+#endif
+
+/*
+** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
+** to keep compatibility on Windows!!!
+**
+** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h
+** when adding functions to the API.
+*/
+#ifdef ERL_NIF_API_FUNC_DECL
+ERL_NIF_API_FUNC_DECL(void*,enif_priv_data,(ErlNifEnv*));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_free,(void* ptr));
+ERL_NIF_API_FUNC_DECL(int,enif_is_atom,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_binary,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_ref,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(size_t size, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_realloc_binary,(ErlNifBinary* bin, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_get_int,(ErlNifEnv*, ERL_NIF_TERM term, int* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_ulong,(ErlNifEnv*, ERL_NIF_TERM term, unsigned long* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_double,(ErlNifEnv*, ERL_NIF_TERM term, double* dp));
+ERL_NIF_API_FUNC_DECL(int,enif_get_list_cell,(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM* head, ERL_NIF_TERM* tail));
+ERL_NIF_API_FUNC_DECL(int,enif_get_tuple,(ErlNifEnv* env, ERL_NIF_TERM tpl, int* arity, const ERL_NIF_TERM** array));
+ERL_NIF_API_FUNC_DECL(int,enif_is_identical,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(int,enif_compare,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_binary,(ErlNifEnv* env, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_badarg,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int,(ErlNifEnv* env, int i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ulong,(ErlNifEnv* env, unsigned long i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_double,(ErlNifEnv* env, double d));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_atom,(ErlNifEnv* env, const char* name));
+ERL_NIF_API_FUNC_DECL(int,enif_make_existing_atom,(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple,(ErlNifEnv* env, unsigned cnt, ...));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list,(ErlNifEnv* env, unsigned cnt, ...));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_cell,(ErlNifEnv* env, ERL_NIF_TERM car, ERL_NIF_TERM cdr));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string,(ErlNifEnv* env, const char* string, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ref,(ErlNifEnv* env));
+
+ERL_NIF_API_FUNC_DECL(ErlNifMutex*,enif_mutex_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_mutex_destroy,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(int,enif_mutex_trylock,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(void,enif_mutex_lock,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(void,enif_mutex_unlock,(ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(ErlNifCond*,enif_cond_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_destroy,(ErlNifCond *cnd));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_signal,(ErlNifCond *cnd));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_broadcast,(ErlNifCond *cnd));
+ERL_NIF_API_FUNC_DECL(void,enif_cond_wait,(ErlNifCond *cnd, ErlNifMutex *mtx));
+ERL_NIF_API_FUNC_DECL(ErlNifRWLock*,enif_rwlock_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_destroy,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(int,enif_rwlock_tryrlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_rlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_runlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(int,enif_rwlock_tryrwlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_rwlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(void,enif_rwlock_rwunlock,(ErlNifRWLock *rwlck));
+ERL_NIF_API_FUNC_DECL(int,enif_tsd_key_create,(char *name, ErlNifTSDKey *key));
+ERL_NIF_API_FUNC_DECL(void,enif_tsd_key_destroy,(ErlNifTSDKey key));
+ERL_NIF_API_FUNC_DECL(void,enif_tsd_set,(ErlNifTSDKey key, void *data));
+ERL_NIF_API_FUNC_DECL(void*,enif_tsd_get,(ErlNifTSDKey key));
+ERL_NIF_API_FUNC_DECL(ErlNifThreadOpts*,enif_thread_opts_create,(char *name));
+ERL_NIF_API_FUNC_DECL(void,enif_thread_opts_destroy,(ErlNifThreadOpts *opts));
+ERL_NIF_API_FUNC_DECL(int,enif_thread_create,(char *name,ErlNifTid *tid,void * (*func)(void *),void *args,ErlNifThreadOpts *opts));
+ERL_NIF_API_FUNC_DECL(ErlNifTid,enif_thread_self,(void));
+ERL_NIF_API_FUNC_DECL(int,enif_equal_tids,(ErlNifTid tid1, ErlNifTid tid2));
+ERL_NIF_API_FUNC_DECL(void,enif_thread_exit,(void *resp));
+ERL_NIF_API_FUNC_DECL(int,enif_thread_join,(ErlNifTid, void **respp));
+
+ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(void* ptr, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_system_info,(ErlNifSysInfo *sip, size_t si_size));
+ERL_NIF_API_FUNC_DECL(int,enif_fprintf,(void/* FILE* */ *filep, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_iolist_as_binary,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, size_t pos, size_t size));
+ERL_NIF_API_FUNC_DECL(int,enif_get_string,(ErlNifEnv*, ERL_NIF_TERM list, char* buf, unsigned len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom,(ErlNifEnv*, ERL_NIF_TERM atom, char* buf, unsigned len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_is_fun,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_pid,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_port,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_get_uint,(ErlNifEnv*, ERL_NIF_TERM term, unsigned* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_long,(ErlNifEnv*, ERL_NIF_TERM term, long* ip));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint,(ErlNifEnv*, unsigned i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_long,(ErlNifEnv*, long i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
+ERL_NIF_API_FUNC_DECL(int,enif_is_empty_list,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type,(ErlNifEnv*, const char* module_str, const char* name_str, void (*dtor)(ErlNifEnv*,void *), ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc_resource,(ErlNifResourceType* type, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource,(ErlNifEnv*, void* obj));
+ERL_NIF_API_FUNC_DECL(int,enif_get_resource,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp));
+ERL_NIF_API_FUNC_DECL(size_t,enif_sizeof_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(unsigned char*,enif_make_new_binary,(ErlNifEnv*,size_t size,ERL_NIF_TERM* termp));
+ERL_NIF_API_FUNC_DECL(int,enif_is_list,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_tuple,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom_length,(ErlNifEnv*, ERL_NIF_TERM atom, unsigned* len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_get_list_length,(ErlNifEnv* env, ERL_NIF_TERM term, unsigned* len));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_atom_len,(ErlNifEnv* env, const char* name, size_t len));
+ERL_NIF_API_FUNC_DECL(int, enif_make_existing_atom_len,(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string_len,(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ErlNifEnv*,enif_alloc_env,(void));
+ERL_NIF_API_FUNC_DECL(void,enif_free_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(void,enif_clear_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(int,enif_send,(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_copy,(ErlNifEnv* dst_env, ERL_NIF_TERM src_term));
+ERL_NIF_API_FUNC_DECL(ErlNifPid*,enif_self,(ErlNifEnv* caller_env, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(int,enif_get_local_pid,(ErlNifEnv* env, ERL_NIF_TERM, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(void,enif_keep_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* obj,const void* data, size_t size));
+#if SIZEOF_LONG != 8
+ERL_NIF_API_FUNC_DECL(int,enif_get_int64,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifSInt64* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_uint64,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifUInt64* ip));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int64,(ErlNifEnv*, ErlNifSInt64));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
+#endif
+ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term, ERL_NIF_TERM *list));
+ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
+ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
+ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
+
+/*
+** Add new entries here to keep compatibility on Windows!!!
+*/
+#endif
+
+/*
+** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order
+** as the ERL_NIF_API_FUNC_DECL list above
+*/
+#ifdef ERL_NIF_API_FUNC_MACRO
+# define enif_priv_data ERL_NIF_API_FUNC_MACRO(enif_priv_data)
+# define enif_alloc ERL_NIF_API_FUNC_MACRO(enif_alloc)
+# define enif_free ERL_NIF_API_FUNC_MACRO(enif_free)
+# define enif_is_atom ERL_NIF_API_FUNC_MACRO(enif_is_atom)
+# define enif_is_binary ERL_NIF_API_FUNC_MACRO(enif_is_binary)
+# define enif_is_ref ERL_NIF_API_FUNC_MACRO(enif_is_ref)
+# define enif_inspect_binary ERL_NIF_API_FUNC_MACRO(enif_inspect_binary)
+# define enif_alloc_binary ERL_NIF_API_FUNC_MACRO(enif_alloc_binary)
+# define enif_realloc_binary ERL_NIF_API_FUNC_MACRO(enif_realloc_binary)
+# define enif_release_binary ERL_NIF_API_FUNC_MACRO(enif_release_binary)
+# define enif_get_int ERL_NIF_API_FUNC_MACRO(enif_get_int)
+# define enif_get_ulong ERL_NIF_API_FUNC_MACRO(enif_get_ulong)
+# define enif_get_double ERL_NIF_API_FUNC_MACRO(enif_get_double)
+# define enif_get_tuple ERL_NIF_API_FUNC_MACRO(enif_get_tuple)
+# define enif_get_list_cell ERL_NIF_API_FUNC_MACRO(enif_get_list_cell)
+# define enif_is_identical ERL_NIF_API_FUNC_MACRO(enif_is_identical)
+# define enif_compare ERL_NIF_API_FUNC_MACRO(enif_compare)
+
+# define enif_make_binary ERL_NIF_API_FUNC_MACRO(enif_make_binary)
+# define enif_make_badarg ERL_NIF_API_FUNC_MACRO(enif_make_badarg)
+# define enif_make_int ERL_NIF_API_FUNC_MACRO(enif_make_int)
+# define enif_make_ulong ERL_NIF_API_FUNC_MACRO(enif_make_ulong)
+# define enif_make_double ERL_NIF_API_FUNC_MACRO(enif_make_double)
+# define enif_make_atom ERL_NIF_API_FUNC_MACRO(enif_make_atom)
+# define enif_make_existing_atom ERL_NIF_API_FUNC_MACRO(enif_make_existing_atom)
+# define enif_make_tuple ERL_NIF_API_FUNC_MACRO(enif_make_tuple)
+# define enif_make_list ERL_NIF_API_FUNC_MACRO(enif_make_list)
+# define enif_make_list_cell ERL_NIF_API_FUNC_MACRO(enif_make_list_cell)
+# define enif_make_string ERL_NIF_API_FUNC_MACRO(enif_make_string)
+# define enif_make_ref ERL_NIF_API_FUNC_MACRO(enif_make_ref)
+
+# define enif_mutex_create ERL_NIF_API_FUNC_MACRO(enif_mutex_create)
+# define enif_mutex_destroy ERL_NIF_API_FUNC_MACRO(enif_mutex_destroy)
+# define enif_mutex_trylock ERL_NIF_API_FUNC_MACRO(enif_mutex_trylock)
+# define enif_mutex_lock ERL_NIF_API_FUNC_MACRO(enif_mutex_lock)
+# define enif_mutex_unlock ERL_NIF_API_FUNC_MACRO(enif_mutex_unlock)
+# define enif_cond_create ERL_NIF_API_FUNC_MACRO(enif_cond_create)
+# define enif_cond_destroy ERL_NIF_API_FUNC_MACRO(enif_cond_destroy)
+# define enif_cond_signal ERL_NIF_API_FUNC_MACRO(enif_cond_signal)
+# define enif_cond_broadcast ERL_NIF_API_FUNC_MACRO(enif_cond_broadcast)
+# define enif_cond_wait ERL_NIF_API_FUNC_MACRO(enif_cond_wait)
+# define enif_rwlock_create ERL_NIF_API_FUNC_MACRO(enif_rwlock_create)
+# define enif_rwlock_destroy ERL_NIF_API_FUNC_MACRO(enif_rwlock_destroy)
+# define enif_rwlock_tryrlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_tryrlock)
+# define enif_rwlock_rlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_rlock)
+# define enif_rwlock_runlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_runlock)
+# define enif_rwlock_tryrwlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_tryrwlock)
+# define enif_rwlock_rwlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_rwlock)
+# define enif_rwlock_rwunlock ERL_NIF_API_FUNC_MACRO(enif_rwlock_rwunlock)
+# define enif_tsd_key_create ERL_NIF_API_FUNC_MACRO(enif_tsd_key_create)
+# define enif_tsd_key_destroy ERL_NIF_API_FUNC_MACRO(enif_tsd_key_destroy)
+# define enif_tsd_set ERL_NIF_API_FUNC_MACRO(enif_tsd_set)
+# define enif_tsd_get ERL_NIF_API_FUNC_MACRO(enif_tsd_get)
+# define enif_thread_opts_create ERL_NIF_API_FUNC_MACRO(enif_thread_opts_create)
+# define enif_thread_opts_destroy ERL_NIF_API_FUNC_MACRO(enif_thread_opts_destroy)
+# define enif_thread_create ERL_NIF_API_FUNC_MACRO(enif_thread_create)
+# define enif_thread_self ERL_NIF_API_FUNC_MACRO(enif_thread_self)
+# define enif_equal_tids ERL_NIF_API_FUNC_MACRO(enif_equal_tids)
+# define enif_thread_exit ERL_NIF_API_FUNC_MACRO(enif_thread_exit)
+# define enif_thread_join ERL_NIF_API_FUNC_MACRO(enif_thread_join)
+
+# define enif_realloc ERL_NIF_API_FUNC_MACRO(enif_realloc)
+# define enif_system_info ERL_NIF_API_FUNC_MACRO(enif_system_info)
+# define enif_fprintf ERL_NIF_API_FUNC_MACRO(enif_fprintf)
+# define enif_inspect_iolist_as_binary ERL_NIF_API_FUNC_MACRO(enif_inspect_iolist_as_binary)
+# define enif_make_sub_binary ERL_NIF_API_FUNC_MACRO(enif_make_sub_binary)
+# define enif_get_string ERL_NIF_API_FUNC_MACRO(enif_get_string)
+# define enif_get_atom ERL_NIF_API_FUNC_MACRO(enif_get_atom)
+# define enif_is_fun ERL_NIF_API_FUNC_MACRO(enif_is_fun)
+# define enif_is_pid ERL_NIF_API_FUNC_MACRO(enif_is_pid)
+# define enif_is_port ERL_NIF_API_FUNC_MACRO(enif_is_port)
+# define enif_get_uint ERL_NIF_API_FUNC_MACRO(enif_get_uint)
+# define enif_get_long ERL_NIF_API_FUNC_MACRO(enif_get_long)
+# define enif_make_uint ERL_NIF_API_FUNC_MACRO(enif_make_uint)
+# define enif_make_long ERL_NIF_API_FUNC_MACRO(enif_make_long)
+# define enif_make_tuple_from_array ERL_NIF_API_FUNC_MACRO(enif_make_tuple_from_array)
+# define enif_make_list_from_array ERL_NIF_API_FUNC_MACRO(enif_make_list_from_array)
+# define enif_is_empty_list ERL_NIF_API_FUNC_MACRO(enif_is_empty_list)
+# define enif_open_resource_type ERL_NIF_API_FUNC_MACRO(enif_open_resource_type)
+# define enif_alloc_resource ERL_NIF_API_FUNC_MACRO(enif_alloc_resource)
+# define enif_release_resource ERL_NIF_API_FUNC_MACRO(enif_release_resource)
+# define enif_make_resource ERL_NIF_API_FUNC_MACRO(enif_make_resource)
+# define enif_get_resource ERL_NIF_API_FUNC_MACRO(enif_get_resource)
+# define enif_sizeof_resource ERL_NIF_API_FUNC_MACRO(enif_sizeof_resource)
+# define enif_make_new_binary ERL_NIF_API_FUNC_MACRO(enif_make_new_binary)
+# define enif_is_list ERL_NIF_API_FUNC_MACRO(enif_is_list)
+# define enif_is_tuple ERL_NIF_API_FUNC_MACRO(enif_is_tuple)
+# define enif_get_atom_length ERL_NIF_API_FUNC_MACRO(enif_get_atom_length)
+# define enif_get_list_length ERL_NIF_API_FUNC_MACRO(enif_get_list_length)
+# define enif_make_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_atom_len)
+# define enif_make_existing_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_existing_atom_len)
+# define enif_make_string_len ERL_NIF_API_FUNC_MACRO(enif_make_string_len)
+# define enif_alloc_env ERL_NIF_API_FUNC_MACRO(enif_alloc_env)
+# define enif_free_env ERL_NIF_API_FUNC_MACRO(enif_free_env)
+# define enif_clear_env ERL_NIF_API_FUNC_MACRO(enif_clear_env)
+# define enif_send ERL_NIF_API_FUNC_MACRO(enif_send)
+# define enif_make_copy ERL_NIF_API_FUNC_MACRO(enif_make_copy)
+# define enif_self ERL_NIF_API_FUNC_MACRO(enif_self)
+# define enif_get_local_pid ERL_NIF_API_FUNC_MACRO(enif_get_local_pid)
+# define enif_keep_resource ERL_NIF_API_FUNC_MACRO(enif_keep_resource)
+# define enif_make_resource_binary ERL_NIF_API_FUNC_MACRO(enif_make_resource_binary)
+#if SIZEOF_LONG != 8
+# define enif_get_int64 ERL_NIF_API_FUNC_MACRO(enif_get_int64)
+# define enif_get_uint64 ERL_NIF_API_FUNC_MACRO(enif_get_uint64)
+# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
+# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
+#endif
+
+# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
+# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
+# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
+# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
+# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
+# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
+
+/*
+** Add new entries here
+*/
+#endif
+
+
+#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+
+/* Inline functions for compile time type checking of arguments to
+ variadic functions.
+*/
+
+# define ERL_NIF_INLINE __inline__
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple1(ErlNifEnv* env,
+ ERL_NIF_TERM e1)
+{
+ return enif_make_tuple(env, 1, e1);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple2(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2)
+{
+ return enif_make_tuple(env, 2, e1, e2);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple3(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3)
+{
+ return enif_make_tuple(env, 3, e1, e2, e3);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple4(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4)
+{
+ return enif_make_tuple(env, 4, e1, e2, e3, e4);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple5(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5)
+{
+ return enif_make_tuple(env, 5, e1, e2, e3, e4, e5);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple6(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6)
+{
+ return enif_make_tuple(env, 6, e1, e2, e3, e4, e5, e6);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple7(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7)
+{
+ return enif_make_tuple(env, 7, e1, e2, e3, e4, e5, e6, e7);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple8(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8)
+{
+ return enif_make_tuple(env, 8, e1, e2, e3, e4, e5, e6, e7, e8);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple9(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8,
+ ERL_NIF_TERM e9)
+{
+ return enif_make_tuple(env, 9, e1, e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list1(ErlNifEnv* env,
+ ERL_NIF_TERM e1)
+{
+ return enif_make_list(env, 1, e1);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list2(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2)
+{
+ return enif_make_list(env, 2, e1, e2);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list3(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3)
+{
+ return enif_make_list(env, 3, e1, e2, e3);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list4(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4)
+{
+ return enif_make_list(env, 4, e1, e2, e3, e4);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list5(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5)
+{
+ return enif_make_list(env, 5, e1, e2, e3, e4, e5);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list6(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6)
+{
+ return enif_make_list(env, 6, e1, e2, e3, e4, e5, e6);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list7(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7)
+{
+ return enif_make_list(env, 7, e1, e2, e3, e4, e5, e6, e7);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list8(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8)
+{
+ return enif_make_list(env, 8, e1, e2, e3, e4, e5, e6, e7, e8);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list9(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8,
+ ERL_NIF_TERM e9)
+{
+ return enif_make_list(env, 9, e1, e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+# undef ERL_NIF_INLINE
+
+#else /* fallback with macros */
+
+#ifndef enif_make_list1
+# define enif_make_list1(ENV,E1) enif_make_list(ENV,1,E1)
+# define enif_make_list2(ENV,E1,E2) enif_make_list(ENV,2,E1,E2)
+# define enif_make_list3(ENV,E1,E2,E3) enif_make_list(ENV,3,E1,E2,E3)
+# define enif_make_list4(ENV,E1,E2,E3,E4) enif_make_list(ENV,4,E1,E2,E3,E4)
+# define enif_make_list5(ENV,E1,E2,E3,E4,E5) enif_make_list(ENV,5,E1,E2,E3,E4,E5)
+# define enif_make_list6(ENV,E1,E2,E3,E4,E5,E6) enif_make_list(ENV,6,E1,E2,E3,E4,E5,E6)
+# define enif_make_list7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_list(ENV,7,E1,E2,E3,E4,E5,E6,E7)
+# define enif_make_list8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_list(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
+# define enif_make_list9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_list(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+# define enif_make_tuple1(ENV,E1) enif_make_tuple(ENV,1,E1)
+# define enif_make_tuple2(ENV,E1,E2) enif_make_tuple(ENV,2,E1,E2)
+# define enif_make_tuple3(ENV,E1,E2,E3) enif_make_tuple(ENV,3,E1,E2,E3)
+# define enif_make_tuple4(ENV,E1,E2,E3,E4) enif_make_tuple(ENV,4,E1,E2,E3,E4)
+# define enif_make_tuple5(ENV,E1,E2,E3,E4,E5) enif_make_tuple(ENV,5,E1,E2,E3,E4,E5)
+# define enif_make_tuple6(ENV,E1,E2,E3,E4,E5,E6) enif_make_tuple(ENV,6,E1,E2,E3,E4,E5,E6)
+# define enif_make_tuple7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_tuple(ENV,7,E1,E2,E3,E4,E5,E6,E7)
+# define enif_make_tuple8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_tuple(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
+# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+#endif
+
+#endif /* __GNUC__ && !WIN32 */
+
+#ifndef enif_make_pid
+
+# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
+
+#if SIZEOF_LONG == 8
+# define enif_get_int64 enif_get_long
+# define enif_get_uint64 enif_get_ulong
+# define enif_make_int64 enif_make_long
+# define enif_make_uint64 enif_make_ulong
+#endif
+
+#endif
+
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.1.2_0.c b/erts/emulator/test/nif_SUITE_data/nif_mod.1.2_0.c
new file mode 100644
index 0000000000..a554cc7f25
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.1.2_0.c
@@ -0,0 +1,4 @@
+#include "nif_api_2_0/erl_nif.h"
+
+#define NIF_LIB_VER 1
+#include "nif_mod.c"
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.1.2_4.c b/erts/emulator/test/nif_SUITE_data/nif_mod.1.2_4.c
new file mode 100644
index 0000000000..6d28dbb8ba
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.1.2_4.c
@@ -0,0 +1,4 @@
+#include "nif_api_2_4/erl_nif.h"
+
+#define NIF_LIB_VER 1
+#include "nif_mod.c"
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.2.2_0.c b/erts/emulator/test/nif_SUITE_data/nif_mod.2.2_0.c
new file mode 100644
index 0000000000..0731e6b5d0
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.2.2_0.c
@@ -0,0 +1,4 @@
+#include "nif_api_2_0/erl_nif.h"
+
+#define NIF_LIB_VER 2
+#include "nif_mod.c"
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.2.2_4.c b/erts/emulator/test/nif_SUITE_data/nif_mod.2.2_4.c
new file mode 100644
index 0000000000..628fd42b52
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.2.2_4.c
@@ -0,0 +1,4 @@
+#include "nif_api_2_4/erl_nif.h"
+
+#define NIF_LIB_VER 2
+#include "nif_mod.c"
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.3.2_0.c b/erts/emulator/test/nif_SUITE_data/nif_mod.3.2_0.c
new file mode 100644
index 0000000000..d7e676b668
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.3.2_0.c
@@ -0,0 +1,4 @@
+#include "nif_api_2_0/erl_nif.h"
+
+#define NIF_LIB_VER 3
+#include "nif_mod.c"
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.3.2_4.c b/erts/emulator/test/nif_SUITE_data/nif_mod.3.2_4.c
new file mode 100644
index 0000000000..bdbe8cf381
--- /dev/null
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.3.2_4.c
@@ -0,0 +1,4 @@
+#include "nif_api_2_4/erl_nif.h"
+
+#define NIF_LIB_VER 3
+#include "nif_mod.c"
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.c b/erts/emulator/test/nif_SUITE_data/nif_mod.c
index fd8a0d0595..04699d3327 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_mod.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.c
@@ -17,7 +17,7 @@
*
* %CopyrightEnd%
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <string.h>
#include <stdio.h>
@@ -176,6 +176,7 @@ static void do_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info, int* retvalp)
CHECK(enif_is_empty_list(env, head));
}
+#if NIF_LIB_VER != 3
static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info)
{
NifModPrivData* data;
@@ -230,6 +231,7 @@ static void unload(ErlNifEnv* env, void* priv)
add_call(env, data, "unload");
NifModPrivData_release(data);
}
+#endif /* NIF_LIB_VER != 3 */
static ERL_NIF_TERM lib_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
@@ -237,10 +239,22 @@ static ERL_NIF_TERM lib_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM arg
return enif_make_int(env, NIF_LIB_VER);
}
+static ERL_NIF_TERM nif_api_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ /*ADD_CALL("nif_api_version");*/
+ return enif_make_tuple2(env,
+ enif_make_int(env, ERL_NIF_MAJOR_VERSION),
+ enif_make_int(env, ERL_NIF_MINOR_VERSION));
+}
+
static ERL_NIF_TERM get_priv_data_ptr(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
+ NifModPrivData** bin_data;
+ ERL_NIF_TERM res;
ADD_CALL("get_priv_data_ptr");
- return enif_make_uint64(env, (ErlNifUInt64)priv_data(env));
+ bin_data = (NifModPrivData**)enif_make_new_binary(env, sizeof(void*), &res);
+ *bin_data = priv_data(env);
+ return res;
}
static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -279,6 +293,7 @@ static ERL_NIF_TERM get_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
static ErlNifFunc nif_funcs[] =
{
{"lib_version", 0, lib_version},
+ {"nif_api_version", 0, nif_api_version},
{"get_priv_data_ptr", 0, get_priv_data_ptr},
{"make_new_resource", 2, make_new_resource},
{"get_resource", 2, get_resource}
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.erl b/erts/emulator/test/nif_SUITE_data/nif_mod.erl
index 1fcc33faa4..8019cfcf82 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_mod.erl
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.erl
@@ -22,7 +22,7 @@
-include_lib("common_test/include/ct.hrl").
--export([load_nif_lib/2, load_nif_lib/3, start/0, lib_version/0, call_history/0,
+-export([load_nif_lib/2, load_nif_lib/3, start/0, lib_version/0,
get_priv_data_ptr/0, make_new_resource/2, get_resource/2]).
-export([loop/0, upgrade/1]).
@@ -35,21 +35,32 @@
on_load() ->
[{data_dir, Path}] = ets:lookup(nif_SUITE, data_dir),
[{lib_version, Ver}] = ets:lookup(nif_SUITE, lib_version),
- erlang:load_nif(filename:join(Path,libname(Ver)), []).
+ [{nif_api_version, API}] = ets:lookup(nif_SUITE, nif_api_version),
+ R = erlang:load_nif(filename:join(Path,libname(Ver,API)), []),
+ check_api_version(R, API).
-endif.
+check_api_version(Err, _) when Err =/= ok -> Err;
+check_api_version(ok, []) -> ok;
+check_api_version(ok, [$., MajC, $_ | MinS]) ->
+ {Maj, Min} = {list_to_integer([MajC]), list_to_integer(MinS)},
+ {Maj, Min} = nif_api_version(),
+ ok.
+
load_nif_lib(Config, Ver) ->
load_nif_lib(Config, Ver, []).
load_nif_lib(Config, Ver, LoadInfo) ->
Path = proplists:get_value(data_dir, Config),
- erlang:load_nif(filename:join(Path,libname(Ver)), LoadInfo).
-
-libname(no_init) -> libname(3);
-libname(Ver) when is_integer(Ver) ->
- "nif_mod." ++ integer_to_list(Ver).
+ API = proplists:get_value(nif_api_version, Config, ""),
+ R = erlang:load_nif(filename:join(Path,libname(Ver,API)), LoadInfo),
+ check_api_version(R, API).
+libname(no_init,API) -> libname(3,API);
+libname(Ver,API) when is_integer(Ver) ->
+ "nif_mod." ++ integer_to_list(Ver) ++ API.
+
start() ->
spawn_opt(?MODULE,loop,[],
[link, monitor]).
@@ -72,7 +83,9 @@ upgrade(Pid) ->
lib_version() -> % NIF
undefined.
-call_history() -> ?nif_stub.
+nif_api_version() -> %NIF
+ {undefined,undefined}.
+
get_priv_data_ptr() -> ?nif_stub.
make_new_resource(_,_) -> ?nif_stub.
get_resource(_,_) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/testcase_driver.h b/erts/emulator/test/nif_SUITE_data/testcase_driver.h
index e32e63069a..feb10ecaea 100644
--- a/erts/emulator/test/nif_SUITE_data/testcase_driver.h
+++ b/erts/emulator/test/nif_SUITE_data/testcase_driver.h
@@ -20,7 +20,7 @@
#ifndef TESTCASE_DRIVER_H__
#define TESTCASE_DRIVER_H__
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdlib.h>
#include <stdio.h>
diff --git a/erts/emulator/test/nif_SUITE_data/tester.c b/erts/emulator/test/nif_SUITE_data/tester.c
index 257b116322..ea4afd924d 100644
--- a/erts/emulator/test/nif_SUITE_data/tester.c
+++ b/erts/emulator/test/nif_SUITE_data/tester.c
@@ -1,4 +1,4 @@
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdio.h>
#include <stdarg.h>
@@ -53,7 +53,7 @@ void testcase_free(void *ptr)
void testcase_run(TestCaseState_t *tcs);
-static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
{
return 0;
}
@@ -70,5 +70,5 @@ static ErlNifFunc nif_funcs[] =
{"run", 0, run}
};
-ERL_NIF_INIT(tester,nif_funcs,NULL,reload,NULL,NULL)
+ERL_NIF_INIT(tester,nif_funcs,NULL,NULL,upgrade,NULL)
diff --git a/erts/emulator/test/node_container_SUITE.erl b/erts/emulator/test/node_container_SUITE.erl
index af18545bff..291d3ee30d 100644
--- a/erts/emulator/test/node_container_SUITE.erl
+++ b/erts/emulator/test/node_container_SUITE.erl
@@ -50,7 +50,8 @@
port_wrap/1,
bad_nc/1,
unique_pid/1,
- iter_max_procs/1]).
+ iter_max_procs/1,
+ magic_ref/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -62,7 +63,7 @@ all() ->
node_table_gc, dist_link_refc, dist_monitor_refc,
node_controller_refc, ets_refc, match_spec_refc,
timer_refc, otp_4715, pid_wrap, port_wrap, bad_nc,
- unique_pid, iter_max_procs].
+ unique_pid, iter_max_procs, magic_ref].
init_per_suite(Config) ->
Config.
@@ -152,7 +153,7 @@ ttbtteq_do_remote(RNode) ->
%%
%% Test case: round_trip_eq
%%
-%% Tests that node containers that are sent beteen nodes stay equal to themselves.
+%% Tests that node containers that are sent between nodes stay equal to themselves.
round_trip_eq(Config) when is_list(Config) ->
ThisNode = {node(), erlang:system_info(creation)},
NodeFirstName = get_nodefirstname(),
@@ -889,10 +890,46 @@ chk_max_proc_line_until(NoMoreTests, Res) ->
chk_max_proc_line_until(NoMoreTests, Res)
end.
+magic_ref(Config) when is_list(Config) ->
+ {MRef0, Addr0} = erts_debug:set_internal_state(make, magic_ref),
+ true = is_reference(MRef0),
+ {Addr0, 1, true} = erts_debug:get_internal_state({magic_ref,MRef0}),
+ MRef1 = binary_to_term(term_to_binary(MRef0)),
+ {Addr0, 2, true} = erts_debug:get_internal_state({magic_ref,MRef1}),
+ MRef0 = MRef1,
+ Me = self(),
+ {Pid, Mon} = spawn_opt(fun () ->
+ receive
+ {Me, MRef} ->
+ Me ! {self(), erts_debug:get_internal_state({magic_ref,MRef})}
+ end
+ end,
+ [link, monitor]),
+ Pid ! {self(), MRef0},
+ receive
+ {Pid, Info} ->
+ {Addr0, 3, true} = Info
+ end,
+ receive
+ {'DOWN', Mon, process, Pid, _} ->
+ ok
+ end,
+ {Addr0, 2, true} = erts_debug:get_internal_state({magic_ref,MRef0}),
+ id(MRef0),
+ id(MRef1),
+ MRefExt = term_to_binary(erts_debug:set_internal_state(make, magic_ref)),
+ garbage_collect(),
+ {MRef2, _Addr2} = binary_to_term(MRefExt),
+ true = is_reference(MRef2),
+ true = erts_debug:get_internal_state({magic_ref,MRef2}),
+ ok.
%%
%% -- Internal utils ---------------------------------------------------------
%%
+id(X) ->
+ X.
+
-define(ND_REFS, erts_debug:get_internal_state(node_and_dist_references)).
node_container_refc_check(Node) when is_atom(Node) ->
diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl
index bb85738454..f62eb0b430 100644
--- a/erts/emulator/test/num_bif_SUITE.erl
+++ b/erts/emulator/test/num_bif_SUITE.erl
@@ -32,6 +32,8 @@
%% list_to_integer/1
%% round/1
%% trunc/1
+%% floor/1
+%% ceil/1
%% integer_to_binary/1
%% integer_to_binary/2
%% binary_to_integer/1
@@ -41,7 +43,7 @@
t_float_to_string/1, t_integer_to_string/1,
t_string_to_integer/1, t_list_to_integer_edge_cases/1,
t_string_to_float_safe/1, t_string_to_float_risky/1,
- t_round/1, t_trunc/1
+ t_round/1, t_trunc_and_friends/1
]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -49,7 +51,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[t_abs, t_float, t_float_to_string, t_integer_to_string,
{group, t_string_to_float}, t_string_to_integer, t_round,
- t_trunc, t_list_to_integer_edge_cases].
+ t_trunc_and_friends, t_list_to_integer_edge_cases].
groups() ->
[{t_string_to_float, [],
@@ -106,7 +108,7 @@ t_float(Config) when is_list(Config) ->
4294967305.0 = float(id(4294967305)),
-4294967305.0 = float(id(-4294967305)),
- %% Extremly big bignums.
+ %% Extremely big bignums.
Big = id(list_to_integer(id(lists:duplicate(2000, $1)))),
{'EXIT', {badarg, _}} = (catch float(Big)),
@@ -298,30 +300,78 @@ t_round(Config) when is_list(Config) ->
-6209607916799025 = round(id(-6209607916799025.0)),
ok.
-t_trunc(Config) when is_list(Config) ->
- 0 = trunc(id(0.0)),
- 5 = trunc(id(5.3333)),
- -10 = trunc(id(-10.978987)),
+%% Test trunc/1, floor/1, ceil/1, and round/1.
+t_trunc_and_friends(_Config) ->
+ MinusZero = 0.0 / (-1.0),
+ 0 = trunc_and_friends(MinusZero),
+ 0 = trunc_and_friends(0.0),
+ 5 = trunc_and_friends(5.3333),
+ -10 = trunc_and_friends(-10.978987),
- % The largest smallnum, converted to float (OTP-3722):
+ %% The largest smallnum, converted to float (OTP-3722):
X = id((1 bsl 27) - 1),
- F = id(X + 0.0),
+ F = X + 0.0,
io:format("X = ~p/~w/~w, F = ~p/~w/~w, trunc(F) = ~p/~w/~w~n",
[X, X, binary_to_list(term_to_binary(X)),
F, F, binary_to_list(term_to_binary(F)),
- trunc(F), trunc(F), binary_to_list(term_to_binary(trunc(F)))]),
- X = trunc(F),
- X = trunc(F+1)-1,
- X = trunc(F-1)+1,
- X = -trunc(-F),
- X = -trunc(-F-1)-1,
- X = -trunc(-F+1)+1,
+ trunc_and_friends(F),
+ trunc_and_friends(F),
+ binary_to_list(term_to_binary(trunc_and_friends(F)))]),
+ X = trunc_and_friends(F),
+ X = trunc_and_friends(F+1)-1,
+ X = trunc_and_friends(F-1)+1,
+ X = -trunc_and_friends(-F),
+ X = -trunc_and_friends(-F-1)-1,
+ X = -trunc_and_friends(-F+1)+1,
%% Bignums.
- 4294967305 = trunc(id(4294967305.7)),
- -4294967305 = trunc(id(-4294967305.7)),
+ 4294967305 = trunc_and_friends(4294967305.7),
+ -4294967305 = trunc_and_friends(-4294967305.7),
+ 18446744073709551616 = trunc_and_friends(float(1 bsl 64)),
+ -18446744073709551616 = trunc_and_friends(-float(1 bsl 64)),
+
+ %% Random.
+ t_trunc_and_friends_rand(100),
ok.
+t_trunc_and_friends_rand(0) ->
+ ok;
+t_trunc_and_friends_rand(N) ->
+ F0 = rand:uniform() * math:pow(10, 50*rand:normal()),
+ F = case rand:uniform() of
+ U when U < 0.5 -> -F0;
+ _ -> F0
+ end,
+ _ = trunc_and_friends(F),
+ t_trunc_and_friends_rand(N-1).
+
+trunc_and_friends(F) ->
+ Trunc = trunc(F),
+ Floor = floor(F),
+ Ceil = ceil(F),
+ Round = round(F),
+
+ Trunc = trunc(Trunc),
+ Floor = floor(Floor),
+ Ceil = ceil(Ceil),
+ Round = round(Round),
+
+ Trunc = trunc(float(Trunc)),
+ Floor = floor(float(Floor)),
+ Ceil = ceil(float(Ceil)),
+ Round = round(float(Round)),
+
+ true = Floor =< Trunc andalso Trunc =< Ceil,
+ true = Ceil - Floor =< 1,
+ true = Round =:= Floor orelse Round =:= Ceil,
+
+ if
+ F < 0 ->
+ Trunc = Ceil;
+ true ->
+ Trunc = Floor
+ end,
+ Trunc.
%% Tests integer_to_binary/1.
diff --git a/erts/emulator/test/old_scheduler_SUITE.erl b/erts/emulator/test/old_scheduler_SUITE.erl
index ffe7d40139..8515a87df8 100644
--- a/erts/emulator/test/old_scheduler_SUITE.erl
+++ b/erts/emulator/test/old_scheduler_SUITE.erl
@@ -64,11 +64,11 @@ all() ->
init_per_testcase(_Case, Config) ->
%% main test process needs max prio
Prio = process_flag(priority, max),
- MS = erlang:system_flag(multi_scheduling, block),
+ MS = erlang:system_flag(multi_scheduling, block_normal),
[{prio,Prio},{multi_scheduling, MS}|Config].
end_per_testcase(_Case, Config) ->
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
Prio=proplists:get_value(prio, Config),
process_flag(priority, Prio),
ok.
diff --git a/erts/emulator/test/os_signal_SUITE.erl b/erts/emulator/test/os_signal_SUITE.erl
new file mode 100644
index 0000000000..9aa49a453e
--- /dev/null
+++ b/erts/emulator/test/os_signal_SUITE.erl
@@ -0,0 +1,354 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% File: os_signal_SUITE.erl
+%% Author: Björn-Egil Dahlberg
+%% Created: 2017-01-13
+%%
+
+-module(os_signal_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-export([all/0, suite/0]).
+-export([init_per_testcase/2, end_per_testcase/2]).
+-export([init_per_suite/1, end_per_suite/1]).
+
+-export([set_alarm/1, fork/0, get_exit_code/1]).
+
+% Test cases
+-export([set_unset/1,
+ t_sighup/1,
+ t_sigusr1/1,
+ t_sigusr2/1,
+ t_sigterm/1,
+ t_sigalrm/1,
+ t_sigchld/1,
+ t_sigchld_fork/1]).
+
+-define(signal_server, erl_signal_server).
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap, {minutes, 2}}].
+
+all() ->
+ case os:type() of
+ {win32, _} -> [];
+ _ -> [set_unset,
+ t_sighup,
+ t_sigusr1,
+ t_sigusr2,
+ t_sigterm,
+ t_sigalrm,
+ t_sigchld,
+ t_sigchld_fork]
+ end.
+
+init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
+ Pid = erlang:whereis(?signal_server),
+ true = erlang:unregister(?signal_server),
+ [{signal_server, Pid}|Config].
+
+end_per_testcase(_Func, Config) ->
+ case proplists:get_value(signal_server, Config) of
+ undefined -> ok;
+ Pid ->
+ true = erlang:register(?signal_server, Pid),
+ ok
+ end.
+
+init_per_suite(Config) ->
+ load_nif(Config),
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+%% tests
+
+set_unset(_Config) ->
+ Signals = [sighup, %sigint,
+ sigquit, %sigill,
+ sigabrt,
+ sigalrm, sigterm,
+ sigusr1, sigusr2,
+ sigchld,
+ sigstop, sigtstp],
+ F1 = fun(Sig) -> ok = os:set_signal(Sig,handle) end,
+ F2 = fun(Sig) -> ok = os:set_signal(Sig,default) end,
+ F3 = fun(Sig) -> ok = os:set_signal(Sig,ignore) end,
+ %% set handle
+ ok = lists:foreach(F1, Signals),
+ %% set ignore
+ ok = lists:foreach(F2, Signals),
+ %% set default
+ ok = lists:foreach(F3, Signals),
+ ok.
+
+t_sighup(_Config) ->
+ Pid1 = setup_service(),
+ OsPid = os:getpid(),
+ os:set_signal(sighup, handle),
+ ok = kill("HUP", OsPid),
+ ok = kill("HUP", OsPid),
+ ok = kill("HUP", OsPid),
+ Msgs1 = fetch_msgs(Pid1),
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ [{notify,sighup},
+ {notify,sighup},
+ {notify,sighup}] = Msgs1,
+ %% no proc
+ ok = kill("HUP", OsPid),
+ ok = kill("HUP", OsPid),
+ ok = kill("HUP", OsPid),
+ %% ignore
+ Pid2 = setup_service(),
+ os:set_signal(sighup, ignore),
+ ok = kill("HUP", OsPid),
+ ok = kill("HUP", OsPid),
+ ok = kill("HUP", OsPid),
+ Msgs2 = fetch_msgs(Pid2),
+ io:format("Msgs2: ~p~n", [Msgs2]),
+ [] = Msgs2,
+ %% reset to handle (it's the default)
+ os:set_signal(sighup, handle),
+ ok.
+
+t_sigusr1(_Config) ->
+ Pid1 = setup_service(),
+ OsPid = os:getpid(),
+ os:set_signal(sigusr1, handle),
+ ok = kill("USR1", OsPid),
+ ok = kill("USR1", OsPid),
+ ok = kill("USR1", OsPid),
+ Msgs1 = fetch_msgs(Pid1),
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ [{notify,sigusr1},
+ {notify,sigusr1},
+ {notify,sigusr1}] = Msgs1,
+ %% no proc
+ ok = kill("USR1", OsPid),
+ ok = kill("USR1", OsPid),
+ ok = kill("USR1", OsPid),
+ %% ignore
+ Pid2 = setup_service(),
+ os:set_signal(sigusr1, ignore),
+ ok = kill("USR1", OsPid),
+ ok = kill("USR1", OsPid),
+ ok = kill("USR1", OsPid),
+ Msgs2 = fetch_msgs(Pid2),
+ io:format("Msgs2: ~p~n", [Msgs2]),
+ [] = Msgs2,
+ %% reset to ignore (it's the default)
+ os:set_signal(sigusr1, handle),
+ ok.
+
+t_sigusr2(_Config) ->
+ Pid1 = setup_service(),
+ OsPid = os:getpid(),
+ os:set_signal(sigusr2, handle),
+ ok = kill("USR2", OsPid),
+ ok = kill("USR2", OsPid),
+ ok = kill("USR2", OsPid),
+ Msgs1 = fetch_msgs(Pid1),
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ [{notify,sigusr2},
+ {notify,sigusr2},
+ {notify,sigusr2}] = Msgs1,
+ %% no proc
+ ok = kill("USR2", OsPid),
+ ok = kill("USR2", OsPid),
+ ok = kill("USR2", OsPid),
+ %% ignore
+ Pid2 = setup_service(),
+ os:set_signal(sigusr2, ignore),
+ ok = kill("USR2", OsPid),
+ ok = kill("USR2", OsPid),
+ ok = kill("USR2", OsPid),
+ Msgs2 = fetch_msgs(Pid2),
+ io:format("Msgs2: ~p~n", [Msgs2]),
+ [] = Msgs2,
+ %% reset to ignore (it's the default)
+ os:set_signal(sigusr2, ignore),
+ ok.
+
+t_sigterm(_Config) ->
+ Pid1 = setup_service(),
+ OsPid = os:getpid(),
+ os:set_signal(sigterm, handle),
+ ok = kill("TERM", OsPid),
+ ok = kill("TERM", OsPid),
+ ok = kill("TERM", OsPid),
+ Msgs1 = fetch_msgs(Pid1),
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ [{notify,sigterm},
+ {notify,sigterm},
+ {notify,sigterm}] = Msgs1,
+ %% no proc
+ ok = kill("TERM", OsPid),
+ ok = kill("TERM", OsPid),
+ ok = kill("TERM", OsPid),
+ %% ignore
+ Pid2 = setup_service(),
+ os:set_signal(sigterm, ignore),
+ ok = kill("TERM", OsPid),
+ ok = kill("TERM", OsPid),
+ ok = kill("TERM", OsPid),
+ Msgs2 = fetch_msgs(Pid2),
+ io:format("Msgs2: ~p~n", [Msgs2]),
+ [] = Msgs2,
+ %% reset to handle (it's the default)
+ os:set_signal(sigterm, handle),
+ ok.
+
+t_sigchld(_Config) ->
+ Pid1 = setup_service(),
+ OsPid = os:getpid(),
+ os:set_signal(sigchld, handle),
+ ok = kill("CHLD", OsPid),
+ ok = kill("CHLD", OsPid),
+ ok = kill("CHLD", OsPid),
+ Msgs1 = fetch_msgs(Pid1),
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ [{notify,sigchld},
+ {notify,sigchld},
+ {notify,sigchld}] = Msgs1,
+ %% no proc
+ ok = kill("CHLD", OsPid),
+ ok = kill("CHLD", OsPid),
+ ok = kill("CHLD", OsPid),
+ %% ignore
+ Pid2 = setup_service(),
+ os:set_signal(sigchld, ignore),
+ ok = kill("CHLD", OsPid),
+ ok = kill("CHLD", OsPid),
+ ok = kill("CHLD", OsPid),
+ Msgs2 = fetch_msgs(Pid2),
+ io:format("Msgs2: ~p~n", [Msgs2]),
+ [] = Msgs2,
+ %% reset to handle (it's the default)
+ os:set_signal(sigchld, ignore),
+ ok.
+
+
+t_sigalrm(_Config) ->
+ Pid1 = setup_service(),
+ ok = os:set_signal(sigalrm, handle),
+ ok = os_signal_SUITE:set_alarm(1),
+ receive after 3000 -> ok end,
+ Msgs1 = fetch_msgs(Pid1),
+ [{notify,sigalrm}] = Msgs1,
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ os:set_signal(sigalrm, ignore),
+ Pid2 = setup_service(),
+ ok = os_signal_SUITE:set_alarm(1),
+ receive after 3000 -> ok end,
+ Msgs2 = fetch_msgs(Pid2),
+ [] = Msgs2,
+ io:format("Msgs2: ~p~n", [Msgs2]),
+ Pid3 = setup_service(),
+ os:set_signal(sigalrm, handle),
+ ok = os_signal_SUITE:set_alarm(1),
+ receive after 3000 -> ok end,
+ Msgs3 = fetch_msgs(Pid3),
+ [{notify,sigalrm}] = Msgs3,
+ io:format("Msgs3: ~p~n", [Msgs3]),
+ os:set_signal(sigalrm, ignore),
+ ok.
+
+t_sigchld_fork(_Config) ->
+ Pid1 = setup_service(),
+ ok = os:set_signal(sigchld, handle),
+ {ok,OsPid} = os_signal_SUITE:fork(),
+ receive after 3000 -> ok end,
+ Msgs1 = fetch_msgs(Pid1),
+ io:format("Msgs1: ~p~n", [Msgs1]),
+ [{notify,sigchld}] = Msgs1,
+ {ok,Status} = os_signal_SUITE:get_exit_code(OsPid),
+ io:format("exit status from ~w : ~w~n", [OsPid,Status]),
+ 42 = Status,
+ %% reset to ignore (it's the default)
+ os:set_signal(sigchld, ignore),
+ ok.
+
+
+%% nif stubs
+
+set_alarm(_Secs) -> no.
+fork() -> no.
+get_exit_code(_OsPid) -> no.
+
+%% aux
+
+setup_service() ->
+ Pid = spawn_link(fun msgs/0),
+ true = erlang:register(?signal_server, Pid),
+ Pid.
+
+msgs() ->
+ msgs([]).
+msgs(Ms) ->
+ receive
+ {Pid, fetch_msgs} -> Pid ! {self(), lists:reverse(Ms)};
+ Msg ->
+ msgs([Msg|Ms])
+ end.
+
+fetch_msgs(Pid) ->
+ Pid ! {self(), fetch_msgs},
+ receive {Pid, Msgs} -> Msgs end.
+
+kill(Signal, Pid) ->
+ {0,_} = run("kill", ["-s", Signal, Pid]),
+ receive after 200 -> ok end,
+ ok.
+
+load_nif(Config) ->
+ Path = proplists:get_value(data_dir, Config),
+ ok = erlang:load_nif(filename:join(Path,"os_signal_nif"), 0).
+
+run(Program0, Args) -> run(".", Program0, Args).
+run(Cwd, Program0, Args) when is_list(Cwd) ->
+ Program = case os:find_executable(Program0) of
+ Path when is_list(Path) ->
+ Path;
+ false ->
+ exit(no)
+ end,
+ Options = [{args,Args},binary,exit_status,stderr_to_stdout,
+ {line,4096}, {cd, Cwd}],
+ try open_port({spawn_executable,Program}, Options) of
+ Port ->
+ run_loop(Port, [])
+ catch
+ error:_ ->
+ exit(no)
+ end.
+
+run_loop(Port, Output) ->
+ receive
+ {Port,{exit_status,Status}} ->
+ {Status,lists:reverse(Output)};
+ {Port,{data,{eol,Bin}}} ->
+ run_loop(Port, [Bin|Output]);
+ _Msg ->
+ run_loop(Port, Output)
+ end.
diff --git a/erts/emulator/test/os_signal_SUITE_data/Makefile.src b/erts/emulator/test/os_signal_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..a7f5cdbba5
--- /dev/null
+++ b/erts/emulator/test/os_signal_SUITE_data/Makefile.src
@@ -0,0 +1,6 @@
+
+NIF_LIBS = os_signal_nif@dll@
+
+all: $(NIF_LIBS)
+
+@SHLIB_RULES@
diff --git a/erts/emulator/test/os_signal_SUITE_data/os_signal_nif.c b/erts/emulator/test/os_signal_SUITE_data/os_signal_nif.c
new file mode 100644
index 0000000000..78e1348383
--- /dev/null
+++ b/erts/emulator/test/os_signal_SUITE_data/os_signal_nif.c
@@ -0,0 +1,66 @@
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+#include <erl_nif.h>
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ return 0;
+}
+
+static ERL_NIF_TERM set_alarm(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int t;
+ if (!enif_get_int(env, argv[0], &t)) {
+ return enif_make_badarg(env);
+ }
+
+ alarm(t);
+
+ return enif_make_atom(env, "ok");
+}
+
+static ERL_NIF_TERM fork_0(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ pid_t pid;
+
+ pid = fork();
+
+ if (pid == 0) {
+ /* child */
+ exit(42);
+ }
+
+ return enif_make_tuple(env, 2,
+ enif_make_atom(env, "ok"),
+ enif_make_int(env, (int)pid));
+}
+
+static ERL_NIF_TERM get_exit_code(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int x;
+ pid_t pid;
+ if (!enif_get_int(env, argv[0], &x)) {
+ return enif_make_badarg(env);
+ }
+
+ pid = (pid_t) x;
+
+ waitpid(pid, &x, 0);
+
+ return enif_make_tuple(env, 2,
+ enif_make_atom(env, "ok"),
+ enif_make_int(env, WEXITSTATUS(x)));
+}
+
+
+static ErlNifFunc nif_funcs[] =
+{
+ {"set_alarm", 1, set_alarm},
+ {"fork", 0, fork_0},
+ {"get_exit_code", 1, get_exit_code}
+};
+
+ERL_NIF_INIT(os_signal_SUITE,nif_funcs,load,NULL,NULL,NULL)
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 23594aa8c4..c117554f90 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -2066,13 +2066,13 @@ exit_status_msb_test(Config, SleepSecs) when is_list(Config) ->
StartedTime = (erlang:monotonic_time(microsecond) - Start)/1000000,
io:format("StartedTime = ~p~n", [StartedTime]),
true = StartedTime < SleepSecs,
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
lists:foreach(fun (P) -> receive {P, done} -> ok end end, Procs),
DoneTime = (erlang:monotonic_time(microsecond) - Start)/1000000,
io:format("DoneTime = ~p~n", [DoneTime]),
true = DoneTime > SleepSecs,
ok = verify_multi_scheduling_blocked(),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
case {length(lists:usort(lists:flatten(SIds))), NoSchedsOnln} of
{N, N} ->
ok;
@@ -2131,7 +2131,7 @@ ping(Config, Sizes, HSize, CmdLine, Options) ->
%% Sizes = Size of packets to generated.
%% HSize = Header size: 1, 2, or 4
%% CmdLine = Additional command line options.
-%% Options = Addtional port options.
+%% Options = Additional port options.
expect_input(Config, Sizes, HSize, CmdLine, Options) ->
expect_input1(Config, Sizes, {HSize, CmdLine, Options}, [], []).
diff --git a/erts/emulator/test/port_SUITE_data/port_test.c b/erts/emulator/test/port_SUITE_data/port_test.c
index e199a0fc13..fa97b4c9d0 100644
--- a/erts/emulator/test/port_SUITE_data/port_test.c
+++ b/erts/emulator/test/port_SUITE_data/port_test.c
@@ -41,7 +41,7 @@ extern int errno;
typedef struct {
char* progname; /* Name of this program (from argv[0]). */
int header_size; /* Number of bytes in each packet header:
- * 1, 2, or 4, or 0 for a continous byte stream. */
+ * 1, 2, or 4, or 0 for a continuous byte stream. */
int fd_from_erl; /* File descriptor from Erlang. */
int fd_to_erl; /* File descriptor to Erlang. */
unsigned char* io_buf; /* Buffer for file i/o. */
diff --git a/erts/emulator/test/port_bif_SUITE_data/port_test.c b/erts/emulator/test/port_bif_SUITE_data/port_test.c
index 28324a56a6..923ab99ccc 100644
--- a/erts/emulator/test/port_bif_SUITE_data/port_test.c
+++ b/erts/emulator/test/port_bif_SUITE_data/port_test.c
@@ -39,7 +39,7 @@ extern int errno;
typedef struct {
char* progname; /* Name of this program (from argv[0]). */
int header_size; /* Number of bytes in each packet header:
- * 1, 2, or 4, or 0 for a continous byte stream. */
+ * 1, 2, or 4, or 0 for a continuous byte stream. */
int fd_from_erl; /* File descriptor from Erlang. */
int fd_to_erl; /* File descriptor to Erlang. */
unsigned char* io_buf; /* Buffer for file i/o. */
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 0a6eb7ffac..e14185e881 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -1028,9 +1028,9 @@ low_prio(Config) when is_list(Config) ->
1 ->
ok = low_prio_test(Config);
_ ->
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
ok = low_prio_test(Config),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
{comment,
"Test not written for SMP runtime system. "
"Multi scheduling blocked during test."}
@@ -1097,9 +1097,9 @@ yield(Config) when is_list(Config) ->
++ ") is enabled. Testcase gets messed up by modfied "
"timing."};
_ ->
- MS = erlang:system_flag(multi_scheduling, block),
+ MS = erlang:system_flag(multi_scheduling, block_normal),
yield_test(),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
case MS of
blocked ->
{comment,
@@ -1679,7 +1679,7 @@ processes_bif_test() ->
true ->
%% Do it again with a process suspended while
%% in the processes/0 bif.
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
Suspendee = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
@@ -1692,7 +1692,7 @@ processes_bif_test() ->
end),
receive {suspend_me, Suspendee} -> ok end,
erlang:suspend_process(Suspendee),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
[{status,suspended},{current_function,{erlang,ptab_list_continue,2}}] =
process_info(Suspendee, [status, current_function]),
@@ -1732,10 +1732,10 @@ do_processes_bif_test(WantReds, DieTest, Processes) ->
Splt = NoTestProcs div 10,
{TP1, TP23} = lists:split(Splt, TestProcs),
{TP2, TP3} = lists:split(Splt, TP23),
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
Tester ! DoIt,
receive GetGoing -> ok end,
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
SpawnProcesses(high),
lists:foreach( fun (P) ->
SpawnHangAround(),
@@ -1944,7 +1944,7 @@ processes_gc_trap(Config) when is_list(Config) ->
processes()
end,
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
Suspendee = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
@@ -1954,7 +1954,7 @@ processes_gc_trap(Config) when is_list(Config) ->
end),
receive {suspend_me, Suspendee} -> ok end,
erlang:suspend_process(Suspendee),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
[{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
@@ -2161,7 +2161,7 @@ processes_term_proc_list_test(MustChk) ->
end)
end,
SpawnSuspendProcessesProc = fun () ->
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
P = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
@@ -2171,7 +2171,7 @@ processes_term_proc_list_test(MustChk) ->
end),
receive {suspend_me, P} -> ok end,
erlang:suspend_process(P),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
[{status,suspended},
{current_function,{erlang,ptab_list_continue,2}}]
= process_info(P, [status, current_function]),
@@ -2232,7 +2232,7 @@ processes_term_proc_list_test(MustChk) ->
S8 = SpawnSuspendProcessesProc(),
?CHK_TERM_PROC_LIST(MustChk, 7),
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
Exit(S8),
?CHK_TERM_PROC_LIST(MustChk, 7),
Exit(S5),
@@ -2241,7 +2241,7 @@ processes_term_proc_list_test(MustChk) ->
?CHK_TERM_PROC_LIST(MustChk, 6),
Exit(S6),
?CHK_TERM_PROC_LIST(MustChk, 0),
- erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(multi_scheduling, unblock_normal),
as_expected.
@@ -2449,7 +2449,7 @@ no_priority_inversion2(Config) when is_list(Config) ->
request_gc(Pid, Prio) ->
Ref = make_ref(),
- erts_internal:request_system_task(Pid, Prio, {garbage_collect, Ref}),
+ erts_internal:request_system_task(Pid, Prio, {garbage_collect, Ref, major}),
Ref.
system_task_blast(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index 3aee15a8fc..b178dede5b 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -1095,16 +1095,13 @@ scheduler_threads(Config) when is_list(Config) ->
end.
dirty_scheduler_threads(Config) when is_list(Config) ->
- SmpSupport = erlang:system_info(smp_support),
- try
- erlang:system_info(dirty_cpu_schedulers),
- dirty_scheduler_threads_test(Config, SmpSupport)
- catch
- error:badarg ->
- {skipped, "No dirty scheduler support"}
+ case erlang:system_info(dirty_cpu_schedulers) of
+ 0 -> {skipped, "No dirty scheduler support"};
+ _ -> dirty_scheduler_threads_test(Config)
end.
-dirty_scheduler_threads_test(Config, SmpSupport) ->
+dirty_scheduler_threads_test(Config) ->
+ SmpSupport = erlang:system_info(smp_support),
{Sched, SchedOnln, _} = get_dsstate(Config, ""),
{HalfSched, HalfSchedOnln} = case SmpSupport of
false -> {1,1};
@@ -1374,12 +1371,9 @@ sst2_loop(N) ->
sst2_loop(N-1).
sst3_loop(S, N) ->
- try erlang:system_info(dirty_cpu_schedulers) of
- DS ->
- sst3_loop_with_dirty_schedulers(S, DS, N)
- catch
- error:badarg ->
- sst3_loop_normal_schedulers_only(S, N)
+ case erlang:system_info(dirty_cpu_schedulers) of
+ 0 -> sst3_loop_normal_schedulers_only(S, N);
+ DS -> sst3_loop_with_dirty_schedulers(S, DS, N)
end.
sst3_loop_normal_schedulers_only(_S, 0) ->
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index a1f12ba93c..f51244485b 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -28,6 +28,8 @@
runtime_update/1, runtime_diff/1,
run_queue_one/1,
scheduler_wall_time/1,
+ scheduler_wall_time_all/1,
+ msb_scheduler_wall_time/1,
reductions/1, reductions_big/1, garbage_collection/1, io/1,
badarg/1, run_queues_lengths_active_tasks/1, msacc/1]).
@@ -43,7 +45,9 @@ suite() ->
all() ->
[{group, wall_clock}, {group, runtime}, reductions,
- reductions_big, {group, run_queue}, scheduler_wall_time,
+ reductions_big, {group, run_queue},
+ scheduler_wall_time, scheduler_wall_time_all,
+ msb_scheduler_wall_time,
garbage_collection, io, badarg,
run_queues_lengths_active_tasks,
msacc].
@@ -271,35 +275,64 @@ hog_iter(0, Mon) ->
%% Tests that statistics(scheduler_wall_time) works as intended
scheduler_wall_time(Config) when is_list(Config) ->
+ scheduler_wall_time_test(scheduler_wall_time).
+
+%% Tests that statistics(scheduler_wall_time_all) works as intended
+scheduler_wall_time_all(Config) when is_list(Config) ->
+ scheduler_wall_time_test(scheduler_wall_time_all).
+
+scheduler_wall_time_test(Type) ->
%% Should return undefined if system_flag is not turned on yet
- undefined = statistics(scheduler_wall_time),
+ undefined = statistics(Type),
%% Turn on statistics
false = erlang:system_flag(scheduler_wall_time, true),
try
Schedulers = erlang:system_info(schedulers_online),
+ DirtyCPUSchedulers = erlang:system_info(dirty_cpu_schedulers_online),
+ DirtyIOSchedulers = erlang:system_info(dirty_io_schedulers),
+ TotLoadSchedulers = case Type of
+ scheduler_wall_time_all ->
+ Schedulers + DirtyCPUSchedulers + DirtyIOSchedulers;
+ scheduler_wall_time ->
+ Schedulers + DirtyCPUSchedulers
+ end,
+
%% Let testserver and everyone else finish their work
timer:sleep(1500),
%% Empty load
- EmptyLoad = get_load(),
+ EmptyLoad = get_load(Type),
{false, _} = {lists:any(fun(Load) -> Load > 50 end, EmptyLoad),EmptyLoad},
MeMySelfAndI = self(),
StartHog = fun() ->
- Pid = spawn(?MODULE, hog, [self()]),
+ Pid = spawn_link(?MODULE, hog, [self()]),
receive hog_started -> MeMySelfAndI ! go end,
Pid
end,
+ StartDirtyHog = fun(Func) ->
+ F = fun () ->
+ erts_debug:Func(alive_waitexiting,
+ MeMySelfAndI)
+ end,
+ Pid = spawn_link(F),
+ receive {alive, Pid} -> ok end,
+ Pid
+ end,
P1 = StartHog(),
%% Max on one, the other schedulers empty (hopefully)
%% Be generous the process can jump between schedulers
%% which is ok and we don't want the test to fail for wrong reasons
- _L1 = [S1Load|EmptyScheds1] = get_load(),
+ _L1 = [S1Load|EmptyScheds1] = get_load(Type),
{true,_} = {S1Load > 50,S1Load},
{false,_} = {lists:any(fun(Load) -> Load > 50 end, EmptyScheds1),EmptyScheds1},
{true,_} = {lists:sum(EmptyScheds1) < 60,EmptyScheds1},
%% 50% load
HalfHogs = [StartHog() || _ <- lists:seq(1, (Schedulers-1) div 2)],
- HalfLoad = lists:sum(get_load()) div Schedulers,
+ HalfDirtyCPUHogs = [StartDirtyHog(dirty_cpu)
+ || _ <- lists:seq(1, DirtyCPUSchedulers div 2)],
+ HalfDirtyIOHogs = [StartDirtyHog(dirty_io)
+ || _ <- lists:seq(1, DirtyIOSchedulers div 2)],
+ HalfLoad = lists:sum(get_load(Type)) div TotLoadSchedulers,
if Schedulers < 2, HalfLoad > 80 -> ok; %% Ok only one scheduler online and one hog
%% We want roughly 50% load
HalfLoad > 40, HalfLoad < 60 -> ok;
@@ -308,23 +341,30 @@ scheduler_wall_time(Config) when is_list(Config) ->
%% 100% load
LastHogs = [StartHog() || _ <- lists:seq(1, Schedulers div 2)],
- FullScheds = get_load(),
+ LastDirtyCPUHogs = [StartDirtyHog(dirty_cpu)
+ || _ <- lists:seq(1, DirtyCPUSchedulers div 2)],
+ LastDirtyIOHogs = [StartDirtyHog(dirty_io)
+ || _ <- lists:seq(1, DirtyIOSchedulers div 2)],
+ FullScheds = get_load(Type),
{false,_} = {lists:any(fun(Load) -> Load < 80 end, FullScheds),FullScheds},
- FullLoad = lists:sum(FullScheds) div Schedulers,
+ FullLoad = lists:sum(FullScheds) div TotLoadSchedulers,
if FullLoad > 90 -> ok;
true -> exit({fullload, FullLoad})
end,
KillHog = fun (HP) ->
HPM = erlang:monitor(process, HP),
+ unlink(HP),
exit(HP, kill),
receive
{'DOWN', HPM, process, HP, killed} ->
ok
end
end,
- [KillHog(Pid) || Pid <- [P1|HalfHogs++LastHogs]],
- AfterLoad = get_load(),
+ [KillHog(Pid) || Pid <- [P1|HalfHogs++HalfDirtyCPUHogs++HalfDirtyIOHogs
+ ++LastHogs++LastDirtyCPUHogs++LastDirtyIOHogs]],
+ receive after 2000 -> ok end, %% Give dirty schedulers time to complete...
+ AfterLoad = get_load(Type),
io:format("AfterLoad=~p~n", [AfterLoad]),
{false,_} = {lists:any(fun(Load) -> Load > 25 end, AfterLoad),AfterLoad},
true = erlang:system_flag(scheduler_wall_time, false)
@@ -332,16 +372,81 @@ scheduler_wall_time(Config) when is_list(Config) ->
erlang:system_flag(scheduler_wall_time, false)
end.
-get_load() ->
- Start = erlang:statistics(scheduler_wall_time),
+get_load(Type) ->
+ Start = erlang:statistics(Type),
timer:sleep(1500),
- End = erlang:statistics(scheduler_wall_time),
+ End = erlang:statistics(Type),
lists:reverse(lists:sort(load_percentage(lists:sort(Start),lists:sort(End)))).
load_percentage([{Id, WN, TN}|Ss], [{Id, WP, TP}|Ps]) ->
[100*(WN-WP) div (TN-TP)|load_percentage(Ss, Ps)];
load_percentage([], []) -> [].
+count(0) ->
+ ok;
+count(N) ->
+ count(N-1).
+
+msb_swt_hog(true) ->
+ count(1000000),
+ erts_debug:dirty_cpu(wait, 10),
+ erts_debug:dirty_io(wait, 10),
+ msb_swt_hog(true);
+msb_swt_hog(false) ->
+ count(1000000),
+ msb_swt_hog(false).
+
+msb_scheduler_wall_time(Config) ->
+ erlang:system_flag(scheduler_wall_time, true),
+ Dirty = erlang:system_info(dirty_cpu_schedulers) /= 0,
+ Hogs = lists:map(fun (_) ->
+ spawn_opt(fun () ->
+ msb_swt_hog(Dirty)
+ end, [{priority,low}, link, monitor])
+ end, lists:seq(1,10)),
+ erlang:system_flag(multi_scheduling, block),
+ try
+ SWT1 = lists:sort(statistics(scheduler_wall_time_all)),
+ %% io:format("SWT1 = ~p~n", [SWT1]),
+ receive after 4000 -> ok end,
+ SWT2 = lists:sort(statistics(scheduler_wall_time_all)),
+ %% io:format("SWT2 = ~p~n", [SWT2]),
+ SWT = lists:zip(SWT1, SWT2),
+ io:format("SU = ~p~n", [lists:map(fun({{I, A0, T0}, {I, A1, T1}}) ->
+ {I, (A1 - A0)/(T1 - T0)} end,
+ SWT)]),
+ {A, T} = lists:foldl(fun({{_, A0, T0}, {_, A1, T1}}, {Ai,Ti}) ->
+ {Ai + (A1 - A0), Ti + (T1 - T0)}
+ end,
+ {0, 0},
+ SWT),
+ TSU = A/T,
+ WSU = ((TSU * (erlang:system_info(schedulers)
+ + erlang:system_info(dirty_cpu_schedulers)
+ + erlang:system_info(dirty_io_schedulers)))
+ / 1),
+ %% Weighted scheduler utilization should be
+ %% very close to 1.0, i.e., we execute the
+ %% same time as one thread executing all
+ %% the time...
+ io:format("WSU = ~p~n", [WSU]),
+ true = 0.9 < WSU andalso WSU < 1.1,
+ ok
+ after
+ erlang:system_flag(multi_scheduling, unblock),
+ erlang:system_flag(scheduler_wall_time, false),
+ lists:foreach(fun ({HP, _HM}) ->
+ unlink(HP),
+ exit(HP, kill)
+ end, Hogs),
+ lists:foreach(fun ({HP, HM}) ->
+ receive
+ {'DOWN', HM, process, HP, _} ->
+ ok
+ end
+ end, Hogs),
+ ok
+ end.
%% Tests that statistics(garbage_collection) is callable.
%% It is not clear how to test anything more.
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index a4aedb31f6..6bd1eb1e1e 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -36,7 +36,8 @@
-export([all/0, suite/0]).
-export([process_count/1, system_version/1, misc_smoke_tests/1,
- heap_size/1, wordsize/1, memory/1, ets_limit/1]).
+ heap_size/1, wordsize/1, memory/1, ets_limit/1, atom_limit/1,
+ atom_count/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -44,7 +45,7 @@ suite() ->
all() ->
[process_count, system_version, misc_smoke_tests,
- heap_size, wordsize, memory, ets_limit].
+ heap_size, wordsize, memory, ets_limit, atom_limit, atom_count].
%%%
%%% The test cases -------------------------------------------------------------
@@ -173,7 +174,7 @@ memory(Config) when is_list(Config) ->
%%
erts_debug:set_internal_state(available_internal_state, true),
- %% Use a large heap size on the controling process in
+ %% Use a large heap size on the controlling process in
%% order to avoid changes in its heap size during
%% comparisons.
MinHeapSize = process_flag(min_heap_size, 1024*1024),
@@ -472,6 +473,17 @@ mapn(_Fun, 0) ->
mapn(Fun, N) ->
[Fun(N) | mapn(Fun, N-1)].
+
+get_node_name(Config) ->
+ list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(proplists:get_value(testcase, Config))
+ ++ "-"
+ ++ integer_to_list(erlang:system_time(second))
+ ++ "-"
+ ++ integer_to_list(erlang:unique_integer([positive]))).
+
+
%% Verify system_info(ets_limit) reflects max ETS table settings.
ets_limit(Config0) when is_list(Config0) ->
Config = [{testcase,ets_limit}|Config0],
@@ -486,7 +498,7 @@ get_ets_limit(Config, EtsMax) ->
0 -> [];
_ -> [{"ERL_MAX_ETS_TABLES", integer_to_list(EtsMax)}]
end,
- {ok, Node} = start_node(Config, Envs),
+ {ok, Node} = start_node_ets(Config, Envs),
Me = self(),
Ref = make_ref(),
spawn_link(Node,
@@ -502,16 +514,50 @@ get_ets_limit(Config, EtsMax) ->
stop_node(Node),
Res.
-start_node(Config, Envs) when is_list(Config) ->
+start_node_ets(Config, Envs) when is_list(Config) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ test_server:start_node(get_node_name(Config), peer,
+ [{args, "-pa "++Pa}, {env, Envs}]).
+
+start_node_atm(Config, AtomsMax) when is_list(Config) ->
Pa = filename:dirname(code:which(?MODULE)),
- Name = list_to_atom(atom_to_list(?MODULE)
- ++ "-"
- ++ atom_to_list(proplists:get_value(testcase, Config))
- ++ "-"
- ++ integer_to_list(erlang:system_time(second))
- ++ "-"
- ++ integer_to_list(erlang:unique_integer([positive]))),
- test_server:start_node(Name, peer, [{args, "-pa "++Pa}, {env, Envs}]).
+ test_server:start_node(get_node_name(Config), peer,
+ [{args, "-pa "++ Pa ++ AtomsMax}]).
stop_node(Node) ->
test_server:stop_node(Node).
+
+
+%% Verify system_info(atom_limit) reflects max atoms settings
+%% (using " +t").
+atom_limit(Config0) when is_list(Config0) ->
+ Config = [{testcase,atom_limit}|Config0],
+ 2186042 = get_atom_limit(Config, " +t 2186042 "),
+ ok.
+
+get_atom_limit(Config, AtomsMax) ->
+ {ok, Node} = start_node_atm(Config, AtomsMax),
+ Me = self(),
+ Ref = make_ref(),
+ spawn_link(Node,
+ fun() ->
+ Res = erlang:system_info(atom_limit),
+ unlink(Me),
+ Me ! {Ref, Res}
+ end),
+ receive
+ {Ref, Res} ->
+ Res
+ end,
+ stop_node(Node),
+ Res.
+
+%% Verify that system_info(atom_count) works.
+atom_count(Config) when is_list(Config) ->
+ Limit = erlang:system_info(atom_limit),
+ Count1 = erlang:system_info(atom_count),
+ list_to_atom(integer_to_list(erlang:unique_integer())),
+ Count2 = erlang:system_info(atom_count),
+ true = Limit >= Count2,
+ true = Count2 > Count1,
+ ok.
diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl
index 9501569814..c13d03bcc4 100644
--- a/erts/emulator/test/time_SUITE.erl
+++ b/erts/emulator/test/time_SUITE.erl
@@ -132,7 +132,7 @@ local_to_univ_utc(Config) when is_list(Config) ->
end.
-%% Tests conversion from univeral to local time.
+%% Tests conversion from universal to local time.
univ_to_local(Config) when is_list(Config) ->
test_univ_to_local(test_data()).
diff --git a/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c b/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c
index 33b346aab7..786be35c9c 100644
--- a/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c
+++ b/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c
@@ -1,4 +1,4 @@
-#include "erl_nif.h"
+#include <erl_nif.h>
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
@@ -6,11 +6,6 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
return 0;
}
-static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
-{
- return 0;
-}
-
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
{
return 0;
@@ -34,4 +29,4 @@ static ErlNifFunc nif_funcs[] =
{"nif_dec", 1, nif_dec_1}
};
-ERL_NIF_INIT(trace_call_time_SUITE,nif_funcs,load,reload,upgrade,unload)
+ERL_NIF_INIT(trace_call_time_SUITE,nif_funcs,load,NULL,upgrade,unload)
diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl
index c297acd78b..5b65889f4a 100644
--- a/erts/emulator/test/trace_local_SUITE.erl
+++ b/erts/emulator/test/trace_local_SUITE.erl
@@ -19,7 +19,6 @@
%%
-module(trace_local_SUITE).
--compile({nowarn_deprecated_function, {erlang,hash,2}}).
-export([basic_test/0, bit_syntax_test/0, return_test/0,
on_and_off_test/0, stack_grow_test/0,
@@ -65,7 +64,7 @@
init_per_testcase(_Case, Config) ->
Config.
-end_per_testcase(_Case, Config) ->
+end_per_testcase(_Case, _Config) ->
shutdown(),
%% Reloading the module will clear all trace patterns, and
@@ -78,7 +77,7 @@ suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap, {minutes, 2}}].
-all() ->
+all() ->
case test_server:is_native(trace_local_SUITE) of
true -> [not_run];
false ->
@@ -98,7 +97,7 @@ all() ->
end.
-not_run(Config) when is_list(Config) ->
+not_run(Config) when is_list(Config) ->
{skipped,"Native code"}.
%% Tests basic local call-trace
@@ -300,7 +299,7 @@ basic_test() ->
NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported,[1]),
?CT(?MODULE,local,[1]),
@@ -308,17 +307,17 @@ basic_test() ->
?CT(?MODULE,local_tail,[1]),
erlang:trace_pattern({?MODULE,'_','_'},[],[]),
erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
- [1,1,1,1] = lambda_slave(fun() ->
- exported_wrap(1)
- end),
- ?NM,
+ [1,1,1,997] = lambda_slave(fun() ->
+ exported_wrap(1)
+ end),
+ ?NM,
erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
- [1,1,1,1] = lambda_slave(fun() ->
- exported_wrap(1)
- end),
+ [1,1,1,997] = lambda_slave(fun() ->
+ exported_wrap(1)
+ end),
?CT(?MODULE,_,_), %% The fun
?CT(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported,[1]),
@@ -379,36 +378,36 @@ return_test() ->
setup([call]),
erlang:trace_pattern({?MODULE,'_','_'},[{'_',[],[{return_trace}]}],
[local]),
- erlang:trace_pattern({erlang,hash,'_'},[{'_',[],[{return_trace}]}],
+ erlang:trace_pattern({erlang,phash2,'_'},[{'_',[],[{return_trace}]}],
[local]),
erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
- ?CT(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
+ ?CT(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported,[1]),
?CT(?MODULE,local,[1]),
?CT(?MODULE,local2,[1]),
?CT(?MODULE,local_tail,[1]),
- ?CT(erlang,hash,[1,1]),
- ?RF(erlang,hash,2,1),
- ?RF(?MODULE,local_tail,1,[1,1]),
- ?RF(?MODULE,local2,1,[1,1]),
- ?RF(?MODULE,local,1,[1,1,1]),
- ?RF(?MODULE,exported,1,[1,1,1,1]),
- ?RF(?MODULE,exported_wrap,1,[1,1,1,1]),
+ ?CT(erlang,phash2,[1,1023]),
+ ?RF(erlang,phash2,2,997),
+ ?RF(?MODULE,local_tail,1,[1,997]),
+ ?RF(?MODULE,local2,1,[1,997]),
+ ?RF(?MODULE,local,1,[1,1,997]),
+ ?RF(?MODULE,exported,1,[1,1,1,997]),
+ ?RF(?MODULE,exported_wrap,1,[1,1,1,997]),
shutdown(),
setup([call,return_to]),
erlang:trace_pattern({?MODULE,'_','_'},[],
[local]),
- erlang:trace_pattern({erlang,hash,'_'},[],
+ erlang:trace_pattern({erlang,phash2,'_'},[],
[local]),
erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
- ?CT(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
+ ?CT(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported,[1]),
?CT(?MODULE,local,[1]),
?CT(?MODULE,local2,[1]),
?CT(?MODULE,local_tail,[1]),
- ?CT(erlang,hash,[1,1]),
+ ?CT(erlang,phash2,[1,1023]),
?RT(?MODULE,local_tail,1),
?RT(?MODULE,local,1),
?RT(?MODULE,exported,1),
@@ -417,25 +416,25 @@ return_test() ->
setup([call,return_to]),
erlang:trace_pattern({?MODULE,'_','_'},[{'_',[],[{return_trace}]}],
[local]),
- erlang:trace_pattern({erlang,hash,'_'},[{'_',[],[{return_trace}]}],
+ erlang:trace_pattern({erlang,phash2,'_'},[{'_',[],[{return_trace}]}],
[local]),
erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
- ?CT(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
+ ?CT(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported,[1]),
?CT(?MODULE,local,[1]),
?CT(?MODULE,local2,[1]),
?CT(?MODULE,local_tail,[1]),
- ?CT(erlang,hash,[1,1]),
- ?RF(erlang,hash,2,1),
+ ?CT(erlang,phash2,[1,1023]),
+ ?RF(erlang,phash2,2,997),
?RT(?MODULE,local_tail,1),
- ?RF(?MODULE,local_tail,1,[1,1]),
- ?RF(?MODULE,local2,1,[1,1]),
+ ?RF(?MODULE,local_tail,1,[1,997]),
+ ?RF(?MODULE,local2,1,[1,997]),
?RT(?MODULE,local,1),
- ?RF(?MODULE,local,1,[1,1,1]),
+ ?RF(?MODULE,local,1,[1,1,997]),
?RT(?MODULE,exported,1),
- ?RF(?MODULE,exported,1,[1,1,1,1]),
- ?RF(?MODULE,exported_wrap,1,[1,1,1,1]),
+ ?RF(?MODULE,exported,1,[1,1,1,997]),
+ ?RF(?MODULE,exported_wrap,1,[1,1,1,997]),
?RT(?MODULE,slave,2),
shutdown(),
?NM,
@@ -446,7 +445,6 @@ return_test() ->
erlang:trace_pattern({'_','_','_'},[],[local]),
apply_slave(erlang,trace,[Pid, false, [all]]),
shutdown(),
-
ok.
on_and_off_test() ->
@@ -456,72 +454,72 @@ on_and_off_test() ->
LocalTail = fun() ->
local_tail(1)
end,
- [1,1] = lambda_slave(LocalTail),
+ [1,997] = lambda_slave(LocalTail),
?CT(?MODULE,local_tail,[1]),
erlang:trace(Pid,true,[return_to]),
- [1,1] = lambda_slave(LocalTail),
+ [1,997] = lambda_slave(LocalTail),
?CT(?MODULE,local_tail,[1]),
?RT(?MODULE,_,_),
0 = erlang:trace_pattern({?MODULE,local_tail,1},[],[global]),
- [1,1] = lambda_slave(LocalTail),
+ [1,997] = lambda_slave(LocalTail),
?NM,
1 = erlang:trace_pattern({?MODULE,exported_wrap,1},[],[global]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
1 = erlang:trace_pattern({?MODULE,exported_wrap,1},[],[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
?RT(?MODULE,slave,2),
- 1 = erlang:trace_pattern({erlang,hash,2},[],[local]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ 1 = erlang:trace_pattern({erlang,phash2,2},[],[local]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
- ?CT(erlang,hash,[1,1]),
+ ?CT(erlang,phash2,[1,1023]),
?RT(?MODULE,local_tail,1),
?RT(?MODULE,slave,2),
erlang:trace(Pid,true,[timestamp]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CTT(?MODULE,exported_wrap,[1]),
- ?CTT(erlang,hash,[1,1]),
+ ?CTT(erlang,phash2,[1,1023]),
?RTT(?MODULE,local_tail,1),
?RTT(?MODULE,slave,2),
erlang:trace(Pid,false,[return_to,timestamp]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
- ?CT(erlang,hash,[1,1]),
+ ?CT(erlang,phash2,[1,1023]),
erlang:trace(Pid,true,[return_to]),
- 1 = erlang:trace_pattern({erlang,hash,2},[],[]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ 1 = erlang:trace_pattern({erlang,phash2,2},[],[]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
- ?CT(erlang,hash,[1,1]),
+ ?CT(erlang,phash2,[1,1023]),
?RT(?MODULE,slave,2),
1 = erlang:trace_pattern({?MODULE,exported_wrap,1},[],[]),
- [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
+ [1,1,1,997] = apply_slave(?MODULE,exported_wrap,[1]),
?CT(?MODULE,exported_wrap,[1]),
- ?CT(erlang,hash,[1,1]),
+ ?CT(erlang,phash2,[1,1023]),
shutdown(),
erlang:trace_pattern({'_','_','_'},false,[local]),
N = erlang:trace_pattern({erlang,'_','_'},true,[local]),
case erlang:trace_pattern({erlang,'_','_'},false,[local]) of
- N ->
+ N ->
ok;
Else ->
exit({number_mismatch, {expected, N}, {got, Else}})
end,
case erlang:trace_pattern({erlang,'_','_'},false,[local]) of
- N ->
+ N ->
ok;
Else2 ->
exit({number_mismatch, {expected, N}, {got, Else2}})
end,
M = erlang:trace_pattern({erlang,'_','_'},true,[]),
case erlang:trace_pattern({erlang,'_','_'},false,[]) of
- M ->
+ M ->
ok;
Else3 ->
exit({number_mismatch, {expected, N}, {got, Else3}})
end,
case erlang:trace_pattern({erlang,'_','_'},false,[]) of
- M ->
+ M ->
ok;
Else4 ->
exit({number_mismatch, {expected, N}, {got, Else4}})
@@ -930,7 +928,7 @@ local2(Val) ->
local_tail(Val). %% Tail recursive call
local_tail(Val) ->
- [Val , erlang:hash(1,1)].
+ [Val , erlang:phash2(1,1023)].
diff --git a/erts/emulator/test/trace_nif_SUITE.erl b/erts/emulator/test/trace_nif_SUITE.erl
index 8d5bff2a48..7ac6fce234 100644
--- a/erts/emulator/test/trace_nif_SUITE.erl
+++ b/erts/emulator/test/trace_nif_SUITE.erl
@@ -265,10 +265,16 @@ nif_process() ->
nif_process().
load_nif(Config) ->
- Path = proplists:get_value(data_dir, Config),
-
- ok = erlang:load_nif(filename:join(Path,"trace_nif"), 0).
+ case is_nif_loaded() of
+ true ->
+ ok;
+ false ->
+ Path = proplists:get_value(data_dir, Config),
+ ok = erlang:load_nif(filename:join(Path,"trace_nif"), 0)
+ end.
+is_nif_loaded() ->
+ false.
nif() ->
{"Stub0",[]}. %exit("nif/0 stub called").
diff --git a/erts/emulator/test/trace_nif_SUITE_data/trace_nif.c b/erts/emulator/test/trace_nif_SUITE_data/trace_nif.c
index 26f2420b8b..1afb5ee919 100644
--- a/erts/emulator/test/trace_nif_SUITE_data/trace_nif.c
+++ b/erts/emulator/test/trace_nif_SUITE_data/trace_nif.c
@@ -1,4 +1,4 @@
-#include "erl_nif.h"
+#include <erl_nif.h>
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
@@ -6,18 +6,18 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
return 0;
}
-static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
{
return 0;
}
-static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+static void unload(ErlNifEnv* env, void* priv_data)
{
- return 0;
}
-static void unload(ErlNifEnv* env, void* priv_data)
+static ERL_NIF_TERM is_nif_loaded(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
+ return enif_make_atom(env,"true");
}
static ERL_NIF_TERM nif_0(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -38,9 +38,10 @@ static ERL_NIF_TERM nif_1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ErlNifFunc nif_funcs[] =
{
+ {"is_nif_loaded", 0, is_nif_loaded},
{"nif", 0, nif_0},
{"nif", 1, nif_1}
};
-ERL_NIF_INIT(trace_nif_SUITE,nif_funcs,load,reload,upgrade,unload)
+ERL_NIF_INIT(trace_nif_SUITE,nif_funcs,load,NULL,upgrade,unload)
diff --git a/erts/emulator/test/tracer_SUITE_data/tracer_test.c b/erts/emulator/test/tracer_SUITE_data/tracer_test.c
index d9543b7ab9..1555a95d9a 100644
--- a/erts/emulator/test/tracer_SUITE_data/tracer_test.c
+++ b/erts/emulator/test/tracer_SUITE_data/tracer_test.c
@@ -18,7 +18,7 @@
* %CopyrightEnd%
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdio.h>
#include <string.h>
diff --git a/erts/emulator/test/z_SUITE.erl b/erts/emulator/test/z_SUITE.erl
index ab56018373..a90701c5d2 100644
--- a/erts/emulator/test/z_SUITE.erl
+++ b/erts/emulator/test/z_SUITE.erl
@@ -68,8 +68,8 @@ schedulers_alive(Config) when is_list(Config) ->
enabled ->
io:format("Testing blocking process exit~n"),
BF = fun () ->
- blocked = erlang:system_flag(multi_scheduling,
- block),
+ blocked_normal = erlang:system_flag(multi_scheduling,
+ block_normal),
Master ! {self(), blocking},
receive after infinity -> ok end
end,
@@ -77,21 +77,21 @@ schedulers_alive(Config) when is_list(Config) ->
Mon = erlang:monitor(process, Blocker),
receive {Blocker, blocking} -> ok end,
[Blocker]
- = erlang:system_info(multi_scheduling_blockers),
+ = erlang:system_info(normal_multi_scheduling_blockers),
unlink(Blocker),
exit(Blocker, kill),
receive {'DOWN', Mon, _, _, _} -> ok end,
enabled = erlang:system_info(multi_scheduling),
- [] = erlang:system_info(multi_scheduling_blockers),
+ [] = erlang:system_info(normal_multi_scheduling_blockers),
ok
end,
io:format("Testing blocked~n"),
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
case erlang:system_info(multi_scheduling) of
enabled ->
ct:fail(multi_scheduling_enabled);
- blocked ->
- [Master] = erlang:system_info(multi_scheduling_blockers);
+ blocked_normal ->
+ [Master] = erlang:system_info(normal_multi_scheduling_blockers);
disabled -> ok
end,
Ps = lists:map(
@@ -109,8 +109,8 @@ schedulers_alive(Config) when is_list(Config) ->
unlink(P),
exit(P, bang)
end, Ps),
- case erlang:system_flag(multi_scheduling, unblock) of
- blocked -> ct:fail(multi_scheduling_blocked);
+ case erlang:system_flag(multi_scheduling, unblock_normal) of
+ blocked_normal -> ct:fail(multi_scheduling_blocked);
disabled -> ok;
enabled -> ok
end,
@@ -226,7 +226,7 @@ pollset_size(Config) when is_list(Config) ->
"Pollset size information not available"}
end;
false ->
- %% Somtimes we have fewer descriptors in the
+ %% Sometimes we have fewer descriptors in the
%% pollset at the end than when we started, but
%% that is ok as long as there are at least 2
%% descriptors (dist listen socket and
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index 4407f7e289..9813142585 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -475,7 +475,7 @@ sub emulator_output {
print '#include "beam_load.h"', "\n";
print "\n";
- print "char tag_to_letter[] = {\n ";
+ print "const char tag_to_letter[] = {\n ";
for ($i = 0; $i < length($genop_types); $i++) {
print "'$tag_type[$i]', ";
}
@@ -489,7 +489,7 @@ sub emulator_output {
# Generate code for specific ops.
#
my($spec_opnum) = 0;
- print "OpEntry opc[] = {\n";
+ print "const OpEntry opc[] = {\n";
foreach $key (sort keys %specific_op) {
$gen_to_spec{$key} = $spec_opnum;
$num_specific{$key} = @{$specific_op{$key}};
@@ -566,13 +566,22 @@ sub emulator_output {
$sep = ",";
}
$init .= "}";
- init_item($print_name, $init, $involves_r, $size, $pack, $sign, 0);
+ init_item($print_name, $init, $involves_r, $size, $pack, $sign);
$op_to_name[$spec_opnum] = $instr;
$spec_opnum++;
}
}
print "};\n\n";
- print "int num_instructions = $spec_opnum;\n\n";
+ print "const int num_instructions = $spec_opnum;\n\n";
+
+ #
+ # Print the array for instruction counts.
+ #
+
+ print "#ifdef ERTS_OPCODE_COUNTER_SUPPORT\n";
+ print "Uint erts_instr_count[$spec_opnum];\n";
+ print "#endif\n";
+ print "\n";
#
# Generate transformations.
@@ -584,7 +593,7 @@ sub emulator_output {
# Print the generic instruction table.
#
- print "GenOpEntry gen_opc[] = {\n";
+ print "const GenOpEntry gen_opc[] = {\n";
for ($i = 0; $i < @gen_opname; $i++) {
if ($i == $num_file_opcodes) {
print "\n/*\n * Internal generic instructions.\n */\n\n";
@@ -678,8 +687,8 @@ sub emulator_output {
print "#define TE_MAX_VARS $te_max_vars\n";
print "\n";
- print "extern char tag_to_letter[];\n";
- print "extern Uint op_transform[];\n";
+ print "extern const char tag_to_letter[];\n";
+ print "extern const Uint op_transform[];\n";
print "\n";
for ($i = 0; $i < @op_to_name; $i++) {
@@ -708,7 +717,7 @@ sub emulator_output {
print "#define DEFINE_COUNTING_LABELS";
for ($i = 0; $i < @op_to_name; $i++) {
my($name) = $op_to_name[$i];
- print " \\\nCountCase($name): opc[$i].count++; goto lb_$name;";
+ print " \\\nCountCase($name): erts_instr_count[$i]++; goto lb_$name;";
}
print "\n\n";
@@ -1417,7 +1426,7 @@ sub tr_gen {
# Print the generated transformation engine.
#
my($offset) = 0;
- print "Uint op_transform[] = {\n";
+ print "const Uint op_transform[] = {\n";
foreach $key (sort keys %gen_transform) {
$gen_transform_offset{$key} = $offset;
my @instr = @{$gen_transform{$key}};
diff --git a/erts/emulator/utils/make_preload b/erts/emulator/utils/make_preload
index f489bc2a39..bcb2e42614 100755
--- a/erts/emulator/utils/make_preload
+++ b/erts/emulator/utils/make_preload
@@ -90,12 +90,12 @@ foreach $file (@ARGV) {
open(FILE, $file) or error("failed to read $file: $!");
binmode(FILE);
$_ = <FILE>;
- $_ = beam_strip($_);
+ $_ = beam_strip($_, $file);
close(FILE);
push(@modules, " {\"$module\", " . length($_) . ", preloaded_$module},\n");
- print "unsigned preloaded_size_$module = ", length($_), ";\n";
- print "unsigned char preloaded_$module", "[] = {\n";
+ print "const unsigned preloaded_size_$module = ", length($_), ";\n";
+ print "const unsigned char preloaded_$module", "[] = {\n";
for ($i = 0; $i < length($_); $i++) {
if ($i % 8 == 0 && $comment ne '') {
$comment =~ s@/\*@..@g; # Comment start -- avoid warning.
@@ -125,10 +125,10 @@ if ($gen_rc) {
print @modules;
print "END\n";
} elsif ($gen_old) {
- print "struct {\n";
+ print "const struct {\n";
print " char* name;\n";
print " int size;\n";
- print " unsigned char* code;\n";
+ print " const unsigned char* code;\n";
print "} pre_loaded[] = {\n";
foreach (@modules) {
print;
@@ -147,20 +147,20 @@ sub error {
}
sub beam_strip {
- my($beam) = @_;
+ my($beam,$file) = @_;
my $size_left = length($beam);
my %chunk;
my %needed_chunk = ('Code' => 1,
- 'Atom' => 1,
+ 'AtU8' => 1,
'ImpT' => 1,
'ExpT' => 1,
'StrT' => 1,
'FunT' => 1,
'LitT' => 1);
- die "can't read Beam files for OTP R4 or earlier (sorry)"
+ die "$file: can't read Beam files for OTP R4 or earlier (sorry)"
if $beam =~ /^\x7fBEAM!/;
#
@@ -177,7 +177,7 @@ sub beam_strip {
die "form size $size greater than size ", $size_left, " of module"
if $size > $size_left;
$size_left -= 4;
- die "not a BEAM file: IFF form type is not 'BEAM'"
+ die "$file: not a BEAM file: IFF form type is not 'BEAM'"
unless $beam_id eq 'BEAM';
#
@@ -197,6 +197,14 @@ sub beam_strip {
}
#
+ # Abort if there is no new-style 'AtU8' atom chunk.
+ #
+
+ exists $chunk{'AtU8'} or
+ die "$file: no 'AtU8' chunk (re-compile with " .
+ "OTP 20 or later)\n";
+
+ #
# Create a new beam file with only the useful chunk types.
#
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index c158778f43..47e1528958 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -36,7 +36,10 @@ use File::Basename;
# <-src>/erl_am.c
# <-src>/erl_bif_table.c
# <-src>/erl_bif_wrap.c
-# <-src>/erl_pbifs.c
+# <-src>/erl_dirty_bif_wrap.c
+# <-src>/erl_guard_bifs.c
+# <-src>/hipe_nbif_impl.c
+# <-include>/hipe_nbif_impl.h
# <-include>/erl_atom_table.h
# <-include>/erl_bif_table.h
#
@@ -52,10 +55,13 @@ my %atom;
my %atom_alias;
my %aliases;
my $auto_alias_num = 0;
+my %dirty_bif_tab;
my @bif;
-my @implementation;
-my @pbif;
+my @bif_info;
+my $dirty_schedulers = 'no';
+my $dirty_schedulers_test = 'no';
+my $hipe = 'no';
while (@ARGV && $ARGV[0] =~ /^-(\w+)/) {
my $opt = shift;
@@ -67,6 +73,18 @@ while (@ARGV && $ARGV[0] =~ /^-(\w+)/) {
$include = shift;
die "No directory for -include argument specified"
unless defined $include;
+ } elsif($opt eq '-ds') {
+ $dirty_schedulers = shift;
+ die "No -ds argument specified"
+ unless defined $dirty_schedulers;
+ } elsif($opt eq '-dst') {
+ $dirty_schedulers_test = shift;
+ die "No -dst argument specified"
+ unless defined $dirty_schedulers_test;
+ } elsif($opt eq '-hipe') {
+ $hipe = shift;
+ die "No -hipe argument specified"
+ unless defined $hipe;
} else {
usage("bad option: $opt");
}
@@ -79,23 +97,64 @@ while (<>) {
my($type, @args) = split;
if ($type eq 'atom') {
save_atoms(@args);
- } elsif ($type eq 'bif' or $type eq 'ubif') {
- my($bif,$alias,$alias2) = (@args);
+ } elsif ($type eq 'bif' or $type eq 'ubif' or $type eq 'gcbif') {
+ if (@args > 2) {
+ error("$type only allows two arguments");
+ }
+ my($bif,$alias) = (@args);
$bif =~ m@^([a-z_.'0-9]+):(.*)/(\d)$@ or error("invalid BIF");
my($mod,$name,$arity) = ($1,$2,$3);
+ my $mfa = "$mod:$name/$arity";
save_atoms($mod, $name);
unless (defined $alias) {
$alias = "";
$alias = "${mod}_" unless $mod eq 'erlang';
$alias .= "${name}_$arity";
}
+ my $sched_type;
+ my $alias3 = $alias;
+
+ $sched_type = $dirty_bif_tab{$mfa};
+
+ if (!$sched_type or ($type eq 'ubif')) {
+ $sched_type = 'normal';
+ }
+ elsif ($sched_type eq 'dirty_cpu') {
+ $alias3 = "schedule_dirty_cpu_$alias"
+ }
+ elsif ($sched_type eq 'dirty_io') {
+ $alias3 = "schedule_dirty_io_$alias"
+ }
+ else {
+ error("invalid sched_type: $sched_type");
+ }
+
my $wrapper;
- $wrapper = "wrap_$alias" if $type eq 'bif';
- $wrapper = $alias if $type eq 'ubif';
+ if ($type eq 'bif') {
+ $wrapper = "wrap_$alias";
+ } else {
+ $wrapper = $alias;
+ }
push(@bif, ["am_$atom_alias{$mod}","am_$atom_alias{$name}",$arity,
- $alias,$wrapper]);
- push(@pbif, $bif =~ m/^'/ && $alias =~ m/^ebif_/);
- push(@implementation, $alias2);
+ $alias3,$wrapper,$alias]);
+ push(@bif_info, [$type, $sched_type, $alias3, $alias]);
+ } elsif ($type eq 'dirty-cpu' or $type eq 'dirty-io'
+ or $type eq 'dirty-cpu-test' or $type eq 'dirty-io-test') {
+ if ($dirty_schedulers eq 'yes') {
+ my($bif,$other) = (@args);
+ $bif =~ m@^([a-z_.'0-9]+):(.*)/(\d)$@ or error("invalid BIF");
+ my($mod,$name,$arity) = ($1,$2,$3);
+ my $mfa = "$mod:$name/$arity";
+ if (($type eq 'dirty-cpu')
+ or (($dirty_schedulers_test eq 'yes')
+ and ($type eq 'dirty-cpu-test'))) {
+ $dirty_bif_tab{$mfa} = 'dirty_cpu';
+ } elsif (($type eq 'dirty-io')
+ or (($dirty_schedulers_test eq 'yes')
+ and ($type eq 'dirty-io-test'))) {
+ $dirty_bif_tab{$mfa} = 'dirty_io';
+ }
+ }
} else {
error("invalid line");
}
@@ -144,7 +203,7 @@ open_file("$include/erl_bif_list.h");
my $i;
for ($i = 0; $i < @bif; $i++) {
# module atom, function atom, arity, C function, table index
- print "BIF_LIST($bif[$i]->[0],$bif[$i]->[1],$bif[$i]->[2],$bif[$i]->[3],$i)\n";
+ print "BIF_LIST($bif[$i]->[0],$bif[$i]->[1],$bif[$i]->[2],$bif[$i]->[3],$bif[$i]->[5],$i)\n";
}
#
@@ -164,10 +223,24 @@ typedef struct bif_entry {
int arity;
BifFunction f;
BifFunction traced;
+ BifFunction impl;
} BifEntry;
+typedef struct erts_gc_bif {
+ BifFunction bif;
+ BifFunction gc_bif;
+ int exp_ix;
+} ErtsGcBif;
+
+typedef struct erts_u_bif {
+ BifFunction bif;
+ int exp_ix;
+} ErtsUBif;
+
extern BifEntry bif_table[];
extern Export* bif_export[];
+extern const ErtsGcBif erts_gc_bifs[];
+extern const ErtsUBif erts_u_bifs[];
#define BIF_SIZE $bif_size
@@ -175,17 +248,28 @@ EOF
my $i;
for ($i = 0; $i < @bif; $i++) {
- print "#define BIF_$bif[$i]->[3] $i\n";
+ print "#define BIF_$bif_info[$i]->[3] $i\n";
}
print "\n";
for ($i = 0; $i < @bif; $i++) {
- my $args = join(', ', 'Process*', 'Eterm*');
- print "Eterm $bif[$i]->[3]($args);\n";
- print "Eterm wrap_$bif[$i]->[3]($args, UWord *I);\n";
+ my $args = join(', ', 'Process*', 'Eterm*', 'UWord*');
+ my $name = $bif_info[$i]->[3];
+ print "Eterm $name($args);\n";
+ print "Eterm wrap_$name($args);\n";
+ print "Eterm erts_gc_$name(Process* p, Eterm* reg, Uint live);\n"
+ if $bif_info[$i]->[0] eq 'gcbif';
+ print "Eterm $bif_info[$i]->[2]($args);\n"
+ unless $bif_info[$i]->[1] eq 'normal';
+ print "\n";
+}
+
+if ($hipe eq 'yes') {
+ print "\n#include \"hipe_nbif_impl.h\"\n";
}
-print "#endif\n";
+
+print "\n#endif\n";
#
# Generate the bif table file.
@@ -216,7 +300,7 @@ includes("export.h", "sys.h", "erl_vm.h", "global.h", "erl_process.h", "bif.h",
for ($i = 0; $i < @bif; $i++) {
next if $bif[$i]->[3] eq $bif[$i]->[4]; # Skip unwrapped bifs
my $arity = $bif[$i]->[2];
- my $func = $bif[$i]->[3];
+ my $func = $bif_info[$i]->[3];
print "Eterm\n";
print "wrap_$func(Process* p, Eterm* args, UWord* I)\n";
print "{\n";
@@ -225,28 +309,107 @@ for ($i = 0; $i < @bif; $i++) {
}
#
-# Generate the package bif file.
+# Generate erl_gc_bifs.c.
#
-open_file("$src/erl_pbifs.c");
+open_file("$src/erl_guard_bifs.c");
my $i;
includes("export.h", "sys.h", "erl_vm.h", "global.h", "erl_process.h", "bif.h",
- "erl_bif_table.h", "erl_atom_table.h");
+ "erl_bif_table.h");
+print "const ErtsGcBif erts_gc_bifs[] = {\n";
for ($i = 0; $i < @bif; $i++) {
- my $arity = $bif[$i]->[2];
- my $func = $bif[$i]->[3];
- my $arg;
- next unless $pbif[$i];
- next unless $func =~ m/^ebif_(.*)/;
- my $orig_func = $1;
- $orig_func = $implementation[$i] if $implementation[$i];
- print "Eterm\n";
- print "$func(Process* p, Eterm* BIF__ARGS)\n";
- print "{\n";
- print " return $orig_func(p, BIF__ARGS);\n";
- print "}\n\n";
+ next unless $bif_info[$i]->[0] eq 'gcbif';
+ print " {$bif[$i]->[3], erts_gc_$bif[$i]->[3], BIF_$bif[$i]->[5]},\n";
+}
+print " {NULL, NULL, -1}\n";
+print "};\n";
+
+print "const ErtsUBif erts_u_bifs[] = {\n";
+for ($i = 0; $i < @bif; $i++) {
+ next unless $bif_info[$i]->[0] eq 'ubif';
+ print " {$bif[$i]->[3], BIF_$bif[$i]->[5]},\n";
+}
+print " {NULL, -1}\n";
+print "};\n";
+
+#
+# Generate the dirty bif wrappers file.
+#
+
+open_file("$src/erl_dirty_bif_wrap.c");
+my $i;
+includes("erl_process.h", "erl_nfunc_sched.h", "erl_bif_table.h", "erl_atom_table.h");
+for ($i = 0; $i < @bif_info; $i++) {
+ next if $bif_info[$i]->[1] eq 'normal';
+ my $dtype;
+ if ($bif_info[$i]->[1] eq 'dirty_cpu') {
+ $dtype = "ERTS_SCHED_DIRTY_CPU";
+ }
+ else {
+ $dtype = "ERTS_SCHED_DIRTY_IO";
+ }
+print <<EOF;
+Eterm $bif_info[$i]->[2](Process *c_p, Eterm *regs, BeamInstr *I)
+{
+ return erts_reschedule_bif(c_p, regs, I, $bif_info[$i]->[3], $dtype);
}
+EOF
+
+}
+
+if ($hipe eq 'yes') {
+
+ #
+ # Generate the nbif_impl bif wrappers file.
+ #
+
+ open_file("$src/hipe_nbif_impl.h");
+ print <<EOF;
+
+#ifndef HIPE_NBIF_IMPL_H__
+#define HIPE_NBIF_IMPL_H__
+
+EOF
+
+ my $i;
+ for ($i = 0; $i < @bif; $i++) {
+ print <<EOF;
+Eterm nbif_impl_$bif[$i]->[5](Process *c_p, Eterm *regs);
+EOF
+ }
+
+ print <<EOF;
+
+#endif /* ERL_HIPE_NBIF_IMPL_H__ */
+
+EOF
+
+ #
+ # Generate the nbif_impl bif wrappers file.
+ #
+
+ open_file("$src/hipe_nbif_impl.c");
+ my $i;
+ includes("erl_process.h", "erl_nfunc_sched.h", "erl_bif_table.h", "erl_atom_table.h");
+ for ($i = 0; $i < @bif; $i++) {
+
+ print <<EOF;
+Eterm nbif_impl_$bif[$i]->[5](Process *c_p, Eterm *regs)
+{
+ return $bif[$i]->[3](c_p, regs, (UWord *) bif_export\[BIF_$bif[$i]->[5]\]);
+}
+
+EOF
+
+ }
+
+} # hipe
+
+#
+# Utilities follow.
+#
+
sub open_file { # or die
my($name) = @_;
diff --git a/erts/epmd/test/epmd_SUITE.erl b/erts/epmd/test/epmd_SUITE.erl
index 0f0a5acde7..81433c39c4 100644
--- a/erts/epmd/test/epmd_SUITE.erl
+++ b/erts/epmd/test/epmd_SUITE.erl
@@ -838,7 +838,7 @@ no_live_killing(Config) when is_list(Config) ->
%% sends TCP RST at wrong time
socket_reset_before_alive2_reply_is_written(Config) when is_list(Config) ->
%% - delay_write for easier triggering of race condition
- %% - relaxed_command_check for gracefull shutdown of epmd even if there
+ %% - relaxed_command_check for graceful shutdown of epmd even if there
%% is stuck node.
ok = epmdrun("-delay_write 1 -relaxed_command_check"),
diff --git a/erts/etc/common/escript.c b/erts/etc/common/escript.c
index 71c278881c..4134a3ff36 100644
--- a/erts/etc/common/escript.c
+++ b/erts/etc/common/escript.c
@@ -428,14 +428,6 @@ main(int argc, char** argv)
argv[argc] = NULL;
#endif
- emulator = env = get_env("ESCRIPT_EMULATOR");
- if (emulator == NULL) {
- emulator = get_default_emulator(argv[0]);
- }
-
- if (strlen(emulator) >= PMAX)
- error("Value of environment variable ESCRIPT_EMULATOR is too large");
-
/*
* Allocate the argv vector to be used for arguments to Erlang.
* Arrange for starting to pushing information in the middle of
@@ -446,21 +438,10 @@ main(int argc, char** argv)
eargv_base = (char **) emalloc(eargv_size*sizeof(char*));
eargv = eargv_base;
eargc = 0;
- push_words(emulator);
eargc_base = eargc;
eargv = eargv + eargv_size/2;
eargc = 0;
- free_env_val(env);
-
- /*
- * Push initial arguments.
- */
-
- PUSH("+B");
- PUSH2("-boot", "start_clean");
- PUSH("-noshell");
-
/* Determine basename of the executable */
for (basename = argv[0]+strlen(argv[0]);
basename > argv[0] && !(IS_DIRSEP(basename[-1]));
@@ -510,6 +491,27 @@ main(int argc, char** argv)
efree(absname);
}
+ /* Determine path to emulator */
+ emulator = env = get_env("ESCRIPT_EMULATOR");
+
+ if (emulator == NULL) {
+ emulator = get_default_emulator(scriptname);
+ }
+
+ if (strlen(emulator) >= PMAX)
+ error("Value of environment variable ESCRIPT_EMULATOR is too large");
+
+ /*
+ * Push initial arguments.
+ */
+
+ push_words(emulator);
+ free_env_val(env);
+
+ PUSH("+B");
+ PUSH2("-boot", "start_clean");
+ PUSH("-noshell");
+
/*
* Read options from the %%! row in the script and add them as args
*/
diff --git a/erts/etc/common/inet_gethost.c b/erts/etc/common/inet_gethost.c
index bc4893b0eb..ccafd95e51 100644
--- a/erts/etc/common/inet_gethost.c
+++ b/erts/etc/common/inet_gethost.c
@@ -2717,7 +2717,7 @@ BOOL close_mesq(MesQ *q)
LeaveCriticalSection(&(q->crit));
return FALSE;
}
- /* Noone else is supposed to use this object any more */
+ /* No one else is supposed to use this object any more */
LeaveCriticalSection(&(q->crit));
DeleteCriticalSection(&(q->crit));
CloseHandle(q->data_present);
diff --git a/erts/etc/unix/README b/erts/etc/unix/README
index adc6db4300..9985f2675d 100644
--- a/erts/etc/unix/README
+++ b/erts/etc/unix/README
@@ -42,7 +42,7 @@ Note that the Install script will terminate if it detects problems -
you will have to correct them and re-run the script. If everything
goes well, the last printout should be:
-Erlang installation sucessfully completed
+Erlang installation successfully completed
If it isn't, something went wrong - check the printouts to find out
what it was.
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index f2babc48d2..c689d495e6 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -775,7 +775,7 @@ define etp-pid-1
set $etp_pid_1 = (Eterm)($arg0)
if ($etp_pid_1 & 0xF) == 0x3
if (etp_arch_bits == 64)
- if (etp_big_endian)
+ if (etp_endianness > 0)
set $etp_pid_data = (unsigned) ((((Uint64) $etp_pid_1) >> 35) & 0x0fffffff)
else
set $etp_pid_data = (unsigned) ((((Uint64) $etp_pid_1) >> 4) & 0x0fffffff)
@@ -835,7 +835,7 @@ define etp-port-1
set $etp_port_1 = (Eterm)($arg0)
if ($etp_port_1 & 0xF) == 0x7
if (etp_arch_bits == 64)
- if (etp_big_endian)
+ if (etp_endianness > 0)
set $etp_port_data = (unsigned) ((((Uint64) $etp_port_1) >> 36) & 0x0fffffff)
else
set $etp_port_data = (unsigned) ((((Uint64) $etp_port_1) >> 4) & 0x0fffffff)
@@ -952,30 +952,31 @@ define etp-ref-1
if ((Eterm)($arg0) & 0x3) != 0x2
printf "#NotBoxed<%#x>", (Eterm)($arg0)
else
- set $etp_ref_1_p = (RefThing *)((Eterm)($arg0) & ~0x3)
+ set $etp_ref_1_p = (ErtsORefThing *)((Eterm)($arg0) & ~0x3)
if ($etp_ref_1_p->header & 0x3b) != 0x10
printf "#NotRef<%#x>", $etp_ref_1_p->header
else
- set $etp_ref_1_nump = (Uint32 *) 0
- set $etp_ref_1_error = 0
- if ($etp_ref_1_p->header >> 6) == 0
- set $etp_ref_1_error = 1
+ if $etp_ref_1_p->header != etp_ref_header && $etp_ref_1_p->header != etp_magic_ref_header
+ printf "#InternalRefError<%#x>", ($arg0)
else
- if $etp_arch64
- set $etp_ref_1_i = (int) $etp_ref_1_p->data.ui32[0]
- if (($etp_ref_1_i + 1) > (2 * ($etp_ref_1_p->header >> 6)))
- set $etp_ref_1_error = 1
- else
- set $etp_ref_1_nump = &$etp_ref_1_p->data.ui32[1]
+ set $etp_magic_ref = 0
+ set $etp_ref_1_i = 3
+ set $etp_ref_1_error = 0
+ set $etp_ref_1_nump = (Uint32 *) 0
+ if etp_ref_header == etp_magic_ref_header
+ if $etp_ref_1_p->marker != 0xffffffff
+ set $etp_magic_ref = 1
+ end
+ else
+ if $etp_ref_1_p->header == etp_magic_ref_header
+ set $etp_magic_ref = 1
end
+ end
+ if $etp_magic_ref == 0
+ set $etp_ref_1_nump = $etp_ref_1_p->num
else
- set $etp_ref_1_i = (int) ($etp_ref_1_p->header >> 6)
- set $etp_ref_1_nump = &$etp_ref_1_p->data.ui32[0]
+ set $etp_ref_1_nump = ((ErtsMRefThing *) $etp_ref_1_p)->mb->refn
end
- end
- if $etp_ref_1_error
- printf "#InternalRefError<%#x>", ($arg0)
- else
printf "#Ref<0"
set $etp_ref_1_i--
while $etp_ref_1_i >= 0
@@ -1312,27 +1313,30 @@ document etpf-msgq
%---------------------------------------------------------------------------
end
-
+define etp-stack-preamble
+ set $etp_stack_p = ($arg0)->stop
+ set $etp_stack_end = ($arg0)->hend
+ printf "%% Stacktrace (%u)\n", $etp_stack_end-$etp_stack_p
+ etp-1 ((Eterm)($arg0)->i) 0
+ printf " (I)\n"
+ if ($arg0)->cp != 0
+ etp-1 ((Eterm)($arg0)->cp) 0
+ printf " (cp)\n"
+ end
+end
define etp-stacktrace
# Args: Process*
#
# Non-reentrant
#
- set $etp_stacktrace_p = ($arg0)->stop
- set $etp_stacktrace_end = ($arg0)->hend
- printf "%% Stacktrace (%u): ", $etp_stacktrace_end-$etp_stacktrace_p
- if ($arg0)->cp == 0x0
- printf "NULL\n"
- else
- etp ($arg0)->cp
- end
- while $etp_stacktrace_p < $etp_stacktrace_end
- if ($etp_stacktrace_p[0] & 0x3) == 0x0
+ etp-stack-preamble ($arg0)
+ while $etp_stack_p < $etp_stack_end
+ if ($etp_stack_p[0] & 0x3) == 0x0
# Continuation pointer
- etp $etp_stacktrace_p[0]
+ etp $etp_stack_p[0]
end
- set $etp_stacktrace_p++
+ set $etp_stack_p++
end
end
@@ -1351,17 +1355,10 @@ define etp-stackdump
#
# Non-reentrant
#
- set $etp_stackdump_p = ($arg0)->stop
- set $etp_stackdump_end = ($arg0)->hend
- printf "%% Stackdump (%u): ", $etp_stackdump_end-$etp_stackdump_p
- if ($arg0)->cp == 0x0
- printf "NULL\n"
- else
- etp ($arg0)->cp
- end
- while $etp_stackdump_p < $etp_stackdump_end
- etp $etp_stackdump_p[0]
- set $etp_stackdump_p++
+ etp-stack-preamble ($arg0)
+ while $etp_stack_p < $etp_stack_end
+ etp $etp_stack_p[0]
+ set $etp_stack_p++
end
end
@@ -1597,7 +1594,7 @@ define etp-term-dump-pid
set $etp_pid_1 = (Eterm)($arg0)
if ($etp_pid_1 & 0xF) == 0x3
if (etp_arch_bits == 64)
- if (etp_big_endian)
+ if (etp_endianness > 0)
set $etp_pid_data = (unsigned) ((((Uint64) $etp_pid_1) >> 36) & 0x0fffffff)
else
set $etp_pid_data = (unsigned) ((((Uint64) $etp_pid_1) >> 4) & 0x0fffffff)
@@ -1642,7 +1639,7 @@ define etp-pid2pix-1
# Args: Eterm
#
if (etp_arch_bits == 64)
- if (etp_big_endian)
+ if (etp_endianness > 0)
set $etp_pix = (int) (((Uint64) $arg0) & 0x0fffffff)
else
set $etp_pix = (int) ((((Uint64) $arg0) >> 32) & 0x0fffffff)
@@ -1821,6 +1818,262 @@ document etp-proc-state
% Print state of process
%---------------------------------------------------------------------------
end
+define etp-proc-state-int
+# Args: int
+#
+ if ($arg0 & 0x80000000)
+ printf "GARBAGE<0x80000000> | "
+ end
+ if ($arg0 & 0x40000000)
+ printf "dirty-running-sys | "
+ end
+ if ($arg0 & 0x20000000)
+ printf "dirty-running | "
+ end
+ if ($arg0 & 0x10000000)
+ printf "dirty-active-sys | "
+ end
+ if ($arg0 & 0x8000000)
+ printf "dirty-io-proc | "
+ end
+ if ($arg0 & 0x4000000)
+ printf "dirty-cpu-proc | "
+ end
+ if ($arg0 & 0x2000000)
+ printf "on-heap-msgq | "
+ end
+ if ($arg0 & 0x1000000)
+ printf "off-heap-msgq | "
+ end
+ if ($arg0 & 0x800000)
+ printf "delayed-sys | "
+ end
+ if ($arg0 & 0x400000)
+ printf "proxy | "
+ set $proxy_process = 1
+ else
+ set $proxy_process = 0
+ end
+ if ($arg0 & 0x200000)
+ printf "running-sys | "
+ end
+ if ($arg0 & 0x100000)
+ printf "active-sys | "
+ end
+ if ($arg0 & 0x80000)
+ printf "trapping-exit | "
+ end
+ if ($arg0 & 0x40000)
+ printf "bound | "
+ end
+ if ($arg0 & 0x20000)
+ printf "garbage-collecting | "
+ end
+ if ($arg0 & 0x10000)
+ printf "suspended | "
+ end
+ if ($arg0 & 0x8000)
+ printf "running | "
+ end
+ if ($arg0 & 0x4000)
+ printf "in-run-queue | "
+ end
+ if ($arg0 & 0x2000)
+ printf "active | "
+ end
+ if ($arg0 & 0x1000)
+ printf "pending-exit | "
+ end
+ if ($arg0 & 0x800)
+ printf "exiting | "
+ end
+ if ($arg0 & 0x400)
+ printf "free | "
+ end
+ if ($arg0 & 0x200)
+ printf "in-prq-low | "
+ end
+ if ($arg0 & 0x100)
+ printf "in-prq-normal | "
+ end
+ if ($arg0 & 0x80)
+ printf "in-prq-high | "
+ end
+ if ($arg0 & 0x40)
+ printf "in-prq-max | "
+ end
+ if ($arg0 & 0x30) == 0x0
+ printf "prq-prio-max | "
+ else
+ if ($arg0 & 0x30) == 0x10
+ printf "prq-prio-high | "
+ else
+ if ($arg0 & 0x30) == 0x20
+ printf "prq-prio-normal | "
+ else
+ printf "prq-prio-low | "
+ end
+ end
+ end
+ if ($arg0 & 0xc) == 0x0
+ printf "usr-prio-max | "
+ else
+ if ($arg0 & 0xc) == 0x4
+ printf "usr-prio-high | "
+ else
+ if ($arg0 & 0xc) == 0x8
+ printf "usr-prio-normal | "
+ else
+ printf "usr-prio-low | "
+ end
+ end
+ end
+ if ($arg0 & 0x3) == 0x0
+ printf "act-prio-max\n"
+ else
+ if ($arg0 & 0x3) == 0x1
+ printf "act-prio-high\n"
+ else
+ if ($arg0 & 0x3) == 0x2
+ printf "act-prio-normal\n"
+ else
+ printf "act-prio-low\n"
+ end
+ end
+ end
+end
+
+document etp-proc-state-int
+%---------------------------------------------------------------------------
+% etp-proc-state-int int
+%
+% Print state of process state value
+%---------------------------------------------------------------------------
+end
+
+
+define etp-proc-state
+# Args: Process*
+#
+ set $state_int = *(((Uint32 *) &(((Process *) $arg0)->state)))
+ etp-proc-state-int $state_int
+end
+
+document etp-proc-state
+%---------------------------------------------------------------------------
+% etp-proc-state Process*
+%
+% Print state of process
+%---------------------------------------------------------------------------
+end
+
+define etp-proc-flags-int
+# Args: int
+#
+ if ($arg0 & ~0x1ffffff)
+ printf "GARBAGE<%x> ", ($arg0 & ~0x1ffffff)
+ end
+ if ($arg0 & 0x1000000)
+ printf "dirty-minor-gc "
+ end
+ if ($arg0 & 0x800000)
+ printf "dirty-major-gc "
+ end
+ if ($arg0 & 0x400000)
+ printf "dirty-gc-hibernate "
+ end
+ if ($arg0 & 0x200000)
+ printf "dirty-cla "
+ end
+ if ($arg0 & 0x100000)
+ printf "delayed-del-proc "
+ end
+ if ($arg0 & 0x80000)
+ printf "hipe-mode "
+ end
+ if ($arg0 & 0x40000)
+ printf "have-blocked-nmsb "
+ end
+ if ($arg0 & 0x20000)
+ printf "shdlr-onln-wait-q "
+ end
+ if ($arg0 & 0x10000)
+ printf "delay-gc "
+ end
+ if ($arg0 & 0x8000)
+ printf "abandoned-heap-use "
+ end
+ if ($arg0 & 0x4000)
+ printf "off-heap-msgq-chng "
+ end
+ if ($arg0 & 0x2000)
+ printf "on-heap-msgq "
+ end
+ if ($arg0 & 0x1000)
+ printf "off-heap-msgq "
+ end
+ if ($arg0 & 0x800)
+ printf "disable-gc "
+ end
+ if ($arg0 & 0x400)
+ printf "force-gc "
+ end
+ if ($arg0 & 0x200)
+ printf "p2pnr-resched "
+ end
+ if ($arg0 & 0x100)
+ printf "have-blocked-msb "
+ end
+ if ($arg0 & 0x80)
+ printf "using-ddll "
+ end
+ if ($arg0 & 0x40)
+ printf "distribution "
+ end
+ if ($arg0 & 0x20)
+ printf "using-db "
+ end
+ if ($arg0 & 0x10)
+ printf "need-fullsweep "
+ end
+ if ($arg0 & 0x8)
+ printf "heap-grow "
+ end
+ if ($arg0 & 0x4)
+ printf "timo "
+ end
+ if ($arg0 & 0x2)
+ printf "inslpqueue "
+ end
+ if ($arg0 & 0x1)
+ printf "hibernate-sched "
+ end
+ printf "\n"
+end
+
+document etp-proc-flags-int
+%---------------------------------------------------------------------------
+% etp-proc-flags-int int
+%
+% Print flags of process flags value
+%---------------------------------------------------------------------------
+end
+
+
+define etp-proc-flags
+# Args: Process*
+#
+ set $flags_int = ((Process *) $arg0)->flags
+ etp-proc-flags-int $flags_int
+end
+
+document etp-proc-flags
+%---------------------------------------------------------------------------
+% etp-proc-flags Process*
+%
+% Print flags of process
+%---------------------------------------------------------------------------
+end
define etp-process-info
# Args: Process*
@@ -1830,6 +2083,8 @@ define etp-process-info
etp-1 $etp_proc->common.id
printf "\n State: "
etp-proc-state $etp_proc
+ printf "\n Flags: "
+ etp-proc-flags $etp_proc
if $proxy_process != 0
printf " Pointer: (Process *) %p\n", $etp_proc
printf " *** PROXY process struct *** refer to: \n"
@@ -1843,26 +2098,35 @@ define etp-process-info
printf "\n"
end
end
+ printf " Current function: "
if ($etp_proc->current)
- printf " Current function: "
- etp-1 $etp_proc->current[0]
+ etp-1 $etp_proc->current->module
printf ":"
- etp-1 $etp_proc->current[1]
- printf "/%d\n", $etp_proc->current[2]
+ etp-1 $etp_proc->current->function
+ printf "/%d\n", $etp_proc->current->arity
+ else
+ printf "unknown\n"
end
+ printf " CP: "
if ($etp_proc->cp)
- printf " CP: "
etp-cp-1 $etp_proc->cp
printf "\n"
+ else
+ printf "unknown\n"
end
+ printf " I: "
if ($etp_proc->i)
- printf " I: "
etp-cp-1 $etp_proc->i
printf "\n"
+ else
+ printf "unknown\n"
end
printf " Heap size: %ld\n", $etp_proc->heap_sz
+ printf " Old-heap size: "
if ($etp_proc->old_heap)
- printf " Old-heap size: %ld\n", $etp_proc->old_hend - $etp_proc->old_heap
+ printf "%ld\n", $etp_proc->old_hend - $etp_proc->old_heap
+ else
+ printf "0\n"
end
printf " Mbuf size: %ld\n", $etp_proc->mbuf_sz
if (etp_smp_compiled)
@@ -2008,7 +2272,7 @@ define etp-port-id2pix-1
# Args: Eterm
#
if (etp_arch_bits == 64)
- if (etp_big_endian)
+ if (etp_endianness > 0)
set $etp_pix = (int) (((Uint64) $arg0) & 0x0fffffff)
elser
set $etp_pix = (int) ((((Uint64) $arg0) >> 32) & 0x0fffffff)
@@ -2351,8 +2615,20 @@ define etp-rq-flags-int
if ($arg0 & 0x4000000)
printf " protected"
end
- if ($arg0 & ~0x7ffffff)
- printf " GARBAGE(0x%x)", ($arg0 & ~0x3ffffff)
+ if ($arg0 & 0x8000000)
+ printf " exec"
+ end
+ if ($arg0 & 0x10000000)
+ printf " msb_exec"
+ end
+ if ($arg0 & 0x20000000)
+ printf " misc_op"
+ end
+ if ($arg0 & 0x40000000)
+ printf " halting"
+ end
+ if ($arg0 & ~0x7fffffff)
+ printf " GARBAGE(0x%x)", ($arg0 & ~0x7fffffff)
end
printf "\n"
end
@@ -2384,6 +2660,9 @@ define etp-ssi-flags
if ($arg0 & 0x10)
printf " suspended"
end
+ if ($arg0 & 0x20)
+ printf " msb_exec"
+ end
printf "\n"
end
@@ -2551,7 +2830,7 @@ define etp-run-queue-info-internal
set $rq_flags = *((Uint32 *) &($runq->flags))
etp-rq-flags-int $rq_flags
printf " Pointer: (ErtsRunQueue *) %p\n", $runq
-
+end
define etp-disasm-1
set $code_ptr = ((BeamInstr*)$arg0)
@@ -2596,7 +2875,7 @@ document etp-disasm
%---------------------------------------------------------------------------
% etp-disasm StartI EndI
%
-% Disassemble the code inbetween StartI and EndI
+% Disassemble the code between StartI and EndI
%---------------------------------------------------------------------------
end
@@ -2628,10 +2907,14 @@ define etp-system-info
printf "Compile date: %s\n", etp_compile_date
printf "Arch: %s\n", etp_arch
printf "Endianness: "
- if (etp_big_endian)
+ if (etp_endianness > 0)
printf "Big\n"
else
- printf "Little\n"
+ if (etp_endianness < 0)
+ printf "Little\n"
+ else
+ printf "Unknown\n"
+ end
end
printf "Word size: %d-bit\n", etp_arch_bits
printf "HiPE support: "
diff --git a/erts/etc/unix/run_erl.c b/erts/etc/unix/run_erl.c
index debd643da7..8f87c59131 100644
--- a/erts/etc/unix/run_erl.c
+++ b/erts/etc/unix/run_erl.c
@@ -553,7 +553,7 @@ static void pass_on(pid_t childpid)
FD_ZERO(&readfds);
FD_ZERO(&writefds);
} else {
- /* Some error occured */
+ /* Some error occurred */
ERRNO_ERR0(LOG_ERR,"Error in select.");
exit(1);
}
@@ -863,7 +863,7 @@ static int open_log(int log_num, int flags)
if (write_all(lfd, buf, strlen(buf)) < 0)
status("Error in writing to log.\n");
-#if USE_FSYNC
+#ifdef USE_FSYNC
fsync(lfd);
#endif
@@ -893,7 +893,7 @@ static void write_to_log(int* lfd, int* log_num, char* buf, int len)
status("Error in writing to log.\n");
}
-#if USE_FSYNC
+#ifdef USE_FSYNC
fsync(*lfd);
#endif
}
diff --git a/erts/example/matrix_nif.c b/erts/example/matrix_nif.c
index 6452084eb7..f42204d10f 100644
--- a/erts/example/matrix_nif.c
+++ b/erts/example/matrix_nif.c
@@ -22,7 +22,7 @@
* for matrix calculations.
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stddef.h>
#include <assert.h>
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index 231eaa6927..4e252ec3f9 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -28,7 +28,7 @@
*
* Due to this we cannot use the __ATOMIC_SEQ_CST
* memory model. For more information see the comment
- * in the begining of ethr_membar.h in this directory.
+ * in the beginning of ethr_membar.h in this directory.
*/
#undef ETHR_INCLUDE_ATOMIC_IMPL__
diff --git a/erts/include/internal/gcc/ethr_dw_atomic.h b/erts/include/internal/gcc/ethr_dw_atomic.h
index 675912d86e..df20d0f1ef 100644
--- a/erts/include/internal/gcc/ethr_dw_atomic.h
+++ b/erts/include/internal/gcc/ethr_dw_atomic.h
@@ -28,7 +28,7 @@
*
* Due to this we cannot use the __ATOMIC_SEQ_CST
* memory model. For more information see the comment
- * in the begining of ethr_membar.h in this directory.
+ * in the beginning of ethr_membar.h in this directory.
*/
#undef ETHR_INCLUDE_DW_ATOMIC_IMPL__
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index d91b5ed079..5b03019d8e 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erl_tracer.beam b/erts/preloaded/ebin/erl_tracer.beam
index d6c987178b..4cf1b5ed82 100644
--- a/erts/preloaded/ebin/erl_tracer.beam
+++ b/erts/preloaded/ebin/erl_tracer.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 72898e6d8a..149d30d299 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_code_purger.beam b/erts/preloaded/ebin/erts_code_purger.beam
index 21463a2286..0a318b70bb 100644
--- a/erts/preloaded/ebin/erts_code_purger.beam
+++ b/erts/preloaded/ebin/erts_code_purger.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_dirty_process_code_checker.beam b/erts/preloaded/ebin/erts_dirty_process_code_checker.beam
index 423f33b99a..20ee82a134 100644
--- a/erts/preloaded/ebin/erts_dirty_process_code_checker.beam
+++ b/erts/preloaded/ebin/erts_dirty_process_code_checker.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam
index 4f81b5089d..c0ff53f503 100644
--- a/erts/preloaded/ebin/erts_internal.beam
+++ b/erts/preloaded/ebin/erts_internal.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_literal_area_collector.beam b/erts/preloaded/ebin/erts_literal_area_collector.beam
index 0926cf1f95..e925636787 100644
--- a/erts/preloaded/ebin/erts_literal_area_collector.beam
+++ b/erts/preloaded/ebin/erts_literal_area_collector.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index c8db150d64..fdd87ef739 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam
index dc29a3eaad..b91fa63e7d 100644
--- a/erts/preloaded/ebin/otp_ring0.beam
+++ b/erts/preloaded/ebin/otp_ring0.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_eval.beam b/erts/preloaded/ebin/prim_eval.beam
index 49fd62490e..a011890c1a 100644
--- a/erts/preloaded/ebin/prim_eval.beam
+++ b/erts/preloaded/ebin/prim_eval.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 6bf3c7313c..b5e5ff9f88 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index bae6291d87..994677872c 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam
index 134497411f..6f1c82509f 100644
--- a/erts/preloaded/ebin/prim_zip.beam
+++ b/erts/preloaded/ebin/prim_zip.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index 34c342371b..eb9e07af7e 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/Makefile b/erts/preloaded/src/Makefile
index 2ab9edaf5e..edb9f35258 100644
--- a/erts/preloaded/src/Makefile
+++ b/erts/preloaded/src/Makefile
@@ -73,7 +73,7 @@ KERNEL_SRC=$(ERL_TOP)/lib/kernel/src
KERNEL_INCLUDE=$(ERL_TOP)/lib/kernel/include
STDLIB_INCLUDE=$(ERL_TOP)/lib/stdlib/include
-ERL_COMPILE_FLAGS += +warn_obsolete_guard +debug_info -I$(KERNEL_SRC) -I$(KERNEL_INCLUDE)
+ERL_COMPILE_FLAGS += +debug_info -I$(KERNEL_SRC) -I$(KERNEL_INCLUDE)
debug opt: $(TARGET_FILES)
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 8771089b65..2b0c9ff2af 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -48,7 +48,7 @@
await_sched_wall_time_modifications/2,
gather_gc_info_result/1]).
--deprecated([hash/2, now/0]).
+-deprecated([now/0]).
%% Get rid of autoimports of spawn to avoid clashes with ourselves.
-compile({no_auto_import,[spawn_link/1]}).
@@ -83,6 +83,10 @@
| 'micro_seconds'
| 'nano_seconds'.
+-opaque prepared_code() :: reference().
+
+-export_type([prepared_code/0]).
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Native code BIF stubs and their types
%% (BIF's actually implemented in this module goes last in the file)
@@ -100,7 +104,8 @@
-export([binary_to_list/3, binary_to_term/1, binary_to_term/2]).
-export([bit_size/1, bitsize/1, bitstring_to_list/1]).
-export([bump_reductions/1, byte_size/1, call_on_load_function/1]).
--export([cancel_timer/1, cancel_timer/2, check_old_code/1, check_process_code/2,
+-export([cancel_timer/1, cancel_timer/2, ceil/1,
+ check_old_code/1, check_process_code/2,
check_process_code/3, crc32/1]).
-export([crc32/2, crc32_combine/3, date/0, decode_packet/3]).
-export([delete_element/2]).
@@ -109,13 +114,13 @@
-export([error/1, error/2, exit/1, exit/2, external_size/1]).
-export([external_size/2, finish_after_on_load/2, finish_loading/1, float/1]).
-export([float_to_binary/1, float_to_binary/2,
- float_to_list/1, float_to_list/2]).
+ float_to_list/1, float_to_list/2, floor/1]).
-export([fun_info/2, fun_info_mfa/1, fun_to_list/1, function_exported/3]).
-export([garbage_collect/0, garbage_collect/1, garbage_collect/2]).
-export([garbage_collect_message_area/0, get/0, get/1, get_keys/0, get_keys/1]).
-export([get_module_info/1, get_stacktrace/0, group_leader/0]).
-export([group_leader/2]).
--export([halt/0, halt/1, halt/2, hash/2,
+-export([halt/0, halt/1, halt/2,
has_prepared_code_on_load/1, hibernate/3]).
-export([insert_element/3]).
-export([integer_to_binary/1, integer_to_list/1]).
@@ -474,6 +479,13 @@ cancel_timer(_TimerRef) ->
cancel_timer(_TimerRef, _Options) ->
erlang:nif_error(undefined).
+%% ceil/1
+%% Shadowed by erl_bif_types: erlang:ceil/1
+-spec ceil(Number) -> integer() when
+ Number :: number().
+ceil(_) ->
+ erlang:nif_error(undef).
+
%% check_old_code/1
-spec check_old_code(Module) -> boolean() when
Module :: module().
@@ -783,9 +795,9 @@ external_size(_Term, _Options) ->
erlang:nif_error(undefined).
%% finish_loading/2
--spec erlang:finish_loading(PreparedCodeBinaries) -> ok | Error when
- PreparedCodeBinaries :: [PreparedCodeBinary],
- PreparedCodeBinary :: binary(),
+-spec erlang:finish_loading(PreparedCodeList) -> ok | Error when
+ PreparedCodeList :: [PreparedCode],
+ PreparedCode :: prepared_code(),
ModuleList :: [module()],
Error :: {not_purged,ModuleList} | {on_load,ModuleList}.
finish_loading(_List) ->
@@ -837,6 +849,13 @@ float_to_list(_Float) ->
float_to_list(_Float, _Options) ->
erlang:nif_error(undefined).
+%% floor/1
+%% Shadowed by erl_bif_types: erlang:floor/1
+-spec floor(Number) -> integer() when
+ Number :: number().
+floor(_) ->
+ erlang:nif_error(undef).
+
%% fun_info/2
-spec erlang:fun_info(Fun, Item) -> {Item, Info} when
Fun :: function(),
@@ -871,7 +890,7 @@ function_exported(_Module, _Function, _Arity) ->
%% garbage_collect/0
-spec garbage_collect() -> true.
garbage_collect() ->
- erlang:nif_error(undefined).
+ erts_internal:garbage_collect(major).
%% garbage_collect/1
-spec garbage_collect(Pid) -> GCResult when
@@ -884,36 +903,39 @@ garbage_collect(Pid) ->
error:Error -> erlang:error(Error, [Pid])
end.
+-record(gcopt, {
+ async = sync :: sync | {async, _},
+ type = major % default major, can also be minor
+ }).
+
%% garbage_collect/2
-spec garbage_collect(Pid, OptionList) -> GCResult | async when
Pid :: pid(),
RequestId :: term(),
- Option :: {async, RequestId},
+ Option :: {async, RequestId} | {type, 'major' | 'minor'},
OptionList :: [Option],
GCResult :: boolean().
garbage_collect(Pid, OptionList) ->
try
- Async = get_gc_opts(OptionList, sync),
- case Async of
+ GcOpts = get_gc_opts(OptionList, #gcopt{}),
+ case GcOpts#gcopt.async of
{async, ReqId} ->
{priority, Prio} = erlang:process_info(erlang:self(),
priority),
- erts_internal:request_system_task(Pid,
- Prio,
- {garbage_collect, ReqId}),
+ erts_internal:request_system_task(
+ Pid, Prio, {garbage_collect, ReqId, GcOpts#gcopt.type}),
async;
sync ->
case Pid == erlang:self() of
true ->
- erlang:garbage_collect();
+ erts_internal:garbage_collect(GcOpts#gcopt.type);
false ->
{priority, Prio} = erlang:process_info(erlang:self(),
priority),
ReqId = erlang:make_ref(),
- erts_internal:request_system_task(Pid,
- Prio,
- {garbage_collect,
- ReqId}),
+ erts_internal:request_system_task(
+ Pid, Prio,
+ {garbage_collect, ReqId, GcOpts#gcopt.type}),
receive
{garbage_collect, ReqId, GCResult} ->
GCResult
@@ -925,10 +947,12 @@ garbage_collect(Pid, OptionList) ->
end.
% gets async opt and verify valid option list
-get_gc_opts([{async, _ReqId} = AsyncTuple | Options], _OldAsync) ->
- get_gc_opts(Options, AsyncTuple);
-get_gc_opts([], Async) ->
- Async.
+get_gc_opts([{async, _ReqId} = AsyncTuple | Options], GcOpt = #gcopt{}) ->
+ get_gc_opts(Options, GcOpt#gcopt{ async = AsyncTuple });
+get_gc_opts([{type, T} | Options], GcOpt = #gcopt{}) ->
+ get_gc_opts(Options, GcOpt#gcopt{ type = T });
+get_gc_opts([], GcOpt) ->
+ GcOpt.
%% garbage_collect_message_area/0
-spec erlang:garbage_collect_message_area() -> boolean().
@@ -963,9 +987,10 @@ get_keys(_Val) ->
erlang:nif_error(undefined).
%% get_module_info/1
--spec erlang:get_module_info(P1) -> [{atom(), [{atom(), term()}]}] when
- P1 :: atom().
-get_module_info(_P1) ->
+-spec erlang:get_module_info(Module) -> [{Item, term()}] when
+ Item :: module | exports | attributes | compile | native | md5,
+ Module :: atom().
+get_module_info(_Module) ->
erlang:nif_error(undefined).
%% get_stacktrace/0
@@ -1007,16 +1032,9 @@ halt(Status) ->
halt(_Status, _Options) ->
erlang:nif_error(undefined).
-%% hash/2
--spec erlang:hash(Term, Range) -> pos_integer() when
- Term :: term(),
- Range :: pos_integer().
-hash(_Term, _Range) ->
- erlang:nif_error(undefined).
-
%% has_prepared_code_on_load/1
-spec erlang:has_prepared_code_on_load(PreparedCode) -> boolean() when
- PreparedCode :: binary().
+ PreparedCode :: prepared_code().
has_prepared_code_on_load(_PreparedCode) ->
erlang:nif_error(undefined).
@@ -1433,7 +1451,7 @@ timestamp() ->
-spec erlang:prepare_loading(Module, Code) -> PreparedCode | {error, Reason} when
Module :: module(),
Code :: binary(),
- PreparedCode :: binary(),
+ PreparedCode :: prepared_code(),
Reason :: bad_file.
prepare_loading(_Module, _Code) ->
erlang:nif_error(undefined).
@@ -1862,10 +1880,12 @@ element(_N, _Tuple) ->
erlang:nif_error(undefined).
%% Not documented
+-type module_info_key() :: attributes | compile | exports | functions | md5
+ | module | native | native_addresses.
-spec erlang:get_module_info(Module, Item) -> ModuleInfo when
Module :: atom(),
- Item :: module | exports | functions | attributes | compile | native_addresses | md5,
- ModuleInfo :: atom() | [] | [{atom(), arity()}] | [{atom(), term()}] | [{atom(), arity(), integer()}].
+ Item :: module_info_key(),
+ ModuleInfo :: term().
get_module_info(_Module, _Item) ->
erlang:nif_error(undefined).
@@ -1991,8 +2011,8 @@ load_module(Mod, Code) ->
case erlang:prepare_loading(Mod, Code) of
{error,_}=Error ->
Error;
- Bin when erlang:is_binary(Bin) ->
- case erlang:finish_loading([Bin]) of
+ Prep when erlang:is_reference(Prep) ->
+ case erlang:finish_loading([Prep]) of
ok ->
{module,Mod};
{Error,[Mod]} ->
@@ -2301,8 +2321,8 @@ spawn_opt(_Tuple) ->
Total_Reductions :: non_neg_integer(),
Reductions_Since_Last_Call :: non_neg_integer();
(run_queue) -> non_neg_integer();
- (run_queue_lengths) -> [RunQueueLenght] when
- RunQueueLenght :: non_neg_integer();
+ (run_queue_lengths) -> [RunQueueLength] when
+ RunQueueLength :: non_neg_integer();
(runtime) -> {Total_Run_Time, Time_Since_Last_Call} when
Total_Run_Time :: non_neg_integer(),
Time_Since_Last_Call :: non_neg_integer();
@@ -2310,10 +2330,14 @@ spawn_opt(_Tuple) ->
SchedulerId :: pos_integer(),
ActiveTime :: non_neg_integer(),
TotalTime :: non_neg_integer();
+ (scheduler_wall_time_all) -> [{SchedulerId, ActiveTime, TotalTime}] | undefined when
+ SchedulerId :: pos_integer(),
+ ActiveTime :: non_neg_integer(),
+ TotalTime :: non_neg_integer();
(total_active_tasks) -> ActiveTasks when
ActiveTasks :: non_neg_integer();
- (total_run_queue_lengths) -> TotalRunQueueLenghts when
- TotalRunQueueLenghts :: non_neg_integer();
+ (total_run_queue_lengths) -> TotalRunQueueLengths when
+ TotalRunQueueLengths :: non_neg_integer();
(wall_clock) -> {Total_Wallclock_Time,
Wallclock_Time_Since_Last_Call} when
Total_Wallclock_Time :: non_neg_integer(),
@@ -2509,6 +2533,8 @@ tuple_to_list(_Tuple) ->
Alloc :: atom();
({allocator_sizes, Alloc}) -> [_] when %% More or less anything
Alloc :: atom();
+ (atom_count) -> pos_integer();
+ (atom_limit) -> pos_integer();
(build_type) -> opt | debug | purify | quantify | purecov |
gcov | valgrind | gprof | lcnt | frmptr;
(c_compiler_used) -> {atom(), term()};
@@ -3989,6 +4015,7 @@ sched_wall_time(Ref, N, undefined) ->
sched_wall_time(Ref, N, Acc) ->
receive
{Ref, undefined} -> sched_wall_time(Ref, N-1, undefined);
+ {Ref, SWTL} when erlang:is_list(SWTL) -> sched_wall_time(Ref, N-1, Acc ++ SWTL);
{Ref, SWT} -> sched_wall_time(Ref, N-1, [SWT|Acc])
end.
diff --git a/erts/preloaded/src/erts_code_purger.erl b/erts/preloaded/src/erts_code_purger.erl
index 28d71fd07e..fd214228c7 100644
--- a/erts/preloaded/src/erts_code_purger.erl
+++ b/erts/preloaded/src/erts_code_purger.erl
@@ -29,29 +29,34 @@
start() ->
register(erts_code_purger, self()),
process_flag(trap_exit, true),
- loop().
-
-loop() ->
- _ = receive
- {purge,Mod,From,Ref} when is_atom(Mod), is_pid(From) ->
- Res = do_purge(Mod),
- From ! {reply, purge, Res, Ref};
-
- {soft_purge,Mod,From,Ref} when is_atom(Mod), is_pid(From) ->
- Res = do_soft_purge(Mod),
- From ! {reply, soft_purge, Res, Ref};
-
- {finish_after_on_load,{Mod,Keep},From,Ref}
- when is_atom(Mod), is_pid(From) ->
- Res = do_finish_after_on_load(Mod, Keep),
- From ! {reply, finish_after_on_load, Res, Ref};
-
- {test_purge, Mod, From, Type, Ref} when is_atom(Mod), is_pid(From) ->
- do_test_purge(Mod, From, Type, Ref);
-
- _Other -> ignore
- end,
- loop().
+ wait_for_request().
+
+wait_for_request() ->
+ handle_request(receive Msg -> Msg end, []).
+
+handle_request({purge, Mod, From, Ref}, Reqs) when is_atom(Mod), is_pid(From) ->
+ {Res, NewReqs} = do_purge(Mod, Reqs),
+ From ! {reply, purge, Res, Ref},
+ check_requests(NewReqs);
+handle_request({soft_purge, Mod, From, Ref}, Reqs) when is_atom(Mod), is_pid(From) ->
+ {Res, NewReqs} = do_soft_purge(Mod, Reqs),
+ From ! {reply, soft_purge, Res, Ref},
+ check_requests(NewReqs);
+handle_request({finish_after_on_load, {Mod,Keep}, From, Ref}, Reqs)
+ when is_atom(Mod), is_boolean(Keep), is_pid(From) ->
+ NewReqs = do_finish_after_on_load(Mod, Keep, Reqs),
+ From ! {reply, finish_after_on_load, ok, Ref},
+ check_requests(NewReqs);
+handle_request({test_purge, Mod, From, Type, Ref}, Reqs) when is_atom(Mod), is_pid(From) ->
+ NewReqs = do_test_purge(Mod, From, Type, Ref, Reqs),
+ check_requests(NewReqs);
+handle_request(_Garbage, Reqs) ->
+ check_requests(Reqs).
+
+check_requests([]) ->
+ wait_for_request();
+check_requests([R|Rs]) ->
+ handle_request(R, Rs).
%%
%% Processes that tries to call a fun that belongs to
@@ -99,14 +104,15 @@ purge(Mod) when is_atom(Mod) ->
Result
end.
-do_purge(Mod) ->
+do_purge(Mod, Reqs) ->
case erts_internal:purge_module(Mod, prepare) of
false ->
- {false, false};
+ {{false, false}, Reqs};
true ->
- DidKill = check_proc_code(erlang:processes(), Mod, true),
+ {DidKill, NewReqs} = check_proc_code(erlang:processes(),
+ Mod, true, Reqs),
true = erts_internal:purge_module(Mod, complete),
- {true, DidKill}
+ {{true, DidKill}, NewReqs}
end.
%% soft_purge(Module)
@@ -122,17 +128,14 @@ soft_purge(Mod) ->
Result
end.
-do_soft_purge(Mod) ->
+do_soft_purge(Mod, Reqs) ->
case erts_internal:purge_module(Mod, prepare) of
false ->
- true;
+ {true, Reqs};
true ->
- Res = check_proc_code(erlang:processes(), Mod, false),
- erts_internal:purge_module(Mod,
- case Res of
- false -> abort;
- true -> complete
- end)
+ {PurgeOp, NewReqs} = check_proc_code(erlang:processes(),
+ Mod, false, Reqs),
+ {erts_internal:purge_module(Mod, PurgeOp), NewReqs}
end.
%% finish_after_on_load(Module, Keep)
@@ -147,179 +150,130 @@ finish_after_on_load(Mod, Keep) ->
Result
end.
-do_finish_after_on_load(Mod, Keep) ->
+do_finish_after_on_load(Mod, Keep, Reqs) ->
erlang:finish_after_on_load(Mod, Keep),
case Keep of
true ->
- ok;
+ Reqs;
false ->
case erts_internal:purge_module(Mod, prepare_on_load) of
false ->
- true;
+ Reqs;
true ->
- _ = check_proc_code(erlang:processes(), Mod, true),
- true = erts_internal:purge_module(Mod, complete)
+ {_DidKill, NewReqs} =
+ check_proc_code(erlang:processes(),
+ Mod, true, Reqs),
+ true = erts_internal:purge_module(Mod, complete),
+ NewReqs
end
end.
-
%%
-%% check_proc_code(Pids, Mod, Hard) - Send asynchronous
+%% check_proc_code(Pids, Mod, Hard, Preqs) - Send asynchronous
%% requests to all processes to perform a check_process_code
%% operation. Each process will check their own state and
%% reply with the result. If 'Hard' equals
%% - true, processes that refer 'Mod' will be killed. If
%% any processes were killed true is returned; otherwise,
%% false.
-%% - false, and any processes refer 'Mod', false will
-%% returned; otherwise, true.
+%% - false, and any processes refer 'Mod', 'abort' will
+%% be returned; otherwise, 'complete'.
%%
-%% Requests will be sent to all processes identified by
-%% Pids at once, but without allowing GC to be performed.
-%% Check process code operations that are aborted due to
-%% GC need, will be restarted allowing GC. However, only
-%% ?MAX_CPC_GC_PROCS outstanding operation allowing GC at
-%% a time will be allowed. This in order not to blow up
-%% memory wise.
-%%
-%% We also only allow ?MAX_CPC_NO_OUTSTANDING_KILLS
+%% We only allow ?MAX_CPC_NO_OUTSTANDING_KILLS
%% outstanding kills. This both in order to avoid flooding
%% our message queue with 'DOWN' messages and limiting the
%% amount of memory used to keep references to all
%% outstanding kills.
%%
-%% We maybe should allow more than two outstanding
-%% GC requests, but for now we play it safe...
--define(MAX_CPC_GC_PROCS, 2).
-define(MAX_CPC_NO_OUTSTANDING_KILLS, 10).
--record(cpc_static, {hard, module, tag}).
+-record(cpc_static, {hard, module, tag, purge_requests}).
-record(cpc_kill, {outstanding = [],
no_outstanding = 0,
waiting = [],
killed = false}).
-check_proc_code(Pids, Mod, Hard) ->
+check_proc_code(Pids, Mod, Hard, PReqs) ->
Tag = erlang:make_ref(),
CpcS = #cpc_static{hard = Hard,
module = Mod,
- tag = Tag},
- check_proc_code(CpcS, cpc_init(CpcS, Pids, 0), 0, [], #cpc_kill{}, true).
-
-check_proc_code(#cpc_static{hard = true}, 0, 0, [],
- #cpc_kill{outstanding = [], waiting = [], killed = Killed},
- true) ->
- %% No outstanding requests. We did a hard check, so result is whether or
- %% not we killed any processes...
- Killed;
-check_proc_code(#cpc_static{hard = false}, 0, 0, [], _KillState, Success) ->
- %% No outstanding requests and we did a soft check...
- Success;
-check_proc_code(#cpc_static{hard = false, tag = Tag} = CpcS, NoReq0, NoGcReq0,
- [], _KillState, false) ->
- %% Failed soft check; just cleanup the remaining replies corresponding
- %% to the requests we've sent...
- {NoReq1, NoGcReq1} = receive
- {check_process_code, {Tag, _P, GC}, _Res} ->
- case GC of
- false -> {NoReq0-1, NoGcReq0};
- true -> {NoReq0, NoGcReq0-1}
- end
- end,
- check_proc_code(CpcS, NoReq1, NoGcReq1, [], _KillState, false);
-check_proc_code(#cpc_static{tag = Tag} = CpcS, NoReq0, NoGcReq0, NeedGC0,
- KillState0, Success) ->
-
- %% Check if we should request a GC operation
- {NoGcReq1, NeedGC1} = case NoGcReq0 < ?MAX_CPC_GC_PROCS of
- GcOpAllowed when GcOpAllowed == false;
- NeedGC0 == [] ->
- {NoGcReq0, NeedGC0};
- _ ->
- {NoGcReq0+1, cpc_request_gc(CpcS,NeedGC0)}
- end,
-
- %% Wait for a cpc reply or 'DOWN' message
- {NoReq1, NoGcReq2, Pid, Result, KillState1} = cpc_recv(Tag,
- NoReq0,
- NoGcReq1,
- KillState0),
-
- %% Check the result of the reply
- case Result of
- aborted ->
- %% Operation aborted due to the need to GC in order to
- %% determine if the process is referring the module.
- %% Schedule the operation for restart allowing GC...
- check_proc_code(CpcS, NoReq1, NoGcReq2, [Pid|NeedGC1], KillState1,
- Success);
- false ->
+ tag = Tag,
+ purge_requests = PReqs},
+ cpc_receive(CpcS, cpc_init(CpcS, Pids, 0), #cpc_kill{}, []).
+
+cpc_receive(#cpc_static{hard = true} = CpcS,
+ 0,
+ #cpc_kill{outstanding = [], waiting = [], killed = Killed},
+ PReqs) ->
+ %% No outstanding cpc requests. We did a hard check, so result is
+ %% whether or not we killed any processes...
+ cpc_result(CpcS, PReqs, Killed);
+cpc_receive(#cpc_static{hard = false} = CpcS, 0, _KillState, PReqs) ->
+ %% No outstanding cpc requests and we did a soft check that succeeded...
+ cpc_result(CpcS, PReqs, complete);
+cpc_receive(#cpc_static{tag = Tag} = CpcS, NoReq, KillState0, PReqs) ->
+ receive
+ {check_process_code, {Tag, _Pid}, false} ->
%% Process not referring the module; done with this process...
- check_proc_code(CpcS, NoReq1, NoGcReq2, NeedGC1, KillState1,
- Success);
- true ->
+ cpc_receive(CpcS, NoReq-1, KillState0, PReqs);
+ {check_process_code, {Tag, Pid}, true} ->
%% Process referring the module...
case CpcS#cpc_static.hard of
false ->
%% ... and soft check. The whole operation failed so
- %% no point continuing; clean up and fail...
- check_proc_code(CpcS, NoReq1, NoGcReq2, [], KillState1,
- false);
+ %% no point continuing; fail straight away. Garbage
+ %% messages from this session will be ignored
+ %% by following sessions...
+ cpc_result(CpcS, PReqs, abort);
true ->
%% ... and hard check; schedule kill of it...
- check_proc_code(CpcS, NoReq1, NoGcReq2, NeedGC1,
- cpc_sched_kill(Pid, KillState1), Success)
+ KillState1 = cpc_sched_kill(Pid, KillState0),
+ cpc_receive(CpcS, NoReq-1, KillState1, PReqs)
end;
- 'DOWN' ->
- %% Handled 'DOWN' message
- check_proc_code(CpcS, NoReq1, NoGcReq2, NeedGC1,
- KillState1, Success)
+ {'DOWN', MonRef, process, _, _} ->
+ KillState1 = cpc_handle_down(MonRef, KillState0),
+ cpc_receive(CpcS, NoReq, KillState1, PReqs);
+ PReq when element(1, PReq) == purge;
+ element(1, PReq) == soft_purge;
+ element(1, PReq) == test_purge ->
+ %% A new purge request; save it until later...
+ cpc_receive(CpcS, NoReq, KillState0, [PReq | PReqs]);
+ _Garbage ->
+ %% Garbage message; ignore it...
+ cpc_receive(CpcS, NoReq, KillState0, PReqs)
end.
-cpc_recv(Tag, NoReq, NoGcReq, #cpc_kill{outstanding = []} = KillState) ->
- receive
- {check_process_code, {Tag, Pid, GC}, Res} ->
- cpc_handle_cpc(NoReq, NoGcReq, GC, Pid, Res, KillState)
- end;
-cpc_recv(Tag, NoReq, NoGcReq,
- #cpc_kill{outstanding = [R0, R1, R2, R3, R4 | _]} = KillState) ->
- receive
- {'DOWN', R, process, _, _} when R == R0;
- R == R1;
- R == R2;
- R == R3;
- R == R4 ->
- cpc_handle_down(NoReq, NoGcReq, R, KillState);
- {check_process_code, {Tag, Pid, GC}, Res} ->
- cpc_handle_cpc(NoReq, NoGcReq, GC, Pid, Res, KillState)
- end;
-cpc_recv(Tag, NoReq, NoGcReq, #cpc_kill{outstanding = [R|_]} = KillState) ->
- receive
- {'DOWN', R, process, _, _} ->
- cpc_handle_down(NoReq, NoGcReq, R, KillState);
- {check_process_code, {Tag, Pid, GC}, Res} ->
- cpc_handle_cpc(NoReq, NoGcReq, GC, Pid, Res, KillState)
+cpc_result(#cpc_static{purge_requests = PReqs}, NewPReqs, Res) ->
+ {Res, PReqs ++ cpc_reverse(NewPReqs)}.
+
+cpc_reverse([_] = L) -> L;
+cpc_reverse(Xs) -> cpc_reverse(Xs, []).
+
+cpc_reverse([], Ys) -> Ys;
+cpc_reverse([X|Xs], Ys) -> cpc_reverse(Xs, [X|Ys]).
+
+cpc_handle_down(R, #cpc_kill{outstanding = Rs,
+ no_outstanding = N} = KillState0) ->
+ try
+ NewOutst = cpc_list_rm(R, Rs),
+ KillState1 = KillState0#cpc_kill{outstanding = NewOutst,
+ no_outstanding = N-1},
+ cpc_sched_kill_waiting(KillState1)
+ catch
+ throw : undefined -> %% Triggered by garbage message...
+ KillState0
end.
-cpc_handle_down(NoReq, NoGcReq, R, #cpc_kill{outstanding = Rs,
- no_outstanding = N} = KillState) ->
- {NoReq, NoGcReq, undefined, 'DOWN',
- cpc_sched_kill_waiting(KillState#cpc_kill{outstanding = cpc_list_rm(R, Rs),
- no_outstanding = N-1})}.
-
+cpc_list_rm(_R, []) ->
+ throw(undefined);
cpc_list_rm(R, [R|Rs]) ->
Rs;
cpc_list_rm(R0, [R1|Rs]) ->
[R1|cpc_list_rm(R0, Rs)].
-cpc_handle_cpc(NoReq, NoGcReq, false, Pid, Res, KillState) ->
- {NoReq-1, NoGcReq, Pid, Res, KillState};
-cpc_handle_cpc(NoReq, NoGcReq, true, Pid, Res, KillState) ->
- {NoReq, NoGcReq-1, Pid, Res, KillState}.
-
cpc_sched_kill_waiting(#cpc_kill{waiting = []} = KillState) ->
KillState;
cpc_sched_kill_waiting(#cpc_kill{outstanding = Rs,
@@ -343,18 +297,13 @@ cpc_sched_kill(Pid,
no_outstanding = N+1,
killed = true}.
-cpc_request(#cpc_static{tag = Tag, module = Mod}, Pid, AllowGc) ->
- erts_internal:check_process_code(Pid, Mod, [{async, {Tag, Pid, AllowGc}},
- {allow_gc, AllowGc}]).
-
-cpc_request_gc(CpcS, [Pid|Pids]) ->
- cpc_request(CpcS, Pid, true),
- Pids.
+cpc_request(#cpc_static{tag = Tag, module = Mod}, Pid) ->
+ erts_internal:check_process_code(Pid, Mod, [{async, {Tag, Pid}}]).
cpc_init(_CpcS, [], NoReqs) ->
NoReqs;
cpc_init(CpcS, [Pid|Pids], NoReqs) ->
- cpc_request(CpcS, Pid, false),
+ cpc_request(CpcS, Pid),
cpc_init(CpcS, Pids, NoReqs+1).
% end of check_proc_code() implementation.
@@ -366,64 +315,63 @@ cpc_init(CpcS, [Pid|Pids], NoReqs) ->
%% as usual, but the tester can control when to enter the
%% specific phases.
%%
-do_test_purge(Mod, From, Type, Ref) when Type == true; Type == false ->
- Mon = erlang:monitor(process, From),
- Res = case Type of
- true -> do_test_hard_purge(Mod, From, Ref, Mon);
- false -> do_test_soft_purge(Mod, From, Ref, Mon)
- end,
+do_test_purge(Mod, From, true, Ref, Reqs) ->
+ {Res, NewReqs} = do_test_hard_purge(Mod, From, Ref, Reqs),
+ From ! {test_purge, Res, Ref},
+ NewReqs;
+do_test_purge(Mod, From, false, Ref, Reqs) ->
+ {Res, NewReqs} = do_test_soft_purge(Mod, From, Ref, Reqs),
From ! {test_purge, Res, Ref},
- erlang:demonitor(Mon, [flush]),
- ok;
-do_test_purge(_, _, _, _) ->
- ok.
+ NewReqs;
+do_test_purge(_, _, _, _, Reqs) ->
+ Reqs.
-do_test_soft_purge(Mod, From, Ref, Mon) ->
+do_test_soft_purge(Mod, From, Ref, Reqs) ->
PrepRes = erts_internal:purge_module(Mod, prepare),
- TestRes = test_progress(started, From, Mon, Ref, ok),
+ TestRes = test_progress(started, From, Ref, ok),
case PrepRes of
false ->
- _ = test_progress(continued, From, Mon, Ref, TestRes),
- true;
+ _ = test_progress(continued, From, Ref, TestRes),
+ {true, Reqs};
true ->
- Res = check_proc_code(erlang:processes(), Mod, false),
- _ = test_progress(continued, From, Mon, Ref, TestRes),
- erts_internal:purge_module(Mod,
- case Res of
- false -> abort;
- true -> complete
- end)
+ {PurgeOp, NewReqs} = check_proc_code(erlang:processes(),
+ Mod, false, Reqs),
+ _ = test_progress(continued, From, Ref, TestRes),
+ {erts_internal:purge_module(Mod, PurgeOp), NewReqs}
end.
-do_test_hard_purge(Mod, From, Ref, Mon) ->
+do_test_hard_purge(Mod, From, Ref, Reqs) ->
PrepRes = erts_internal:purge_module(Mod, prepare),
- TestRes = test_progress(started, From, Mon, Ref, ok),
+ TestRes = test_progress(started, From, Ref, ok),
case PrepRes of
false ->
- _ = test_progress(continued, From, Mon, Ref, TestRes),
- {false, false};
+ _ = test_progress(continued, From, Ref, TestRes),
+ {{false, false}, Reqs};
true ->
- DidKill = check_proc_code(erlang:processes(), Mod, true),
- _ = test_progress(continued, From, Mon, Ref, TestRes),
+ {DidKill, NewReqs} = check_proc_code(erlang:processes(),
+ Mod, true, Reqs),
+ _ = test_progress(continued, From, Ref, TestRes),
true = erts_internal:purge_module(Mod, complete),
- {true, DidKill}
+ {{true, DidKill}, NewReqs}
end.
-test_progress(_State, _From, _Mon, _Ref, died) ->
+test_progress(_State, _From, _Ref, died) ->
%% Test process died; continue so we wont
%% leave the system in an inconsistent
%% state...
died;
-test_progress(started, From, Mon, Ref, ok) ->
+test_progress(started, From, Ref, ok) ->
From ! {started, Ref},
+ Mon = erlang:monitor(process, From),
receive
{'DOWN', Mon, process, From, _} -> died;
- {continue, Ref} -> ok
+ {continue, Ref} -> erlang:demonitor(Mon, [flush]), ok
end;
-test_progress(continued, From, Mon, Ref, ok) ->
+test_progress(continued, From, Ref, ok) ->
From ! {continued, Ref},
+ Mon = erlang:monitor(process, From),
receive
{'DOWN', Mon, process, From, _} -> died;
- {complete, Ref} -> ok
+ {complete, Ref} -> erlang:demonitor(Mon, [flush]), ok
end.
diff --git a/erts/preloaded/src/erts_dirty_process_code_checker.erl b/erts/preloaded/src/erts_dirty_process_code_checker.erl
index 911642082c..7d3fa264be 100644
--- a/erts/preloaded/src/erts_dirty_process_code_checker.erl
+++ b/erts/preloaded/src/erts_dirty_process_code_checker.erl
@@ -48,8 +48,7 @@ handle_request({Requester,
Prio,
{check_process_code,
ReqId,
- Module,
- _Flags} = Op}) ->
+ Module} = Op}) ->
%%
%% Target may have stopped executing dirty since the
%% initial request was made. Check its current state
diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl
index f4518c4008..bcc779e6f6 100644
--- a/erts/preloaded/src/erts_internal.erl
+++ b/erts/preloaded/src/erts_internal.erl
@@ -31,7 +31,8 @@
-export([await_port_send_result/3]).
-export([cmp_term/2]).
--export([map_to_tuple_keys/1, term_type/1, map_hashmap_children/1]).
+-export([map_to_tuple_keys/1, term_type/1, map_hashmap_children/1,
+ maps_to_list/2]).
-export([open_port/2, port_command/3, port_connect/2, port_close/1,
port_control/3, port_call/3, port_info/1, port_info/2]).
@@ -39,6 +40,7 @@
gather_system_check_result/1]).
-export([request_system_task/3, request_system_task/4]).
+-export([garbage_collect/1]).
-export([check_process_code/3]).
-export([check_dirty_process_code/2]).
@@ -60,7 +62,7 @@
-export([trace/3, trace_pattern/3]).
%% Auto import name clash
--export([check_process_code/2]).
+-export([check_process_code/1]).
%%
%% Await result of send to port
@@ -205,8 +207,9 @@ port_info(_Result, _Item) ->
-spec request_system_task(Pid, Prio, Request) -> 'ok' when
Prio :: 'max' | 'high' | 'normal' | 'low',
- Request :: {'garbage_collect', term()}
- | {'check_process_code', term(), module(), non_neg_integer()}
+ Type :: 'major' | 'minor',
+ Request :: {'garbage_collect', term(), Type}
+ | {'check_process_code', term(), module()}
| {'copy_literals', term(), boolean()},
Pid :: pid().
@@ -216,7 +219,7 @@ request_system_task(_Pid, _Prio, _Request) ->
-spec request_system_task(RequesterPid, TargetPid, Prio, Request) -> 'ok' | 'dirty_execution' when
Prio :: 'max' | 'high' | 'normal' | 'low',
Request :: {'garbage_collect', term()}
- | {'check_process_code', term(), module(), non_neg_integer()}
+ | {'check_process_code', term(), module()}
| {'copy_literals', term(), boolean()},
RequesterPid :: pid(),
TargetPid :: pid().
@@ -224,12 +227,14 @@ request_system_task(_Pid, _Prio, _Request) ->
request_system_task(_RequesterPid, _TargetPid, _Prio, _Request) ->
erlang:nif_error(undefined).
--define(ERTS_CPC_ALLOW_GC, (1 bsl 0)).
+-spec garbage_collect(Mode) -> 'true' when Mode :: 'major' | 'minor'.
--spec check_process_code(Module, Flags) -> boolean() when
- Module :: module(),
- Flags :: non_neg_integer().
-check_process_code(_Module, _Flags) ->
+garbage_collect(_Mode) ->
+ erlang:nif_error(undefined).
+
+-spec check_process_code(Module) -> boolean() when
+ Module :: module().
+check_process_code(_Module) ->
erlang:nif_error(undefined).
-spec check_process_code(Pid, Module, OptionList) -> CheckResult | async when
@@ -240,7 +245,7 @@ check_process_code(_Module, _Flags) ->
OptionList :: [Option],
CheckResult :: boolean() | aborted.
check_process_code(Pid, Module, OptionList) ->
- {Async, Flags} = get_cpc_opts(OptionList, sync, ?ERTS_CPC_ALLOW_GC),
+ Async = get_cpc_opts(OptionList, sync),
case Async of
{async, ReqId} ->
{priority, Prio} = erlang:process_info(erlang:self(),
@@ -249,13 +254,12 @@ check_process_code(Pid, Module, OptionList) ->
Prio,
{check_process_code,
ReqId,
- Module,
- Flags}),
+ Module}),
async;
sync ->
case Pid == erlang:self() of
true ->
- erts_internal:check_process_code(Module, Flags);
+ erts_internal:check_process_code(Module);
false ->
{priority, Prio} = erlang:process_info(erlang:self(),
priority),
@@ -264,8 +268,7 @@ check_process_code(Pid, Module, OptionList) ->
Prio,
{check_process_code,
ReqId,
- Module,
- Flags}),
+ Module}),
receive
{check_process_code, ReqId, CheckResult} ->
CheckResult
@@ -273,18 +276,14 @@ check_process_code(Pid, Module, OptionList) ->
end
end.
-% gets async and flag opts and verify valid option list
-get_cpc_opts([{async, _ReqId} = AsyncTuple | Options], _OldAsync, Flags) ->
- get_cpc_opts(Options, AsyncTuple, Flags);
-get_cpc_opts([{allow_gc, AllowGC} | Options], Async, Flags) ->
- get_cpc_opts(Options, Async, cpc_flags(Flags, ?ERTS_CPC_ALLOW_GC, AllowGC));
-get_cpc_opts([], Async, Flags) ->
- {Async, Flags}.
-
-cpc_flags(OldFlags, Bit, true) ->
- OldFlags bor Bit;
-cpc_flags(OldFlags, Bit, false) ->
- OldFlags band (bnot Bit).
+% gets async opt and verify valid option list
+get_cpc_opts([{async, _ReqId} = AsyncTuple | Options], _OldAsync) ->
+ get_cpc_opts(Options, AsyncTuple);
+get_cpc_opts([{allow_gc, AllowGC} | Options], Async) when AllowGC == true;
+ AllowGC == false ->
+ get_cpc_opts(Options, Async);
+get_cpc_opts([], Async) ->
+ Async.
-spec check_dirty_process_code(Pid,Module) -> 'true' | 'false' when
Pid :: pid(),
@@ -371,6 +370,15 @@ map_hashmap_children(_M) ->
Multi :: boolean(),
Res :: term().
+%% return a list of key value pairs, at most of length N
+-spec maps_to_list(M,N) -> Pairs when
+ M :: map(),
+ N :: integer(),
+ Pairs :: list().
+
+maps_to_list(_M, _N) ->
+ erlang:nif_error(undefined).
+
%% erlang:demonitor(Ref, [flush]) traps to
%% erts_internal:flush_monitor_messages(Ref, Res) when
%% it needs to flush monitor messages.
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 551ca4ea40..86dc9a2957 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -932,15 +932,15 @@ load_rest([], _) ->
prepare_loading_fun() ->
fun(Mod, FullName, Beam) ->
case erlang:prepare_loading(Mod, Beam) of
- Prepared when is_binary(Prepared) ->
+ {error,_}=Error ->
+ Error;
+ Prepared ->
case erlang:has_prepared_code_on_load(Prepared) of
true ->
{ok,{on_load,Beam,FullName}};
false ->
{ok,{prepared,Prepared,FullName}}
- end;
- {error,_}=Error ->
- Error
+ end
end
end.
diff --git a/erts/preloaded/src/zlib.erl b/erts/preloaded/src/zlib.erl
index fa0f28c5c3..8cd3e39fd7 100644
--- a/erts/preloaded/src/zlib.erl
+++ b/erts/preloaded/src/zlib.erl
@@ -23,7 +23,8 @@
-export([open/0,close/1,deflateInit/1,deflateInit/2,deflateInit/6,
deflateSetDictionary/2,deflateReset/1,deflateParams/3,
deflate/2,deflate/3,deflateEnd/1,
- inflateInit/1,inflateInit/2,inflateSetDictionary/2,
+ inflateInit/1,inflateInit/2,
+ inflateSetDictionary/2,inflateGetDictionary/1,
inflateSync/1,inflateReset/1,inflate/2,inflateEnd/1,
inflateChunk/1, inflateChunk/2,
setBufSize/2,getBufSize/1,
@@ -98,25 +99,26 @@
-define(INFLATE_INIT, 8).
-define(INFLATE_INIT2, 9).
-define(INFLATE_SETDICT, 10).
--define(INFLATE_SYNC, 11).
--define(INFLATE_RESET, 12).
--define(INFLATE_END, 13).
--define(INFLATE, 14).
--define(INFLATE_CHUNK, 25).
+-define(INFLATE_GETDICT, 11).
+-define(INFLATE_SYNC, 12).
+-define(INFLATE_RESET, 13).
+-define(INFLATE_END, 14).
+-define(INFLATE, 15).
+-define(INFLATE_CHUNK, 26).
--define(CRC32_0, 15).
--define(CRC32_1, 16).
--define(CRC32_2, 17).
+-define(CRC32_0, 16).
+-define(CRC32_1, 17).
+-define(CRC32_2, 18).
--define(SET_BUFSZ, 18).
--define(GET_BUFSZ, 19).
--define(GET_QSIZE, 20).
+-define(SET_BUFSZ, 19).
+-define(GET_BUFSZ, 20).
+-define(GET_QSIZE, 21).
--define(ADLER32_1, 21).
--define(ADLER32_2, 22).
+-define(ADLER32_1, 22).
+-define(ADLER32_2, 23).
--define(CRC32_COMBINE, 23).
--define(ADLER32_COMBINE, 24).
+-define(CRC32_COMBINE, 24).
+-define(ADLER32_COMBINE, 25).
%%------------------------------------------------------------------------
@@ -242,6 +244,13 @@ inflateInit(Z, WindowBits) ->
inflateSetDictionary(Z, Dictionary) ->
call(Z, ?INFLATE_SETDICT, Dictionary).
+-spec inflateGetDictionary(Z) -> Dictionary when
+ Z :: zstream(),
+ Dictionary :: iolist().
+inflateGetDictionary(Z) ->
+ _ = call(Z, ?INFLATE_GETDICT, []),
+ collect(Z).
+
-spec inflateSync(zstream()) -> 'ok'.
inflateSync(Z) ->
call(Z, ?INFLATE_SYNC, []).
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 4578d4e0b1..453df6ca83 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
#
-VSN = 8.3
+VSN = 9.0
# Port number 4365 in 4.2
# Port number 4366 in 4.3