aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rwxr-xr-xerts/autoconf/configure.vxworks8
-rw-r--r--erts/configure.in586
-rw-r--r--erts/doc/src/Makefile1
-rw-r--r--erts/doc/src/absform.xml10
-rw-r--r--erts/doc/src/alt_disco.xml93
-rw-r--r--erts/doc/src/erl_driver.xml6
-rw-r--r--erts/doc/src/erlang.xml86
-rw-r--r--erts/doc/src/erts_alloc.xml36
-rw-r--r--erts/doc/src/match_spec.xml30
-rw-r--r--erts/doc/src/notes.xml15
-rw-r--r--erts/doc/src/part.xml1
-rw-r--r--erts/emulator/Makefile.in4
-rw-r--r--erts/emulator/beam/atom.names4
-rw-r--r--erts/emulator/beam/beam_bif_load.c38
-rw-r--r--erts/emulator/beam/beam_debug.c2
-rw-r--r--erts/emulator/beam/beam_emu.c3
-rw-r--r--erts/emulator/beam/bif.c181
-rw-r--r--erts/emulator/beam/bif.h37
-rw-r--r--erts/emulator/beam/bif.tab13
-rw-r--r--erts/emulator/beam/bif_instrs.tab8
-rw-r--r--erts/emulator/beam/break.c15
-rw-r--r--erts/emulator/beam/dist.c422
-rw-r--r--erts/emulator/beam/erl_alloc.c98
-rw-r--r--erts/emulator/beam/erl_alloc.types3
-rw-r--r--erts/emulator/beam/erl_alloc_util.c1384
-rw-r--r--erts/emulator/beam/erl_alloc_util.h35
-rw-r--r--erts/emulator/beam/erl_bif_info.c306
-rw-r--r--erts/emulator/beam/erl_bif_trace.c190
-rw-r--r--erts/emulator/beam/erl_db.c2
-rw-r--r--erts/emulator/beam/erl_db_util.c14
-rw-r--r--erts/emulator/beam/erl_gc.c49
-rw-r--r--erts/emulator/beam/erl_init.c2
-rw-r--r--erts/emulator/beam/erl_instrument.c1257
-rw-r--r--erts/emulator/beam/erl_instrument.h42
-rw-r--r--erts/emulator/beam/erl_lock_check.c41
-rw-r--r--erts/emulator/beam/erl_map.c56
-rw-r--r--erts/emulator/beam/erl_message.c153
-rw-r--r--erts/emulator/beam/erl_message.h27
-rw-r--r--erts/emulator/beam/erl_monitor_link.c97
-rw-r--r--erts/emulator/beam/erl_monitor_link.h57
-rw-r--r--erts/emulator/beam/erl_nif.c23
-rw-r--r--erts/emulator/beam/erl_port.h16
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c793
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h153
-rw-r--r--erts/emulator/beam/erl_process.c954
-rw-r--r--erts/emulator/beam/erl_process.h66
-rw-r--r--erts/emulator/beam/erl_process_lock.h18
-rw-r--r--erts/emulator/beam/erl_term.h48
-rw-r--r--erts/emulator/beam/erl_trace.c44
-rw-r--r--erts/emulator/beam/erl_trace.h2
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/io.c38
-rw-r--r--erts/emulator/beam/msg_instrs.tab3
-rw-r--r--erts/emulator/beam/utils.c174
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c13
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c26
-rw-r--r--erts/emulator/hipe/hipe_process.h9
-rw-r--r--erts/emulator/nifs/unix/unix_prim_file.c2
-rw-r--r--erts/emulator/nifs/win32/win_prim_file.c50
-rw-r--r--erts/emulator/sys/common/erl_check_io.c2
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h2
-rw-r--r--erts/emulator/test/code_SUITE.erl2
-rw-r--r--erts/emulator/test/dgawd_handler.erl4
-rw-r--r--erts/emulator/test/distribution_SUITE.erl13
-rw-r--r--erts/emulator/test/dump_SUITE.erl4
-rw-r--r--erts/emulator/test/lcnt_SUITE.erl17
-rw-r--r--erts/emulator/test/map_SUITE.erl86
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl20
-rw-r--r--erts/emulator/test/nif_SUITE.erl19
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c10
-rw-r--r--erts/emulator/test/port_SUITE.erl8
-rw-r--r--erts/emulator/test/process_SUITE.erl56
-rw-r--r--erts/emulator/test/sensitive_SUITE.erl2
-rw-r--r--erts/emulator/test/statistics_SUITE.erl7
-rw-r--r--erts/emulator/test/system_profile_SUITE.erl86
-rw-r--r--erts/emulator/test/trace_SUITE.erl62
-rw-r--r--erts/emulator/test/tracer_SUITE.erl6
-rw-r--r--erts/etc/common/erlexec.c1
-rw-r--r--erts/etc/common/heart.c5
-rw-r--r--erts/etc/unix/etp-commands.in161
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin54160 -> 54432 bytes
-rw-r--r--erts/preloaded/ebin/erl_tracer.beambin2184 -> 2168 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin102732 -> 103344 bytes
-rw-r--r--erts/preloaded/ebin/erts_code_purger.beambin11376 -> 11348 bytes
-rw-r--r--erts/preloaded/ebin/erts_dirty_process_signal_handler.beambin2740 -> 2760 bytes
-rw-r--r--erts/preloaded/ebin/erts_internal.beambin15104 -> 16672 bytes
-rw-r--r--erts/preloaded/ebin/erts_literal_area_collector.beambin3288 -> 3268 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin50596 -> 51408 bytes
-rw-r--r--erts/preloaded/ebin/otp_ring0.beambin1424 -> 1404 bytes
-rw-r--r--erts/preloaded/ebin/prim_buffer.beambin3620 -> 3568 bytes
-rw-r--r--erts/preloaded/ebin/prim_eval.beambin1496 -> 1472 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin27496 -> 27724 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin77928 -> 77884 bytes
-rw-r--r--erts/preloaded/ebin/prim_zip.beambin22956 -> 22864 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin19724 -> 19704 bytes
-rw-r--r--erts/preloaded/src/erl_prim_loader.erl8
-rw-r--r--erts/preloaded/src/erlang.erl167
-rw-r--r--erts/preloaded/src/erts_dirty_process_signal_handler.erl34
-rw-r--r--erts/preloaded/src/erts_internal.erl72
-rw-r--r--erts/preloaded/src/init.erl58
-rw-r--r--erts/preloaded/src/prim_file.erl9
-rw-r--r--erts/test/erlc_SUITE.erl4
-rw-r--r--erts/test/upgrade_SUITE.erl44
-rw-r--r--erts/vsn.mk2
104 files changed, 4795 insertions, 4022 deletions
diff --git a/erts/autoconf/configure.vxworks b/erts/autoconf/configure.vxworks
index a13e0a6c56..18ca1718d6 100755
--- a/erts/autoconf/configure.vxworks
+++ b/erts/autoconf/configure.vxworks
@@ -68,10 +68,6 @@ WIND_BASE=${WIND_BASE:=`ypmatch tornado passwd | awk -F: '{print $6}'`/$VXTOP}
# These are created by autoconf.
MKDIRS="${ERL_TOP}/lib/os_mon/priv/bin/$target
${ERL_TOP}/lib/os_mon/priv/obj/$target
- ${ERL_TOP}/lib/orber/priv/obj/$target
- ${ERL_TOP}/lib/orber/priv/bin/$target
- ${ERL_TOP}/lib/ic/priv/lib/$target
- ${ERL_TOP}/lib/ic/priv/obj/$target
${ERL_TOP}/lib/asn1/priv/lib/$target
${ERL_TOP}/lib/asn1/priv/obj/$target
${ERL_TOP}/lib/erl_interface/obj/$target
@@ -100,8 +96,6 @@ etcdir=${ERL_TOP}/erts/etc/common
erlint_dir=${ERL_TOP}/lib/erl_interface/src
epmd_dir=${ERL_TOP}/erts/epmd/src
os_mon_dir=${ERL_TOP}/lib/os_mon/c_src
-orber_dir=${ERL_TOP}/lib/orber/c_src
-ic_dir=${ERL_TOP}/lib/ic/c_src
internal_tools_dir=${ERL_TOP}
libdir=${ERL_TOP}/lib
tsdir=$libdir/test_server/src
@@ -121,10 +115,8 @@ CONFIG_FILES="${ERL_TOP}/erts/emulator/$host/Makefile
$internal_tools_dir/make/$host/otp.mk
$os_mon_dir/$host/Makefile
$zlibdir/$host/Makefile
- $ic_dir/$host/Makefile
$runtime_tools_dir/$host/Makefile
$tools_dir/$host/Makefile
- $orber_dir/$host/Makefile"
for file in $CONFIG_FILES; do
new_name=`echo $file|sed "s%/$host/%/$target/%"`
diff --git a/erts/configure.in b/erts/configure.in
index 2d0d6c6444..f66445ec25 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -2661,18 +2661,6 @@ AC_CHECK_PROG(M4, m4, m4)
if test X${enable_hipe} != Xno; then
- if test X$ac_cv_sizeof_void_p != X4 && test X$ARCH = Xamd64; then
- dnl HiPE cannot run on x86_64 without MAP_FIXED and MAP_NORESERVE
- AC_CHECK_DECLS([MAP_FIXED, MAP_NORESERVE], [], [], [#include <sys/mman.h>])
- if test X$ac_cv_have_decl_MAP_FIXED != Xyes || test X$ac_cv_have_decl_MAP_NORESERVE != Xyes; then
- if test X${enable_hipe} = Xyes; then
- AC_MSG_ERROR([HiPE on x86_64 needs MAP_FIXED and MAP_NORESERVE flags for mmap()])
- else
- enable_hipe=no
- AC_MSG_WARN([Disable HiPE due to lack of MAP_FIXED and MAP_NORESERVE flags for mmap()])
- fi
- fi
- else
dnl HiPE cannot run without mprotect()
if test X$ac_cv_func_mprotect != Xyes; then
if test X${enable_hipe} = Xyes; then
@@ -2682,7 +2670,6 @@ if test X${enable_hipe} != Xno; then
AC_MSG_WARN([Disable HiPE due to lack of mprotect()])
fi
fi
- fi
fi
dnl check to auto-enable hipe here...
@@ -2714,570 +2701,13 @@ if test X${enable_hipe} = Xyes; then
fi
AC_SUBST(HIPEBEAMLDFLAGS)
-if test X${enable_fp_exceptions} = Xauto ; then
- case $host_os in
- *linux*)
- enable_fp_exceptions=no
- AC_MSG_NOTICE([Floating point exceptions disabled by default on Linux]) ;;
- darwin*)
- enable_fp_exceptions=no
- AC_MSG_NOTICE([Floating point exceptions disabled by default on MacOS X]) ;;
- *)
- ;;
- esac
-fi
-
-if test X${enable_fp_exceptions} = Xauto ; then
- if test X${enable_hipe} = Xyes; then
- enable_fp_exceptions=yes
- else
- enable_fp_exceptions=no
- AC_MSG_NOTICE([Floating point exceptions disabled by default in this configuration])
- fi
-fi
-
-if test X${enable_fp_exceptions} != Xyes ; then
- AC_DEFINE(NO_FPE_SIGNALS,[],[Define if floating points exceptions are non-existing/not reliable])
- FPE=unreliable
-else
-
- AC_MSG_CHECKING([for unreliable floating point exceptions])
-
-
- AC_TRY_RUN([
-/* fpe-test.c */
-#include <stdio.h>
-#include <signal.h>
-#include <stdlib.h>
-
-#if defined(__clang__) || defined(__llvm__)
-#error "Clang/LLVM generates broken code for FP exceptions"
-#endif
-
-volatile int erl_fp_exception;
-
-/*
- * We expect a single SIGFPE in this test program.
- * Getting many more indicates an inadequate SIGFPE handler,
- * e.g. using the generic handler on x86.
- */
-static void new_fp_exception(void)
-{
- if (++erl_fp_exception > 50) {
- fprintf(stderr, "SIGFPE loop detected, bailing out\n");
- exit(1);
- }
-}
-
-/* Is there no standard identifier for Darwin/MacOSX ? */
-#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
-#define __DARWIN__ 1
-#endif
-
-/*
- * Implement unmask_fpe() and check_fpe() based on CPU/OS combination
- */
-
-#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
-
-static void unmask_x87(void)
-{
- unsigned short cw;
- __asm__ __volatile__("fstcw %0" : "=m"(cw));
- cw &= ~(0x01|0x04|0x08); /* unmask IM, ZM, OM */
- __asm__ __volatile__("fldcw %0" : : "m"(cw));
-}
-
-static void unmask_sse2(void)
-{
- unsigned int mxcsr;
- __asm__ __volatile__("stmxcsr %0" : "=m"(mxcsr));
- mxcsr &= ~(0x003F|0x0680); /* clear exn flags, unmask OM, ZM, IM (not PM, UM, DM) */
- __asm__ __volatile__("ldmxcsr %0" : : "m"(mxcsr));
-}
-
-#if defined(__x86_64__)
-
-static inline int cpu_has_sse2(void) { return 1; }
-
-#else /* !__x86_64__ */
-
-/*
- * Check if an x86-32 processor has SSE2.
- */
-static unsigned int xor_eflags(unsigned int mask)
-{
- unsigned int eax, edx;
-
- eax = mask; /* eax = mask */
- __asm__("pushfl\n\t"
- "popl %0\n\t" /* edx = original EFLAGS */
- "xorl %0, %1\n\t" /* eax = mask ^ EFLAGS */
- "pushl %1\n\t"
- "popfl\n\t" /* new EFLAGS = mask ^ original EFLAGS */
- "pushfl\n\t"
- "popl %1\n\t" /* eax = new EFLAGS */
- "xorl %0, %1\n\t" /* eax = new EFLAGS ^ old EFLAGS */
- "pushl %0\n\t"
- "popfl" /* restore original EFLAGS */
- : "=d"(edx), "=a"(eax)
- : "1"(eax));
- return eax;
-}
-
-static __inline__ unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax, save_ebx;
-
- /* In PIC mode i386 reserves EBX. So we must save
- and restore it ourselves to not upset gcc. */
- __asm__(
- "movl %%ebx, %1\n\t"
- "cpuid\n\t"
- "movl %1, %%ebx"
- : "=a"(eax), "=m"(save_ebx)
- : "0"(op)
- : "cx", "dx");
- return eax;
-}
-
-static __inline__ unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx, save_ebx;
-
- /* In PIC mode i386 reserves EBX. So we must save
- and restore it ourselves to not upset gcc. */
- __asm__(
- "movl %%ebx, %2\n\t"
- "cpuid\n\t"
- "movl %2, %%ebx"
- : "=a"(eax), "=d"(edx), "=m"(save_ebx)
- : "0"(op)
- : "cx");
- return edx;
-}
-
-/* The AC bit, bit #18, is a new bit introduced in the EFLAGS
- * register on the Intel486 processor to generate alignment
- * faults. This bit cannot be set on the Intel386 processor.
- */
-static __inline__ int is_386(void)
-{
- return ((xor_eflags(1<<18) >> 18) & 1) == 0;
-}
-
-/* Newer x86 processors have a CPUID instruction, as indicated by
- * the ID bit (#21) in EFLAGS being modifiable.
- */
-static __inline__ int has_CPUID(void)
-{
- return (xor_eflags(1<<21) >> 21) & 1;
-}
-
-static int cpu_has_sse2(void)
-{
- unsigned int maxlev, features;
- static int has_sse2 = -1;
-
- if (has_sse2 >= 0)
- return has_sse2;
- has_sse2 = 0;
-
- if (is_386())
- return 0;
- if (!has_CPUID())
- return 0;
- maxlev = cpuid_eax(0);
- /* Intel A-step Pentium had a preliminary version of CPUID.
- It also didn't have SSE2. */
- if ((maxlev & 0xFFFFFF00) == 0x0500)
- return 0;
- /* If max level is zero then CPUID cannot report any features. */
- if (maxlev == 0)
- return 0;
- features = cpuid_edx(1);
- has_sse2 = (features & (1 << 26)) != 0;
-
- return has_sse2;
-}
-#endif /* !__x86_64__ */
-
-static void unmask_fpe(void)
-{
- unmask_x87();
- if (cpu_has_sse2())
- unmask_sse2();
-}
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("fwait" : "=m"(erl_fp_exception) : "m"(f));
- if (!erl_fp_exception)
- return 0;
- __asm__ __volatile__("fninit");
- unmask_fpe();
- return 1;
-}
-
-#elif defined(__sparc__) && defined(__linux__)
-
-#if defined(__arch64__)
-#define LDX "ldx"
-#define STX "stx"
-#else
-#define LDX "ld"
-#define STX "st"
-#endif
-
-static void unmask_fpe(void)
-{
- unsigned long fsr;
-
- __asm__(STX " %%fsr, %0" : "=m"(fsr));
- fsr &= ~(0x1FUL << 23); /* clear FSR[TEM] field */
- fsr |= (0x1AUL << 23); /* enable NV, OF, DZ exceptions */
- __asm__ __volatile__(LDX " %0, %%fsr" : : "m"(fsr));
-}
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("" : "=m"(erl_fp_exception) : "em"(f));
- return erl_fp_exception;
-}
-
-#elif (defined(__powerpc__) && defined(__linux__)) || (defined(__ppc__) && defined(__DARWIN__))
-
-#if defined(__linux__)
-
-#include <sys/prctl.h>
-
-static void set_fpexc_precise(void)
-{
- if (prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE) < 0) {
- perror("PR_SET_FPEXC");
- exit(1);
- }
-}
-
-#elif defined(__DARWIN__)
-
-#include <mach/mach.h>
-#include <pthread.h>
-
-/*
- * FE0 FE1 MSR bits
- * 0 0 floating-point exceptions disabled
- * 0 1 floating-point imprecise nonrecoverable
- * 1 0 floating-point imprecise recoverable
- * 1 1 floating-point precise mode
- *
- * Apparently:
- * - Darwin 5.5 (MacOS X <= 10.1) starts with FE0 == FE1 == 0,
- * and resets FE0 and FE1 to 0 after each SIGFPE.
- * - Darwin 6.0 (MacOS X 10.2) starts with FE0 == FE1 == 1,
- * and does not reset FE0 or FE1 after a SIGFPE.
- */
-#define FE0_MASK (1<<11)
-#define FE1_MASK (1<<8)
-
-/* a thread cannot get or set its own MSR bits */
-static void *fpu_fpe_enable(void *arg)
-{
- thread_t t = *(thread_t*)arg;
- struct ppc_thread_state state;
- unsigned int state_size = PPC_THREAD_STATE_COUNT;
-
- if (thread_get_state(t, PPC_THREAD_STATE, (natural_t*)&state, &state_size) != KERN_SUCCESS) {
- perror("thread_get_state");
- exit(1);
- }
- if ((state.srr1 & (FE1_MASK|FE0_MASK)) != (FE1_MASK|FE0_MASK)) {
-#if 0
- /* This would also have to be performed in the SIGFPE handler
- to work around the MSR reset older Darwin releases do. */
- state.srr1 |= (FE1_MASK|FE0_MASK);
- thread_set_state(t, PPC_THREAD_STATE, (natural_t*)&state, state_size);
-#else
- fprintf(stderr, "srr1 == 0x%08x, your Darwin is too old\n", state.srr1);
- exit(1);
-#endif
- }
- return NULL; /* Ok, we appear to be on Darwin 6.0 or later */
-}
-
-static void set_fpexc_precise(void)
-{
- thread_t self = mach_thread_self();
- pthread_t enabler;
-
- if (pthread_create(&enabler, NULL, fpu_fpe_enable, &self)) {
- perror("pthread_create");
- } else if (pthread_join(enabler, NULL)) {
- perror("pthread_join");
- }
-}
-
-#endif
-
-static void set_fpscr(unsigned int fpscr)
-{
- union {
- double d;
- unsigned int fpscr[2];
- } u;
- u.fpscr[0] = 0xFFF80000;
- u.fpscr[1] = fpscr;
- __asm__ __volatile__("mtfsf 255,%0" : : "f"(u.d));
-}
-
-static void unmask_fpe(void)
-{
- set_fpexc_precise();
- set_fpscr(0x80|0x40|0x10); /* VE, OE, ZE; not UE or XE */
-}
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("" : "=m"(erl_fp_exception) : "fm"(f));
- return erl_fp_exception;
-}
-
-#else
-
-#include <ieeefp.h>
-
-#define unmask_fpe() fpsetmask(FP_X_INV | FP_X_OFL | FP_X_DZ)
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("" : "=m"(erl_fp_exception) : "g"(f));
- return erl_fp_exception;
-}
-
-#endif
-
-/*
- * Implement SIGFPE handler based on CPU/OS combination
- */
-
-#if (defined(__linux__) && (defined(__i386__) || defined(__x86_64__) || defined(__sparc__) || defined(__powerpc__))) || (defined(__DARWIN__) && (defined(__i386__) || defined(__x86_64__) || defined(__ppc__))) || (defined(__FreeBSD__) && (defined(__i386__) || defined(__x86_64__))) || ((defined(__OpenBSD__) || defined(__NetBSD__)) && defined(__x86_64__)) || (defined(__sun__) && defined(__x86_64__))
-
-#if defined(__linux__) && defined(__i386__)
-#if !defined(X86_FXSR_MAGIC)
-#define X86_FXSR_MAGIC 0x0000
-#endif
-#elif defined(__FreeBSD__) && defined(__i386__)
-#include <sys/types.h>
-#include <machine/npx.h>
-#elif defined(__FreeBSD__) && defined(__x86_64__)
-#include <sys/types.h>
-#include <machine/fpu.h>
-#elif defined(__DARWIN__)
-#include <machine/signal.h>
-#elif defined(__OpenBSD__) && defined(__x86_64__)
-#include <sys/types.h>
-#include <machine/fpu.h>
-#endif
-#if !(defined(__OpenBSD__) && defined(__x86_64__))
-#include <ucontext.h>
-#endif
-#include <string.h>
-
-static void fpe_sig_action(int sig, siginfo_t *si, void *puc)
-{
- ucontext_t *uc = puc;
-#if defined(__linux__)
-#if defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- fpregset_t fpstate = mc->fpregs;
- fpstate->mxcsr = 0x1F80;
- fpstate->swd &= ~0xFF;
-#elif defined(__i386__)
- mcontext_t *mc = &uc->uc_mcontext;
- fpregset_t fpstate = mc->fpregs;
- if ((fpstate->status >> 16) == X86_FXSR_MAGIC)
- ((struct _fpstate*)fpstate)->mxcsr = 0x1F80;
- fpstate->sw &= ~0xFF;
-#elif defined(__sparc__) && defined(__arch64__)
- /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
- struct sigcontext *sc = (struct sigcontext*)puc;
- sc->sigc_regs.tpc = sc->sigc_regs.tnpc;
- sc->sigc_regs.tnpc += 4;
-#elif defined(__sparc__)
- /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
- struct sigcontext *sc = (struct sigcontext*)puc;
- sc->si_regs.pc = sc->si_regs.npc;
- sc->si_regs.npc = (unsigned long)sc->si_regs.npc + 4;
-#elif defined(__powerpc__)
-#if defined(__powerpc64__)
- mcontext_t *mc = &uc->uc_mcontext;
- unsigned long *regs = &mc->gp_regs[0];
-#else
- mcontext_t *mc = uc->uc_mcontext.uc_regs;
- unsigned long *regs = &mc->gregs[0];
-#endif
- regs[PT_NIP] += 4;
- regs[PT_FPSCR] = 0x80|0x40|0x10; /* VE, OE, ZE; not UE or XE */
-#endif
-#elif defined(__DARWIN__)
-#if defined(DARWIN_MODERN_MCONTEXT)
-#if defined(__x86_64__)
- mcontext_t mc = uc->uc_mcontext;
- struct __darwin_x86_float_state64 *fpstate = &mc->__fs;
- fpstate->__fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->__fpu_fsw &= ~0xFF;
-#elif defined(__i386__)
- mcontext_t mc = uc->uc_mcontext;
- struct __darwin_i386_float_state *fpstate = &mc->__fs;
- fpstate->__fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->__fpu_fsw &= ~0xFF;
-#elif defined(__ppc__)
- mcontext_t mc = uc->uc_mcontext;
- mc->ss.srr0 += 4;
- mc->fs.fpscr = 0x80|0x40|0x10;
-#endif
-#else
-#if defined(__x86_64__)
- mcontext_t mc = uc->uc_mcontext;
- struct x86_float_state64_t *fpstate = &mc->fs;
- fpstate->fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->fpu_fsw &= ~0xFF;
-#elif defined(__i386__)
- mcontext_t mc = uc->uc_mcontext;
- x86_float_state32_t *fpstate = &mc->fs;
- fpstate->fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->fpu_fsw &= ~0xFF;
-#elif defined(__ppc__)
- mcontext_t mc = uc->uc_mcontext;
- mc->ss.srr0 += 4;
- mc->fs.fpscr = 0x80|0x40|0x10;
-#endif
-#endif
-#elif defined(__FreeBSD__) && defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- struct savefpu *savefpu = (struct savefpu*)&mc->mc_fpstate;
- struct envxmm *envxmm = &savefpu->sv_env;
- envxmm->en_mxcsr = 0x1F80;
- envxmm->en_sw &= ~0xFF;
-#elif defined(__FreeBSD__) && defined(__i386__)
- mcontext_t *mc = &uc->uc_mcontext;
- union savefpu *savefpu = (union savefpu*)&mc->mc_fpstate;
- if (mc->mc_fpformat == _MC_FPFMT_XMM) {
- struct envxmm *envxmm = &savefpu->sv_xmm.sv_env;
- envxmm->en_mxcsr = 0x1F80;
- envxmm->en_sw &= ~0xFF;
- } else {
- struct env87 *env87 = &savefpu->sv_87.sv_env;
- env87->en_sw &= ~0xFF;
- }
-#elif defined(__OpenBSD__) && defined(__x86_64__)
- struct fxsave64 *fxsave = uc->sc_fpstate;
- fxsave->fx_mxcsr = 0x1F80;
- fxsave->fx_fsw &= ~0xFF;
-#elif defined(__NetBSD__) && defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- struct fxsave64 *fxsave = (struct fxsave64 *)&mc->__fpregs;
- fxsave->fx_mxcsr = 0x1F80;
- fxsave->fx_fsw &= ~0xFF;
-#elif defined(__sun__) && defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- struct fpchip_state *fpstate = &mc->fpregs.fp_reg_set.fpchip_state;
- fpstate->mxcsr = 0x1F80;
- fpstate->sw &= ~0xFF;
-#endif
- new_fp_exception();
-}
-
-static void catch_sigfpe(void)
-{
- struct sigaction act;
-
- memset(&act, 0, sizeof act);
- act.sa_sigaction = fpe_sig_action;
- act.sa_flags = SA_SIGINFO;
- sigaction(SIGFPE, &act, NULL);
-}
-
-#else
-
-static void fpe_sig_handler(int sig)
-{
- new_fp_exception();
-}
-
-static void catch_sigfpe(void)
-{
- signal(SIGFPE, fpe_sig_handler);
-}
-
-#endif
-
-/*
- * Generic test code
- */
-
-static void do_init(void)
-{
- catch_sigfpe();
- unmask_fpe();
-}
-
-double a = 3.23e133;
-double b = 3.57e257;
-double res;
-
-void do_fmul(void)
-{
- res = a * b;
-}
-
-int do_check(void)
-{
- if (check_fpe(res)) {
- fprintf(stderr, "res = %g, FPE worked\n", res);
- return 0;
- } else {
- fprintf(stderr, "res = %g, FPE failed\n", res);
- return 1;
- }
-}
-
-int main(int argc, const char **argv)
-{
- if (argc == 3) {
- a = atof(argv[1]);
- b = atof(argv[2]);
- }
- do_init();
- do_fmul();
- return do_check();
-}
-],
-erl_ok=yes,
-erl_ok=no,
-[
-case X$erl_xcomp_reliable_fpe in
- X) erl_ok=cross;;
- Xyes|Xno) erl_ok=$erl_xcomp_reliable_fpe;;
- *) AC_MSG_ERROR([Bad erl_xcomp_reliable_fpe value: $erl_xcomp_reliable_fpe]);;
-esac
-])
-
- if test $erl_ok = yes; then
- FPE=reliable
- AC_MSG_RESULT(reliable)
- else
- FPE=unreliable
- AC_MSG_RESULT([unreliable; testing in software instead])
- AC_DEFINE(NO_FPE_SIGNALS,[],[Define if floating points exceptions are non-existing/not reliable])
- if test $erl_ok = cross; then
- AC_MSG_WARN([result unreliable guessed because of cross compilation])
- fi
- fi
-fi
-
-AC_SUBST(FPE)
-
+dnl Permanently disable floating point exceptions.
+dnl On x86/amd64, floating points exceptions have
+dnl unresolved stability issues.
+AC_MSG_CHECKING([for unreliable floating point exceptions])
+FPE=unreliable
+AC_MSG_RESULT([unreliable])
+AC_DEFINE(NO_FPE_SIGNALS,[],[Define if floating points exceptions are non-existing/not reliable])
dnl
dnl Some operating systems allow you to redefine FD_SETSIZE to be able
@@ -4831,10 +4261,8 @@ dnl The ones below should be moved to their respective lib
dnl
dnl ../lib/ssl/c_src/$host/Makefile:../lib/ssl/c_src/Makefile.in
AC_CONFIG_FILES([
- ../lib/ic/c_src/$host/Makefile:../lib/ic/c_src/Makefile.in
../lib/os_mon/c_src/$host/Makefile:../lib/os_mon/c_src/Makefile.in
../lib/crypto/c_src/$host/Makefile:../lib/crypto/c_src/Makefile.in
- ../lib/orber/c_src/$host/Makefile:../lib/orber/c_src/Makefile.in
../lib/runtime_tools/c_src/$host/Makefile:../lib/runtime_tools/c_src/Makefile.in
../lib/tools/c_src/$host/Makefile:../lib/tools/c_src/Makefile.in
])
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index 5fa8b0673a..96cc4413a9 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -74,6 +74,7 @@ XML_CHAPTER_FILES = \
match_spec.xml \
crash_dump.xml \
alt_dist.xml \
+ alt_disco.xml \
driver.xml \
absform.xml \
inet_cfg.xml \
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index 2ada903edb..158f4dc4e8 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -407,9 +407,8 @@
</item>
<item>
<p>If E is a map creation <c>#{A_1, ..., A_k}</c>,
- where each <c>A_i</c> is an association <c>E_i_1 => E_i_2</c>
- or <c>E_i_1 := E_i_2</c>, then Rep(E) =
- <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
+ where each <c>A_i</c> is an association <c>E_i_1 => E_i_2</c>,
+ then Rep(E) = <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
For Rep(A), see below.</p>
</item>
<item>
@@ -731,9 +730,8 @@
</item>
<item>
<p>If Gt is a map creation <c>#{A_1, ..., A_k}</c>,
- where each <c>A_i</c> is an association <c>Gt_i_1 => Gt_i_2</c>
- or <c>Gt_i_1 := Gt_i_2</c>, then Rep(Gt) =
- <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
+ where each <c>A_i</c> is an association <c>Gt_i_1 => Gt_i_2</c>,
+ then Rep(Gt) = <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
For Rep(A), see above.</p>
</item>
<item>
diff --git a/erts/doc/src/alt_disco.xml b/erts/doc/src/alt_disco.xml
new file mode 100644
index 0000000000..d04221b9b3
--- /dev/null
+++ b/erts/doc/src/alt_disco.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>How to Implement an Alternative Service Discovery for Erlang Distribution
+ </title>
+ <prepared>Timmo Verlaan</prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date>2018-04-25</date>
+ <rev>PA1</rev>
+ <file>alt_disco.xml</file>
+ </header>
+ <p>
+ This section describes how to implement an alternative discovery mechanism
+ for Erlang distribution. Discovery is normally done using DNS and the
+ Erlang Port Mapper Daemon (EPMD) for port discovery.
+ </p>
+
+ <note><p>
+ Support for alternative service discovery mechanisms was added in Erlang/OTP
+ 21.
+ </p></note>
+
+
+ <section>
+ <title>Introduction</title>
+ <p>To implement your own service discovery module you have to write your own
+ EPMD module. The <seealso marker="kernel:erl_epmd">EPMD module</seealso> is
+ responsible for providing the location of another node. The distribution
+ modules (<c>inet_tcp_dist</c>/<c>inet_tls_dist</c>) call the EPMD module to
+ get the IP address and port of the other node. The EPMD module that is part
+ of Erlang/OTP will resolve the hostname using DNS and uses the EPMD unix
+ process to get the port of another node. The EPMD unix process does this by
+ connecting to the other node on a well-known port, port 4369.</p>
+ </section>
+
+ <section>
+ <title>Discovery module</title>
+ <p>The discovery module needs to implement the same API as the regular
+ <seealso marker="kernel:erl_epmd">EPMD module</seealso>. However, instead of
+ communicating with EPMD you can connect to any service to find out
+ connection details of other nodes. A discovery module is enabled
+ by setting <seealso marker="erts:erl#epmd_module">-epmd_module</seealso>
+ when starting erlang. The discovery module must implement the following
+ callbacks:</p>
+
+ <taglist>
+ <tag><seealso marker="kernel:erl_epmd#start_link/0">start_link/0</seealso></tag>
+ <item>Start any processes needed by the discovery module.</item>
+ <tag><seealso marker="kernel:erl_epmd#names/1">names/1</seealso></tag>
+ <item>Return node names held by the registrar for the given host.</item>
+ <tag><seealso marker="kernel:erl_epmd#register_node/2">register_node/2</seealso></tag>
+ <item>Register the given node name with the registrar.</item>
+ <tag><seealso marker="kernel:erl_epmd#port_please/3">port_please/3</seealso></tag>
+ <item>Return the distribution port used by the given node.</item>
+ </taglist>
+
+ <p>The discovery module may implement the following callback:</p>
+
+ <taglist>
+ <tag><seealso marker="kernel:erl_epmd#address_please/3">address_please/3</seealso></tag>
+ <item><p>Return the address of the given node.
+ If not implemented, <seealso marker="kernel:inet#gethostbyname/1">
+ inet:gethostbyname/1</seealso> will be used instead</p>
+ <p>This callback may also return the port of the given node. In that case
+ <seealso marker="kernel:erl_epmd#port_please/3">port_please/3</seealso>
+ may be omitted.</p></item>
+ </taglist>
+ </section>
+</chapter>
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index c790872fe4..e6c9905039 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -429,7 +429,7 @@
<taglist>
<tag>Return types for driver callbacks</tag>
<item>
- <p>Rrewrite driver callback
+ <p>Rewrite driver callback
<seealso marker="driver_entry#control"><c>control</c></seealso>
to use return type <c>ErlDrvSSizeT</c> instead of <c>int</c>.</p>
<p>Rewrite driver callback
@@ -841,7 +841,7 @@ int suggested_stack_size;</code>
<p>Thread options structure passed to
<seealso marker="#erl_drv_thread_create">
<c>erl_drv_thread_create</c></seealso>.
- The following fields exists:</p>
+ The following field exists:</p>
<taglist>
<tag><c>suggested_stack_size</c></tag>
<item>A suggestion, in kilowords, on how large a stack to use.
@@ -3220,6 +3220,6 @@ erl_drv_output_term(driver_mk_port(drvport), spec, sizeof(spec) / sizeof(spec[0]
<seealso marker="erlang"><c>erlang(3)</c></seealso>,
<seealso marker="kernel:erl_ddll"><c>erl_ddll(3)</c></seealso>,
section <seealso marker="alt_dist">How to Implement an Alternative
- Carrier for the Erlang Distribution></seealso> in the User's Guide</p>
+ Carrier for the Erlang Distribution</seealso> in the User's Guide</p>
</section>
</cref>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index d4d4dd7f31..cff56b9cb8 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -2432,6 +2432,26 @@ os_prompt%</pre>
</func>
<func>
+ <name name="is_map_key" arity="2"/>
+ <fsummary></fsummary>
+ <desc>
+ <p>Returns <c>true</c> if map <c><anno>Map</anno></c> contains
+ <c><anno>Key</anno></c> and returns <c>false</c> if it does not
+ contain the <c><anno>Key</anno></c>.</p>
+ <p>The call fails with a <c>{badmap,Map}</c> exception if
+ <c><anno>Map</anno></c> is not a map.</p>
+ <p><em>Example:</em></p>
+ <code type="none">
+> Map = #{"42" => value}.
+#{"42" => value}
+> is_map_key("42",Map).
+true
+> is_map_key(value,Map).
+false</code>
+ </desc>
+ </func>
+
+ <func>
<name name="is_number" arity="1"/>
<fsummary>Check whether a term is a number.</fsummary>
<desc>
@@ -2965,6 +2985,25 @@ os_prompt%</pre>
</func>
<func>
+ <name name="map_get" arity="2" />
+ <fsummary>Extract a value from a map</fsummary>
+ <desc>
+ <p>Returns value <c><anno>Value</anno></c> associated with
+ <c><anno>Key</anno></c> if <c><anno>Map</anno></c> contains
+ <c><anno>Key</anno></c>.</p>
+ <p>The call fails with a <c>{badmap,Map}</c> exception if
+ <c><anno>Map</anno></c> is not a map, or with a <c>{badkey,Key}</c>
+ exception if no value is associated with <c><anno>Key</anno></c>.</p>
+ <p><em>Example:</em></p>
+ <code type="none">
+> Key = 1337,
+ Map = #{42 => value_two,1337 => "value one","a" => 1},
+ map_get(Key,Map).
+"value one"</code>
+ </desc>
+ </func>
+
+ <func>
<name name="map_size" arity="1"/>
<fsummary>Return the size of a map.</fsummary>
<desc>
@@ -6966,10 +7005,47 @@ ok
from other events in the system. It is only guaranteed that
<c><anno>Suspendee</anno></c> <em>eventually</em> suspends
(unless it
- is resumed). If option <c>asynchronous</c> has <em>not</em>
+ is resumed). If no <c>asynchronous</c> options has
been passed, the caller of <c>erlang:suspend_process/2</c> is
blocked until <c><anno>Suspendee</anno></c> has suspended.</p>
</item>
+ <tag><c>{asynchronous, ReplyTag}</c></tag>
+ <item>
+ <p>A suspend request is sent to the process identified by
+ <c><anno>Suspendee</anno></c>. When the suspend request
+ has been processed, a reply message is sent to the caller
+ of this function. The reply is on the form <c>{ReplyTag,
+ State}</c> where <c>State</c> is either:</p>
+ <taglist>
+ <tag><c>exited</c></tag>
+ <item>
+ <p>
+ <c><anno>Suspendee</anno></c> has exited.
+ </p>
+ </item>
+ <tag><c>suspended</c></tag>
+ <item>
+ <p>
+ <c><anno>Suspendee</anno></c> is now suspended.
+ </p>
+ </item>
+ <tag><c>not_suspended</c></tag>
+ <item>
+ <p>
+ <c><anno>Suspendee</anno></c> is not suspended.
+ This can only happen when the process that
+ issued this request, have called
+ <c>resume_process(<anno>Suspendee</anno>)</c>
+ before getting the reply.
+ </p>
+ </item>
+ </taglist>
+ <p>
+ Appart from the reply message, the <c>{asynchronous,
+ ReplyTag}</c> option behaves exactly the same as the
+ <c>asynchronous</c> option without reply tag.
+ </p>
+ </item>
<tag><c>unless_suspending</c></tag>
<item>
<p>The process identified by <c><anno>Suspendee</anno></c> is
@@ -6993,6 +7069,13 @@ ok
<warning>
<p>This BIF is intended for debugging only.</p>
</warning>
+ <warning>
+ <p>You can easily create deadlocks if processes suspends
+ each other (directly or in circles). In ERTS versions prior
+ to ERTS version 10.0, the runtime system prevented such
+ deadlocks, but this prevention has now been removed due
+ to performance reasons.</p>
+ </warning>
<p>Failures:</p>
<taglist>
<tag><c>badarg</c></tag>
@@ -10999,4 +11082,3 @@ true</pre>
</func>
</funcs>
</erlref>
-
diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml
index 53e136d76c..0893eb291c 100644
--- a/erts/doc/src/erts_alloc.xml
+++ b/erts/doc/src/erts_alloc.xml
@@ -559,6 +559,20 @@
than this threshold, otherwise the carrier is shrunk.
See also <seealso marker="#M_rsbcst"><c>rsbcst</c></seealso>.</p>
</item>
+ <tag><marker id="M_atags"/><c><![CDATA[+M<S>atags true|false]]></c></tag>
+ <item>
+ <p>Adds a small tag to each allocated block that contains basic
+ information about what it is and who allocated it. Use the
+ <seealso marker="tools:instrument"><c>instrument</c></seealso>
+ module to inspect this information.</p>
+
+ <p>The runtime overhead is one word per allocation when enabled. This
+ may change at any time in the future.</p>
+
+ <p>The default is <c>true</c> for <c>binary_alloc</c> and
+ <c>driver_alloc</c>, and <c>false</c> for the other allocator
+ types.</p>
+ </item>
<tag><marker id="M_e"/><c><![CDATA[+M<S>e true|false]]></c></tag>
<item>
<p>Enables allocator <c><![CDATA[<S>]]></c>.</p>
@@ -724,22 +738,12 @@
<section>
<title>Instrumentation Flags</title>
<taglist>
- <tag><marker id="Mim"/><c>+Mim true|false</c></tag>
- <item>
- <p>A map over current allocations is kept by the emulator.
- The allocation map can be retrieved through module
- <seealso marker="tools:instrument">
- <c>instrument(3)</c></seealso>. <c>+Mim true</c>
- implies <c>+Mis true</c>. <c>+Mim true</c> is the same as flag
- <seealso marker="erl#instr"><c>-instr</c></seealso> in
- <c>erl(1)</c>.</p>
- </item>
- <tag><marker id="Mis"/><c>+Mis true|false</c></tag>
- <item>
- <p>Status over allocated memory is kept by the emulator.
- The allocation status can be retrieved through module
- <seealso marker="tools:instrument">
- <c>instrument(3)</c></seealso>.</p>
+ <tag><c>+M&lt;S&gt;atags</c></tag>
+ <item>
+ <p>Adds a small tag to each allocated block that contains basic
+ information about what it is and who allocated it. See
+ <seealso marker="#M_atags"><c>+M&lt;S&gt;atags</c></seealso> for a
+ more complete description.</p>
</item>
<tag><marker id="Mit"/><c>+Mit X</c></tag>
<item>
diff --git a/erts/doc/src/match_spec.xml b/erts/doc/src/match_spec.xml
index 6cf0a0e677..46a3daebe8 100644
--- a/erts/doc/src/match_spec.xml
+++ b/erts/doc/src/match_spec.xml
@@ -86,12 +86,12 @@
<c><![CDATA[is_list]]></c> | <c><![CDATA[is_number]]></c> |
<c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> |
- <c><![CDATA[is_map]]></c> | <c><![CDATA[is_binary]]></c> |
- <c><![CDATA[is_function]]></c> | <c><![CDATA[is_record]]></c> |
- <c><![CDATA[is_seq_trace]]></c> | <c><![CDATA['and']]></c> |
- <c><![CDATA['or']]></c> | <c><![CDATA['not']]></c> |
- <c><![CDATA['xor']]></c> | <c><![CDATA['andalso']]></c> |
- <c><![CDATA['orelse']]></c>
+ <c><![CDATA[is_map]]></c> | <c><![CDATA[is_map_key]]></c> |
+ <c><![CDATA[is_binary]]></c> | <c><![CDATA[is_function]]></c> |
+ <c><![CDATA[is_record]]></c> | <c><![CDATA[is_seq_trace]]></c> |
+ <c><![CDATA['and']]></c> | <c><![CDATA['or']]></c> |
+ <c><![CDATA['not']]></c> | <c><![CDATA['xor']]></c> |
+ <c><![CDATA['andalso']]></c> | <c><![CDATA['orelse']]></c>
</item>
<item>ConditionExpression ::= ExprMatchVariable | { GuardFunction } |
{ GuardFunction, ConditionExpression, ... } | TermConstruct
@@ -110,7 +110,8 @@
</item>
<item>GuardFunction ::= BoolFunction | <c><![CDATA[abs]]></c> |
<c><![CDATA[element]]></c> | <c><![CDATA[hd]]></c> |
- <c><![CDATA[length]]></c> | <c><![CDATA[node]]></c> |
+ <c><![CDATA[length]]></c> | <c><![CDATA[map_get]]></c> |
+ <c><![CDATA[map_size]]></c> | <c><![CDATA[node]]></c> |
<c><![CDATA[round]]></c> | <c><![CDATA[size]]></c> |
<c><![CDATA[tl]]></c> | <c><![CDATA[trunc]]></c> |
<c><![CDATA['+']]></c> | <c><![CDATA['-']]></c> |
@@ -167,11 +168,12 @@
<c><![CDATA[is_list]]></c> | <c><![CDATA[is_number]]></c> |
<c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> |
- <c><![CDATA[is_map]]></c> | <c><![CDATA[is_binary]]></c> |
- <c><![CDATA[is_function]]></c> | <c><![CDATA[is_record]]></c> |
- <c><![CDATA['and']]></c> | <c><![CDATA['or']]></c> |
- <c><![CDATA['not']]></c> | <c><![CDATA['xor']]></c> |
- <c><![CDATA['andalso']]></c> | <c><![CDATA['orelse']]></c>
+ <c><![CDATA[is_map]]></c> | <c><![CDATA[map_is_key]]></c> |
+ <c><![CDATA[is_binary]]></c> | <c><![CDATA[is_function]]></c> |
+ <c><![CDATA[is_record]]></c> | <c><![CDATA['and']]></c> |
+ <c><![CDATA['or']]></c> | <c><![CDATA['not']]></c> |
+ <c><![CDATA['xor']]></c> | <c><![CDATA['andalso']]></c> |
+ <c><![CDATA['orelse']]></c>
</item>
<item>ConditionExpression ::= ExprMatchVariable | { GuardFunction } |
{ GuardFunction, ConditionExpression, ... } | TermConstruct
@@ -189,7 +191,8 @@
</item>
<item>GuardFunction ::= BoolFunction | <c><![CDATA[abs]]></c> |
<c><![CDATA[element]]></c> | <c><![CDATA[hd]]></c> |
- <c><![CDATA[length]]></c> | <c><![CDATA[node]]></c> |
+ <c><![CDATA[length]]></c> | <c><![CDATA[map_get]]></c> |
+ <c><![CDATA[map_size]]></c> | <c><![CDATA[node]]></c> |
<c><![CDATA[round]]></c> | <c><![CDATA[size]]></c> |
<c><![CDATA[tl]]></c> | <c><![CDATA[trunc]]></c> |
<c><![CDATA['+']]></c> | <c><![CDATA['-']]></c> |
@@ -865,4 +868,3 @@
can be useful for testing complicated ETS matches.</p>
</section>
</chapter>
-
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index fcc7ec5b66..929d569f16 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -31,6 +31,21 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 9.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a crash in <c>heart:get_cmd/0</c> when the
+ stored command was too long.</p>
+ <p>
+ Own Id: OTP-15034</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 9.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/erts/doc/src/part.xml b/erts/doc/src/part.xml
index d583b873a0..fc39cb30e6 100644
--- a/erts/doc/src/part.xml
+++ b/erts/doc/src/part.xml
@@ -37,6 +37,7 @@
<xi:include href="match_spec.xml"/>
<xi:include href="crash_dump.xml"/>
<xi:include href="alt_dist.xml"/>
+ <xi:include href="alt_disco.xml"/>
<xi:include href="absform.xml"/>
<xi:include href="tty.xml"/>
<xi:include href="driver.xml"/>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 92cf40c1ae..221cf84622 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -570,7 +570,7 @@ $(TTF_DIR)/OPCODES-GENERATED: $(OPCODE_TABLES) utils/beam_makeops
-code-model @CODE_MODEL@ \
-outdir $(TTF_DIR) \
-DUSE_VM_PROBES=$(if $(USE_VM_PROBES),1,0) \
- -DNO_FPE_SIGNALS=$(if $filter(unreliable,$(FPE)),1,0) \
+ -DNO_FPE_SIGNALS=$(if $(filter unreliable,$(FPE)),1,0) \
-emulator $(OPCODE_TABLES) && echo $? >$(TTF_DIR)/OPCODES-GENERATED
GENERATE += $(TTF_DIR)/OPCODES-GENERATED
@@ -832,7 +832,7 @@ RUN_OBJS += \
$(OBJDIR)/erl_alloc.o $(OBJDIR)/erl_mtrace.o \
$(OBJDIR)/erl_alloc_util.o $(OBJDIR)/erl_goodfit_alloc.o \
$(OBJDIR)/erl_bestfit_alloc.o $(OBJDIR)/erl_afit_alloc.o \
- $(OBJDIR)/erl_instrument.o $(OBJDIR)/erl_init.o \
+ $(OBJDIR)/erl_init.o \
$(OBJDIR)/erl_atom_table.o $(OBJDIR)/erl_bif_table.o \
$(OBJDIR)/erl_bif_ddll.o $(OBJDIR)/erl_bif_guard.o \
$(OBJDIR)/erl_bif_info.o $(OBJDIR)/erl_bif_op.o \
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index cceca66850..45b7540aeb 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -142,6 +142,7 @@ atom bsr
atom bsr_anycrlf
atom bsr_unicode
atom build_type
+atom busy
atom busy_dist_port
atom busy_port
atom call
@@ -252,6 +253,7 @@ atom exception_from
atom exception_trace
atom exclusive
atom exit_status
+atom exited
atom existing
atom existing_processes
atom existing_ports
@@ -358,6 +360,7 @@ atom loaded
atom load_cancelled
atom load_failure
atom local
+atom logger
atom long_gc
atom long_schedule
atom low
@@ -444,6 +447,7 @@ atom no_float
atom no_integer
atom no_network
atom no_start_optimize
+atom not_suspended
atom not
atom not_a_list
atom not_loaded
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 5c76aafae7..a0dbd9ec7b 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -603,8 +603,9 @@ badarg:
BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
{
+ erts_aint32_t state;
Process *rp;
- int reds = 0;
+ int dirty, busy, reds = 0;
Eterm res;
if (BIF_P != erts_dirty_process_signal_handler
@@ -618,20 +619,29 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
if (is_not_atom(BIF_ARG_2))
BIF_ERROR(BIF_P, BADARG);
- rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (BIF_ARG_1 == BIF_P->common.id)
+ BIF_RET(am_normal);
+
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
if (!rp)
- BIF_RET(am_false);
-
+ BIF_RET(am_false);
+
+ state = erts_atomic32_read_nob(&rp->state);
+ dirty = (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (!dirty)
+ BIF_RET(am_normal);
+
+ busy = erts_proc_trylock(rp, ERTS_PROC_LOCK_MAIN) == EBUSY;
+
+ if (busy)
+ BIF_RET(am_busy);
+
res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls);
- if (BIF_P != rp)
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
- ASSERT(is_value(res));
+ ASSERT(res == am_true || res == am_false);
BIF_RET2(res, reds);
}
@@ -1757,11 +1767,11 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
release_literal_areas.last = ref;
}
erts_mtx_unlock(&release_literal_areas.mtx);
- erts_queue_message(erts_literal_area_collector,
+ erts_queue_proc_message(BIF_P,
+ erts_literal_area_collector,
0,
erts_alloc_message(0, NULL),
- am_copy_literals,
- BIF_P->common.id);
+ am_copy_literals);
}
return ret;
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 5eb68b817e..b8a8d06315 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1191,7 +1191,7 @@ dirty_send_message(Process *c_p, Eterm to, Eterm tag)
mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
msg = TUPLE2(hp, tag, c_p->common.id);
- erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
if (rp == real_c_p)
rp_locks &= ~c_p_locks;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index ee287243a4..ab5920a67e 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1166,6 +1166,9 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
}
+ if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
+ erts_proc_sig_handle_pending_suspend(c_p);
+
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 79244b8544..97e1ee1286 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1364,13 +1364,14 @@ BIF_RETTYPE exit_signal_2(BIF_ALIST_2)
/* Handle flags common to both process_flag_2 and process_flag_3. */
-static BIF_RETTYPE process_flag_aux(Process *BIF_P,
- Process *rp,
- Eterm flag,
- Eterm val)
+static Eterm process_flag_aux(Process *c_p, int *redsp, Eterm flag, Eterm val)
{
Eterm old_value = NIL; /* shut up warning about use before set */
Sint i;
+
+ if (redsp)
+ *redsp = 1;
+
if (flag == am_save_calls) {
struct saved_calls *scb;
if (!is_small(val))
@@ -1390,30 +1391,89 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
}
#ifdef HIPE
- if (rp->flags & F_HIPE_MODE) {
- ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(rp));
- scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(rp, scb);
+ if (c_p->flags & F_HIPE_MODE) {
+ ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p));
+ scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(c_p, scb);
}
else
#endif
{
#ifdef HIPE
- ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(rp));
+ ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(c_p));
#endif
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb);
- if (rp == BIF_P && ((scb && i == 0) || (!scb && i != 0))) {
- /* Adjust fcalls to match save calls setting... */
- if (i == 0)
- BIF_P->fcalls += CONTEXT_REDS; /* disabled it */
- else
- BIF_P->fcalls -= CONTEXT_REDS; /* enabled it */
-
- /*
- * Make sure we reschedule immediately so the
- * change take effect at once.
- */
- ERTS_VBUMP_ALL_REDS(BIF_P);
- }
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(c_p, scb);
+
+ if (((scb && i == 0) || (!scb && i != 0))) {
+
+ /*
+ * Make sure we reschedule immediately so the
+ * change take effect at once.
+ */
+ if (!redsp) {
+ /* Executed via BIF call.. */
+ via_bif:
+
+ /* Adjust fcalls to match save calls setting... */
+ if (i == 0)
+ c_p->fcalls += CONTEXT_REDS; /* disabled it */
+ else
+ c_p->fcalls -= CONTEXT_REDS; /* enabled it */
+
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+ else {
+ erts_aint32_t state;
+ /*
+ * Executed via signal handler. Try to figure
+ * out in what context we are executing...
+ */
+
+ state = erts_atomic32_read_nob(&c_p->state);
+ if (state & (ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING)) {
+ /*
+ * We are either processing signals before
+ * being executed or executing dirty. That
+ * is, no need to adjust anything...
+ */
+ *redsp = 1;
+ }
+ else {
+ ErtsSchedulerData *esdp;
+ ASSERT(state & ERTS_PSFLG_RUNNING);
+
+ /*
+ * F_DELAY_GC is currently only set when
+ * we handle signals in state running via
+ * receive helper...
+ */
+
+ if (!(c_p->flags & F_DELAY_GC)) {
+ *redsp = 1;
+ goto via_bif;
+ }
+
+ /*
+ * Executing via receive helper...
+ *
+ * We utilize the virtual reds counter
+ * in order to get correct calculation
+ * of reductions consumed when scheduling
+ * out the process...
+ */
+
+ esdp = erts_get_scheduler_data();
+
+ if (i == 0)
+ esdp->virtual_reds += CONTEXT_REDS; /* disabled it */
+ else
+ esdp->virtual_reds -= CONTEXT_REDS; /* enabled it */
+
+ *redsp = -1;
+ }
+ }
+ }
}
if (!scb)
@@ -1423,11 +1483,12 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
}
- BIF_RET(old_value);
+ ASSERT(is_immed(old_value));
+ return old_value;
}
error:
- BIF_ERROR(BIF_P, BADARG);
+ return am_badarg;
}
BIF_RETTYPE process_flag_2(BIF_ALIST_2)
@@ -1596,29 +1657,73 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
/* Fall through and try process_flag_aux() ... */
}
- BIF_RET(process_flag_aux(BIF_P, BIF_P, BIF_ARG_1, BIF_ARG_2));
+ old_value = process_flag_aux(BIF_P, NULL, BIF_ARG_1, BIF_ARG_2);
+ if (old_value != am_badarg)
+ BIF_RET(old_value);
error:
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_flag_3(BIF_ALIST_3)
+typedef struct {
+ Eterm flag;
+ Eterm value;
+ ErlOffHeap oh;
+ Eterm heap[1];
+} ErtsProcessFlag3Args;
+
+static Eterm
+exec_process_flag_3(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
- Eterm res;
+ ErtsProcessFlag3Args *pf3a = arg;
+ Eterm res;
+
+ if (ERTS_PROC_IS_EXITING(c_p))
+ res = am_badarg;
+ else
+ res = process_flag_aux(c_p, redsp, pf3a->flag, pf3a->value);
+ erts_cleanup_offheap(&pf3a->oh);
+ erts_free(ERTS_ALC_T_PF3_ARGS, arg);
+ return res;
+}
+
+
+BIF_RETTYPE erts_internal_process_flag_3(BIF_ALIST_3)
+{
+ Eterm res, *hp;
+ ErlOffHeap *ohp;
+ ErtsProcessFlag3Args *pf3a;
+ Uint flag_sz, value_sz;
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_flag_aux(BIF_P, NULL, BIF_ARG_2, BIF_ARG_3);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ flag_sz = is_immed(BIF_ARG_2) ? 0 : size_object(BIF_ARG_2);
+ value_sz = is_immed(BIF_ARG_3) ? 0 : size_object(BIF_ARG_3);
+
+ pf3a = erts_alloc(ERTS_ALC_T_PF3_ARGS,
+ sizeof(ErtsProcessFlag3Args)
+ + sizeof(Eterm)*(flag_sz+value_sz-1));
+
+ ohp = &pf3a->oh;
+ ERTS_INIT_OFF_HEAP(&pf3a->oh);
- rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ hp = &pf3a->heap[0];
- if (!rp)
- BIF_ERROR(BIF_P, BADARG);
+ pf3a->flag = copy_struct(BIF_ARG_2, flag_sz, &hp, ohp);
+ pf3a->value = copy_struct(BIF_ARG_3, value_sz, &hp, ohp);
- res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3);
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ exec_process_flag_3,
+ (void *) pf3a);
- if (rp != BIF_P)
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
return res;
}
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index a47339253e..cf9f61c0b8 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -295,6 +295,19 @@ do { \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_TRAP4(Ret, Trap, Proc, A0, A1, A2, A3) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->arity = 4; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ reg[3] = (Eterm) (A3); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
@@ -343,6 +356,18 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define BIF_TRAP4(Trap_, p, A0, A1, A2, A3) do { \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
+ (p)->arity = 4; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ reg[2] = (A2); \
+ reg[3] = (A3); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
(p)->arity = 0; \
(p)->i = (BeamInstr*) (Code_); \
@@ -401,6 +426,12 @@ do { \
ERTS_BIF_PREP_TRAP3(RET, (TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_PREP_YIELD4(RET, TRP, P, A0, A1, A2, A3) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP4(RET, (TRP), (P), (A0), (A1), (A2), (A3)); \
+} while (0)
+
#define ERTS_BIF_YIELD0(TRP, P) \
do { \
ERTS_VBUMP_ALL_REDS((P)); \
@@ -425,6 +456,12 @@ do { \
BIF_TRAP3((TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_YIELD4(TRP, P, A0, A1, A2, A3) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP4((TRP), (P), (A0), (A1), (A2), (A3)); \
+} while (0)
+
#define ERTS_BIF_PREP_EXITED(RET, PROC) \
do { \
KILL_CATCHES((PROC)); \
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 1276048317..7548924178 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -125,7 +125,7 @@ bif erlang:pid_to_list/1
bif erlang:ports/0
bif erlang:pre_loaded/0
bif erlang:process_flag/2
-bif erlang:process_flag/3
+bif erts_internal:process_flag/3
bif erlang:process_info/1
bif erlang:process_info/2
bif erlang:processes/0
@@ -154,7 +154,6 @@ bif erlang:unregister/1
bif erlang:whereis/1
bif erlang:spawn_opt/1
bif erlang:setnode/2
-bif erlang:setnode/3
bif erlang:dist_get_stat/1
bif erlang:dist_ctrl_input_handler/2
bif erlang:dist_ctrl_put_data/2
@@ -191,6 +190,8 @@ bif erts_internal:scheduler_wall_time/1
bif erts_internal:dirty_process_handle_signals/1
+bif erts_internal:create_dist_channel/4
+
# inet_db support
bif erlang:port_set_data/2
bif erlang:port_get_data/1
@@ -204,9 +205,9 @@ bif erlang:seq_trace/2
bif erlang:seq_trace_info/1
bif erlang:seq_trace_print/1
bif erlang:seq_trace_print/2
-bif erlang:suspend_process/2
+bif erts_internal:suspend_process/2
bif erlang:resume_process/1
-bif erlang:process_display/2
+bif erts_internal:process_display/2
bif erlang:bump_reductions/1
@@ -691,5 +692,9 @@ bif erts_internal:new_connection/1
bif erts_internal:abort_connection/2
bif erts_internal:map_next/3
bif ets:whereis/1
+bif erts_internal:gather_alloc_histograms/1
+bif erts_internal:gather_carrier_info/1
+ubif erlang:map_get/2
+ubif erlang:is_map_key/2
bif ets:internal_delete_all/2
bif ets:internal_select_delete/2
diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab
index 0932b8b985..0f074280db 100644
--- a/erts/emulator/beam/bif_instrs.tab
+++ b/erts/emulator/beam/bif_instrs.tab
@@ -432,9 +432,17 @@ nif_bif.call_nif() {
live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
+
+ ASSERT((c_p->scheduler_data)->current_nif == NULL);
+ (c_p->scheduler_data)->current_nif = &env;
+
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
if (env.exception_thrown)
nif_bif_result = THE_NON_VALUE;
+
+ ASSERT((c_p->scheduler_data)->current_nif == &env);
+ (c_p->scheduler_data)->current_nif = NULL;
+
erts_post_nif(&env);
ERTS_CHK_MBUF_SZ(c_p);
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index ba8cc5e2ba..9ff52c92b8 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -36,7 +36,6 @@
#include "hash.h"
#include "atom.h"
#include "beam_load.h"
-#include "erl_instrument.h"
#include "erl_hl_timer.h"
#include "erl_thr_progress.h"
#include "erl_proc_sig_queue.h"
@@ -955,20 +954,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_cbprintf(to, to_arg, "=atoms\n");
dump_atoms(to, to_arg);
- /* Keep the instrumentation data at the end of the dump */
- if (erts_instr_memory_map || erts_instr_stat) {
- erts_cbprintf(to, to_arg, "=instr_data\n");
-
- if (erts_instr_stat) {
- erts_cbprintf(to, to_arg, "=memory_status\n");
- erts_instr_dump_stat_to(to, to_arg, 0);
- }
- if (erts_instr_memory_map) {
- erts_cbprintf(to, to_arg, "=memory_map\n");
- erts_instr_dump_memory_map_to(to, to_arg);
- }
- }
-
erts_cbprintf(to, to_arg, "=end\n");
if (fp) {
fclose(fp);
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index f203d85ca9..70474898b2 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1471,22 +1471,16 @@ int erts_net_message(Port *prt,
mdp = erts_monitor_create(ERTS_MON_TYPE_DIST_PROC,
ref, watcher, pid, name);
-#ifdef DEBUG
- code =
-#endif
- erts_monitor_dist_insert(&mdp->origin, dep->mld);
- ASSERT(code);
+ code = erts_monitor_dist_insert(&mdp->origin, dep->mld);
+ ASSERT(code); (void)code;
if (erts_proc_sig_send_monitor(&mdp->target, pid))
break; /* done */
/* Failed to send to local proc; cleanup reply noproc... */
-#ifdef DEBUG
- code =
-#endif
- erts_monitor_dist_delete(&mdp->origin);
- ASSERT(code);
+ code = erts_monitor_dist_delete(&mdp->origin);
+ ASSERT(code); (void)code;
erts_monitor_release_both(mdp);
}
@@ -3144,60 +3138,60 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-/**********************************************************************
- ** Allocate a dist entry, set node name install the connection handler
- ** setnode_3({name@host, Creation}, Cid, {Type, Version, Initial, IC, OC})
- ** Type = flag field, where the flags are specified in dist.h
- ** Version = distribution version, >= 1
- ** IC = in_cookie (ignored)
- ** OC = out_cookie (ignored)
- **
- ** Note that in distribution protocols above 1, the Initial parameter
- ** is always NIL and the cookies are always the atom '', cookies are not
- ** sent in the distribution messages but are only used in
- ** the handshake.
- **
- ***********************************************************************/
+/*
+ * erts_internal:create_dist_channel/4 is used by
+ * erlang:setnode/3.
+ */
+
+typedef struct {
+ DistEntry *dep;
+ Uint flags;
+ Uint version;
+} ErtsSetupConnDistCtrl;
+
+static void
+setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
+ Eterm ctrlr, Uint flags,
+ Uint version);
-BIF_RETTYPE setnode_3(BIF_ALIST_3)
+static Eterm
+setup_connection_distctrl(Process *c_p, void *arg,
+ int *redsp, ErlHeapFragment **bpp);
+
+BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
{
BIF_RETTYPE ret;
Uint flags;
- unsigned long version;
- Eterm ic, oc;
- Eterm *tp;
+ Uint version;
+ Eterm *hp, res_tag = THE_NON_VALUE, res = THE_NON_VALUE;
DistEntry *dep = NULL;
- ErtsProcLocks proc_unlock = 0;
- Process *proc;
+ int de_locked = 0;
Port *pp = NULL;
- Eterm notify_proc;
- erts_aint32_t qflgs;
/*
* Check and pick out arguments
*/
- if (!is_node_name_atom(BIF_ARG_1) ||
- !(is_internal_port(BIF_ARG_2)
- || is_internal_pid(BIF_ARG_2))
- || (erts_this_node->sysname == am_Noname)) {
- goto badarg;
- }
+ /* Node name... */
+ if (!is_node_name_atom(BIF_ARG_1))
+ goto badarg;
- if (!is_tuple(BIF_ARG_3))
- goto badarg;
- tp = tuple_val(BIF_ARG_3);
- if (*tp++ != make_arityval(4))
- goto badarg;
- if (!is_small(*tp))
- goto badarg;
- flags = unsigned_val(*tp++);
- if (!is_small(*tp) || (version = unsigned_val(*tp)) == 0)
- goto badarg;
- ic = *(++tp);
- oc = *(++tp);
- if (!is_atom(ic) || !is_atom(oc))
- goto badarg;
+ /* Distribution controller... */
+ if (!is_internal_port(BIF_ARG_2) && !is_internal_pid(BIF_ARG_2))
+ goto badarg;
+
+ /* Dist flags... */
+ if (!is_small(BIF_ARG_3))
+ goto badarg;
+ flags = unsigned_val(BIF_ARG_3);
+
+ /* Version... */
+ if (!is_small(BIF_ARG_4))
+ goto badarg;
+ version = unsigned_val(BIF_ARG_4);
+
+ if (version == 0)
+ goto badarg;
if (~flags & DFLAG_DIST_MANDATORY) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
@@ -3228,74 +3222,79 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
else if (!dep)
goto system_limit; /* Should never happen!!! */
+ erts_de_rlock(dep);
+ de_locked = -1;
+
+ if (dep->state == ERTS_DE_STATE_EXITING) {
+ /* Suspend on dist entry waiting for the exit to finish */
+ ErtsProcList *plp = erts_proclist_create(BIF_P);
+ plp->next = NULL;
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_mtx_lock(&dep->qlock);
+ erts_proclist_store_last(&dep->suspended, plp);
+ erts_mtx_unlock(&dep->qlock);
+ goto yield;
+ }
+
+ erts_de_runlock(dep);
+ de_locked = 0;
+
if (is_internal_pid(BIF_ARG_2)) {
if (BIF_P->common.id == BIF_ARG_2) {
- proc_unlock = 0;
- proc = BIF_P;
- }
- else {
- proc_unlock = ERTS_PROC_LOCK_MAIN;
- proc = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2, proc_unlock);
- }
- erts_de_rwlock(dep);
-
- if (!proc)
- goto badarg;
- else if (proc == ERTS_PROC_LOCK_BUSY) {
- proc_unlock = 0;
- goto yield;
- }
+ ErtsSetupConnDistCtrl scdc;
- erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS);
- proc_unlock |= ERTS_PROC_LOCK_STATUS;
+ scdc.dep = dep;
+ scdc.flags = flags;
+ scdc.version = version;
- if (ERTS_PROC_GET_DIST_ENTRY(proc)) {
- if (dep == ERTS_PROC_GET_DIST_ENTRY(proc)
- && (proc->flags & F_DISTRIBUTION)
- && dep->cid == BIF_ARG_2) {
- ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
- goto done;
- }
- goto badarg;
- }
+ res = setup_connection_distctrl(BIF_P, &scdc, NULL, NULL);
+ BUMP_REDS(BIF_P, 5);
+ dep = NULL;
- if (dep->state == ERTS_DE_STATE_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_mtx_unlock(&dep->qlock);
- goto yield;
- }
- if (dep->state != ERTS_DE_STATE_PENDING) {
- if (dep->state == ERTS_DE_STATE_IDLE)
- erts_set_dist_entry_pending(dep);
- else
+ if (res == am_badarg)
goto badarg;
+
+ ASSERT(is_internal_magic_ref(res));
+ res_tag = am_ok; /* Connection up */
}
+ else {
+ ErtsSetupConnDistCtrl *scdcp;
- if (is_not_nil(dep->cid))
- goto badarg;
+ scdcp = erts_alloc(ERTS_ALC_T_SETUP_CONN_ARG,
+ sizeof(ErtsSetupConnDistCtrl));
- proc->flags |= F_DISTRIBUTION;
- ERTS_PROC_SET_DIST_ENTRY(proc, dep);
+ scdcp->dep = dep;
+ scdcp->flags = flags;
+ scdcp->version = version;
- proc_unlock &= ~ERTS_PROC_LOCK_STATUS;
- erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
+ res = erts_proc_sig_send_rpc_request(BIF_P,
+ BIF_ARG_2,
+ !0,
+ setup_connection_distctrl,
+ (void *) scdcp);
+ if (is_non_value(res))
+ goto badarg;
+
+ dep = NULL;
- dep->send = NULL; /* Only for distr ports... */
+ ASSERT(is_internal_ordinary_ref(res));
+ res_tag = am_message; /* Caller need to wait for dhandle in message */
+ }
+ hp = HAlloc(BIF_P, 3);
}
else {
+ int new;
pp = erts_id2port_sflgs(BIF_ARG_2,
BIF_P,
ERTS_PROC_LOCK_MAIN,
ERTS_PORT_SFLGS_INVALID_LOOKUP);
erts_de_rwlock(dep);
+ de_locked = 1;
+
+ if (dep->state == ERTS_DE_STATE_EXITING)
+ goto badarg;
if (!pp || (erts_atomic32_read_nob(&pp->state)
& ERTS_PORT_SFLG_EXITING))
@@ -3304,65 +3303,108 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
goto badarg;
- if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) {
- ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
- goto done; /* Already set */
- }
+ if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep)
+ new = 0;
+ else {
+ if (dep->state != ERTS_DE_STATE_PENDING) {
+ if (dep->state == ERTS_DE_STATE_IDLE)
+ erts_set_dist_entry_pending(dep);
+ else
+ goto badarg;
+ }
- if (dep->state == ERTS_DE_STATE_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_mtx_unlock(&dep->qlock);
- goto yield;
- }
- if (dep->state != ERTS_DE_STATE_PENDING) {
- if (dep->state == ERTS_DE_STATE_IDLE)
- erts_set_dist_entry_pending(dep);
- else
+ if (pp->dist_entry || is_not_nil(dep->cid))
goto badarg;
- }
- if (pp->dist_entry || is_not_nil(dep->cid))
- goto badarg;
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
- erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+ pp->dist_entry = dep;
- pp->dist_entry = dep;
+ ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
- ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+ dep->send = (pp->drv_ptr->outputv
+ ? dist_port_commandv
+ : dist_port_command);
+ ASSERT(dep->send);
- dep->send = (pp->drv_ptr->outputv
- ? dist_port_commandv
- : dist_port_command);
- ASSERT(dep->send);
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ }
- /*
- * Dist-ports do not use the "busy port message queue" functionality, but
- * instead use "busy dist entry" functionality.
- */
- {
- ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
- erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ setup_connection_epiloge_rwunlock(BIF_P, dep, BIF_ARG_2, flags, version);
+ de_locked = 0;
+ new = !0;
}
+ hp = HAlloc(BIF_P, 3 + ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_build_dhandle(&hp, &BIF_P->off_heap, dep);
+ res_tag = am_ok; /* Connection up */
+ if (new)
+ dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ }
+
+ ASSERT(is_value(res) && is_value(res_tag));
+
+ res = TUPLE2(hp, res_tag, res);
+
+ ERTS_BIF_PREP_RET(ret, res);
+
+ done:
+
+ if (dep && dep != erts_this_dist_entry) {
+ if (de_locked) {
+ if (de_locked > 0)
+ erts_de_rwunlock(dep);
+ else
+ erts_de_runlock(dep);
+ }
+ erts_deref_dist_entry(dep);
}
+ if (pp)
+ erts_port_release(pp);
+
+ return ret;
+
+ yield:
+ ERTS_BIF_PREP_YIELD4(ret,
+ bif_export[BIF_erts_internal_create_dist_channel_4],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+ goto done;
+
+ badarg:
+ ERTS_BIF_PREP_RET(ret, am_badarg);
+ goto done;
+
+ system_limit:
+ ERTS_BIF_PREP_RET(ret, am_system_limit);
+ goto done;
+}
+
+static void
+setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
+ Eterm ctrlr, Uint flags,
+ Uint version)
+{
+ Eterm notify_proc = NIL;
+ erts_aint32_t qflgs;
+
dep->version = version;
dep->creation = 0;
-#ifdef DEBUG
+ ASSERT(is_internal_port(ctrlr) || is_internal_pid(ctrlr));
ASSERT(erts_atomic_read_nob(&dep->qsize) == 0
|| (dep->state == ERTS_DE_STATE_PENDING));
-#endif
if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
create_cache(dep);
- erts_set_dist_entry_connected(dep, BIF_ARG_2, flags);
+ erts_set_dist_entry_connected(dep, ctrlr, flags);
notify_proc = NIL;
if (erts_atomic_read_nob(&dep->qsize)) {
@@ -3381,50 +3423,100 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
}
}
}
- erts_de_rwunlock(dep);
- if (is_internal_pid(notify_proc))
- notify_dist_data(BIF_P, notify_proc);
- ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
+ erts_de_rwunlock(dep);
- dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ if (is_internal_pid(notify_proc))
+ notify_dist_data(c_p, notify_proc);
inc_no_nodes();
- send_nodes_mon_msgs(BIF_P,
+ send_nodes_mon_msgs(c_p,
am_nodeup,
- BIF_ARG_1,
+ dep->sysname,
flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
NIL);
- done:
+}
- if (dep && dep != erts_this_dist_entry) {
- erts_de_rwunlock(dep);
- erts_deref_dist_entry(dep);
+static Eterm
+setup_connection_distctrl(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
+{
+ ErtsSetupConnDistCtrl *scdcp = (ErtsSetupConnDistCtrl *) arg;
+ DistEntry *dep = scdcp->dep;
+ int dep_locked = 0;
+ Eterm *hp;
+ erts_aint32_t state;
+
+ if (redsp)
+ *redsp = 1;
+
+ state = erts_atomic32_read_nob(&c_p->state);
+
+ if (state & ERTS_PSFLG_EXITING)
+ goto badarg;
+
+ erts_de_rwlock(dep);
+ dep_locked = !0;
+
+ if (dep->state == ERTS_DE_STATE_EXITING)
+ goto badarg;
+
+ if (ERTS_PROC_GET_DIST_ENTRY(c_p)) {
+ if (dep == ERTS_PROC_GET_DIST_ENTRY(c_p)
+ && (c_p->flags & F_DISTRIBUTION)
+ && dep->cid == c_p->common.id) {
+ goto connected;
+ }
+ goto badarg;
}
- if (pp)
- erts_port_release(pp);
+ if (dep->state != ERTS_DE_STATE_PENDING) {
+ if (dep->state == ERTS_DE_STATE_IDLE)
+ erts_set_dist_entry_pending(dep);
+ else
+ goto badarg;
+ }
- if (proc_unlock)
- erts_proc_unlock(proc, proc_unlock);
+ if (is_not_nil(dep->cid))
+ goto badarg;
- return ret;
+ c_p->flags |= F_DISTRIBUTION;
+ ERTS_PROC_SET_DIST_ENTRY(c_p, dep);
- yield:
- ERTS_BIF_PREP_YIELD3(ret, bif_export[BIF_setnode_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- goto done;
+ dep->send = NULL; /* Only for distr ports... */
- badarg:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
- goto done;
+ if (redsp)
+ *redsp = 5;
- system_limit:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
- goto done;
+ setup_connection_epiloge_rwunlock(c_p, dep, c_p->common.id,
+ scdcp->flags, scdcp->version);
+connected:
+
+ /* we take over previous inc in refc of dep */
+
+ if (!bpp) /* called directly... */
+ return erts_make_dhandle(c_p, dep);
+
+ erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
+
+ *bpp = new_message_buffer(ERTS_MAGIC_REF_THING_SIZE);
+ hp = (*bpp)->mem;
+ return erts_build_dhandle(&hp, &(*bpp)->off_heap, dep);
+
+badarg:
+
+ if (bpp) /* not called directly */
+ erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
+
+ if (dep_locked)
+ erts_de_rwunlock(dep);
+
+ erts_deref_dist_entry(dep);
+
+ return am_badarg;
}
+
BIF_RETTYPE erts_internal_get_dflags_0(BIF_ALIST_0)
{
return erts_dflags_record;
@@ -3593,7 +3685,7 @@ int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks)
dhandle = erts_build_dhandle(&hp, ohp, dep);
msg = TUPLE4(hp, am_auto_connect, dep->sysname, make_small(conn_id),
dhandle);
- erts_queue_message(net_kernel, nk_locks, mp, msg, proc->common.id);
+ erts_queue_proc_message(proc, net_kernel, nk_locks, mp, msg);
erts_proc_unlock(net_kernel, nk_locks);
}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 061b9df627..d99d2ea57b 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -38,7 +38,7 @@
#include "erl_db.h"
#include "erl_binary.h"
#include "erl_bits.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "erl_mseg.h"
#include "erl_monitor_link.h"
#include "erl_hl_timer.h"
@@ -202,8 +202,6 @@ typedef struct {
int top_pad;
AlcUInit_t alloc_util;
struct {
- int stat;
- int map;
char *mtrace;
char *nodename;
} instr;
@@ -428,6 +426,7 @@ set_default_binary_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ts = ERTS_ALC_MTA_BINARY;
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+ ip->init.util.atags = 1;
}
static void
@@ -464,6 +463,7 @@ set_default_driver_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+ ip->init.util.atags = 1;
}
static void
@@ -501,6 +501,7 @@ set_default_test_alloc_opts(struct au_init *ip)
ip->init.util.mmbcs = 0; /* Main carrier size */
ip->init.util.ts = ERTS_ALC_MTA_TEST;
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+ ip->init.util.atags = 1;
/* Use a constant minimal MBC size */
#if ERTS_SA_MB_CARRIERS
@@ -906,7 +907,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
&test_alloc_state);
erts_mtrace_install_wrapper_functions();
- extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
init_aireq_alloc();
@@ -1411,7 +1411,9 @@ handle_au_arg(struct au_init *auip,
}
if (!strategy_support_carrier_migration(auip))
auip->init.util.acul = 0;
- }
+ } else if (has_prefix("atags", sub_param)) {
+ auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip);
+ }
else
goto bad_switch;
break;
@@ -1741,24 +1743,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
break;
case 'i':
switch (argv[i][3]) {
- case 's':
- arg = get_value(argv[i]+4, argv, &i);
- if (sys_strcmp("true", arg) == 0)
- init->instr.stat = 1;
- else if (sys_strcmp("false", arg) == 0)
- init->instr.stat = 0;
- else
- bad_value(param, param+3, arg);
- break;
- case 'm':
- arg = get_value(argv[i]+4, argv, &i);
- if (sys_strcmp("true", arg) == 0)
- init->instr.map = 1;
- else if (sys_strcmp("false", arg) == 0)
- init->instr.map = 0;
- else
- bad_value(param, param+3, arg);
- break;
case 't':
init->instr.mtrace = get_value(argv[i]+4, argv, &i);
break;
@@ -1817,9 +1801,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case '-':
if (argv[i][2] == '\0') {
/* End of system flags reached */
- if (init->instr.mtrace
- /* || init->instr.stat
- || init->instr.map */) {
+ if (init->instr.mtrace) {
while (i < *argc) {
if(sys_strcmp(argv[i], "-sname") == 0
|| sys_strcmp(argv[i], "-name") == 0) {
@@ -2097,7 +2079,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
* NOTE! When updating this function, make sure to also update
* erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
*/
-#define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys)
+#define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys)
struct {
int total;
int processes;
@@ -2108,7 +2090,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
int binary;
int code;
int ets;
- int maximum;
} want = {0};
struct {
UWord total;
@@ -2120,7 +2101,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
UWord binary;
UWord code;
UWord ets;
- UWord maximum;
} size = {0};
Eterm atoms[sizeof(size)/sizeof(UWord)];
UWord *uintps[sizeof(size)/sizeof(UWord)];
@@ -2173,12 +2153,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
want.ets = 1;
atoms[length] = am_ets;
uintps[length++] = &size.ets;
-
- want.maximum = erts_instr_stat;
- if (want.maximum) {
- atoms[length] = am_maximum;
- uintps[length++] = &size.maximum;
- }
}
else {
DeclareTmpHeapNoproc(tmp_heap,2);
@@ -2260,18 +2234,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
uintps[length++] = &size.ets;
}
break;
- case am_maximum:
- if (erts_instr_stat) {
- if (!want.maximum) {
- want.maximum = 1;
- atoms[length] = am_maximum;
- uintps[length++] = &size.maximum;
- }
- } else {
- UnUseTmpHeapNoproc(2);
- return am_badarg;
- }
- break;
default:
UnUseTmpHeapNoproc(2);
return am_badarg;
@@ -2437,14 +2399,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
size.ets += erts_get_ets_misc_mem_size();
}
- if (erts_instr_stat && (want_tot_or_sys || want.maximum)) {
- if (want_tot_or_sys) {
- size.total = erts_instr_get_total();
- size.system = size.total - size.processes;
- }
- size.maximum = erts_instr_get_max_total();
- }
- else if (want_tot_or_sys) {
+ if (want_tot_or_sys) {
size.system = size.total - size.processes;
}
@@ -2522,18 +2477,6 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
i = 0;
- if (erts_instr_stat) {
- values[i].arity = 2;
- values[i].name = "total";
- values[i].ui[0] = erts_instr_get_total();
- i++;
-
- values[i].arity = 2;
- values[i].name = "maximum";
- values[i].ui[0] = erts_instr_get_max_total();
- i++;
- }
-
values[i].arity = 2;
values[i].name = "sys_misc";
values[i].ui[0] = erts_sys_misc_mem_sz();
@@ -2824,10 +2767,7 @@ erts_allocator_info(fmtfn_t to, void *arg)
erts_alcu_au_info_options(&to, arg, NULL, NULL);
erts_print(to, arg, "=allocator:instr\n");
- erts_print(to, arg, "option m: %s\n",
- erts_instr_memory_map ? "true" : "false");
- erts_print(to, arg, "option s: %s\n",
- erts_instr_stat ? "true" : "false");
+
erts_print(to, arg, "option t: %s\n",
erts_mtrace_enabled ? "true" : "false");
@@ -2933,16 +2873,12 @@ erts_allocator_options(void *proc)
NULL, hpp, szp);
#endif
{
- Eterm o[3], v[3];
- o[0] = am_atom_put("m", 1);
- v[0] = erts_instr_memory_map ? am_true : am_false;
- o[1] = am_atom_put("s", 1);
- v[1] = erts_instr_stat ? am_true : am_false;
- o[2] = am_atom_put("t", 1);
- v[2] = erts_mtrace_enabled ? am_true : am_false;
+ Eterm o[1], v[1];
+ o[0] = am_atom_put("t", 1);
+ v[0] = erts_mtrace_enabled ? am_true : am_false;
atoms[length] = am_atom_put("instr", 5);
- terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v);
+ terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
}
atoms[length] = am_atom_put("lock_physical_memory", 20);
@@ -3458,8 +3394,8 @@ badarg:
/*
* The allocator wrapper prelocking stuff below is about the locking order.
- * It only affects wrappers (erl_mtrace.c and erl_instrument.c) that keep locks
- * during alloc/realloc/free.
+ * It only affects wrappers (erl_mtrace.c) that keep locks during
+ * alloc/realloc/free.
*
* Some query functions in erl_alloc_util.c lock the allocator mutex and then
* use erts_printf that in turn may call the sys allocator through the wrappers.
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 4a6a19b210..9db600dce0 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -287,6 +287,8 @@ type DIST_DEMONITOR SHORT_LIVED PROCESSES dist_demonitor
type CML_CLEANUP SHORT_LIVED SYSTEM connection_ml_cleanup
type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state
type ML_DIST STANDARD SYSTEM monitor_link_dist
+type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
+type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
type ENVIRONMENT SYSTEM SYSTEM environment
@@ -346,6 +348,7 @@ type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
type MONITOR FIXED_SIZE PROCESSES monitor
+type MONITOR_SUSPEND STANDARD PROCESSES monitor_suspend
type LINK FIXED_SIZE PROCESSES link
type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index e148be7af6..fdf355d503 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -48,6 +48,8 @@
#include "erl_mseg.h"
#include "erl_threads.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
+#include "erl_nif.h"
#ifdef ERTS_ENABLE_LOCK_COUNT
#include "erl_lock_count.h"
@@ -139,6 +141,33 @@ MBC after deallocating first block:
[Carrier_t|pad|Block_t 111| udata... ]
*/
+/* Allocation tags ...
+ *
+ * These are added to the footer of every block when enabled. Currently they
+ * consist of the allocation type and an atom identifying the allocating
+ * driver/nif (or 'system' if that can't be determined), but the format is not
+ * supposed to be set in stone.
+ *
+ * The packing scheme requires that the atom values are small enough to fit
+ * into a word with ERTS_ALC_N_BITS to spare. Users must check for overflow
+ * before MAKE_ATAG(). */
+
+typedef UWord alcu_atag_t;
+
+#define MAKE_ATAG(IdAtom, Type) \
+ (ASSERT((Type) >= ERTS_ALC_N_MIN && (Type) <= ERTS_ALC_N_MAX), \
+ ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \
+ (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (Type))
+
+#define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS))
+#define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK)
+
+#define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS)
+
+#define DBG_IS_VALID_ATAG(Allocator, AT) \
+ (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \
+ ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \
+ (Allocator)->alloc_no == ERTS_ALC_T2A(ERTS_ALC_N2T(ATAG_TYPE(AT))))
/* Blocks ... */
@@ -153,10 +182,17 @@ MBC after deallocating first block:
#endif
#define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
+#define GET_BLK_ATAG(B) \
+ (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])
+#define SET_BLK_ATAG(B, T) \
+ (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))
+
+#define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0)
+
#define UMEMSZ2BLKSZ(AP, SZ) \
- (ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \
+ (ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ) <= (AP)->min_block_size \
? (AP)->min_block_size \
- : UNIT_CEILING(ABLK_HDR_SZ + (SZ)))
+ : UNIT_CEILING(ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ)))
#define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
#define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
@@ -688,6 +724,62 @@ static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp);
static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int);
+static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)
+{
+ ErtsSchedulerData *esdp;
+ Eterm id;
+
+ ERTS_CT_ASSERT(_unchecked_atom_val(am_system) <= MAX_ATAG_ATOM_ID);
+ ASSERT(allocator->atags);
+
+ esdp = erts_get_scheduler_data();
+ id = am_system;
+
+ if (esdp) {
+ if (esdp->current_nif) {
+ Module *mod = erts_nif_get_module((esdp->current_nif)->mod_nif);
+
+ /* Mod can be NULL if a resource destructor allocates memory after
+ * the module has been unloaded. */
+ if (mod) {
+ id = make_atom(mod->module);
+ }
+ } else if (esdp->current_port) {
+ Port *p = esdp->current_port;
+ id = (p->drv_ptr)->name_atom;
+ }
+
+ /* We fall back to 'system' if we can't pack the driver/NIF name into
+ * the tag. This may be a bit misleading but we've made no promises
+ * that the information is complete.
+ *
+ * This can only happen on 32-bit emulators when a new driver/NIF has
+ * been loaded *after* 16 million atoms have been used, and supporting
+ * that fringe case is not worth an extra word. 64-bit emulators are
+ * unaffected since the atom cache limits atom indexes to 32 bits. */
+ if(MAX_ATOM_TABLE_SIZE > MAX_ATAG_ATOM_ID) {
+ if (atom_val(id) > MAX_ATAG_ATOM_ID) {
+ id = am_system;
+ }
+ }
+ }
+
+ return MAKE_ATAG(id, type);
+}
+
+static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag)
+{
+ Block_t *block;
+
+ ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+ ASSERT(allocator->atags && p);
+ (void)allocator;
+
+ block = UMEM2BLK(p);
+
+ SET_BLK_ATAG(block, tag);
+}
+
/* internal data... */
#if 0
@@ -4242,6 +4334,7 @@ static struct {
Eterm e;
Eterm t;
Eterm ramv;
+ Eterm atags;
#if HAVE_ERTS_MSEG
Eterm asbcst;
Eterm rsbcst;
@@ -4311,7 +4404,7 @@ static struct {
#endif
} am;
-static Eterm fix_type_atoms[ERTS_ALC_NO_FIXED_SIZES];
+static Eterm alloc_type_atoms[ERTS_ALC_N_MAX + 1];
static ERTS_INLINE void atom_init(Eterm *atom, char *name)
{
@@ -4342,6 +4435,7 @@ init_atoms(Allctr_t *allctr)
AM_INIT(e);
AM_INIT(t);
AM_INIT(ramv);
+ AM_INIT(atags);
#if HAVE_ERTS_MSEG
AM_INIT(asbcst);
AM_INIT(rsbcst);
@@ -4413,12 +4507,12 @@ init_atoms(Allctr_t *allctr)
}
#endif
- for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
- ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
- char *name = (char *) ERTS_ALC_N2TD(n);
- size_t len = sys_strlen(name);
- fix_type_atoms[ix] = am_atom_put(name, len);
- }
+ for (ix = ERTS_ALC_N_MIN; ix <= ERTS_ALC_N_MAX; ix++) {
+ const char *name = ERTS_ALC_N2TD(ix);
+ size_t len = sys_strlen(name);
+
+ alloc_type_atoms[ix] = am_atom_put(name, len);
+ }
}
if (allctr && !allctr->atoms_initialized) {
@@ -4531,6 +4625,7 @@ sz_info_fix(Allctr_t *allctr,
ErtsAlcFixList_t *fix = &allctr->fix[ix];
UWord alloced = fix->type_size * fix->u.cpool.allocated;
UWord used = fix->type_size * fix->u.cpool.used;
+ ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
if (print_to_p) {
fmtfn_t to = *print_to_p;
@@ -4538,15 +4633,14 @@ sz_info_fix(Allctr_t *allctr,
erts_print(to,
arg,
"fix type internal: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE
- + ix),
+ (char *) ERTS_ALC_N2TD(n),
alloced,
used);
}
if (hpp || szp) {
add_3tup(hpp, szp, &res,
- fix_type_atoms[ix],
+ alloc_type_atoms[n],
bld_unstable_uint(hpp, szp, alloced),
bld_unstable_uint(hpp, szp, used));
}
@@ -4559,6 +4653,7 @@ sz_info_fix(Allctr_t *allctr,
ErtsAlcFixList_t *fix = &allctr->fix[ix];
UWord alloced = fix->type_size * fix->u.nocpool.allocated;
UWord used = fix->type_size*fix->u.nocpool.used;
+ ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
if (print_to_p) {
fmtfn_t to = *print_to_p;
@@ -4566,15 +4661,14 @@ sz_info_fix(Allctr_t *allctr,
erts_print(to,
arg,
"fix type: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE
- + ix),
+ (char *) ERTS_ALC_N2TD(n),
alloced,
used);
}
if (hpp || szp) {
add_3tup(hpp, szp, &res,
- fix_type_atoms[ix],
+ alloc_type_atoms[n],
bld_unstable_uint(hpp, szp, alloced),
bld_unstable_uint(hpp, szp, used));
}
@@ -5000,6 +5094,7 @@ info_options(Allctr_t *allctr,
"option e: true\n"
"option t: %s\n"
"option ramv: %s\n"
+ "option atags: %s\n"
"option sbct: %beu\n"
#if HAVE_ERTS_MSEG
"option asbcst: %bpu\n"
@@ -5018,6 +5113,7 @@ info_options(Allctr_t *allctr,
"option acul: %bpu\n",
topt,
allctr->ramv ? "true" : "false",
+ allctr->atags ? "true" : "false",
allctr->sbc_threshold,
#if HAVE_ERTS_MSEG
allctr->mseg_opt.abs_shrink_th,
@@ -5087,6 +5183,7 @@ info_options(Allctr_t *allctr,
am_sbct,
bld_uint(hpp, szp, allctr->sbc_threshold));
add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
+ add_2tup(hpp, szp, &res, am.atags, allctr->atags ? am_true : am_false);
add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false));
add_2tup(hpp, szp, &res, am.e, am_true);
}
@@ -5408,9 +5505,8 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
/* ----------------------------------------------------------------------- */
static ERTS_INLINE void *
-do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
+do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size)
{
- Allctr_t *allctr = (Allctr_t *) extra;
void *res;
ASSERT(initialized);
@@ -5449,10 +5545,19 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
+ Allctr_t *allctr = (Allctr_t *) extra;
void *res;
+
ASSERT(!"This is not thread safe");
- res = do_erts_alcu_alloc(type, extra, size);
+
+ res = do_erts_alcu_alloc(type, allctr, size);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
+ }
+
DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
@@ -5462,13 +5567,25 @@ void *
erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
+ alcu_atag_t tag = 0;
void *res;
+
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, extra, size);
- DEBUG_CHECK_ALIGNMENT(res);
+ res = do_erts_alcu_alloc(type, allctr, size);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
erts_mtx_unlock(&allctr->mutex);
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
@@ -5478,6 +5595,7 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int ix;
+ alcu_atag_t tag = 0;
Allctr_t *allctr;
void *res;
@@ -5487,11 +5605,19 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
allctr = tspec->allctr[ix];
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_alloc(type, allctr, size);
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -5504,10 +5630,15 @@ void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *pref_allctr;
+ alcu_atag_t tag = 0;
void *res;
pref_allctr = get_pref_allctr(extra);
+ if (pref_allctr->atags) {
+ tag = determine_alloc_tag(pref_allctr, type);
+ }
+
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
@@ -5523,12 +5654,15 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
res = do_erts_alcu_alloc(type, pref_allctr, size);
}
+ if (pref_allctr->atags && res) {
+ set_alloc_tag(pref_allctr, res, tag);
+ }
+
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
-
return res;
}
@@ -5537,10 +5671,9 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void
-do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
+do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,
Carrier_t **busy_pcrr_pp)
{
- Allctr_t *allctr = (Allctr_t *) extra;
ASSERT(initialized);
ASSERT(allctr);
@@ -5572,7 +5705,8 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
{
- do_erts_alcu_free(type, extra, p, NULL);
+ Allctr_t *allctr = (Allctr_t *) extra;
+ do_erts_alcu_free(type, allctr, p, NULL);
}
@@ -5581,7 +5715,7 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
{
Allctr_t *allctr = (Allctr_t *) extra;
erts_mtx_lock(&allctr->mutex);
- do_erts_alcu_free(type, extra, p, NULL);
+ do_erts_alcu_free(type, allctr, p, NULL);
erts_mtx_unlock(&allctr->mutex);
}
@@ -5641,13 +5775,12 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
static ERTS_INLINE void *
do_erts_alcu_realloc(ErtsAlcType_t type,
- void *extra,
+ Allctr_t *allctr,
void *p,
Uint size,
Uint32 alcu_flgs,
Carrier_t **busy_pcrr_pp)
{
- Allctr_t *allctr = (Allctr_t *) extra;
Block_t *blk;
void *res;
@@ -5661,7 +5794,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (!p) {
- res = do_erts_alcu_alloc(type, extra, size);
+ res = do_erts_alcu_alloc(type, allctr, size);
INC_CC(allctr->calls.this_realloc);
DEC_CC(allctr->calls.this_alloc);
return res;
@@ -5670,7 +5803,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
#if ALLOC_ZERO_EQ_NULL
if (!size) {
ASSERT(p);
- do_erts_alcu_free(type, extra, p, busy_pcrr_pp);
+ do_erts_alcu_free(type, allctr, p, busy_pcrr_pp);
INC_CC(allctr->calls.this_realloc);
DEC_CC(allctr->calls.this_free);
return NULL;
@@ -5755,19 +5888,29 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
void *
erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
+ Allctr_t *allctr = (Allctr_t *)extra;
void *res;
- res = do_erts_alcu_realloc(type, extra, p, size, 0, NULL);
+
+ res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
+
DEBUG_CHECK_ALIGNMENT(res);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
+ }
+
return res;
}
void *
erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
+ Allctr_t *allctr = (Allctr_t *)extra;
void *res;
- res = do_erts_alcu_alloc(type, extra, size);
+
+ res = do_erts_alcu_alloc(type, allctr, size);
if (!res)
- res = erts_alcu_realloc(type, extra, p, size);
+ res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
else {
Block_t *blk;
size_t cpy_size;
@@ -5777,23 +5920,42 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, extra, p, NULL);
+ do_erts_alcu_free(type, allctr, p, NULL);
}
+
DEBUG_CHECK_ALIGNMENT(res);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
+ }
+
return res;
}
-
void *
erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
+ alcu_atag_t tag = 0;
void *res;
+
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_realloc(type, extra, ptr, size, 0, NULL);
+
+ res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
erts_mtx_unlock(&allctr->mutex);
+
DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
@@ -5801,11 +5963,17 @@ void *
erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
+ alcu_atag_t tag = 0;
void *res;
+
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, extra, size);
+ res = do_erts_alcu_alloc(type, allctr, size);
if (!res)
- res = erts_alcu_realloc_ts(type, extra, p, size);
+ res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
else {
Block_t *blk;
size_t cpy_size;
@@ -5815,10 +5983,17 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, extra, p, NULL);
+ do_erts_alcu_free(type, allctr, p, NULL);
}
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
erts_mtx_unlock(&allctr->mutex);
+
DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
@@ -5829,6 +6004,7 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int ix;
+ alcu_atag_t tag = 0;
Allctr_t *allctr;
void *res;
@@ -5838,11 +6014,19 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
allctr = tspec->allctr[ix];
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -5857,6 +6041,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int ix;
+ alcu_atag_t tag = 0;
Allctr_t *allctr;
void *res;
@@ -5866,14 +6051,16 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
allctr = tspec->allctr[ix];
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_alloc(type, allctr, size);
if (!res) {
- if (allctr->thread_safe)
- erts_mtx_unlock(&allctr->mutex);
- res = erts_alcu_realloc_thr_spec(type, allctr, ptr, size);
+ res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
}
else {
Block_t *blk;
@@ -5885,29 +6072,34 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
cpy_size = size;
sys_memcpy(res, ptr, cpy_size);
do_erts_alcu_free(type, allctr, ptr, NULL);
- if (allctr->thread_safe)
- erts_mtx_unlock(&allctr->mutex);
}
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+
DEBUG_CHECK_ALIGNMENT(res);
return res;
}
static ERTS_INLINE void *
-realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
+realloc_thr_pref(ErtsAlcType_t type, Allctr_t *pref_allctr, void *p, Uint size,
int force_move)
{
void *res;
- Allctr_t *pref_allctr, *used_allctr;
+ Allctr_t *used_allctr;
UWord old_user_size;
Carrier_t *busy_pcrr_p;
+ alcu_atag_t tag = 0;
int retried;
- if (!p)
- return erts_alcu_alloc_thr_pref(type, extra, size);
-
- pref_allctr = get_pref_allctr(extra);
+ if (pref_allctr->atags) {
+ tag = determine_alloc_tag(pref_allctr, type);
+ }
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
@@ -5936,6 +6128,11 @@ restart:
retried = 1;
goto restart;
}
+
+ if (pref_allctr->atags && res) {
+ set_alloc_tag(pref_allctr, res, tag);
+ }
+
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
}
@@ -5944,6 +6141,9 @@ restart:
if (!res)
goto unlock_ts_return;
else {
+ if (pref_allctr->atags) {
+ set_alloc_tag(pref_allctr, res, tag);
+ }
DEBUG_CHECK_ALIGNMENT(res);
@@ -5974,20 +6174,34 @@ restart:
}
}
+ DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
void *
erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
- return realloc_thr_pref(type, extra, p, size, 0);
+ if (p) {
+ Allctr_t *pref_allctr = get_pref_allctr(extra);
+
+ return realloc_thr_pref(type, pref_allctr, p, size, 0);
+ }
+
+ return erts_alcu_alloc_thr_pref(type, extra, size);
}
void *
erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
void *p, Uint size)
{
- return realloc_thr_pref(type, extra, p, size, 1);
+ if (p) {
+ Allctr_t *pref_allctr = get_pref_allctr(extra);
+
+ return realloc_thr_pref(type, pref_allctr, p, size, 1);
+ }
+
+ return erts_alcu_alloc_thr_pref(type, extra, size);
}
@@ -6071,6 +6285,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->t = 0;
allctr->ramv = init->ramv;
+ allctr->atags = init->atags;
allctr->main_carrier_size = init->mmbcs;
#if HAVE_ERTS_MSEG
@@ -6320,6 +6535,1072 @@ erts_alcu_init(AlcUInit_t *init)
initialized = 1;
}
+/* ------------------------------------------------------------------------- */
+
+/* Allocation histograms and carrier information is gathered by walking through
+ * all carriers associated with each allocator instance. This is done as
+ * aux_yield_work on the scheduler that owns each instance.
+ *
+ * Yielding is implemented by temporarily inserting a "dummy carrier" at the
+ * last position. It's permanently "busy" so it won't get picked up by someone
+ * else when in the carrier pool, and we never make the employer aware of it
+ * through callbacks so we can't accidentally allocate on it.
+ *
+ * Plain malloc/free is used to guarantee we won't allocate with the allocator
+ * we're scanning. */
+
+/* Yield between carriers once this many blocks have been processed. Note that
+ * a single carrier scan may exceed this figure. */
+#ifndef DEBUG
+ #define BLOCKSCAN_REDUCTIONS (8000)
+#else
+ #define BLOCKSCAN_REDUCTIONS (400)
+#endif
+
+/* Abort a single carrier scan after this many blocks to prevent really large
+ * MBCs from blocking forever. */
+#define BLOCKSCAN_BAILOUT_THRESHOLD (16000)
+
+typedef struct alcu_blockscan {
+ /* A per-scheduler list used when multiple scans have been queued. The
+ * current scanner will always run until completion/abort before moving on
+ * to the next. */
+ struct alcu_blockscan *scanner_queue;
+
+ Allctr_t *allocator;
+ Process *process;
+
+ int (*current_op)(struct alcu_blockscan *scanner);
+ int (*next_op)(struct alcu_blockscan *scanner);
+ int reductions;
+
+ ErtsAlcCPoolData_t *cpool_cursor;
+ CarrierList_t *current_clist;
+ Carrier_t *clist_cursor;
+ Carrier_t dummy_carrier;
+
+ /* Called if the process that started this job dies before we're done. */
+ void (*abort)(void *user_data);
+
+ /* Called on each carrier. The callback must return the number of blocks
+ * scanned to yield properly between carriers.
+ *
+ * Note that it's not possible to "yield back" into a carrier. */
+ int (*scan)(Allctr_t *, void *user_data, Carrier_t *);
+
+ /* Called when all carriers have been scanned. The callback may return
+ * non-zero to yield. */
+ int (*finish)(void *user_data);
+
+ void *user_data;
+} blockscan_t;
+
+static Carrier_t *blockscan_restore_clist_cursor(blockscan_t *state)
+{
+ Carrier_t *cursor = state->clist_cursor;
+
+ ASSERT(state->clist_cursor == (state->current_clist)->first ||
+ state->clist_cursor == &state->dummy_carrier);
+
+ if (cursor == &state->dummy_carrier) {
+ cursor = cursor->next;
+
+ unlink_carrier(state->current_clist, state->clist_cursor);
+ }
+
+ return cursor;
+}
+
+static void blockscan_save_clist_cursor(blockscan_t *state, Carrier_t *after)
+{
+ ASSERT(state->clist_cursor == (state->current_clist)->first ||
+ state->clist_cursor == &state->dummy_carrier);
+
+ state->clist_cursor = &state->dummy_carrier;
+
+ (state->clist_cursor)->next = after->next;
+ (state->clist_cursor)->prev = after;
+
+ relink_carrier(state->current_clist, state->clist_cursor);
+}
+
+static int blockscan_clist_yielding(blockscan_t *state)
+{
+ Carrier_t *cursor = blockscan_restore_clist_cursor(state);
+
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ return 0;
+ }
+
+ while (cursor) {
+ /* Skip dummy carriers inserted by another (concurrent) block scan.
+ * This can happen when scanning thread-safe allocators from multiple
+ * schedulers. */
+ if (CARRIER_SZ(cursor) > 0) {
+ int blocks_scanned = state->scan(state->allocator,
+ state->user_data,
+ cursor);
+
+ state->reductions -= blocks_scanned;
+
+ if (state->reductions <= 0) {
+ blockscan_save_clist_cursor(state, cursor);
+ return 1;
+ }
+ }
+
+ cursor = cursor->next;
+ }
+
+ return 0;
+}
+
+static ErtsAlcCPoolData_t *blockscan_restore_cpool_cursor(blockscan_t *state)
+{
+ ErtsAlcCPoolData_t *cursor;
+
+ cursor = cpool_aint2cpd(cpool_read(&(state->cpool_cursor)->next));
+
+ if (state->cpool_cursor == &state->dummy_carrier.cpool) {
+ cpool_delete(state->allocator, state->allocator, &state->dummy_carrier);
+ }
+
+ return cursor;
+}
+
+static void blockscan_save_cpool_cursor(blockscan_t *state,
+ ErtsAlcCPoolData_t *after)
+{
+ ErtsAlcCPoolData_t *dummy_carrier, *prev_carrier, *next_carrier;
+
+ dummy_carrier = &state->dummy_carrier.cpool;
+
+ next_carrier = cpool_aint2cpd(cpool_mod_mark(&after->next));
+ prev_carrier = cpool_aint2cpd(cpool_mod_mark(&next_carrier->prev));
+
+ cpool_init(&dummy_carrier->next, (erts_aint_t)next_carrier);
+ cpool_init(&dummy_carrier->prev, (erts_aint_t)prev_carrier);
+
+ cpool_set_mod_marked(&prev_carrier->next,
+ (erts_aint_t)dummy_carrier,
+ (erts_aint_t)next_carrier);
+ cpool_set_mod_marked(&next_carrier->prev,
+ (erts_aint_t)dummy_carrier,
+ (erts_aint_t)prev_carrier);
+
+ state->cpool_cursor = dummy_carrier;
+}
+
+static int blockscan_cpool_yielding(blockscan_t *state)
+{
+ ErtsAlcCPoolData_t *sentinel, *cursor;
+
+ sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel;
+ cursor = blockscan_restore_cpool_cursor(state);
+
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ return 0;
+ }
+
+ while (cursor != sentinel) {
+ Carrier_t *carrier;
+ erts_aint_t exp;
+
+ /* When a deallocation happens on a pooled carrier it will be routed to
+ * its owner, so the only way to be sure that it isn't modified while
+ * scanning is to skip all carriers that aren't ours. The deallocations
+ * deferred to us will get handled when we're done. */
+ while (cursor->orig_allctr != state->allocator) {
+ cursor = cpool_aint2cpd(cpool_read(&cursor->next));
+
+ if (cursor == sentinel) {
+ return 0;
+ }
+ }
+
+ carrier = ErtsContainerStruct(cursor, Carrier_t, cpool);
+ exp = erts_atomic_read_rb(&carrier->allctr);
+
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ ASSERT(state->allocator == (Allctr_t*)(exp & ~ERTS_CRR_ALCTR_FLG_MASK));
+ ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
+
+ if (erts_atomic_cmpxchg_acqb(&carrier->allctr,
+ exp | ERTS_CRR_ALCTR_FLG_BUSY,
+ exp) == exp) {
+ /* Skip dummy carriers inserted by another (concurrent) block
+ * scan. This can happen when scanning thread-safe allocators
+ * from multiple schedulers. */
+ if (CARRIER_SZ(carrier) > 0) {
+ int blocks_scanned = state->scan(state->allocator,
+ state->user_data,
+ carrier);
+
+ state->reductions -= blocks_scanned;
+
+ if (state->reductions <= 0) {
+ blockscan_save_cpool_cursor(state, cursor);
+ erts_atomic_set_relb(&carrier->allctr, exp);
+
+ return 1;
+ }
+ }
+
+ erts_atomic_set_relb(&carrier->allctr, exp);
+ }
+ }
+
+ cursor = cpool_aint2cpd(cpool_read(&cursor->next));
+ }
+
+ return 0;
+}
+
+static int blockscan_yield_helper(blockscan_t *state,
+ int (*yielding_op)(blockscan_t*))
+{
+ /* Note that we don't check whether to abort here; only yielding_op knows
+ * whether the carrier is still in the list/pool. */
+
+ if ((state->allocator)->thread_safe) {
+ /* Locked scans have to be as short as possible. */
+ state->reductions = 1;
+
+ erts_mtx_lock(&(state->allocator)->mutex);
+ } else {
+ state->reductions = BLOCKSCAN_REDUCTIONS;
+ }
+
+ if (yielding_op(state)) {
+ state->next_op = state->current_op;
+ }
+
+ if ((state->allocator)->thread_safe) {
+ erts_mtx_unlock(&(state->allocator)->mutex);
+ }
+
+ return 1;
+}
+
+/* */
+
+static int blockscan_finish(blockscan_t *state)
+{
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ state->abort(state->user_data);
+ return 0;
+ }
+
+ state->current_op = blockscan_finish;
+
+ return state->finish(state->user_data);
+}
+
+static int blockscan_sweep_sbcs(blockscan_t *state)
+{
+ if (state->current_op != blockscan_sweep_sbcs) {
+ SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_SBC, state->allocator);
+ state->current_clist = &(state->allocator)->sbc_list;
+ state->clist_cursor = (state->current_clist)->first;
+ }
+
+ state->current_op = blockscan_sweep_sbcs;
+ state->next_op = blockscan_finish;
+
+ return blockscan_yield_helper(state, blockscan_clist_yielding);
+}
+
+static int blockscan_sweep_mbcs(blockscan_t *state)
+{
+ if (state->current_op != blockscan_sweep_mbcs) {
+ SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
+ state->current_clist = &(state->allocator)->mbc_list;
+ state->clist_cursor = (state->current_clist)->first;
+ }
+
+ state->current_op = blockscan_sweep_mbcs;
+ state->next_op = blockscan_sweep_sbcs;
+
+ return blockscan_yield_helper(state, blockscan_clist_yielding);
+}
+
+static int blockscan_sweep_cpool(blockscan_t *state)
+{
+ if (state->current_op != blockscan_sweep_cpool) {
+ ErtsAlcCPoolData_t *sentinel;
+
+ SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
+ sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel;
+ state->cpool_cursor = sentinel;
+ }
+
+ state->current_op = blockscan_sweep_cpool;
+ state->next_op = blockscan_sweep_mbcs;
+
+ return blockscan_yield_helper(state, blockscan_cpool_yielding);
+}
+
+static int blockscan_get_specific_allocator(int allocator_num,
+ int sched_id,
+ Allctr_t **out)
+{
+ ErtsAllocatorInfo_t *ai;
+ Allctr_t *allocator;
+
+ ASSERT(allocator_num >= ERTS_ALC_A_MIN &&
+ allocator_num <= ERTS_ALC_A_MAX);
+ ASSERT(sched_id >= 0 && sched_id <= erts_no_schedulers);
+
+ ai = &erts_allctrs_info[allocator_num];
+
+ if (!ai->enabled || !ai->alloc_util) {
+ return 0;
+ }
+
+ if (!ai->thr_spec) {
+ if (sched_id != 0) {
+ /* Only thread-specific allocators can be scanned on a specific
+ * scheduler. */
+ return 0;
+ }
+
+ allocator = (Allctr_t*)ai->extra;
+ ASSERT(allocator->thread_safe);
+ } else {
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t*)ai->extra;
+
+ ASSERT(sched_id < tspec->size);
+
+ allocator = tspec->allctr[sched_id];
+ }
+
+ *out = allocator;
+
+ return 1;
+}
+
+static void blockscan_sched_trampoline(void *arg)
+{
+ ErtsAlcuBlockscanYieldData *yield;
+ ErtsSchedulerData *esdp;
+ blockscan_t *scanner;
+
+ esdp = erts_get_scheduler_data();
+ scanner = (blockscan_t*)arg;
+
+ yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
+
+ ASSERT((yield->last == NULL) == (yield->current == NULL));
+
+ if (yield->last != NULL) {
+ blockscan_t *prev_scanner = yield->last;
+
+ ASSERT(prev_scanner->scanner_queue == NULL);
+
+ prev_scanner->scanner_queue = scanner;
+ } else {
+ yield->current = scanner;
+ }
+
+ scanner->scanner_queue = NULL;
+ yield->last = scanner;
+
+ erts_notify_new_aux_yield_work(esdp);
+}
+
+static void blockscan_dispatch(blockscan_t *scanner, Process *owner,
+ Allctr_t *allocator, int sched_id)
+{
+ ASSERT(erts_get_scheduler_id() != 0);
+
+ if (sched_id == 0) {
+ /* Global instances are always handled on the current scheduler. */
+ sched_id = ERTS_ALC_GET_THR_IX();
+ ASSERT(allocator->thread_safe);
+ }
+
+ scanner->allocator = allocator;
+ scanner->process = owner;
+
+ erts_proc_inc_refc(scanner->process);
+
+ cpool_init_carrier_data(scanner->allocator, &scanner->dummy_carrier);
+ erts_atomic_init_nob(&(scanner->dummy_carrier).allctr,
+ (erts_aint_t)allocator | ERTS_CRR_ALCTR_FLG_BUSY);
+
+ if (ERTS_ALC_IS_CPOOL_ENABLED(scanner->allocator)) {
+ scanner->next_op = blockscan_sweep_cpool;
+ } else {
+ scanner->next_op = blockscan_sweep_mbcs;
+ }
+
+ /* Aux yield jobs can only be set up while running on the scheduler that
+ * services them, so we move there before continuing.
+ *
+ * We can't drive the scan itself through this since the scheduler will
+ * always finish *all* misc aux work in one go which makes it impossible to
+ * yield. */
+ erts_schedule_misc_aux_work(sched_id, blockscan_sched_trampoline, scanner);
+}
+
+int erts_handle_yielded_alcu_blockscan(ErtsSchedulerData *esdp,
+ ErtsAlcuBlockscanYieldData *yield)
+{
+ blockscan_t *scanner = yield->current;
+
+ (void)esdp;
+
+ ASSERT((yield->last == NULL) == (yield->current == NULL));
+
+ if (scanner) {
+ if (scanner->next_op(scanner)) {
+ return 1;
+ }
+
+ ASSERT(ERTS_PROC_IS_EXITING(scanner->process) ||
+ scanner->current_op == blockscan_finish);
+
+ yield->current = scanner->scanner_queue;
+
+ if (yield->current == NULL) {
+ ASSERT(scanner == yield->last);
+ yield->last = NULL;
+ }
+
+ erts_proc_dec_refc(scanner->process);
+
+ /* Plain free is intentional. */
+ free(scanner);
+
+ return yield->current != NULL;
+ }
+
+ return 0;
+}
+
+void erts_alcu_sched_spec_data_init(ErtsSchedulerData *esdp)
+{
+ ErtsAlcuBlockscanYieldData *yield;
+
+ yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
+
+ yield->current = NULL;
+ yield->last = NULL;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static ERTS_INLINE int u64_log2(Uint64 v)
+{
+ static const int log2_tab64[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct hist_tree__ {
+ struct hist_tree__ *parent;
+ struct hist_tree__ *left;
+ struct hist_tree__ *right;
+
+ int is_red;
+
+ alcu_atag_t tag;
+ UWord histogram[1];
+} hist_tree_t;
+
+#define ERTS_RBT_PREFIX hist_tree
+#define ERTS_RBT_T hist_tree_t
+#define ERTS_RBT_KEY_T UWord
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) ((void)0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->tag)
+#define ERTS_RBT_IS_LT(KX, KY) (KX < KY)
+#define ERTS_RBT_IS_EQ(KX, KY) (KX == KY)
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+typedef struct {
+ blockscan_t common;
+
+ ErtsIRefStorage iref;
+ Process *process;
+
+ hist_tree_rbt_yield_state_t hist_tree_yield;
+ hist_tree_t *hist_tree;
+ UWord hist_count;
+
+ UWord hist_slot_start;
+ int hist_slot_count;
+
+ UWord unscanned_size;
+
+ ErtsHeapFactory msg_factory;
+ int building_result;
+ Eterm result_list;
+} gather_ahist_t;
+
+static void gather_ahist_update(gather_ahist_t *state, UWord tag, UWord size)
+{
+ hist_tree_t *hist_node;
+ UWord size_interval;
+ int hist_slot;
+
+ hist_node = hist_tree_rbt_lookup(state->hist_tree, tag);
+
+ if (hist_node == NULL) {
+ /* Plain calloc is intentional. */
+ hist_node = (hist_tree_t*)calloc(1, sizeof(hist_tree_t) +
+ (state->hist_slot_count - 1) *
+ sizeof(hist_node->histogram[0]));
+ hist_node->tag = tag;
+
+ hist_tree_rbt_insert(&state->hist_tree, hist_node);
+ state->hist_count++;
+ }
+
+ size_interval = (size / state->hist_slot_start);
+ size_interval = u64_log2(size_interval + 1);
+
+ hist_slot = MIN(size_interval, state->hist_slot_count - 1);
+
+ hist_node->histogram[hist_slot]++;
+}
+
+static int gather_ahist_scan(Allctr_t *allocator,
+ void *user_data,
+ Carrier_t *carrier)
+{
+ gather_ahist_t *state;
+ int blocks_scanned;
+ Block_t *block;
+
+ state = (gather_ahist_t*)user_data;
+ blocks_scanned = 1;
+
+ if (IS_SB_CARRIER(carrier)) {
+ alcu_atag_t tag;
+
+ block = SBC2BLK(allocator, carrier);
+ tag = GET_BLK_ATAG(block);
+
+ ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+
+ gather_ahist_update(state, tag, SBC_BLK_SZ(block));
+ } else {
+ UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
+
+ ASSERT(IS_MB_CARRIER(carrier));
+
+ block = MBC_TO_FIRST_BLK(allocator, carrier);
+
+ while (1) {
+ UWord block_size = MBC_BLK_SZ(block);
+
+ if (IS_ALLOCED_BLK(block)) {
+ alcu_atag_t tag = GET_BLK_ATAG(block);
+
+ ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+
+ gather_ahist_update(state, tag, block_size);
+ }
+
+ scanned_bytes += block_size;
+
+ if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
+ state->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
+ break;
+ } else if (IS_LAST_BLK(block)) {
+ break;
+ }
+
+ block = NXT_BLK(block);
+ blocks_scanned++;
+ }
+ }
+
+ return blocks_scanned;
+}
+
+static void gather_ahist_append_result(hist_tree_t *node, void *arg)
+{
+ gather_ahist_t *state = (gather_ahist_t*)arg;
+
+ Eterm histogram_tuple, tag_tuple;
+
+ Eterm *hp;
+ int ix;
+
+ ASSERT(state->building_result);
+
+ hp = erts_produce_heap(&state->msg_factory, 7 + state->hist_slot_count, 0);
+
+ hp[0] = make_arityval(state->hist_slot_count);
+
+ for (ix = 0; ix < state->hist_slot_count; ix++) {
+ hp[1 + ix] = make_small(node->histogram[ix]);
+ }
+
+ histogram_tuple = make_tuple(hp);
+ hp += 1 + state->hist_slot_count;
+
+ hp[0] = make_arityval(3);
+ hp[1] = ATAG_ID(node->tag);
+ hp[2] = alloc_type_atoms[ATAG_TYPE(node->tag)];
+ hp[3] = histogram_tuple;
+
+ tag_tuple = make_tuple(hp);
+ hp += 4;
+
+ state->result_list = CONS(hp, tag_tuple, state->result_list);
+
+ /* Plain free is intentional. */
+ free(node);
+}
+
+static void gather_ahist_send(gather_ahist_t *state)
+{
+ Eterm result_tuple, unscanned_size, task_ref;
+
+ Uint term_size;
+ Eterm *hp;
+
+ ASSERT((state->result_list == NIL) ^ (state->hist_count > 0));
+ ASSERT(state->building_result);
+
+ term_size = 4 + erts_iref_storage_heap_size(&state->iref);
+ term_size += IS_USMALL(0, state->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
+
+ hp = erts_produce_heap(&state->msg_factory, term_size, 0);
+
+ task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
+ &(state->msg_factory.message)->hfrag.off_heap, 0);
+
+ unscanned_size = bld_unstable_uint(&hp, NULL, state->unscanned_size);
+
+ hp[0] = make_arityval(3);
+ hp[1] = task_ref;
+ hp[2] = unscanned_size;
+ hp[3] = state->result_list;
+
+ result_tuple = make_tuple(hp);
+
+ erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
+
+ erts_queue_message(state->process, 0, state->msg_factory.message,
+ result_tuple, am_system);
+}
+
+static int gather_ahist_finish(void *arg)
+{
+ gather_ahist_t *state = (gather_ahist_t*)arg;
+
+ if (!state->building_result) {
+ ErtsMessage *message;
+ Uint minimum_size;
+ Eterm *hp;
+
+ /* {Ref, unscanned size, [{Tag, {Histogram}} | Rest]} */
+ minimum_size = 4 + erts_iref_storage_heap_size(&state->iref) +
+ state->hist_count * (7 + state->hist_slot_count);
+
+ message = erts_alloc_message(minimum_size, &hp);
+ erts_factory_selfcontained_message_init(&state->msg_factory,
+ message, hp);
+
+ ERTS_RBT_YIELD_STAT_INIT(&state->hist_tree_yield);
+
+ state->result_list = NIL;
+ state->building_result = 1;
+ }
+
+ if (hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree,
+ &gather_ahist_append_result,
+ state,
+ &state->hist_tree_yield,
+ BLOCKSCAN_REDUCTIONS)) {
+ return 1;
+ }
+
+ gather_ahist_send(state);
+
+ return 0;
+}
+
+static void gather_ahist_destroy_result(hist_tree_t *node, void *arg)
+{
+ (void)arg;
+ free(node);
+}
+
+static void gather_ahist_abort(void *arg)
+{
+ gather_ahist_t *state = (gather_ahist_t*)arg;
+
+ if (state->building_result) {
+ erts_factory_undo(&state->msg_factory);
+ }
+
+ hist_tree_rbt_foreach_destroy(&state->hist_tree,
+ &gather_ahist_destroy_result,
+ NULL);
+}
+
+int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref)
+{
+ gather_ahist_t *gather_state;
+ blockscan_t *scanner;
+ Allctr_t *allocator;
+
+ ASSERT(is_internal_ref(ref));
+
+ if (!blockscan_get_specific_allocator(allocator_num,
+ sched_id,
+ &allocator)) {
+ return 0;
+ } else if (!allocator->atags) {
+ return 0;
+ }
+
+ ensure_atoms_initialized(allocator);
+
+ /* Plain calloc is intentional. */
+ gather_state = (gather_ahist_t*)calloc(1, sizeof(gather_ahist_t));
+ scanner = &gather_state->common;
+
+ scanner->abort = gather_ahist_abort;
+ scanner->scan = gather_ahist_scan;
+ scanner->finish = gather_ahist_finish;
+ scanner->user_data = gather_state;
+
+ erts_iref_storage_save(&gather_state->iref, ref);
+ gather_state->hist_slot_start = hist_start;
+ gather_state->hist_slot_count = hist_width;
+ gather_state->process = p;
+
+ blockscan_dispatch(scanner, p, allocator, sched_id);
+
+ return 1;
+}
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct chist_node__ {
+ struct chist_node__ *next;
+
+ UWord carrier_size;
+ UWord unscanned_size;
+ UWord allocated_size;
+
+ /* BLOCKSCAN_BAILOUT_THRESHOLD guarantees we won't overflow this or the
+ * counters in the free block histogram. */
+ int allocated_count;
+ int flags;
+
+ int histogram[1];
+} chist_node_t;
+
+typedef struct {
+ blockscan_t common;
+
+ ErtsIRefStorage iref;
+ Process *process;
+
+ Eterm allocator_desc;
+
+ chist_node_t *info_list;
+ UWord info_count;
+
+ UWord hist_slot_start;
+ int hist_slot_count;
+
+ ErtsHeapFactory msg_factory;
+ int building_result;
+ Eterm result_list;
+} gather_cinfo_t;
+
+static int gather_cinfo_scan(Allctr_t *allocator,
+ void *user_data,
+ Carrier_t *carrier)
+{
+ gather_cinfo_t *state;
+ chist_node_t *node;
+ int blocks_scanned;
+ Block_t *block;
+
+ state = (gather_cinfo_t*)user_data;
+ node = calloc(1, sizeof(chist_node_t) +
+ (state->hist_slot_count - 1) *
+ sizeof(node->histogram[0]));
+ blocks_scanned = 1;
+
+ /* ERTS_CRR_ALCTR_FLG_BUSY is ignored since we've set it ourselves and it
+ * would be misleading to include it. */
+ node->flags = erts_atomic_read_rb(&carrier->allctr) &
+ (ERTS_CRR_ALCTR_FLG_MASK & ~ERTS_CRR_ALCTR_FLG_BUSY);
+ node->carrier_size = CARRIER_SZ(carrier);
+
+ if (IS_SB_CARRIER(carrier)) {
+ UWord block_size;
+
+ block = SBC2BLK(allocator, carrier);
+ block_size = SBC_BLK_SZ(block);
+
+ node->allocated_size = block_size;
+ node->allocated_count = 1;
+ } else {
+ UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
+
+ block = MBC_TO_FIRST_BLK(allocator, carrier);
+
+ while (1) {
+ UWord block_size = MBC_BLK_SZ(block);
+
+ scanned_bytes += block_size;
+
+ if (IS_ALLOCED_BLK(block)) {
+ node->allocated_size += block_size;
+ node->allocated_count++;
+ } else {
+ UWord size_interval;
+ int hist_slot;
+
+ size_interval = (block_size / state->hist_slot_start);
+ size_interval = u64_log2(size_interval + 1);
+
+ hist_slot = MIN(size_interval, state->hist_slot_count - 1);
+
+ node->histogram[hist_slot]++;
+ }
+
+ if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
+ node->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
+ break;
+ } else if (IS_LAST_BLK(block)) {
+ break;
+ }
+
+ block = NXT_BLK(block);
+ blocks_scanned++;
+ }
+ }
+
+ node->next = state->info_list;
+ state->info_list = node;
+ state->info_count++;
+
+ return blocks_scanned;
+}
+
+static void gather_cinfo_append_result(gather_cinfo_t *state,
+ chist_node_t *info)
+{
+ Eterm carrier_size, unscanned_size, allocated_size;
+ Eterm histogram_tuple, carrier_tuple;
+
+ Uint term_size;
+ Eterm *hp;
+ int ix;
+
+ ASSERT(state->building_result);
+
+ term_size = 11 + state->hist_slot_count;
+ term_size += IS_USMALL(0, info->carrier_size) ? 0 : BIG_UINT_HEAP_SIZE;
+ term_size += IS_USMALL(0, info->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
+ term_size += IS_USMALL(0, info->allocated_size) ? 0 : BIG_UINT_HEAP_SIZE;
+
+ hp = erts_produce_heap(&state->msg_factory, term_size, 0);
+
+ hp[0] = make_arityval(state->hist_slot_count);
+
+ for (ix = 0; ix < state->hist_slot_count; ix++) {
+ hp[1 + ix] = make_small(info->histogram[ix]);
+ }
+
+ histogram_tuple = make_tuple(hp);
+ hp += 1 + state->hist_slot_count;
+
+ carrier_size = bld_unstable_uint(&hp, NULL, info->carrier_size);
+ unscanned_size = bld_unstable_uint(&hp, NULL, info->unscanned_size);
+ allocated_size = bld_unstable_uint(&hp, NULL, info->allocated_size);
+
+ hp[0] = make_arityval(7);
+ hp[1] = state->allocator_desc;
+ hp[2] = carrier_size;
+ hp[3] = unscanned_size;
+ hp[4] = allocated_size;
+ hp[5] = make_small(info->allocated_count);
+ hp[6] = (info->flags & ERTS_CRR_ALCTR_FLG_IN_POOL) ? am_true : am_false;
+ hp[7] = histogram_tuple;
+
+ carrier_tuple = make_tuple(hp);
+ hp += 8;
+
+ state->result_list = CONS(hp, carrier_tuple, state->result_list);
+
+ free(info);
+}
+
+static void gather_cinfo_send(gather_cinfo_t *state)
+{
+ Eterm result_tuple, task_ref;
+
+ int term_size;
+ Eterm *hp;
+
+ ASSERT((state->result_list == NIL) ^ (state->info_count > 0));
+ ASSERT(state->building_result);
+
+ term_size = 3 + erts_iref_storage_heap_size(&state->iref);
+ hp = erts_produce_heap(&state->msg_factory, term_size, 0);
+
+ task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
+ &(state->msg_factory.message)->hfrag.off_heap, 0);
+
+ hp[0] = make_arityval(2);
+ hp[1] = task_ref;
+ hp[2] = state->result_list;
+
+ result_tuple = make_tuple(hp);
+
+ erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
+
+ erts_queue_message(state->process, 0, state->msg_factory.message,
+ result_tuple, am_system);
+}
+
+static int gather_cinfo_finish(void *arg)
+{
+ gather_cinfo_t *state = (gather_cinfo_t*)arg;
+ int reductions = BLOCKSCAN_REDUCTIONS;
+
+ if (!state->building_result) {
+ ErtsMessage *message;
+ Uint minimum_size;
+ Eterm *hp;
+
+ /* {Ref, [{Carrier size, unscanned size, allocated size,
+ * allocated block count, {Free block histogram}} | Rest]} */
+ minimum_size = 3 + erts_iref_storage_heap_size(&state->iref) +
+ state->info_count * (11 + state->hist_slot_count);
+
+ message = erts_alloc_message(minimum_size, &hp);
+ erts_factory_selfcontained_message_init(&state->msg_factory,
+ message, hp);
+
+ state->result_list = NIL;
+ state->building_result = 1;
+ }
+
+ while (state->info_list) {
+ chist_node_t *current = state->info_list;
+ state->info_list = current->next;
+
+ gather_cinfo_append_result(state, current);
+
+ if (reductions-- <= 0) {
+ return 1;
+ }
+ }
+
+ gather_cinfo_send(state);
+
+ return 0;
+}
+
+static void gather_cinfo_abort(void *arg)
+{
+ gather_cinfo_t *state = (gather_cinfo_t*)arg;
+
+ if (state->building_result) {
+ erts_factory_undo(&state->msg_factory);
+ }
+
+ while (state->info_list) {
+ chist_node_t *current = state->info_list;
+ state->info_list = current->next;
+
+ free(current);
+ }
+}
+
+int erts_alcu_gather_carrier_info(struct process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref)
+{
+ gather_cinfo_t *gather_state;
+ blockscan_t *scanner;
+
+ const char *allocator_desc;
+ Allctr_t *allocator;
+
+ ASSERT(is_internal_ref(ref));
+
+ if (!blockscan_get_specific_allocator(allocator_num,
+ sched_id,
+ &allocator)) {
+ return 0;
+ }
+
+ allocator_desc = ERTS_ALC_A2AD(allocator_num);
+
+ /* Plain calloc is intentional. */
+ gather_state = (gather_cinfo_t*)calloc(1, sizeof(gather_cinfo_t));
+ scanner = &gather_state->common;
+
+ scanner->abort = gather_cinfo_abort;
+ scanner->scan = gather_cinfo_scan;
+ scanner->finish = gather_cinfo_finish;
+ scanner->user_data = gather_state;
+
+ gather_state->allocator_desc = erts_atom_put((byte *)allocator_desc,
+ sys_strlen(allocator_desc),
+ ERTS_ATOM_ENC_LATIN1, 1);
+ erts_iref_storage_save(&gather_state->iref, ref);
+ gather_state->hist_slot_start = hist_start * 2;
+ gather_state->hist_slot_count = hist_width;
+ gather_state->process = p;
+
+ blockscan_dispatch(scanner, p, allocator, sched_id);
+
+ return 1;
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_alcu_test() is only supposed to be used for testing. *
@@ -6441,6 +7722,7 @@ erts_alcu_verify_unused_ts(Allctr_t *allctr)
erts_mtx_unlock(&allctr->mutex);
}
+
#ifdef DEBUG
int is_sbc_blk(Block_t* blk)
{
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 05c8a0db3b..ff4d10b206 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -50,6 +50,7 @@ typedef struct {
int tspec;
int tpref;
int ramv;
+ int atags;
UWord sbct;
UWord asbcst;
UWord rsbcst;
@@ -106,6 +107,7 @@ typedef struct {
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
0, /* (bool) ramv: realloc always moves */\
+ 0, /* (bool) atags: tagged allocations */\
512*1024, /* (bytes) sbct: sbc threshold */\
2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
20, /* (%) rsbcst: rel sbc shrink threshold */\
@@ -142,6 +144,7 @@ typedef struct {
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
0, /* (bool) ramv: realloc always moves */\
+ 0, /* (bool) atags: tagged allocations */\
64*1024, /* (bytes) sbct: sbc threshold */\
2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
20, /* (%) rsbcst: rel sbc shrink threshold */\
@@ -224,6 +227,36 @@ void erts_lcnt_update_allocator_locks(int enable);
int erts_alcu_try_set_dyn_param(Allctr_t*, Eterm param, Uint value);
+/* Gathers per-tag allocation histograms from the given allocator number
+ * (ERTS_ALC_A_*) and scheduler id. An id of 0 means the global instance will
+ * be used.
+ *
+ * The results are sent to `p`, and it returns the number of messages to wait
+ * for. */
+int erts_alcu_gather_alloc_histograms(struct process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref);
+
+/* Gathers per-carrier info from the given allocator number (ERTS_ALC_A_*) and
+ * scheduler id. An id of 0 means the global instance will be used.
+ *
+ * The results are sent to `p`, and it returns the number of messages to wait
+ * for. */
+int erts_alcu_gather_carrier_info(struct process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref);
+
+struct alcu_blockscan;
+
+typedef struct {
+ struct alcu_blockscan *current;
+ struct alcu_blockscan *last;
+} ErtsAlcuBlockscanYieldData;
+
+int erts_handle_yielded_alcu_blockscan(struct ErtsSchedulerData_ *esdp,
+ ErtsAlcuBlockscanYieldData *yield);
+void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
+
#endif /* !ERL_ALLOC_UTIL__ */
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
@@ -548,6 +581,7 @@ struct Allctr_t_ {
/* Options */
int t;
int ramv;
+ int atags;
Uint sbc_threshold;
Uint sbc_move_threshold;
Uint mbc_move_threshold;
@@ -684,6 +718,7 @@ struct Allctr_t_ {
#endif
};
+
int erts_alcu_start(Allctr_t *, AllctrInit_t *);
void erts_alcu_stop(Allctr_t *);
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 3c13991fb6..8b2b1a58c7 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -38,7 +38,7 @@
#include "erl_message.h"
#include "erl_binary.h"
#include "erl_db.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "dist.h"
#include "erl_gc.h"
#include "erl_cpu_topology.h"
@@ -51,6 +51,7 @@
#include "erl_ptab.h"
#include "erl_time.h"
#include "erl_proc_sig_queue.h"
+#include "erl_alloc_util.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -616,17 +617,13 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
}
typedef struct {
- Process *c_p;
- ErtsProcLocks c_p_locks;
ErtsMonitorSuspend **smi;
Uint smi_i;
Uint smi_max;
- int sz;
+ Uint sz;
} ErtsSuspendMonitorInfoCollection;
-#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
- (SMIC).c_p = (CP); \
- (SMIC).c_p_locks = (CPL); \
+#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
(SMIC).smi = NULL; \
(SMIC).smi_i = (SMIC).smi_max = 0; \
(SMIC).sz = 0; \
@@ -659,34 +656,26 @@ do { \
static void
collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
{
- ErtsMonitorSuspend *smon = erts_monitor_suspend(mon);
- ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
- Process *suspendee = erts_pid2proc(smicp->c_p,
- smicp->c_p_locks,
- mon->other.item,
- 0);
- if (suspendee) { /* suspendee is alive */
- Sint a, p;
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ Sint count;
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsSuspendMonitorInfoCollection *smicp;
- ASSERT((smon->active && !smon->pending)
- || (smon->pending && !smon->active));
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ smicp = vsmicp;
ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
- smicp->smi[smicp->smi_i] = smon;
+ smicp->smi[smicp->smi_i] = msp;
smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
- a = (Sint) smon->active; /* quiet compiler warnings */
- p = (Sint) smon->pending; /* on 64-bit machines */
+ mstate = erts_atomic_read_nob(&msp->state);
- if (!IS_SSMALL(a))
- smicp->sz += BIG_UINT_HEAP_SIZE;
- if (!IS_SSMALL(p))
+ count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(count))
smicp->sz += BIG_UINT_HEAP_SIZE;
+
smicp->smi_i++;
}
}
@@ -756,7 +745,7 @@ typedef struct {
static ErtsProcessInfoArgs pi_args[] = {
{am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
- {am_current_function, 4, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
{am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
{am_status, 0, 0, 0},
{am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
@@ -778,15 +767,15 @@ static ErtsProcessInfoArgs pi_args[] = {
{am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
{am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
- {am_backtrace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
{am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
{am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
{am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
- {am_current_location, 0, 0, ERTS_PROC_LOCK_MAIN},
- {am_current_stacktrace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
{am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
{am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
@@ -1074,8 +1063,10 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
if (c_p->common.id == pid) {
int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
- int sreds = ERTS_BIF_REDS_LEFT(c_p);
- int sres;
+ int sres, sreds, reds_left;
+
+ reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ sreds = reds_left;
if (!local_only) {
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
@@ -1084,15 +1075,19 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
}
sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
+
+ BUMP_REDS(c_p, (int) sreds);
+ reds_left -= sreds;
+
if (state & ERTS_PSFLG_EXITING) {
c_p->flags &= ~F_LOCAL_SIGS_ONLY;
goto exited;
}
- if (!sres) {
+ if (!sres | (reds_left <= 0)) {
/*
- * More signals to handle; need to yield and continue.
- * Prevent fetching of more signals by setting
- * local-sigs-only flag.
+ * More signals to handle or out of reds; need
+ * to yield and continue. Prevent fetching of
+ * more signals by setting local-sigs-only flag.
*/
c_p->flags |= F_LOCAL_SIGS_ONLY;
goto yield;
@@ -1165,6 +1160,7 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
else {
if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
goto send_signal;
+ state = ERTS_PSFLG_RUNNING; /* fail state... */
rp = erts_try_lock_sig_free_proc(pid, locks, &state);
if (!rp)
goto undefined;
@@ -1626,56 +1622,56 @@ process_info_aux(Process *c_p,
case ERTS_PI_IX_SUSPENDING: {
ErtsSuspendMonitorInfoCollection smic;
int i;
- Eterm item;
- erts_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
+ ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
- ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
- c_p,
- (c_p == rp
- ? ERTS_PROC_LOCK_MAIN
- : 0) | ERTS_PROC_LOCK_STATUS);
-
- erts_monitor_tree_foreach(rp->suspend_monitors,
- &collect_one_suspend_monitor,
- &smic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_suspend_monitor,
+ (void *) &smic);
reserve_size += smic.sz;
res = NIL;
for (i = 0; i < smic.smi_i; i++) {
- Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
- Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
- Eterm active;
- Eterm pending;
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+ Sint ci;
+ Eterm ct, active, pending, item;
Uint sz = 4 + 2;
- if (!IS_SSMALL(a))
- sz += BIG_UINT_HEAP_SIZE;
- if (!IS_SSMALL(p))
- sz += BIG_UINT_HEAP_SIZE;
+
+ msp = smic.smi[i];
+ mstate = erts_atomic_read_nob(&msp->state);
+
+ ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(ci))
+ sz += BIG_UINT_HEAP_SIZE;
ERTS_PI_UNRESERVE(reserve_size, sz);
hp = erts_produce_heap(hfact, sz, reserve_size);
- if (IS_SSMALL(a))
- active = make_small(a);
- else {
- active = small_to_big(a, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- if (IS_SSMALL(p))
- pending = make_small(p);
- else {
- pending = small_to_big(p, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- item = TUPLE3(hp, smic.smi[i]->mon.other.item, active, pending);
+ if (IS_SSMALL(ci))
+ ct = make_small(ci);
+ else {
+ ct = small_to_big(ci, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+
+ if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
+ active = ct;
+ pending = make_small(0);
+ }
+ else {
+ active = make_small(0);
+ pending = ct;
+ }
+
+ ASSERT(is_internal_pid(msp->md.origin.other.item));
+
+ item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
hp += 4;
res = CONS(hp, item, res);
}
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
-
*reds += (Uint) smic.smi_i / 4;
ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
@@ -2151,45 +2147,6 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
return make_small(sizeof(UWord));
}
goto badarg;
- } else if (sel == am_allocated) {
- if (arity == 2) {
- Eterm res = THE_NON_VALUE;
- char *buf;
- Sint len = is_string(*tp);
- if (len <= 0)
- return res;
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(*tp, buf, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[len] = '\0';
- res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- if (is_non_value(res))
- goto badarg;
- return res;
- }
- else if (arity == 3 && tp[0] == am_status) {
- if (is_atom(tp[1]))
- return erts_instr_get_stat(BIF_P, tp[1], 1);
- else {
- Eterm res = THE_NON_VALUE;
- char *buf;
- Sint len = is_string(tp[1]);
- if (len <= 0)
- return res;
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(tp[1], buf, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[len] = '\0';
- res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- if (is_non_value(res))
- goto badarg;
- return res;
- }
- }
- else
- goto badarg;
} else if (sel == am_allocator) {
switch (arity) {
case 2:
@@ -2557,8 +2514,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (BIF_ARG_1 == am_allocated_areas) {
res = erts_allocated_areas(NULL, NULL, BIF_P);
BIF_RET(res);
- } else if (BIF_ARG_1 == am_allocated) {
- BIF_RET(erts_instr_get_memory_map(BIF_P));
} else if (BIF_ARG_1 == am_hipe_architecture) {
#if defined(HIPE)
BIF_RET(hipe_arch_name);
@@ -2699,9 +2654,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
sizeof(ERLANG_ARCHITECTURE)-1,
NIL));
}
- else if (BIF_ARG_1 == am_memory_types) {
- return erts_instr_get_type_info(BIF_P);
- }
else if (BIF_ARG_1 == am_os_type) {
BIF_RET(os_type_tuple);
}
@@ -3680,26 +3632,46 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_display_2(BIF_ALIST_2)
+static Eterm
+process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
+ if (redsp)
+ *redsp = 1;
- if (BIF_ARG_2 != am_backtrace)
- BIF_ERROR(BIF_P, BADARG);
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_badarg;
- rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
- if(!rp) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
- erts_proc_unlock(rp, (BIF_P == rp
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
- BIF_RET(am_true);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ return am_true;
+}
+
+
+BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
+{
+ Eterm res;
+
+ if (BIF_ARG_2 != am_backtrace)
+ BIF_RET(am_badarg);
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_display(BIF_P, NULL, NULL, NULL);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ process_display,
+ NULL);
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
+
+ BIF_RET(res);
}
/* this is a general call which return some possibly useful information */
@@ -4640,27 +4612,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_true);
}
}
- else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
- int old_use_opt, use_opt;
- switch (BIF_ARG_2) {
- case am_true:
- use_opt = 1;
- break;
- case am_false:
- use_opt = 0;
- break;
- default:
- BIF_ERROR(BIF_P, BADARG);
- }
-
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_thr_progress_block();
- old_use_opt = !erts_disable_proc_not_running_opt;
- erts_disable_proc_not_running_opt = !use_opt;
- erts_thr_progress_unblock();
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(old_use_opt ? am_true : am_false);
- }
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
@@ -4744,6 +4695,55 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+static BIF_RETTYPE
+gather_histograms_helper(Process * c_p, Eterm arg_tuple,
+ int gather(Process *, int, int, int, UWord, Eterm))
+{
+ SWord hist_start, hist_width, sched_id;
+ int msg_count, alloc_num;
+ Eterm *args;
+
+ /* This is an internal BIF, so the error checking is mostly left to erlang
+ * code. */
+
+ ASSERT(is_tuple_arity(arg_tuple, 5));
+ args = tuple_val(arg_tuple);
+
+ for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
+ if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
+ break;
+ }
+ }
+
+ if (alloc_num > ERTS_ALC_A_MAX) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ sched_id = signed_val(args[2]);
+ hist_width = signed_val(args[3]);
+ hist_start = signed_val(args[4]);
+
+ if (sched_id < 0 || sched_id > erts_no_schedulers) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
+
+ BIF_RET(make_small(msg_count));
+}
+
+BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
+{
+ return gather_histograms_helper(BIF_P, BIF_ARG_1,
+ erts_alcu_gather_alloc_histograms);
+}
+
+BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
+{
+ return gather_histograms_helper(BIF_P, BIF_ARG_1,
+ erts_alcu_gather_carrier_info);
+}
+
#ifdef ERTS_ENABLE_LOCK_COUNT
typedef struct {
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 1953f79d79..f9d351e69e 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -809,10 +809,129 @@ Eterm trace_info_2(BIF_ALIST_2)
BIF_ERROR(p, BADARG);
}
erts_release_code_write_permission();
+
+ if (is_internal_ref(res))
+ BIF_TRAP1(erts_await_result, BIF_P, res);
+
BIF_RET(res);
}
static Eterm
+build_trace_flags_term(Eterm **hpp, Uint *szp, Uint trace_flags)
+{
+
+#define ERTS_TFLAG__(F, FN) \
+ if (trace_flags & F) { \
+ if (szp) \
+ sz += 2; \
+ if (hp) { \
+ res = CONS(hp, FN, res); \
+ hp += 2; \
+ } \
+ }
+
+ Eterm res;
+ Uint sz = 0;
+ Eterm *hp;
+
+ if (hpp) {
+ hp = *hpp;
+ res = NIL;
+ }
+ else {
+ hp = NULL;
+ res = THE_NON_VALUE;
+ }
+
+ ERTS_TFLAG__(F_NOW_TS, am_timestamp);
+ ERTS_TFLAG__(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
+ ERTS_TFLAG__(F_MON_TS, am_monotonic_timestamp);
+ ERTS_TFLAG__(F_TRACE_SEND, am_send);
+ ERTS_TFLAG__(F_TRACE_RECEIVE, am_receive);
+ ERTS_TFLAG__(F_TRACE_SOS, am_set_on_spawn);
+ ERTS_TFLAG__(F_TRACE_CALLS, am_call);
+ ERTS_TFLAG__(F_TRACE_PROCS, am_procs);
+ ERTS_TFLAG__(F_TRACE_SOS1, am_set_on_first_spawn);
+ ERTS_TFLAG__(F_TRACE_SOL, am_set_on_link);
+ ERTS_TFLAG__(F_TRACE_SOL1, am_set_on_first_link);
+ ERTS_TFLAG__(F_TRACE_SCHED, am_running);
+ ERTS_TFLAG__(F_TRACE_SCHED_EXIT, am_exiting);
+ ERTS_TFLAG__(F_TRACE_GC, am_garbage_collection);
+ ERTS_TFLAG__(F_TRACE_ARITY_ONLY, am_arity);
+ ERTS_TFLAG__(F_TRACE_RETURN_TO, am_return_to);
+ ERTS_TFLAG__(F_TRACE_SILENT, am_silent);
+ ERTS_TFLAG__(F_TRACE_SCHED_NO, am_scheduler_id);
+ ERTS_TFLAG__(F_TRACE_PORTS, am_ports);
+ ERTS_TFLAG__(F_TRACE_SCHED_PORTS, am_running_ports);
+ ERTS_TFLAG__(F_TRACE_SCHED_PROCS, am_running_procs);
+
+ if (szp)
+ *szp += sz;
+
+ if (hpp)
+ *hpp = hp;
+
+ return res;
+
+#undef ERTS_TFLAG__
+}
+
+static Eterm
+trace_info_tracee(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
+{
+ ErlHeapFragment *bp;
+ Eterm *hp, res, key;
+ Uint sz;
+
+ *redsp = 1;
+
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_undefined;
+
+ key = (Eterm) arg;
+ sz = 3;
+
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(c_p)))
+ erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCK_MAIN,
+ &c_p->common);
+
+ switch (key) {
+ case am_tracer:
+
+ erts_build_tracer_to_term(NULL, NULL, &sz, ERTS_TRACER(c_p));
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ res = erts_build_tracer_to_term(&hp, &bp->off_heap,
+ NULL, ERTS_TRACER(c_p));
+ if (res == am_false)
+ res = NIL;
+ break;
+
+ case am_flags:
+
+ build_trace_flags_term(NULL, &sz, ERTS_TRACE_FLAGS(c_p));
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ res = build_trace_flags_term(&hp, NULL, ERTS_TRACE_FLAGS(c_p));
+ break;
+
+ default:
+
+ ERTS_INTERNAL_ERROR("Key not supported");
+ res = NIL;
+ bp = NULL;
+ hp = NULL;
+ break;
+ }
+
+ *redsp += 2;
+
+ res = TUPLE2(hp, key, res);
+ *bpp = bp;
+ return res;
+}
+
+static Eterm
trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
{
Eterm tracer;
@@ -846,24 +965,19 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
erts_port_release(tracee);
} else if (is_internal_pid(pid_spec)) {
- Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCK_MAIN);
-
- if (tracee == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key);
+ Eterm ref;
- if (!tracee)
- return am_undefined;
+ if (key != am_flags && key != am_tracer)
+ goto error;
- if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
- erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
- &tracee->common);
+ ref = erts_proc_sig_send_rpc_request(p, pid_spec, !0,
+ trace_info_tracee,
+ (void *) key);
- tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
- trace_flags = ERTS_TRACE_FLAGS(tracee);
+ if (is_non_value(ref))
+ return am_undefined;
- if (tracee != p)
- erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
+ return ref;
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -873,48 +987,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
}
if (key == am_flags) {
- int num_flags = 21; /* MAXIMUM number of flags. */
- Uint needed = 3+2*num_flags;
- Eterm flag_list = NIL;
- Eterm* limit;
+ Eterm flag_list;
+ Uint sz = 3;
+ Eterm *hp;
-#define FLAG0(flag_mask,flag) \
- if (trace_flags & (flag_mask)) { flag_list = CONS(hp, flag, flag_list); hp += 2; } else {}
+ build_trace_flags_term(NULL, &sz, trace_flags);
+
+ hp = HAlloc(p, sz);
+
+ flag_list = build_trace_flags_term(&hp, NULL, trace_flags);
-#if defined(DEBUG)
- /*
- * Check num_flags if this assertion fires.
- */
-# define FLAG ASSERT(num_flags-- > 0); FLAG0
-#else
-# define FLAG FLAG0
-#endif
- hp = HAlloc(p, needed);
- limit = hp+needed;
- FLAG(F_NOW_TS, am_timestamp);
- FLAG(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
- FLAG(F_MON_TS, am_monotonic_timestamp);
- FLAG(F_TRACE_SEND, am_send);
- FLAG(F_TRACE_RECEIVE, am_receive);
- FLAG(F_TRACE_SOS, am_set_on_spawn);
- FLAG(F_TRACE_CALLS, am_call);
- FLAG(F_TRACE_PROCS, am_procs);
- FLAG(F_TRACE_SOS1, am_set_on_first_spawn);
- FLAG(F_TRACE_SOL, am_set_on_link);
- FLAG(F_TRACE_SOL1, am_set_on_first_link);
- FLAG(F_TRACE_SCHED, am_running);
- FLAG(F_TRACE_SCHED_EXIT, am_exiting);
- FLAG(F_TRACE_GC, am_garbage_collection);
- FLAG(F_TRACE_ARITY_ONLY, am_arity);
- FLAG(F_TRACE_RETURN_TO, am_return_to);
- FLAG(F_TRACE_SILENT, am_silent);
- FLAG(F_TRACE_SCHED_NO, am_scheduler_id);
- FLAG(F_TRACE_PORTS, am_ports);
- FLAG(F_TRACE_SCHED_PORTS, am_running_ports);
- FLAG(F_TRACE_SCHED_PROCS, am_running_procs);
-#undef FLAG0
-#undef FLAG
- HRelease(p,limit,hp+3);
return TUPLE2(hp, key, flag_list);
} else if (key == am_tracer) {
if (tracer == am_false)
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 468e22ed59..3a29f8cf56 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -3643,7 +3643,7 @@ send_ets_transfer_message(Process *c_p, Process *proc,
hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
sender = c_p->common.id;
msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
- erts_queue_message(proc, *locks, mp, msg, sender);
+ erts_queue_proc_message(c_p, proc, *locks, mp, msg);
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index fc23b793c8..37d261d0df 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -638,6 +638,18 @@ static DMCGuardBif guard_tab[] =
DBIF_ALL
},
{
+ am_map_get,
+ &map_get_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_is_map_key,
+ &is_map_key_2,
+ 2,
+ DBIF_ALL
+ },
+ {
am_bit_size,
&bit_size_1,
1,
@@ -5737,5 +5749,3 @@ void db_match_dis(Binary *bp)
}
#endif /* DMC_DEBUG */
-
-
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index b498fd9cf9..a65dbbf42b 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -61,7 +61,7 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
-#if defined(DEBUG) && 1
+#if defined(DEBUG) && 0
# define HARDDEBUG 1
#endif
@@ -223,23 +223,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
5,
ERTS_ALC_T_GC_INFO_REQ)
-static ERTS_INLINE void
-ensure_sigq_roots_available(Process *p)
-{
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
- switch (p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) {
- case F_OFF_HEAP_MSGQ_CHNG:
- case 0:
- erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
- erts_proc_sig_fetch(p);
- erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
- break;
- default:
- break;
- }
-}
-
-
/*
* Initialize GC global data.
*/
@@ -430,28 +413,25 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
{
int cost;
- if (p->flags & F_HIBERNATE_SCHED) {
+ if (p->flags & (F_HIBERNATE_SCHED|F_HIPE_RECV_LOCKED)) {
/*
* We just hibernated. We do *not* want to mess
* up the hibernation by an ordinary GC...
+ *
+ * OR
+ *
+ * We left a receive in HiPE with message
+ * queue lock locked, and we do not want to
+ * do a GC with message queue locked...
*/
return result;
}
-#ifdef HIPE
- if (p->hipe_smp.have_receive_locks) {
- /* Do not want to GC with message queue locked... */
- return result;
- }
-#endif
-
if (!p->mbuf) {
/* Must have GC:d in BIF call... invalidate live_hf_end */
live_hf_end = ERTS_INVALID_HFRAG_PTR;
}
- ensure_sigq_roots_available(p);
-
if (is_non_value(result)) {
if (p->freason == TRAP) {
#ifdef HIPE
@@ -895,11 +875,8 @@ do_major_collection:
int
erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- int reds;
- int reds_left;
- ensure_sigq_roots_available(p);
- reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
- reds_left = ERTS_REDS_LEFT(p, fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
+ int reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds);
@@ -909,9 +886,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds;
- ensure_sigq_roots_available(p);
- reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
>= erts_proc_sched_data(p)->virtual_reds);
@@ -1137,8 +1112,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* First an ordinary major collection...
*/
- ensure_sigq_roots_available(p);
-
p->flags |= F_NEED_FULLSWEEP;
if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 179822cc0b..57c6c10c7f 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -37,7 +37,7 @@
#include "erl_mseg.h"
#include "erl_threads.h"
#include "erl_hl_timer.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "erl_printf_term.h"
#include "erl_misc_utils.h"
#include "packet_parser.h"
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
deleted file mode 100644
index 2f70e7996e..0000000000
--- a/erts/emulator/beam/erl_instrument.c
+++ /dev/null
@@ -1,1257 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "global.h"
-#include "big.h"
-#include "erl_instrument.h"
-#include "erl_threads.h"
-
-typedef union { long l; double d; } Align_t;
-
-typedef struct {
- Uint size;
-#ifdef VALGRIND
- void* valgrind_leak_suppressor;
-#endif
- Align_t mem[1];
-} StatBlock_t;
-
-#define STAT_BLOCK_HEADER_SIZE (sizeof(StatBlock_t) - sizeof(Align_t))
-
-typedef struct MapStatBlock_t_ MapStatBlock_t;
-struct MapStatBlock_t_ {
- Uint size;
- ErtsAlcType_t type_no;
- Eterm pid;
- MapStatBlock_t *prev;
- MapStatBlock_t *next;
- Align_t mem[1];
-};
-
-#define MAP_STAT_BLOCK_HEADER_SIZE (sizeof(MapStatBlock_t) - sizeof(Align_t))
-
-typedef struct {
- Uint size;
- Uint max_size;
- Uint max_size_ever;
-
- Uint blocks;
- Uint max_blocks;
- Uint max_blocks_ever;
-} Stat_t;
-
-static erts_mtx_t instr_mutex;
-static erts_mtx_t instr_x_mutex;
-
-int erts_instr_memory_map;
-int erts_instr_stat;
-
-static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
-
-struct stats_ {
- Stat_t tot;
- Stat_t a[ERTS_ALC_A_MAX+1];
- Stat_t *ap[ERTS_ALC_A_MAX+1];
- Stat_t c[ERTS_ALC_C_MAX+1];
- Stat_t n[ERTS_ALC_N_MAX+1];
-};
-
-static struct stats_ *stats;
-
-static MapStatBlock_t *mem_anchor;
-
-static Eterm *am_tot;
-static Eterm *am_n;
-static Eterm *am_a;
-static Eterm *am_c;
-
-static int atoms_initialized;
-
-static struct {
- Eterm total;
- Eterm allocators;
- Eterm classes;
- Eterm types;
- Eterm sizes;
- Eterm blocks;
- Eterm instr_hdr;
-#ifdef DEBUG
- Eterm end_of_atoms;
-#endif
-} am;
-
-static void ERTS_INLINE atom_init(Eterm *atom, const char *name)
-{
- *atom = am_atom_put((char *) name, sys_strlen(name));
-}
-#define AM_INIT(AM) atom_init(&am.AM, #AM)
-
-static void
-init_atoms(void)
-{
-#ifdef DEBUG
- Eterm *atom;
- for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
- *atom = THE_NON_VALUE;
- }
-#endif
-
- AM_INIT(total);
- AM_INIT(allocators);
- AM_INIT(classes);
- AM_INIT(types);
- AM_INIT(sizes);
- AM_INIT(blocks);
- AM_INIT(instr_hdr);
-
-#ifdef DEBUG
- for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
- ASSERT(*atom != THE_NON_VALUE);
- }
-#endif
-
- atoms_initialized = 1;
-}
-
-#undef AM_INIT
-
-static void
-init_am_tot(void)
-{
- am_tot = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- sizeof(Eterm));
- atom_init(am_tot, "total");
-}
-
-
-static void
-init_am_n(void)
-{
- int i;
- am_n = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- (ERTS_ALC_N_MAX+1)*sizeof(Eterm));
-
- for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
- atom_init(&am_n[i], ERTS_ALC_N2TD(i));
- }
-
-}
-
-static void
-init_am_c(void)
-{
- int i;
- am_c = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- (ERTS_ALC_C_MAX+1)*sizeof(Eterm));
-
- for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
- atom_init(&am_c[i], ERTS_ALC_C2CD(i));
- }
-
-}
-
-static void
-init_am_a(void)
-{
- int i;
- am_a = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- (ERTS_ALC_A_MAX+1)*sizeof(Eterm));
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- atom_init(&am_a[i], ERTS_ALC_A2AD(i));
- }
-
-}
-
-static ERTS_INLINE void
-stat_upd_alloc(ErtsAlcType_t n, Uint size)
-{
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- stats->ap[a]->size += size;
- if (stats->ap[a]->max_size < stats->ap[a]->size)
- stats->ap[a]->max_size = stats->ap[a]->size;
-
- stats->c[c].size += size;
- if (stats->c[c].max_size < stats->c[c].size)
- stats->c[c].max_size = stats->c[c].size;
-
- stats->n[n].size += size;
- if (stats->n[n].max_size < stats->n[n].size)
- stats->n[n].max_size = stats->n[n].size;
-
- stats->tot.size += size;
- if (stats->tot.max_size < stats->tot.size)
- stats->tot.max_size = stats->tot.size;
-
- stats->ap[a]->blocks++;
- if (stats->ap[a]->max_blocks < stats->ap[a]->blocks)
- stats->ap[a]->max_blocks = stats->ap[a]->blocks;
-
- stats->c[c].blocks++;
- if (stats->c[c].max_blocks < stats->c[c].blocks)
- stats->c[c].max_blocks = stats->c[c].blocks;
-
- stats->n[n].blocks++;
- if (stats->n[n].max_blocks < stats->n[n].blocks)
- stats->n[n].max_blocks = stats->n[n].blocks;
-
- stats->tot.blocks++;
- if (stats->tot.max_blocks < stats->tot.blocks)
- stats->tot.max_blocks = stats->tot.blocks;
-
-}
-
-
-static ERTS_INLINE void
-stat_upd_free(ErtsAlcType_t n, Uint size)
-{
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- ASSERT(stats->ap[a]->size >= size);
- stats->ap[a]->size -= size;
-
- ASSERT(stats->c[c].size >= size);
- stats->c[c].size -= size;
-
- ASSERT(stats->n[n].size >= size);
- stats->n[n].size -= size;
-
- ASSERT(stats->tot.size >= size);
- stats->tot.size -= size;
-
- ASSERT(stats->ap[a]->blocks > 0);
- stats->ap[a]->blocks--;
-
- ASSERT(stats->c[c].blocks > 0);
- stats->c[c].blocks--;
-
- ASSERT(stats->n[n].blocks > 0);
- stats->n[n].blocks--;
-
- ASSERT(stats->tot.blocks > 0);
- stats->tot.blocks--;
-
-}
-
-
-static ERTS_INLINE void
-stat_upd_realloc(ErtsAlcType_t n, Uint size, Uint old_size)
-{
- if (old_size)
- stat_upd_free(n, old_size);
- stat_upd_alloc(n, size);
-}
-
-/*
- * stat instrumentation callback functions
- */
-
-static void stat_pre_lock(void)
-{
- erts_mtx_lock(&instr_mutex);
-}
-
-static void stat_pre_unlock(void)
-{
- erts_mtx_unlock(&instr_mutex);
-}
-
-static ErtsAllocatorWrapper_t instr_wrapper;
-
-static void *
-stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint ssize;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- ssize = size + STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->alloc)(n, real_af->extra, ssize);
- if (res) {
- stat_upd_alloc(n, size);
- ((StatBlock_t *) res)->size = size;
-#ifdef VALGRIND
- /* Suppress "possibly leaks" by storing an actual dummy pointer
- to the _start_ of the allocated block.*/
- ((StatBlock_t *) res)->valgrind_leak_suppressor = res;
-#endif
- res = (void *) ((StatBlock_t *) res)->mem;
- }
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
- return res;
-}
-
-static void *
-stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint old_size;
- Uint ssize;
- void *sptr;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
- old_size = ((StatBlock_t *) sptr)->size;
- }
- else {
- sptr = NULL;
- old_size = 0;
- }
-
- ssize = size + STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->realloc)(n, real_af->extra, sptr, ssize);
- if (res) {
- stat_upd_realloc(n, size, old_size);
- ((StatBlock_t *) res)->size = size;
-#ifdef VALGRIND
- ((StatBlock_t *) res)->valgrind_leak_suppressor = res;
-#endif
- res = (void *) ((StatBlock_t *) res)->mem;
- }
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
- return res;
-}
-
-static void
-stat_free(ErtsAlcType_t n, void *extra, void *ptr)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- void *sptr;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
- stat_upd_free(n, ((StatBlock_t *) sptr)->size);
- }
- else {
- sptr = NULL;
- }
-
- (*real_af->free)(n, real_af->extra, sptr);
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
-}
-
-/*
- * map stat instrumentation callback functions
- */
-
-static void map_stat_pre_lock(void)
-{
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
-}
-
-static void map_stat_pre_unlock(void)
-{
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
-}
-
-static void *
-map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint msize;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->alloc)(n, real_af->extra, msize);
- if (res) {
- MapStatBlock_t *mb = (MapStatBlock_t *) res;
- stat_upd_alloc(n, size);
-
- mb->size = size;
- mb->type_no = n;
- mb->pid = erts_get_current_pid();
-
- mb->prev = NULL;
- mb->next = mem_anchor;
- if (mem_anchor)
- mem_anchor->prev = mb;
- mem_anchor = mb;
-
- res = (void *) mb->mem;
- }
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
- return res;
-}
-
-static void *
-map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint old_size;
- Uint msize;
- void *mptr;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
- old_size = ((MapStatBlock_t *) mptr)->size;
- }
- else {
- mptr = NULL;
- old_size = 0;
- }
-
- msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->realloc)(n, real_af->extra, mptr, msize);
- if (res) {
- MapStatBlock_t *mb = (MapStatBlock_t *) res;
-
- mb->size = size;
- mb->type_no = n;
- mb->pid = erts_get_current_pid();
-
- stat_upd_realloc(n, size, old_size);
-
- if (mptr != res) {
-
- if (mptr) {
- if (mb->prev)
- mb->prev->next = mb;
- else {
- ASSERT(mem_anchor == (MapStatBlock_t *) mptr);
- mem_anchor = mb;
- }
- if (mb->next)
- mb->next->prev = mb;
- }
- else {
- mb->prev = NULL;
- mb->next = mem_anchor;
- if (mem_anchor)
- mem_anchor->prev = mb;
- mem_anchor = mb;
- }
-
- }
-
- res = (void *) mb->mem;
- }
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
- }
-
- return res;
-}
-
-static void
-map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- void *mptr;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- MapStatBlock_t *mb;
-
- mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
- mb = (MapStatBlock_t *) mptr;
-
- stat_upd_free(n, mb->size);
-
- if (mb->prev)
- mb->prev->next = mb->next;
- else
- mem_anchor = mb->next;
- if (mb->next)
- mb->next->prev = mb->prev;
- }
- else {
- mptr = NULL;
- }
-
- (*real_af->free)(n, real_af->extra, mptr);
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
- }
-
-}
-
-static void dump_memory_map_to_stream(fmtfn_t to, void* to_arg)
-{
- ErtsAlcType_t n;
- MapStatBlock_t *bp;
- int lock = !ERTS_IS_CRASH_DUMPING;
- if (lock) {
- ASSERT(!erts_is_allctr_wrapper_prelocked());
- erts_mtx_lock(&instr_mutex);
- }
-
- /* Write header */
-
- erts_cbprintf(to, to_arg,
- "{instr_hdr,\n"
- " %lu,\n"
- " %lu,\n"
- " {",
- (unsigned long) ERTS_INSTR_VSN,
- (unsigned long) MAP_STAT_BLOCK_HEADER_SIZE);
-
-#if ERTS_ALC_N_MIN != 1
-#error ERTS_ALC_N_MIN is not 1
-#endif
-
- for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
- const char *astr;
-
- if (erts_allctrs_info[a].enabled)
- astr = ERTS_ALC_A2AD(a);
- else
- astr = ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
-
- erts_cbprintf(to, to_arg,
- "%s{%s,%s,%s}%s",
- (n == ERTS_ALC_N_MIN) ? "" : " ",
- ERTS_ALC_N2TD(n),
- astr,
- ERTS_ALC_C2CD(c),
- (n == ERTS_ALC_N_MAX) ? "" : ",\n");
- }
-
- erts_cbprintf(to, to_arg, "}}.\n");
-
- /* Write memory data */
- for (bp = mem_anchor; bp; bp = bp->next) {
- if (is_internal_pid(bp->pid))
- erts_cbprintf(to, to_arg,
- "{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n",
- (UWord) bp->type_no,
- (UWord) bp->mem,
- (UWord) bp->size,
- (UWord) pid_channel_no(bp->pid),
- (UWord) pid_number(bp->pid),
- (UWord) pid_serial(bp->pid));
- else
- erts_cbprintf(to, to_arg,
- "{%lu, %lu, %lu, undefined}.\n",
- (UWord) bp->type_no,
- (UWord) bp->mem,
- (UWord) bp->size);
- }
-
- if (lock)
- erts_mtx_unlock(&instr_mutex);
-}
-
-int erts_instr_dump_memory_map_to(fmtfn_t to, void* to_arg)
-{
- if (!erts_instr_memory_map)
- return 0;
-
- dump_memory_map_to_stream(to, to_arg);
- return 1;
-}
-
-int erts_instr_dump_memory_map(const char *name)
-{
- int fd;
-
- if (!erts_instr_memory_map)
- return 0;
-
- fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, 0640);
- if (fd < 0)
- return 0;
-
- dump_memory_map_to_stream(erts_write_fd, (void*)&fd);
-
- close(fd);
- return 1;
-}
-
-Eterm erts_instr_get_memory_map(Process *proc)
-{
- MapStatBlock_t *org_mem_anchor;
- Eterm hdr_tuple, md_list, res;
- Eterm *hp;
- Uint hsz;
- MapStatBlock_t *bp;
-#ifdef DEBUG
- Eterm *end_hp;
-#endif
-
- if (!erts_instr_memory_map)
- return am_false;
-
- if (!atoms_initialized)
- init_atoms();
- if (!am_n)
- init_am_n();
- if (!am_c)
- init_am_c();
- if (!am_a)
- init_am_a();
-
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
-
- /* Header size */
- hsz = 5 + 1 + (ERTS_ALC_N_MAX+1-ERTS_ALC_N_MIN)*(1 + 4);
-
- /* Memory data list */
- for (bp = mem_anchor; bp; bp = bp->next) {
- if (is_internal_pid(bp->pid)) {
-#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
- if (internal_pid_number(bp->pid) > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
-#endif
-#if (_PID_SER_SIZE - 1 > MAX_SMALL)
- if (internal_pid_serial(bp->pid) > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
-#endif
- hsz += 4;
- }
-
- if ((UWord) bp->mem > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
- if (bp->size > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
-
- hsz += 5 + 2;
- }
-
- hsz += 3; /* Root tuple */
-
- org_mem_anchor = mem_anchor;
- mem_anchor = NULL;
-
- erts_mtx_unlock(&instr_mutex);
-
- hp = HAlloc(proc, hsz); /* May end up calling map_stat_alloc() */
-
- erts_mtx_lock(&instr_mutex);
-
-#ifdef DEBUG
- end_hp = hp + hsz;
-#endif
-
- { /* Build header */
- ErtsAlcType_t n;
- Eterm type_map;
- Uint *hp2 = hp;
-#ifdef DEBUG
- Uint *hp2_end;
-#endif
-
- hp += (ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN)*4;
-
-#ifdef DEBUG
- hp2_end = hp;
-#endif
-
- type_map = make_tuple(hp);
- *(hp++) = make_arityval(ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN);
-
- for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- if (!erts_allctrs_info[a].enabled)
- a = ERTS_ALC_A_SYSTEM;
-
- *(hp++) = TUPLE3(hp2, am_n[n], am_a[a], am_c[c]);
- hp2 += 4;
- }
-
- ASSERT(hp2 == hp2_end);
-
- hdr_tuple = TUPLE4(hp,
- am.instr_hdr,
- make_small(ERTS_INSTR_VSN),
- make_small(MAP_STAT_BLOCK_HEADER_SIZE),
- type_map);
-
- hp += 5;
- }
-
- /* Build memory data list */
-
- for (md_list = NIL, bp = org_mem_anchor; bp; bp = bp->next) {
- Eterm tuple;
- Eterm type;
- Eterm ptr;
- Eterm size;
- Eterm pid;
-
- if (is_not_internal_pid(bp->pid))
- pid = am_undefined;
- else {
- Eterm c;
- Eterm n;
- Eterm s;
-
-#if (ERST_INTERNAL_CHANNEL_NO > MAX_SMALL)
-#error Oversized internal channel number
-#endif
- c = make_small(ERST_INTERNAL_CHANNEL_NO);
-
-#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
- if (internal_pid_number(bp->pid) > MAX_SMALL) {
- n = uint_to_big(internal_pid_number(bp->pid), hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
-#endif
- n = make_small(internal_pid_number(bp->pid));
-
-#if (_PID_SER_SIZE - 1 > MAX_SMALL)
- if (internal_pid_serial(bp->pid) > MAX_SMALL) {
- s = uint_to_big(internal_pid_serial(bp->pid), hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
-#endif
- s = make_small(internal_pid_serial(bp->pid));
- pid = TUPLE3(hp, c, n, s);
- hp += 4;
- }
-
-
-#if ERTS_ALC_N_MAX > MAX_SMALL
-#error Oversized memory type number
-#endif
- type = make_small(bp->type_no);
-
- if ((UWord) bp->mem > MAX_SMALL) {
- ptr = uint_to_big((UWord) bp->mem, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
- ptr = make_small((UWord) bp->mem);
-
- if (bp->size > MAX_SMALL) {
- size = uint_to_big(bp->size, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
- size = make_small(bp->size);
-
- tuple = TUPLE4(hp, type, ptr, size, pid);
- hp += 5;
-
- md_list = CONS(hp, tuple, md_list);
- hp += 2;
- }
-
- res = TUPLE2(hp, hdr_tuple, md_list);
-
- ASSERT(hp + 3 == end_hp);
-
- if (mem_anchor) {
- for (bp = mem_anchor; bp->next; bp = bp->next)
- ;
- ASSERT(org_mem_anchor);
- org_mem_anchor->prev = bp;
- bp->next = org_mem_anchor;
- }
- else {
- mem_anchor = org_mem_anchor;
- }
-
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
-
- return res;
-}
-
-static ERTS_INLINE void
-begin_new_max_period(Stat_t *stat, int min, int max)
-{
- int i;
- for (i = min; i <= max; i++) {
- stat[i].max_size = stat[i].size;
- stat[i].max_blocks = stat[i].blocks;
- }
-}
-
-static ERTS_INLINE void
-update_max_ever_values(Stat_t *stat, int min, int max)
-{
- int i;
- for (i = min; i <= max; i++) {
- if (stat[i].max_size_ever < stat[i].max_size)
- stat[i].max_size_ever = stat[i].max_size;
- if (stat[i].max_blocks_ever < stat[i].max_blocks)
- stat[i].max_blocks_ever = stat[i].max_blocks;
- }
-}
-
-#define bld_string erts_bld_string
-#define bld_tuple erts_bld_tuple
-#define bld_tuplev erts_bld_tuplev
-#define bld_list erts_bld_list
-#define bld_2tup_list erts_bld_2tup_list
-#define bld_uint erts_bld_uint
-
-Eterm
-erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period)
-{
- int i, len, max, min, allctr;
- Eterm *names, *values, res;
- Uint arr_size, stat_size, hsz, *hszp, *hp, **hpp;
- Stat_t *stat_src, *stat;
-
- if (!erts_instr_stat)
- return am_false;
-
- if (!atoms_initialized)
- init_atoms();
-
- if (what == am.total) {
- min = 0;
- max = 0;
- allctr = 0;
- stat_size = sizeof(Stat_t);
- stat_src = &stats->tot;
- if (!am_tot)
- init_am_tot();
- names = am_tot;
- }
- else if (what == am.allocators) {
- min = ERTS_ALC_A_MIN;
- max = ERTS_ALC_A_MAX;
- allctr = 1;
- stat_size = sizeof(Stat_t)*(ERTS_ALC_A_MAX+1);
- stat_src = stats->a;
- if (!am_a)
- init_am_a();
- names = am_a;
- }
- else if (what == am.classes) {
- min = ERTS_ALC_C_MIN;
- max = ERTS_ALC_C_MAX;
- allctr = 0;
- stat_size = sizeof(Stat_t)*(ERTS_ALC_C_MAX+1);
- stat_src = stats->c;
- if (!am_c)
- init_am_c();
- names = &am_c[ERTS_ALC_C_MIN];
- }
- else if (what == am.types) {
- min = ERTS_ALC_N_MIN;
- max = ERTS_ALC_N_MAX;
- allctr = 0;
- stat_size = sizeof(Stat_t)*(ERTS_ALC_N_MAX+1);
- stat_src = stats->n;
- if (!am_n)
- init_am_n();
- names = &am_n[ERTS_ALC_N_MIN];
- }
- else {
- return THE_NON_VALUE;
- }
-
- stat = (Stat_t *) erts_alloc(ERTS_ALC_T_TMP, stat_size);
-
- arr_size = (max - min + 1)*sizeof(Eterm);
-
- if (allctr)
- names = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);
-
- values = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);
-
- erts_mtx_lock(&instr_mutex);
-
- update_max_ever_values(stat_src, min, max);
-
- sys_memcpy((void *) stat, (void *) stat_src, stat_size);
-
- if (begin_max_period)
- begin_new_max_period(stat_src, min, max);
-
- erts_mtx_unlock(&instr_mutex);
-
- hsz = 0;
- hszp = &hsz;
- hpp = NULL;
-
- restart_bld:
-
- len = 0;
- for (i = min; i <= max; i++) {
- if (!allctr || erts_allctrs_info[i].enabled) {
- Eterm s[2];
-
- if (allctr)
- names[len] = am_a[i];
-
- s[0] = bld_tuple(hpp, hszp, 4,
- am.sizes,
- bld_uint(hpp, hszp, stat[i].size),
- bld_uint(hpp, hszp, stat[i].max_size),
- bld_uint(hpp, hszp, stat[i].max_size_ever));
-
- s[1] = bld_tuple(hpp, hszp, 4,
- am.blocks,
- bld_uint(hpp, hszp, stat[i].blocks),
- bld_uint(hpp, hszp, stat[i].max_blocks),
- bld_uint(hpp, hszp, stat[i].max_blocks_ever));
-
- values[len] = bld_list(hpp, hszp, 2, s);
-
- len++;
- }
- }
-
- res = bld_2tup_list(hpp, hszp, len, names, values);
-
- if (!hpp) {
- hp = HAlloc(proc, hsz);
- hszp = NULL;
- hpp = &hp;
- goto restart_bld;
- }
-
- erts_free(ERTS_ALC_T_TMP, (void *) stat);
- erts_free(ERTS_ALC_T_TMP, (void *) values);
- if (allctr)
- erts_free(ERTS_ALC_T_TMP, (void *) names);
-
- return res;
-}
-
-static void
-dump_stat_to_stream(fmtfn_t to, void* to_arg, int begin_max_period)
-{
- ErtsAlcType_t i, a_max, a_min;
-
- erts_mtx_lock(&instr_mutex);
-
- erts_cbprintf(to, to_arg,
- "{instr_vsn,%lu}.\n",
- (unsigned long) ERTS_INSTR_VSN);
-
- update_max_ever_values(&stats->tot, 0, 0);
-
- erts_cbprintf(to, to_arg,
- "{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
- (UWord) stats->tot.size,
- (UWord) stats->tot.max_size,
- (UWord) stats->tot.max_size_ever,
- (UWord) stats->tot.blocks,
- (UWord) stats->tot.max_blocks,
- (UWord) stats->tot.max_blocks_ever);
-
- a_max = 0;
- a_min = ~0;
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_allctrs_info[i].enabled) {
- if (a_min > i)
- a_min = i;
- if (a_max < i)
- a_max = i;
- }
- }
-
- ASSERT(ERTS_ALC_A_MIN <= a_min && a_min <= ERTS_ALC_A_MAX);
- ASSERT(ERTS_ALC_A_MIN <= a_max && a_max <= ERTS_ALC_A_MAX);
- ASSERT(a_min <= a_max);
-
- update_max_ever_values(stats->a, a_min, a_max);
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_allctrs_info[i].enabled) {
- erts_cbprintf(to, to_arg,
- "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
- i == a_min ? "{allocators,\n [" : " ",
- ERTS_ALC_A2AD(i),
- (UWord) stats->a[i].size,
- (UWord) stats->a[i].max_size,
- (UWord) stats->a[i].max_size_ever,
- (UWord) stats->a[i].blocks,
- (UWord) stats->a[i].max_blocks,
- (UWord) stats->a[i].max_blocks_ever,
- i == a_max ? "]}.\n" : ",\n");
- }
- }
-
- update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
-
- for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
- erts_cbprintf(to, to_arg,
- "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
- i == ERTS_ALC_C_MIN ? "{classes,\n [" : " ",
- ERTS_ALC_C2CD(i),
- (UWord) stats->c[i].size,
- (UWord) stats->c[i].max_size,
- (UWord) stats->c[i].max_size_ever,
- (UWord) stats->c[i].blocks,
- (UWord) stats->c[i].max_blocks,
- (UWord) stats->c[i].max_blocks_ever,
- i == ERTS_ALC_C_MAX ? "]}.\n" : ",\n" );
- }
-
- update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
-
- for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
- erts_cbprintf(to, to_arg,
- "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
- i == ERTS_ALC_N_MIN ? "{types,\n [" : " ",
- ERTS_ALC_N2TD(i),
- (UWord) stats->n[i].size,
- (UWord) stats->n[i].max_size,
- (UWord) stats->n[i].max_size_ever,
- (UWord) stats->n[i].blocks,
- (UWord) stats->n[i].max_blocks,
- (UWord) stats->n[i].max_blocks_ever,
- i == ERTS_ALC_N_MAX ? "]}.\n" : ",\n" );
- }
-
- if (begin_max_period) {
- begin_new_max_period(&stats->tot, 0, 0);
- begin_new_max_period(stats->a, a_min, a_max);
- begin_new_max_period(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
- begin_new_max_period(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
- }
-
- erts_mtx_unlock(&instr_mutex);
-
-}
-
-int erts_instr_dump_stat_to(fmtfn_t to, void* to_arg, int begin_max_period)
-{
- if (!erts_instr_stat)
- return 0;
-
- dump_stat_to_stream(to, to_arg, begin_max_period);
- return 1;
-}
-
-int erts_instr_dump_stat(const char *name, int begin_max_period)
-{
- int fd;
-
- if (!erts_instr_stat)
- return 0;
-
- fd = open(name, O_WRONLY | O_CREAT | O_TRUNC,0640);
- if (fd < 0)
- return 0;
-
- dump_stat_to_stream(erts_write_fd, (void*)&fd, begin_max_period);
-
- close(fd);
- return 1;
-}
-
-
-Uint
-erts_instr_get_total(void)
-{
- return erts_instr_stat ? stats->tot.size : 0;
-}
-
-Uint
-erts_instr_get_max_total(void)
-{
- if (erts_instr_stat) {
- update_max_ever_values(&stats->tot, 0, 0);
- return stats->tot.max_size_ever;
- }
- return 0;
-}
-
-Eterm
-erts_instr_get_type_info(Process *proc)
-{
- Eterm res, *tpls;
- Uint hsz, *hszp, *hp, **hpp;
- ErtsAlcType_t n;
-
- if (!am_n)
- init_am_n();
- if (!am_a)
- init_am_a();
- if (!am_c)
- init_am_c();
-
- tpls = (Eterm *) erts_alloc(ERTS_ALC_T_TMP,
- (ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1)
- * sizeof(Eterm));
- hsz = 0;
- hszp = &hsz;
- hpp = NULL;
-
- restart_bld:
-
-#if ERTS_ALC_N_MIN != 1
-#error ERTS_ALC_N_MIN is not 1
-#endif
-
- for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- if (!erts_allctrs_info[a].enabled)
- a = ERTS_ALC_A_SYSTEM;
-
- tpls[n - ERTS_ALC_N_MIN]
- = bld_tuple(hpp, hszp, 3, am_n[n], am_a[a], am_c[c]);
- }
-
- res = bld_tuplev(hpp, hszp, ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1, tpls);
-
- if (!hpp) {
- hp = HAlloc(proc, hsz);
- hszp = NULL;
- hpp = &hp;
- goto restart_bld;
- }
-
- erts_free(ERTS_ALC_T_TMP, tpls);
-
- return res;
-}
-
-Uint
-erts_instr_init(int stat, int map_stat)
-{
- Uint extra_sz;
- int i;
-
- am_tot = NULL;
- am_n = NULL;
- am_c = NULL;
- am_a = NULL;
-
- erts_instr_memory_map = 0;
- erts_instr_stat = 0;
- atoms_initialized = 0;
-
- if (!stat && !map_stat)
- return 0;
-
- stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
-
- erts_mtx_init(&instr_mutex, "instr", NIL,
- ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
-
- mem_anchor = NULL;
-
- /* Install instrumentation functions */
- ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
-
- sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
-
- sys_memzero((void *) &stats->tot, sizeof(Stat_t));
- sys_memzero((void *) stats->a, sizeof(Stat_t)*(ERTS_ALC_A_MAX+1));
- sys_memzero((void *) stats->c, sizeof(Stat_t)*(ERTS_ALC_C_MAX+1));
- sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1));
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_allctrs_info[i].enabled)
- stats->ap[i] = &stats->a[i];
- else
- stats->ap[i] = &stats->a[ERTS_ALC_A_SYSTEM];
- }
-
- if (map_stat) {
-
- erts_mtx_init(&instr_x_mutex, "instr_x", NIL,
- ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
-
- erts_instr_memory_map = 1;
- erts_instr_stat = 1;
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- erts_allctrs[i].alloc = map_stat_alloc;
- erts_allctrs[i].realloc = map_stat_realloc;
- erts_allctrs[i].free = map_stat_free;
- erts_allctrs[i].extra = (void *) &real_allctrs[i];
- }
- instr_wrapper.lock = map_stat_pre_lock;
- instr_wrapper.unlock = map_stat_pre_unlock;
- extra_sz = MAP_STAT_BLOCK_HEADER_SIZE;
- }
- else {
- erts_instr_stat = 1;
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- erts_allctrs[i].alloc = stat_alloc;
- erts_allctrs[i].realloc = stat_realloc;
- erts_allctrs[i].free = stat_free;
- erts_allctrs[i].extra = (void *) &real_allctrs[i];
- }
- instr_wrapper.lock = stat_pre_lock;
- instr_wrapper.unlock = stat_pre_unlock;
- extra_sz = STAT_BLOCK_HEADER_SIZE;
- }
- erts_allctr_wrapper_prelock_init(&instr_wrapper);
- return extra_sz;
-}
-
diff --git a/erts/emulator/beam/erl_instrument.h b/erts/emulator/beam/erl_instrument.h
deleted file mode 100644
index 351172b2fa..0000000000
--- a/erts/emulator/beam/erl_instrument.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef ERL_INSTRUMENT_H__
-#define ERL_INSTRUMENT_H__
-
-#include "erl_mtrace.h"
-
-#define ERTS_INSTR_VSN 2
-
-extern int erts_instr_memory_map;
-extern int erts_instr_stat;
-
-Uint erts_instr_init(int stat, int map_stat);
-int erts_instr_dump_memory_map_to(fmtfn_t to, void* to_arg);
-int erts_instr_dump_memory_map(const char *name);
-Eterm erts_instr_get_memory_map(Process *process);
-int erts_instr_dump_stat_to(fmtfn_t to, void* to_arg, int begin_max_period);
-int erts_instr_dump_stat(const char *name, int begin_max_period);
-Eterm erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period);
-Eterm erts_instr_get_type_info(Process *proc);
-Uint erts_instr_get_total(void);
-Uint erts_instr_get_max_total(void);
-
-#endif
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index d66410367b..463ae898a3 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -217,6 +217,14 @@ typedef struct {
static lc_matrix_t tot_lc_matrix;
+#define ERTS_LC_FB_CHUNK_SIZE 10
+
+typedef struct lc_alloc_chunk_t_ lc_alloc_chunk_t;
+struct lc_alloc_chunk_t_ {
+ lc_alloc_chunk_t* next;
+ lc_free_block_t array[ERTS_LC_FB_CHUNK_SIZE];
+};
+
typedef struct lc_thread_t_ lc_thread_t;
struct lc_thread_t_ {
char *thread_name;
@@ -227,6 +235,7 @@ struct lc_thread_t_ {
lc_locked_lock_list_t locked;
lc_locked_lock_list_t required;
lc_free_block_t *free_blocks;
+ lc_alloc_chunk_t *chunks;
lc_matrix_t matrix;
};
@@ -235,14 +244,6 @@ static ethr_tsd_key locks_key;
static lc_thread_t *lc_threads = NULL;
static ethr_spinlock_t lc_threads_lock;
-
-#ifdef ERTS_LC_STATIC_ALLOC
-#define ERTS_LC_FB_CHUNK_SIZE 10000
-#else
-#define ERTS_LC_FB_CHUNK_SIZE 10
-#endif
-
-
static ERTS_INLINE void
lc_lock_threads(void)
{
@@ -268,12 +269,16 @@ static ERTS_INLINE void lc_free(lc_thread_t* thr, lc_locked_lock_t *p)
static lc_locked_lock_t *lc_core_alloc(lc_thread_t* thr)
{
int i;
- lc_free_block_t *fbs;
- fbs = (lc_free_block_t *) malloc(sizeof(lc_free_block_t)
- * ERTS_LC_FB_CHUNK_SIZE);
- if (!fbs) {
+ lc_alloc_chunk_t* chunk;
+ lc_free_block_t* fbs;
+ chunk = (lc_alloc_chunk_t*) malloc(sizeof(lc_alloc_chunk_t));
+ if (!chunk) {
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
+ chunk->next = thr->chunks;
+ thr->chunks = chunk;
+
+ fbs = chunk->array;
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
sys_memset((void *) &fbs[i], 0xdf, sizeof(lc_free_block_t));
@@ -321,6 +326,7 @@ create_thread_data(char *thread_name)
thr->locked.last = NULL;
thr->prev = NULL;
thr->free_blocks = NULL;
+ thr->chunks = NULL;
sys_memzero(&thr->matrix, sizeof(thr->matrix));
lc_lock_threads();
@@ -336,7 +342,7 @@ create_thread_data(char *thread_name)
static void collect_matrix(lc_matrix_t*);
static void
-destroy_locked_locks(lc_thread_t *thr)
+destroy_thread_data(lc_thread_t *thr)
{
ASSERT(thr->thread_name);
free((void *) thr->thread_name);
@@ -359,8 +365,13 @@ destroy_locked_locks(lc_thread_t *thr)
lc_unlock_threads();
- free((void *) thr);
+ while (thr->chunks) {
+ lc_alloc_chunk_t* free_me = thr->chunks;
+ thr->chunks = thr->chunks->next;
+ free(free_me);
+ }
+ free((void *) thr);
}
static ERTS_INLINE lc_thread_t *
@@ -615,7 +626,7 @@ thread_exit_handler(void)
print_curr_locks(thr);
lc_abort();
}
- destroy_locked_locks(thr);
+ destroy_thread_data(thr);
/* erts_tsd_set(locks_key, NULL); */
}
}
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index 4ec6960997..48154b5d0f 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -43,7 +43,9 @@
*
* DONE:
* - erlang:is_map/1
+ * - erlang:is_map_key/2
* - erlang:map_size/1
+ * - erlang:map_get/2
*
* - maps:find/2
* - maps:from_list/1
@@ -202,7 +204,7 @@ BIF_RETTYPE maps_find_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
-/* maps:get/2
+/* maps:get/2 and erlang:map_get/2
* return value if key *matches* a key in the map
* exception badkey if none matches
*/
@@ -223,6 +225,10 @@ BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
+BIF_RETTYPE map_get_2(BIF_ALIST_2) {
+ BIF_RET(maps_get_2(BIF_CALL_ARGS));
+}
+
/* maps:from_list/1
* List may be unsorted [{K,V}]
*/
@@ -914,7 +920,7 @@ static int hxnodecmp(hxnode_t *a, hxnode_t *b) {
return -1;
}
-/* maps:is_key/2 */
+/* maps:is_key/2 and erlang:is_map_key/2 */
BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
@@ -924,6 +930,10 @@ BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
+BIF_RETTYPE is_map_key_2(BIF_ALIST_2) {
+ BIF_RET(maps_is_key_2(BIF_CALL_ARGS));
+}
+
/* maps:keys/1 */
BIF_RETTYPE maps_keys_1(BIF_ALIST_1) {
@@ -3048,7 +3058,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
Uint path_length = 0;
Uint *path_rest = NULL;
int i, elems, orig_elems;
- Eterm node = map, res, *path_ptr = NULL, *hp;
+ Eterm node = map, res, *patch_ptr = NULL, *hp;
/* A stack WSTACK is used when traversing the hashmap.
* It contains: node, idx, sz, ptr
@@ -3107,15 +3117,22 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
}
if (type == iterator) {
- /* iterator uses the format {K, V, {K, V, {K, V, [Path | Map]}}},
- * so each element is 4 words large */
+ /*
+ * Iterator uses the format {K1, V1, {K2, V2, {K3, V3, [Path | Map]}}},
+ * so each element is 4 words large.
+ * To make iteration order independent of input reductions
+ * the KV-pairs are here built in DESTRUCTIVE non-reverse order.
+ */
hp = HAlloc(BIF_P, 4 * elems);
- res = am_none;
} else {
- /* list used the format [Path, Map, {K,V}, {K,V} | BIF_ARG_3],
- * so each element is 2+3 words large */
+ /*
+ * List used the format [Path, Map, {K3,V3}, {K2,V2}, {K1,V1} | BIF_ARG_3],
+ * so each element is 2+3 words large.
+ * To make list order independent of input reductions
+ * the KV-pairs are here built in FUNCTIONAL reverse order
+ * as this is how the list as a whole is constructed.
+ */
hp = HAlloc(BIF_P, (2 + 3) * elems);
- res = BIF_ARG_3;
}
orig_elems = elems;
@@ -3139,12 +3156,15 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
if (is_list(ptr[PATH_ELEM(curr_path)])) {
Eterm *lst = list_val(ptr[PATH_ELEM(curr_path)]);
if (type == iterator) {
- res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4;
- /* Note where we should patch the Iterator is needed */
- path_ptr = hp-1;
+ res = make_tuple(hp);
+ hp[0] = make_arityval(3);
+ hp[1] = CAR(lst);
+ hp[2] = CDR(lst);
+ patch_ptr = &hp[3];
+ hp += 4;
} else {
Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3;
- res = CONS(hp, tup, res); hp += 2;
+ res = CONS(hp, tup, BIF_ARG_3); hp += 2;
}
elems--;
break;
@@ -3178,7 +3198,12 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
while (idx < sz && elems != 0 && is_list(ptr[idx])) {
Eterm *lst = list_val(ptr[idx]);
if (type == iterator) {
- res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4;
+ *patch_ptr = make_tuple(hp);
+ hp[0] = make_arityval(3);
+ hp[1] = CAR(lst);
+ hp[2] = CDR(lst);
+ patch_ptr = &hp[3];
+ hp += 4;
} else {
Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3;
res = CONS(hp, tup, res); hp += 2;
@@ -3276,7 +3301,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
if (type == iterator) {
hp = HAlloc(BIF_P, 2);
- *path_ptr = CONS(hp, path, map); hp += 2;
+ *patch_ptr = CONS(hp, path, map); hp += 2;
} else {
hp = HAlloc(BIF_P, 4);
res = CONS(hp, map, res); hp += 2;
@@ -3284,6 +3309,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
}
} else {
if (type == iterator) {
+ *patch_ptr = am_none;
HRelease(BIF_P, hp + 4 * elems, hp);
} else {
HRelease(BIF_P, hp + (2+3) * elems, hp);
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 34bd11d87c..507cc989d2 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -303,8 +303,10 @@ erts_queue_dist_message(Process *rcvr,
erts_cleanup_messages(mp);
}
else {
+ LINK_MESSAGE(rcvr, mp);
- LINK_MESSAGE(rcvr, mp, &mp->next, 1);
+ if (rcvr_locks & ERTS_PROC_LOCK_MAIN)
+ erts_proc_sig_fetch(rcvr);
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
@@ -314,9 +316,8 @@ erts_queue_dist_message(Process *rcvr,
}
/* Add messages last in message queue */
-static Sint
+static void
queue_messages(Process* receiver,
- erts_aint32_t *receiver_state,
ErtsProcLocks receiver_locks,
ErtsMessage* first,
ErtsMessage** last,
@@ -325,86 +326,124 @@ queue_messages(Process* receiver,
int locked_msgq = 0;
erts_aint32_t state;
- ASSERT(is_value(ERL_MESSAGE_TERM(first)));
- ASSERT(is_value(ERL_MESSAGE_FROM(first)));
- ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
- ERL_MESSAGE_TOKEN(first) == NIL ||
- is_tuple(ERL_MESSAGE_TOKEN(first)));
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
- receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+#ifdef DEBUG
+ {
+ ErtsMessage* fmsg = ERTS_SIG_IS_MSG(first) ? first : first->next;
+ ASSERT(fmsg);
+ ASSERT(is_value(ERL_MESSAGE_TERM(fmsg)));
+ ASSERT(is_value(ERL_MESSAGE_FROM(fmsg)));
+ ASSERT(ERL_MESSAGE_TOKEN(fmsg) == am_undefined ||
+ ERL_MESSAGE_TOKEN(fmsg) == NIL ||
+ is_tuple(ERL_MESSAGE_TOKEN(fmsg)));
+ }
#endif
- if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
- if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks;
-
- if (receiver_state)
- state = *receiver_state;
- else
- state = erts_atomic32_read_nob(&receiver->state);
- if (state & ERTS_PSFLG_EXITING)
- goto exiting;
+ ERTS_LC_ASSERT((erts_proc_lc_my_proc_locks(receiver) & ERTS_PROC_LOCK_MSGQ)
+ == (receiver_locks & ERTS_PROC_LOCK_MSGQ));
- need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
- if (need_locks) {
- erts_proc_unlock(receiver, need_locks);
- }
- need_locks |= ERTS_PROC_LOCK_MSGQ;
- erts_proc_lock(receiver, need_locks);
- }
+ if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
+ erts_proc_lock(receiver, ERTS_PROC_LOCK_MSGQ);
locked_msgq = 1;
}
-
state = erts_atomic32_read_nob(&receiver->state);
if (state & ERTS_PSFLG_EXITING) {
- exiting:
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
- erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ ErtsSchedulerData* esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(!esdp->pending_signal.sig);
+ esdp->pending_signal.sig = (ErtsSignal*) first;
+ esdp->pending_signal.to = receiver->common.id;
+ first = first->next;
+ }
erts_cleanup_messages(first);
- return 0;
+ return;
+ }
+
+ if (last == &first->next) {
+ ASSERT(len == 1);
+ LINK_MESSAGE(receiver, first);
+ }
+ else {
+ erts_enqueue_signals(receiver, first, last, NULL, len, state);
}
- LINK_MESSAGE(receiver, first, last, len);
+ if (receiver_locks & ERTS_PROC_LOCK_MAIN)
+ erts_proc_sig_fetch(receiver);
if (locked_msgq) {
erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
}
- erts_proc_notify_new_message(receiver, receiver_locks);
- return 0;
+ if (last == &first->next)
+ erts_proc_notify_new_message(receiver, receiver_locks);
+ else
+ erts_proc_notify_new_sig(receiver, state, ERTS_PSFLG_ACTIVE);
}
-static Sint
-queue_message(Process* receiver,
- erts_aint32_t *receiver_state,
- ErtsProcLocks receiver_locks,
- ErtsMessage* mp, Eterm msg, Eterm from)
+static ERTS_INLINE
+ErtsMessage* prepend_pending_sig_maybe(Process* sender, Process* receiver,
+ ErtsMessage* mp)
{
- ERL_MESSAGE_TERM(mp) = msg;
- ERL_MESSAGE_FROM(mp) = from;
- return queue_messages(receiver, receiver_state, receiver_locks,
- mp, &mp->next, 1);
+ ErtsSchedulerData* esdp = sender->scheduler_data;
+ ErtsSignal* pend_sig;
+
+ if (!esdp || esdp->pending_signal.to != receiver->common.id)
+ return mp;
+
+ pend_sig = esdp->pending_signal.sig;
+
+ ASSERT(esdp->pending_signal.dbg_from == sender);
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+ pend_sig->common.next = mp;
+ pend_sig->common.specific.next = NULL;
+ return (ErtsMessage*) pend_sig;
}
-Sint
+/**
+ *
+ * @brief Send one message from *NOT* a local process.
+ *
+ */
+void
erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks,
ErtsMessage* mp, Eterm msg, Eterm from)
{
- return queue_message(receiver, NULL, receiver_locks, mp, msg, from);
+ ASSERT(is_not_internal_pid(from));
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = from;
+ queue_messages(receiver, receiver_locks, mp, &mp->next, 1);
}
+/**
+ * @brief Send one message from a local process.
+ */
+void
+erts_queue_proc_message(Process* sender,
+ Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg)
+{
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = sender->common.id;
+ queue_messages(receiver, receiver_locks,
+ prepend_pending_sig_maybe(sender, receiver, mp),
+ &mp->next, 1);
+}
-Sint
-erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks,
- ErtsMessage* first, ErtsMessage** last, Uint len)
+
+void
+erts_queue_proc_messages(Process* sender,
+ Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len)
{
- return queue_messages(receiver, NULL, receiver_locks,
- first, last, len);
+ queue_messages(receiver, receiver_locks,
+ prepend_pending_sig_maybe(sender, receiver, first),
+ last, len);
}
void
@@ -541,7 +580,7 @@ erts_try_alloc_message_on_heap(Process *pp,
* Send a local message when sender & receiver processes are known.
*/
-Sint
+void
erts_send_message(Process* sender,
Process* receiver,
ErtsProcLocks *receiver_locks,
@@ -552,7 +591,6 @@ erts_send_message(Process* sender,
ErtsMessage* mp;
ErlOffHeap *ohp;
Eterm token = NIL;
- Sint res = 0;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(sender_name, 64);
DTRACE_CHARBUF(receiver_name, 64);
@@ -695,13 +733,8 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
- res = queue_message(receiver,
- &receiver_state,
- *receiver_locks,
- mp, message,
- sender->common.id);
- return res;
+ erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message);
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index ee87297ba4..d120111634 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -229,7 +229,7 @@ typedef union {
typedef struct {
/* pointers to next pointers pointing to... */
ErtsMessage **next; /* ... next (non-message) signal */
- ErtsMessage **last; /* ... next (non-message) signal */
+ ErtsMessage **last; /* ... last (non-message) signal */
} ErtsMsgQNMSigs;
/* Size of default message buffer (erl_message.c) */
@@ -296,8 +296,11 @@ typedef struct {
typedef struct {
ErtsMessage* first;
ErtsMessage** last; /* point to the last next pointer */
- Sint len; /* queue length */
+ Sint len; /* number of messages in queue */
ErtsMsgQNMSigs nmsigs;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ int may_contain_heap_terms;
+#endif
} ErtsSignalInQueue;
typedef struct erl_trace_message_queue__ {
@@ -364,13 +367,14 @@ typedef struct erl_trace_message_queue__ {
#endif
-/* Add message last_msg in message queue */
-#define LINK_MESSAGE(p, first_msg, last_msg, num_msgs) \
+/* Add one message last in message queue */
+#define LINK_MESSAGE(p, msg) \
do { \
+ ASSERT(ERTS_SIG_IS_MSG(msg)); \
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \
- *(p)->sig_inq.last = (first_msg); \
- (p)->sig_inq.last = (last_msg); \
- (p)->sig_inq.len += (num_msgs); \
+ *(p)->sig_inq.last = (msg); \
+ (p)->sig_inq.last = &(msg)->next; \
+ (p)->sig_inq.len++; \
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \
} while(0)
@@ -437,11 +441,12 @@ ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm);
-Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
-Sint erts_queue_messages(Process*, ErtsProcLocks,
- ErtsMessage*, ErtsMessage**, Uint);
+void erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
+void erts_queue_proc_message(Process* from,Process* to, ErtsProcLocks,ErtsMessage*, Eterm);
+void erts_queue_proc_messages(Process* from, Process* to, ErtsProcLocks,
+ ErtsMessage*, ErtsMessage**, Uint);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
-Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
+void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
Uint erts_msg_attached_data_size_aux(ErtsMessage *msg);
diff --git a/erts/emulator/beam/erl_monitor_link.c b/erts/emulator/beam/erl_monitor_link.c
index 70f36fb6b7..48d9bd4ca5 100644
--- a/erts/emulator/beam/erl_monitor_link.c
+++ b/erts/emulator/beam/erl_monitor_link.c
@@ -630,7 +630,9 @@ erts_monitor_tree_lookup_create(ErtsMonitor **root, int *created, Uint16 type,
ErtsMonitor *res;
ErtsMonitorCreateCtxt cctxt = {type, origin};
- ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE || type == ERTS_MON_TYPE_NODES);
+ ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE
+ || type == ERTS_MON_TYPE_NODES
+ || type == ERTS_MON_TYPE_SUSPEND);
res = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root,
target, create_monitor,
@@ -760,11 +762,13 @@ erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name)
switch (type) {
case ERTS_MON_TYPE_PROC:
case ERTS_MON_TYPE_PORT:
- case ERTS_MON_TYPE_TIME_OFFSET:
if (is_nil(name)) {
ErtsMonitorDataHeap *mdhp;
ErtsORefThing *ortp;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+
+ ERTS_ML_ASSERT(is_nil(name));
ERTS_ML_ASSERT(is_immed(orgn) && is_immed(trgt));
ERTS_ML_ASSERT(is_internal_ordinary_ref(ref));
@@ -860,10 +864,38 @@ erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name)
mdep->dist = NULL;
break;
}
- case ERTS_MON_TYPE_SUSPEND:
- ERTS_INTERNAL_ERROR("Use erts_monitor_suspend_create() instead...");
- mdp = NULL;
+ case ERTS_MON_TYPE_SUSPEND: {
+ ErtsMonitorSuspend *msp;
+
+ ERTS_ML_ASSERT(is_nil(name));
+ ERTS_ML_ASSERT(is_nil(ref));
+ ERTS_ML_ASSERT(is_internal_pid(orgn) && is_internal_pid(trgt));
+
+ msp = erts_alloc(ERTS_ALC_T_MONITOR_SUSPEND,
+ sizeof(ErtsMonitorSuspend));
+ mdp = &msp->md;
+ ERTS_ML_ASSERT(((void *) mdp) == ((void *) msp));
+
+ mdp->ref = NIL;
+
+ mdp->origin.other.item = trgt;
+ mdp->origin.offset = (Uint16) offsetof(ErtsMonitorData, origin);
+ mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ ERTS_ML_ASSERT(mdp->origin.key_offset >= mdp->origin.offset);
+ mdp->origin.flags = (Uint16) ERTS_ML_FLG_EXTENDED;
+ mdp->origin.type = type;
+
+ mdp->target.other.item = orgn;
+ mdp->target.offset = (Uint16) offsetof(ErtsMonitorData, target);
+ mdp->target.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ mdp->target.flags = ERTS_ML_FLG_TARGET|ERTS_ML_FLG_EXTENDED;
+ mdp->target.type = type;
+
+ msp->next = NULL;
+ erts_atomic_init_relb(&msp->state, 0);
+
break;
+ }
default:
ERTS_INTERNAL_ERROR("Invalid monitor type");
mdp = NULL;
@@ -887,10 +919,11 @@ erts_monitor_destroy__(ErtsMonitorData *mdp)
ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
== (mdp->target.flags & ERTS_ML_FLGS_SAME));
- ERTS_ML_ASSERT(mdp->origin.type != ERTS_MON_TYPE_SUSPEND);
if (!(mdp->origin.flags & ERTS_ML_FLG_EXTENDED))
erts_free(ERTS_ALC_T_MONITOR, mdp);
+ else if (mdp->origin.type == ERTS_MON_TYPE_SUSPEND)
+ erts_free(ERTS_ALC_T_MONITOR_SUSPEND, mdp);
else {
ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
ErlOffHeap oh;
@@ -927,10 +960,10 @@ erts_monitor_size(ErtsMonitor *mon)
Uint size, refc;
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
- ERTS_ML_ASSERT(mon->type != ERTS_MON_TYPE_SUSPEND);
-
if (!(mon->flags & ERTS_ML_FLG_EXTENDED))
size = sizeof(ErtsMonitorDataHeap);
+ else if (mon->type == ERTS_MON_TYPE_SUSPEND)
+ size = sizeof(ErtsMonitorSuspend);
else {
ErtsMonitorDataExtended *mdep;
Uint hsz = 0;
@@ -957,54 +990,6 @@ erts_monitor_size(ErtsMonitor *mon)
return size / refc;
}
-
-/* suspend monitors... */
-
-ErtsMonitorSuspend *
-erts_monitor_suspend_create(Eterm pid)
-{
- ErtsMonitorSuspend *msp;
-
- ERTS_ML_ASSERT(is_internal_pid(pid));
-
- msp = erts_alloc(ERTS_ALC_T_SUSPEND_MON,
- sizeof(ErtsMonitorSuspend));
- msp->mon.offset = (Uint16) offsetof(ErtsMonitorSuspend, mon);
- msp->mon.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
- msp->mon.other.item = pid;
- msp->mon.flags = 0;
- msp->mon.type = ERTS_MON_TYPE_SUSPEND;
- msp->pending = 0;
- msp->active = 0;
- return msp;
-}
-
-static ErtsMonLnkNode *
-create_monitor_suspend(Eterm pid, void *unused)
-{
- ErtsMonitorSuspend *msp = erts_monitor_suspend_create(pid);
- return (ErtsMonLnkNode *) &msp->mon;
-}
-
-ErtsMonitorSuspend *
-erts_monitor_suspend_tree_lookup_create(ErtsMonitor **root, int *created,
- Eterm pid)
-{
- ErtsMonitor *mon;
- mon = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root,
- pid, create_monitor_suspend,
- NULL,
- created);
- return erts_monitor_suspend(mon);
-}
-
-void
-erts_monitor_suspend_destroy(ErtsMonitorSuspend *msp)
-{
- ERTS_ML_ASSERT(!(msp->mon.flags & ERTS_ML_FLG_IN_TABLE));
- erts_free(ERTS_ALC_T_SUSPEND_MON, msp);
-}
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Link Operations *
* *
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
index 603aead8cc..9ff8aa509a 100644
--- a/erts/emulator/beam/erl_monitor_link.h
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -246,15 +246,28 @@
*
* --- ERTS_MON_TYPE_SUSPEND -------------------------------------
*
- * Suspend monitor.
+ * Suspend monitor. A local process (origin) suspends another
+ * local process (target).
*
- * Other Item: Suspendee process identifier
- * Key: Suspendee process identifier
- *
- * Valid keys are only ordinary internal references.
+ * Origin:
+ * Other Item: Process identifier of suspendee
+ * (target)
+ * Key: Process identifier of suspendee
+ * (target)
+ * Target:
+ * Other Item: Process identifier of suspender
+ * (origin)
+ * Key: Process identifier of suspender
+ * (origin)
+ * Shared:
+ * Next: Pointer to another suspend monitor
+ * State: Number of suspends and a flag
+ * indicating if the suspend is
+ * active or not.
*
- * This type of monitor is a bit strange and the whole process
- * suspend functionality should be improved...
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list for local targets on the target process.
*
*
*
@@ -638,11 +651,15 @@ struct ErtsMonitorDataExtended__ {
Eterm heap[1]; /* heap start... */
};
-typedef struct {
- ErtsMonitor mon;
- int pending;
- int active;
-} ErtsMonitorSuspend;
+typedef struct ErtsMonitorSuspend__ ErtsMonitorSuspend;
+
+struct ErtsMonitorSuspend__ {
+ ErtsMonitorData md; /* origin = suspender; target = suspendee */
+ ErtsMonitorSuspend *next;
+ erts_atomic_t state;
+};
+#define ERTS_MSUSPEND_STATE_FLG_ACTIVE ((erts_aint_t) (((Uint) 1) << (sizeof(Uint)*8 - 1)))
+#define ERTS_MSUSPEND_STATE_COUNTER_MASK (~ERTS_MSUSPEND_STATE_FLG_ACTIVE)
/*
* --- Monitor tree operations ---
@@ -1094,24 +1111,25 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
*
* @brief Create a monitor
*
- * Can create all types of monitors exept for suspend monitors
+ * Can create all types of monitors
*
* When the funcion is called it is assumed that:
* - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC,
* ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE
- * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE or ERTS_MON_TYPE_NODES
+ * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or
+ * ERTS_MON_TYPE_SUSPEND
* - 'ref' is and ordinary internal reference or an external reference if
* type is ERTS_MON_TYPE_DIST_PROC
* - 'name' is an atom or NIL if type is ERTS_MON_TYPE_PROC,
* ERTS_MON_TYPE_PORT, or ERTS_MON_TYPE_DIST_PROC
* - 'name is NIL if type is ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_RESOURCE,
- * ERTS_MON_TYPE_NODE, or ERTS_MON_TYPE_NODES
+ * ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND
* If the above is not true, bad things will happen.
*
* @param[in] type ERTS_MON_TYPE_PROC, ERTS_MON_TYPE_PORT,
* ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_DIST_PROC,
* ERTS_MON_TYPE_RESOURCE, ERTS_MON_TYPE_NODE,
- * or ERTS_MON_TYPE_NODES
+ * ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND
*
* @param[in] ref A reference or NIL depending on type
*
@@ -1119,6 +1137,10 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
*
* @param[in] target The key of the target
*
+ * @param[in] name An atom (the name) or NIL depending on type
+ *
+ * @returns A pointer to monitor data structure
+ *
*/
ErtsMonitorData *erts_monitor_create(Uint16 type, Eterm ref, Eterm origin,
Eterm target, Eterm name);
@@ -1347,7 +1369,8 @@ erts_monitor_to_data(ErtsMonitor *mon)
ERTS_ML_ASSERT(erts_monitor_origin_offset == (size_t) mdp->origin.offset);
ERTS_ML_ASSERT(!!(mdp->target.flags & ERTS_ML_FLG_TARGET));
ERTS_ML_ASSERT(erts_monitor_target_offset == (size_t) mdp->target.offset);
- if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES) {
+ if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES
+ || mon->type == ERTS_MON_TYPE_SUSPEND) {
ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->origin.key_offset);
ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->target.key_offset);
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 0e0013e8a4..0fbf0eb03a 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -388,12 +388,18 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC));
+ ASSERT(esdp->current_nif == NULL);
+ esdp->current_nif = &env;
+
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ ASSERT(esdp->current_nif == &env);
+ esdp->current_nif = NULL;
+
ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
ASSERT(env.proc->next == c_p);
@@ -659,7 +665,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
rp_locks = 0;
if (rp->common.id == c_p->common.id)
rp_locks = c_p_locks;
- erts_queue_messages(rp, rp_locks, first, last, len);
+ erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len);
if (rp->common.id == c_p->common.id)
rp_locks &= ~c_p_locks;
if (rp_locks)
@@ -850,7 +856,10 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
}
}
- erts_queue_message(rp, rp_locks, mp, msg, from);
+ if (c_p)
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
+ else
+ erts_queue_message(rp, rp_locks, mp, msg, from);
done:
if (c_p == rp)
@@ -1246,8 +1255,10 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env,
if (is_non_value(*term)) {
return 0;
}
- erts_factory_close(&factory);
- cache_env(dst_env);
+ if (size > 0) {
+ erts_factory_close(&factory);
+ cache_env(dst_env);
+ }
ASSERT(bp > data);
return bp - data;
@@ -4258,6 +4269,10 @@ int erts_nif_get_funcs(struct erl_module_nif* mod,
return mod->entry.num_of_funcs;
}
+Module *erts_nif_get_module(struct erl_module_nif *nif_mod) {
+ return nif_mod->mod;
+}
+
Eterm erts_nif_call_function(Process *p, Process *tracee,
struct erl_module_nif* mod,
ErlNifFunc *fun, int argc, Eterm *argv)
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 2a98a6f00b..9b52b648e5 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -485,6 +485,8 @@ ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm);
ERTS_GLB_INLINE int erts_is_port_alive(Eterm);
ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm);
ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *);
+ERTS_GLB_INLINE Port *erts_get_current_port(void);
+ERTS_GLB_INLINE Eterm erts_get_current_port_id(void);
#define erts_drvport2port(Prt) erts_drvport2port_state((Prt), NULL)
@@ -812,6 +814,20 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
return reds;
}
+ERTS_GLB_INLINE
+Port *erts_get_current_port(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ return esdp ? esdp->current_port : NULL;
+}
+
+ERTS_GLB_INLINE
+Eterm erts_get_current_port_id(void)
+{
+ Port *port = erts_get_current_port();
+ return port ? port->common.id : THE_NON_VALUE;
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
void erts_port_resume_procs(Port *);
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index bcc4fc6d9b..e9b41ad298 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -39,6 +39,7 @@
#include "big.h"
#include "erl_gc.h"
#include "bif.h"
+#include "erl_bif_unique.h"
#include "erl_proc_sig_queue.h"
#include "dtrace-wrapper.h"
@@ -49,7 +50,7 @@
* Note that not all signal are handled using this functionality!
*/
-#define ERTS_SIG_Q_OP_MAX 11
+#define ERTS_SIG_Q_OP_MAX 13
#define ERTS_SIG_Q_OP_EXIT 0
#define ERTS_SIG_Q_OP_EXIT_LINKED 1
@@ -62,7 +63,9 @@
#define ERTS_SIG_Q_OP_TRACE_CHANGE_STATE 8
#define ERTS_SIG_Q_OP_PERSISTENT_MON_MSG 9
#define ERTS_SIG_Q_OP_IS_ALIVE 10
-#define ERTS_SIG_Q_OP_PROCESS_INFO ERTS_SIG_Q_OP_MAX
+#define ERTS_SIG_Q_OP_PROCESS_INFO 11
+#define ERTS_SIG_Q_OP_SYNC_SUSPEND 12
+#define ERTS_SIG_Q_OP_RPC ERTS_SIG_Q_OP_MAX
#define ERTS_SIG_Q_TYPE_MAX (ERTS_MON_LNK_TYPE_MAX + 5)
@@ -154,6 +157,17 @@ typedef struct {
} ErtsIsAliveRequest;
typedef struct {
+ Eterm message;
+ Eterm requester;
+ int async;
+} ErtsSyncSuspendRequest;
+
+typedef struct {
+ ErtsMonitorSuspend *mon;
+ ErtsMessage *sync;
+} ErtsProcSigPendingSuspend;
+
+typedef struct {
ErtsSignalCommon common;
Sint refc;
Sint delayed_len;
@@ -176,6 +190,15 @@ typedef struct {
#define ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE ((Sint) -1)
#define ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC ((Sint) -2)
+typedef struct {
+ ErtsSignalCommon common;
+ Eterm requester;
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **);
+ void *arg;
+ Eterm ref;
+ ErtsORefThing oref_thing;
+} ErtsProcSigRPC;
+
static int handle_msg_tracing(Process *c_p,
ErtsSigRecvTracing *tracing,
ErtsMessage ***next_nm_sig);
@@ -308,9 +331,8 @@ destroy_sig_group_leader(ErtsSigGroupLeader *sgl)
}
static ERTS_INLINE void
-sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
- Process *rp, ErtsMessage **first,
- ErtsMessage **last, ErtsMessage ***last_next)
+sig_enqueue_trace(Process *c_p, ErtsMessage **sigp, int op,
+ Process *rp, ErtsMessage ***last_next)
{
switch (op) {
case ERTS_SIG_Q_OP_LINK:
@@ -326,12 +348,11 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
* Prepend a trace-change-state signal before the
* link signal...
*/
-
tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE,
ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO,
0);
ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo));
- ti->common.next = *last;
+ ti->common.next = *sigp;
ti->common.specific.next = &ti->common.next;
ti->common.tag = tag;
ti->flags_on = ERTS_TRACE_FLAGS(c_p) & TRACEE_FLAGS;
@@ -344,8 +365,9 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
erts_tracer_update(&ti->tracer, ERTS_TRACER(c_p));
- *first = (ErtsMessage *) ti;
- *last_next = &ti->common.next;
+ *sigp = (ErtsMessage *) ti;
+ if (!*last_next || *last_next == sigp)
+ *last_next = &ti->common.next;
}
break;
@@ -354,6 +376,7 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
case ERTS_SIG_Q_OP_EXIT_LINKED:
if (DTRACE_ENABLED(process_exit_signal)) {
+ ErtsMessage* sig = *sigp;
Uint16 type = ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag);
Eterm reason, from;
@@ -430,9 +453,24 @@ sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig, ErtsMessage *last
}
}
+#ifdef DEBUG
+static int dbg_count_nmsigs(ErtsMessage *first)
+{
+ ErtsMessage *sig;
+ int cnt = 0;
+
+ for (sig = first; sig; sig = sig->next) {
+ if (ERTS_SIG_IS_NON_MSG(sig))
+ ++cnt;
+ }
+ return cnt;
+}
+#endif
+
static ERTS_INLINE erts_aint32_t
enqueue_signals(Process *rp, ErtsMessage *first,
- ErtsMessage *last, ErtsMessage **last_next,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint num_msgs,
erts_aint32_t in_state)
{
erts_aint32_t state = in_state;
@@ -442,13 +480,23 @@ enqueue_signals(Process *rp, ErtsMessage *first,
ASSERT(!*this);
*this = first;
- rp->sig_inq.last = &last->next;
+ rp->sig_inq.last = last;
if (!rp->sig_inq.nmsigs.next) {
ASSERT(!rp->sig_inq.nmsigs.last);
- rp->sig_inq.nmsigs.next = this;
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ rp->sig_inq.nmsigs.next = this;
+ }
+ else if (last_next) {
+ ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
+ rp->sig_inq.nmsigs.next = &first->next;
+ }
+ else
+ goto no_nmsig;
+
state = erts_atomic32_read_bor_nob(&rp->state,
ERTS_PSFLG_SIG_IN_Q);
+ no_nmsig:
ASSERT(!(state & ERTS_PSFLG_SIG_IN_Q));
}
else {
@@ -459,83 +507,168 @@ enqueue_signals(Process *rp, ErtsMessage *first,
ASSERT(sig && !sig->common.specific.next);
ASSERT(state & ERTS_PSFLG_SIG_IN_Q);
- sig->common.specific.next = this;
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ sig->common.specific.next = this;
+ }
+ else if (last_next) {
+ ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
+ sig->common.specific.next = &first->next;
+ }
}
if (last_next) {
- ASSERT(first != last);
+ ASSERT(dbg_count_nmsigs(first) >= 2);
rp->sig_inq.nmsigs.last = last_next;
}
- else {
- ASSERT(first == last);
+ else if (ERTS_SIG_IS_NON_MSG(first)) {
+ ASSERT(dbg_count_nmsigs(first) == 1);
rp->sig_inq.nmsigs.last = this;
}
+ else
+ ASSERT(dbg_count_nmsigs(first) == 0);
+
+ rp->sig_inq.len += num_msgs;
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp);
return state;
}
-static ERTS_INLINE void
-ensure_dirty_proc_handled(Eterm pid,
- erts_aint32_t state,
- erts_aint32_t prio)
+erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint num_msgs,
+ erts_aint32_t in_state)
{
- if (state & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- Eterm *hp;
- ErtsMessage *mp;
- Process *sig_handler;
+ return enqueue_signals(rp, first, last, last_next, num_msgs, in_state);
+}
- if (prio < 0)
- prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
+void
+erts_make_dirty_proc_handled(Eterm pid,
+ erts_aint32_t state,
+ erts_aint32_t prio)
+{
+ Eterm *hp;
+ ErtsMessage *mp;
+ Process *sig_handler;
- switch (prio) {
- case PRIORITY_MAX:
- sig_handler = erts_dirty_process_signal_handler_max;
- break;
- case PRIORITY_HIGH:
- sig_handler = erts_dirty_process_signal_handler_high;
- break;
- default:
- sig_handler = erts_dirty_process_signal_handler;
- break;
- }
+ ASSERT(state & (ERTS_PSFLG_DIRTY_RUNNING |
+ ERTS_PSFLG_DIRTY_RUNNING_SYS));
+
+ if (prio < 0)
+ prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
- /* Make sure signals are handled... */
- mp = erts_alloc_message(0, &hp);
- erts_queue_message(sig_handler, 0, mp, pid, am_system);
+ switch (prio) {
+ case PRIORITY_MAX:
+ sig_handler = erts_dirty_process_signal_handler_max;
+ break;
+ case PRIORITY_HIGH:
+ sig_handler = erts_dirty_process_signal_handler_high;
+ break;
+ default:
+ sig_handler = erts_dirty_process_signal_handler;
+ break;
}
+
+ /* Make sure signals are handled... */
+ mp = erts_alloc_message(0, &hp);
+ erts_queue_message(sig_handler, 0, mp, pid, am_system);
}
static void
check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig);
+
static int
proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
{
int res;
Process *rp;
- ErtsMessage *first, *last, **last_next;
+ ErtsMessage *first, *last, **last_next, **sigp;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL;
erts_aint32_t state;
+ ErtsSignal *pend_sig;
- if (is_normal_sched)
- rp = erts_proc_lookup_raw(pid);
- else
- rp = erts_proc_lookup_raw_inc_refc(pid);
+ if (is_normal_sched) {
+ pend_sig = esdp->pending_signal.sig;
+ if (op == ERTS_SIG_Q_OP_MONITOR
+ && ((ErtsMonitor*)sig)->type == ERTS_MON_TYPE_PROC) {
- if (!rp)
- return 0;
+ if (!pend_sig) {
+ esdp->pending_signal.sig = sig;
+ esdp->pending_signal.to = pid;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = esdp->current_process;
+#endif
+ return 1;
+ }
+ ASSERT(esdp->pending_signal.dbg_from == esdp->current_process);
+ if (pend_sig != sig) {
+ /* Switch them and send previously pending signal instead */
+ Eterm pend_to = esdp->pending_signal.to;
+ esdp->pending_signal.sig = sig;
+ esdp->pending_signal.to = pid;
+ sig = pend_sig;
+ pid = pend_to;
+ }
+ else {
+ /* Caller wants to flush pending signal */
+ ASSERT(pid == esdp->pending_signal.to);
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = NULL;
+#endif
+ pend_sig = NULL;
+ }
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)sig, am_noproc);
+ return 1;
+ }
+ }
+ else if (pend_sig && pid == esdp->pending_signal.to) {
+ /* Flush pending signal to maintain signal order */
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
+ return 0;
+ }
+
+ /* Prepend pending signal */
+ pend_sig->common.next = (ErtsMessage*) sig;
+ pend_sig->common.specific.next = &pend_sig->common.next;
+ first = (ErtsMessage*) pend_sig;
+ last = (ErtsMessage*) sig;
+ sigp = last_next = &pend_sig->common.next;
+ goto first_last_done;
+ }
+ else {
+ pend_sig = NULL;
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp)
+ return 0;
+ }
+ }
+ else {
+ rp = erts_proc_lookup_raw_inc_refc(pid);
+ if (!rp)
+ return 0;
+ pend_sig = NULL;
+ }
- sig->common.specific.next = NULL;
first = last = (ErtsMessage *) sig;
last_next = NULL;
+ sigp = &first;
+
+first_last_done:
+ sig->common.specific.next = NULL;
/* may add signals before and/or after sig */
- sig_enqueue_trace(c_p, first, op, rp,
- &first, &last, &last_next);
+ sig_enqueue_trace(c_p, sigp, op, rp, &last_next);
last->next = NULL;
@@ -546,7 +679,7 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
if (ERTS_PSFLG_FREE & state)
res = 0;
else {
- state = enqueue_signals(rp, first, last, last_next, state);
+ state = enqueue_signals(rp, first, &last->next, last_next, 0, state);
if (ERTS_UNLIKELY(op == ERTS_SIG_Q_OP_PROCESS_INFO))
check_push_msgq_len_offs_marker(rp, sig);
res = !0;
@@ -554,17 +687,23 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- if (res == 0)
+ if (res == 0) {
+ if (pend_sig) {
+ if (sig == pend_sig) {
+ /* We did a switch, callers signal is now pending (still ok) */
+ ASSERT(esdp->pending_signal.sig);
+ res = 1;
+ }
+ else {
+ ASSERT(first == (ErtsMessage*)pend_sig);
+ first = first->next;
+ }
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
+ }
sig_enqueue_trace_cleanup(first, sig, last);
-
- if (!(state & (ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_SIG_IN_Q))) {
- /* Schedule process... */
- state = erts_proc_sys_schedule(rp, state, 0);
}
- ensure_dirty_proc_handled(rp->common.id, state, -1);
+ erts_proc_notify_new_sig(rp, state, 0);
if (!is_normal_sched)
erts_proc_dec_refc(rp);
@@ -572,6 +711,24 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
return res;
}
+void erts_proc_sig_send_pending(ErtsSchedulerData* esdp)
+{
+ ErtsSignal* sig = esdp->pending_signal.sig;
+ int op;
+
+ ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(sig);
+ ASSERT(is_internal_pid(esdp->pending_signal.to));
+
+ op = ERTS_SIG_Q_OP_MONITOR;
+ ASSERT(op == ERTS_PROC_SIG_OP(sig->common.tag));
+
+ if (!proc_queue_signal(NULL, esdp->pending_signal.to, sig, op)) {
+ ErtsMonitor* mon = (ErtsMonitor*)sig;
+ erts_proc_sig_send_monitor_down(mon, am_noproc);
+ }
+}
+
static int
maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
{
@@ -602,7 +759,10 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
if (res) {
/* ensure handled if dirty executing... */
state = erts_atomic32_read_nob(&rp->state);
- ensure_dirty_proc_handled(other, state, my_prio);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ erts_make_dirty_proc_handled(other, state, my_prio);
+ }
}
}
}
@@ -612,11 +772,6 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
void
erts_proc_sig_fetch__(Process *proc)
{
-#ifdef ERTS_PROC_SIG_HARD_DEBUG
- ErtsSignalPrivQueues sig_qs = proc->sig_qs;
- ErtsSignalInQueue sig_inq = proc->sig_inq;
-#endif
-
ASSERT(proc->sig_inq.first);
if (!proc->sig_inq.nmsigs.next) {
@@ -634,9 +789,7 @@ erts_proc_sig_fetch__(Process *proc)
}
}
else {
-#ifdef DEBUG
erts_aint32_t s;
-#endif
ASSERT(proc->sig_inq.nmsigs.last);
if (!proc->sig_qs.nmsigs.last) {
ASSERT(!proc->sig_qs.nmsigs.next);
@@ -645,16 +798,13 @@ erts_proc_sig_fetch__(Process *proc)
else
proc->sig_qs.nmsigs.next = proc->sig_inq.nmsigs.next;
-#ifdef DEBUG
- s =
-#endif
- erts_atomic32_read_bset_nob(&proc->state,
+ s = erts_atomic32_read_bset_nob(&proc->state,
(ERTS_PSFLG_SIG_Q
| ERTS_PSFLG_SIG_IN_Q),
ERTS_PSFLG_SIG_Q);
ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
- == ERTS_PSFLG_SIG_IN_Q);
+ == ERTS_PSFLG_SIG_IN_Q); (void)s;
}
else {
ErtsSignal *sig;
@@ -667,14 +817,11 @@ erts_proc_sig_fetch__(Process *proc)
else
sig->common.specific.next = proc->sig_inq.nmsigs.next;
-#ifdef DEBUG
- s =
-#endif
- erts_atomic32_read_band_nob(&proc->state,
+ s = erts_atomic32_read_band_nob(&proc->state,
~ERTS_PSFLG_SIG_IN_Q);
ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
- == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q));
+ == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)); (void)s;
}
if (proc->sig_inq.nmsigs.last == &proc->sig_inq.first)
proc->sig_qs.nmsigs.last = proc->sig_qs.cont_last;
@@ -1184,6 +1331,8 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
/* Pass signal using old monitor structure... */
ErtsSignal *sig;
+ send_using_monitor_struct:
+
mon->other.item = reason; /* Pass immed reason via other.item... */
sig = (ErtsSignal *) mon;
sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR_DOWN,
@@ -1195,6 +1344,18 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
Eterm from_tag, monitored, heap[3];
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ /*
+ * Set reason to 'undefined', since exit
+ * reason is not used for suspend monitors,
+ * and send using monitor structure. This
+ * since we don't want to trigger
+ * unnecessary memory allocation etc...
+ */
+ reason = am_undefined;
+ goto send_using_monitor_struct;
+ }
+
if (!(mon->flags & ERTS_ML_FLG_NAME)) {
from_tag = monitored = mdp->origin.other.item;
if (is_external_pid(from_tag)) {
@@ -1417,8 +1578,7 @@ erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to, Eterm ref)
/* It wasn't alive; reply to ourselves... */
mp->next = NULL;
mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
- erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN,
- mp, msg, am_system);
+ erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, mp, msg, am_system);
}
}
@@ -1473,7 +1633,173 @@ erts_proc_sig_send_process_info_request(Process *c_p,
else
erts_free(ERTS_ALC_T_SIG_DATA, pis);
return res;
-}
+}
+
+void
+erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to, Eterm tag, Eterm reply)
+{
+ ErlHeapFragment *hfrag;
+ Uint hsz, tag_sz;
+ Eterm *hp, *start_hp, tag_cpy, msg, default_reply;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ ErtsSyncSuspendRequest *ssusp;
+ int async_suspend;
+
+ tag_sz = size_object(tag);
+
+ hsz = 3 + tag_sz + sizeof(ErtsSyncSuspendRequest)/sizeof(Eterm);
+
+ mp = erts_alloc_message(hsz, &hp);
+ hfrag = &mp->hfrag;
+ mp->next = NULL;
+ ohp = &hfrag->off_heap;
+ start_hp = hp;
+
+ tag_cpy = copy_struct(tag, tag_sz, &hp, ohp);
+
+ async_suspend = is_non_value(reply);
+ default_reply = async_suspend ? am_suspended : reply;
+
+ msg = TUPLE2(hp, tag_cpy, default_reply);
+ hp += 3;
+
+ hfrag->used_size = hp - start_hp;
+
+ ssusp = (ErtsSyncSuspendRequest *) (char *) hp;
+ ssusp->message = msg;
+ ssusp->requester = c_p->common.id;
+ ssusp->async = async_suspend;
+
+ ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_SYNC_SUSPEND,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ ERL_MESSAGE_FROM(mp) = am_system;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+#endif
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_SYNC_SUSPEND))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ Eterm *tp;
+ /* It wasn't alive; reply to ourselves... */
+ mp->next = NULL;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ tp = tuple_val(msg);
+ tp[2] = async_suspend ? am_badarg : am_exited;
+ erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN,
+ mp, msg, am_system);
+ }
+}
+
+Eterm
+erts_proc_sig_send_rpc_request(Process *c_p,
+ Eterm to,
+ int reply,
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
+ void *arg)
+{
+ Eterm res;
+ ErtsProcSigRPC *sig = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsProcSigRPC));
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_RPC,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+ sig->requester = reply ? c_p->common.id : NIL;
+ sig->func = func;
+ sig->arg = arg;
+
+ if (!reply) {
+ res = am_ok;
+ sig->ref = am_ok;
+ }
+ else {
+ res = erts_make_ref(c_p);
+
+ sys_memcpy((void *) &sig->oref_thing,
+ (void *) internal_ref_val(res),
+ sizeof(ErtsORefThing));
+
+ sig->ref = make_internal_ref(&sig->oref_thing);
+
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ }
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_RPC))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ erts_free(ERTS_ALC_T_SIG_DATA, sig);
+ res = THE_NON_VALUE;
+ if (reply)
+ JOIN_MESSAGE(c_p);
+ }
+
+ return res;
+}
+
+static int
+handle_rpc(Process *c_p, ErtsProcSigRPC *rpc, int cnt, int limit, int *yieldp)
+{
+ Process *rp;
+ ErlHeapFragment *bp = NULL;
+ Eterm res;
+ Uint hsz;
+ int reds, out_cnt;
+
+ /*
+ * reds in:
+ * Reductions left.
+ *
+ * reds out:
+ * Absolute value of reds out equals consumed
+ * amount of reds. If a negative value, force
+ * a yield.
+ */
+
+ reds = (limit - cnt) / ERTS_SIG_REDS_CNT_FACTOR;
+ if (reds <= 0)
+ reds = 1;
+
+ res = (*rpc->func)(c_p, rpc->arg, &reds, &bp);
+
+ if (reds < 0) {
+ /* Force yield... */
+ *yieldp = !0;
+ reds *= -1;
+ }
+
+ out_cnt = reds*ERTS_SIG_REDS_CNT_FACTOR;
+
+ hsz = 3 + sizeof(ErtsORefThing)/sizeof(Eterm);
+
+ rp = erts_proc_lookup(rpc->requester);
+ if (!rp) {
+ if (bp)
+ free_message_buffer(bp);
+ }
+ else {
+ Eterm *hp, msg, ref;
+ ErtsMessage *mp = erts_alloc_message(hsz, &hp);
+
+ sys_memcpy((void *) hp, (void *) &rpc->oref_thing,
+ sizeof(rpc->oref_thing));
+
+ ref = make_internal_ref(hp);
+ hp += sizeof(rpc->oref_thing)/sizeof(Eterm);
+ msg = TUPLE2(hp, ref, res);
+
+ mp->hfrag.next = bp;
+
+ erts_queue_proc_message(c_p, rp, 0, mp, msg);
+ }
+
+ erts_free(ERTS_ALC_T_SIG_DATA, rpc);
+
+ return out_cnt;
+}
static void
is_alive_response(Process *c_p, ErtsMessage *mp, int is_alive)
@@ -2391,8 +2717,9 @@ destroy_process_info_request(Process *c_p, ErtsProcessInfoSig *pisig)
}
static int
-handle_process_info(Process *c_p, ErtsMessage *sig,
- ErtsMessage ***next_nm_sig, int is_alive)
+handle_process_info(Process *c_p, ErtsSigRecvTracing *tracing,
+ ErtsMessage *sig, ErtsMessage ***next_nm_sig,
+ int is_alive)
{
ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
Uint reds = 0;
@@ -2416,7 +2743,11 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
* Move messages part of message queue into inner
* signal queue...
*/
+ ASSERT(tracing);
+
if (*next_nm_sig != &c_p->sig_qs.cont) {
+ if (*next_nm_sig == tracing->messages.next)
+ tracing->messages.next = &c_p->sig_qs.cont;
*c_p->sig_qs.last = c_p->sig_qs.cont;
c_p->sig_qs.last = *next_nm_sig;
@@ -2427,18 +2758,6 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
*c_p->sig_qs.last = NULL;
}
- if (!pisig->common.specific.next) {
- /*
- * No more signals in middle queue...
- *
- * Process-info 'status' needs sig-q
- * process flag to be updated in order
- * to show accurate result...
- */
- erts_atomic32_read_band_nob(&c_p->state,
- ~ERTS_PSFLG_SIG_Q);
- }
-
#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
{
Sint len;
@@ -2452,8 +2771,20 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
#endif
}
}
- if (is_alive)
+ if (is_alive) {
+ if (!pisig->common.specific.next) {
+ /*
+ * No more signals in middle queue...
+ *
+ * Process-info 'status' needs sig-q
+ * process flag to be updated in order
+ * to show accurate result...
+ */
+ erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_SIG_Q);
+ }
remove_nm_sig(c_p, sig, next_nm_sig);
+ }
rp = erts_proc_lookup(pisig->requester);
ASSERT(c_p != rp);
@@ -2498,7 +2829,7 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
if (is_alive)
erts_factory_trim_and_close(&hfact, &msg, 1);
- erts_queue_message(rp, locks, mp, msg, c_p->common.id);
+ erts_queue_proc_message(c_p, rp, locks, mp, msg);
if (!is_alive && locks)
erts_proc_unlock(rp, locks);
@@ -2512,6 +2843,155 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
return ((int) reds)*4 + 8;
}
+static void
+handle_suspend(Process *c_p, ErtsMonitor *mon, int *yieldp)
+{
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+
+ if (!(state & ERTS_PSFLG_DIRTY_RUNNING)) {
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ mstate = erts_atomic_read_bor_acqb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ *yieldp = !0;
+ }
+ else {
+ /* Executing dirty; delay suspend... */
+ ErtsProcSigPendingSuspend *psusp;
+ ErtsMonitorSuspend *msp;
+
+ psusp = ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+ if (!psusp) {
+ psusp = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsProcSigPendingSuspend));
+ psusp->mon = NULL;
+ psusp->sync = NULL;
+ ERTS_PROC_SET_PENDING_SUSPEND(c_p, (void *) psusp);
+ }
+
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+
+ msp->next = psusp->mon;
+ psusp->mon = msp;
+
+ erts_atomic32_inc_nob(&msp->md.refc);
+ }
+}
+
+static void
+sync_suspend_reply(Process *c_p, ErtsMessage *mp, erts_aint32_t state)
+{
+ /*
+ * Sender prepared the message for us. Just patch
+ * the result if necessary. The default prepared
+ * result is 'false'.
+ */
+ Process *rp;
+ ErtsSyncSuspendRequest *ssusp;
+
+ ssusp = (ErtsSyncSuspendRequest *) (char *) (&mp->hfrag.mem[0]
+ + mp->hfrag.used_size);
+
+ ASSERT(ERTS_SIG_IS_NON_MSG(mp));
+ ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag)
+ == ERTS_SIG_Q_OP_SYNC_SUSPEND);
+ ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size);
+ ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord)
+ >= sizeof(ErtsSyncSuspendRequest));
+ ASSERT(is_internal_pid(ssusp->requester));
+ ASSERT(ssusp->requester != c_p->common.id);
+ ASSERT(is_tuple_arity(ssusp->message, 2));
+ ASSERT(is_immed(tuple_val(ssusp->message)[2]));
+
+ ERL_MESSAGE_TERM(mp) = ssusp->message;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ mp->next = NULL;
+
+ rp = erts_proc_lookup(ssusp->requester);
+ if (!rp)
+ erts_cleanup_messages(mp);
+ else {
+ if ((state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SUSPENDED)) != ERTS_PSFLG_SUSPENDED) {
+ /* Not suspended -> patch result... */
+ if (state & ERTS_PSFLG_EXITING) {
+ Eterm *tp = tuple_val(ssusp->message);
+ tp[2] = ssusp->async ? am_exited : am_badarg;
+ }
+ else {
+ Eterm *tp = tuple_val(ssusp->message);
+ ASSERT(!(state & ERTS_PSFLG_SUSPENDED));
+ tp[2] = ssusp->async ? am_not_suspended : am_internal_error;
+ }
+ }
+ erts_queue_proc_message(c_p, rp, 0, mp, ssusp->message);
+ }
+}
+
+static void
+handle_sync_suspend(Process *c_p, ErtsMessage *mp)
+{
+ ErtsProcSigPendingSuspend *psusp;
+
+ psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+ if (!psusp)
+ sync_suspend_reply(c_p, mp, erts_atomic32_read_nob(&c_p->state));
+ else {
+ mp->next = psusp->sync;
+ psusp->sync = mp;
+ }
+}
+
+void
+erts_proc_sig_handle_pending_suspend(Process *c_p)
+{
+ ErtsMonitorSuspend *msp;
+ ErtsMessage *sync;
+ ErtsProcSigPendingSuspend *psusp;
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+
+ msp = psusp->mon;
+
+ while (msp) {
+ ErtsMonitorSuspend *next_msp = msp->next;
+ msp->next = NULL;
+ if (!(state & ERTS_PSFLG_EXITING)
+ && erts_monitor_is_in_table(&msp->md.target)) {
+ erts_aint_t mstate;
+
+ mstate = erts_atomic_read_bor_acqb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+
+ erts_monitor_release(&msp->md.target);
+
+ msp = next_msp;
+ }
+
+ sync = psusp->sync;
+
+ while (sync) {
+ ErtsMessage *next_sync = sync->next;
+ sync->next = NULL;
+ sync_suspend_reply(c_p, sync, state);
+ sync = next_sync;
+ }
+
+ erts_free(ERTS_ALC_T_SIG_DATA, psusp);
+
+ ERTS_PROC_SET_PENDING_SUSPEND(c_p, NULL);
+}
+
/*
* Called in order to handle incoming signals.
*/
@@ -2522,7 +3002,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
{
Eterm tag;
erts_aint32_t state;
- int cnt, limit, abs_lim, msg_tracing;
+ int yield, cnt, limit, abs_lim, msg_tracing;
ErtsMessage *sig, ***next_nm_sig;
ErtsSigRecvTracing tracing;
@@ -2542,6 +3022,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
limit = *redsp;
*redsp = 0;
+ yield = 0;
if (!c_p->sig_qs.cont) {
if (state == -1)
@@ -2655,6 +3136,18 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
cnt += handle_nodedown(c_p, sig, mdp, next_nm_sig);
}
break;
+ case ERTS_MON_TYPE_SUSPEND:
+ tmon = (ErtsMonitor *) sig;
+ ASSERT(erts_monitor_is_target(tmon));
+ ASSERT(!erts_monitor_is_in_table(tmon));
+ mdp = erts_monitor_to_data(tmon);
+ if (erts_monitor_is_in_table(&mdp->origin)) {
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p),
+ &mdp->origin);
+ omon = &mdp->origin;
+ }
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ break;
default:
ERTS_INTERNAL_ERROR("invalid monitor type");
break;
@@ -2718,9 +3211,13 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
if (mon->type == ERTS_MON_TYPE_DIST_PROC)
erts_monitor_tree_insert(&ERTS_P_MONITORS(c_p), mon);
- else
+ else {
erts_monitor_list_insert(&ERTS_P_LT_MONITORS(c_p), mon);
+ if (mon->type == ERTS_MON_TYPE_SUSPEND)
+ handle_suspend(c_p, mon, &yield);
+ }
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ cnt += 2;
break;
}
@@ -2764,9 +3261,16 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon);
else {
erts_monitor_list_delete(&ERTS_P_LT_MONITORS(c_p), tmon);
- if (type == ERTS_MON_TYPE_RESOURCE) {
+ switch (type) {
+ case ERTS_MON_TYPE_RESOURCE:
erts_nif_demonitored((ErtsResource *) tmon->other.ptr);
cnt++;
+ break;
+ case ERTS_MON_TYPE_SUSPEND:
+ erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
+ break;
+ default:
+ break;
}
}
erts_monitor_release_both(mdp);
@@ -2877,7 +3381,22 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
case ERTS_SIG_Q_OP_PROCESS_INFO:
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
- handle_process_info(c_p, sig, next_nm_sig, !0);
+ handle_process_info(c_p, &tracing, sig, next_nm_sig, !0);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ handle_sync_suspend(c_p, sig);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_RPC:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ cnt += handle_rpc(c_p, (ErtsProcSigRPC *) sig, cnt,
+ limit, &yield);
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
break;
@@ -3038,6 +3557,15 @@ stop: {
*redsp = cnt/4 + 1;
+ if (yield) {
+ int vreds = max_reds - *redsp;
+ if (vreds > 0) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ esdp->virtual_reds += vreds;
+ }
+ *redsp = max_reds;
+ }
+
return res;
}
}
@@ -3146,6 +3674,8 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
case ERTS_MON_TYPE_PROC:
case ERTS_MON_TYPE_DIST_PROC:
case ERTS_MON_TYPE_NODE:
+ case ERTS_MON_TYPE_NODES:
+ case ERTS_MON_TYPE_SUSPEND:
erts_monitor_release((ErtsMonitor *) sig);
break;
default:
@@ -3198,9 +3728,20 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
break;
case ERTS_SIG_Q_OP_PROCESS_INFO:
- handle_process_info(c_p, sig, next_nm_sig, 0);
+ handle_process_info(c_p, NULL, sig, next_nm_sig, 0);
break;
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ handle_sync_suspend(c_p, sig);
+ break;
+
+ case ERTS_SIG_Q_OP_RPC: {
+ int yield = 0;
+ handle_rpc(c_p, (ErtsProcSigRPC *) sig,
+ cnt, limit, &yield);
+ break;
+ }
+
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
destroy_trace_info((ErtsSigTraceInfo *) sig);
break;
@@ -3336,6 +3877,7 @@ erts_proc_sig_signal_size(ErtsSignal *sig)
}
break;
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
case ERTS_SIG_Q_OP_IS_ALIVE:
size = ((ErtsMessage *) sig)->hfrag.alloc_size;
@@ -3391,6 +3933,10 @@ erts_proc_sig_signal_size(ErtsSignal *sig)
break;
}
+ case ERTS_SIG_Q_OP_RPC:
+ size = sizeof(ErtsProcSigRPC);
+ break;
+
default:
ERTS_INTERNAL_ERROR("Unknown signal");
break;
@@ -3467,17 +4013,13 @@ erts_proc_sig_receive_helper(Process *c_p,
*/
*get_outp = 0;
*msgpp = NULL;
+
return consumed_reds;
}
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
- if (left_reds <= 0) {
- *get_outp = -1; /* yield */
- *msgpp = NULL;
-
- ASSERT(consumed_reds >= (fcalls - neg_o_reds));
- return consumed_reds;
- }
+ if (left_reds <= 0)
+ break; /* Yield */
/* handle newly arrived signals... */
}
@@ -3498,19 +4040,27 @@ erts_proc_sig_receive_helper(Process *c_p,
max_reds, !0);
consumed_reds += reds;
left_reds -= reds;
- /* we may have exited by an incoming signal... */
- if (state & ERTS_PSFLG_EXITING) {
+
+ /* we may have exited or suspended by an incoming signal... */
+
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_SUSPENDED)) {
+ if (state & ERTS_PSFLG_SUSPENDED)
+ break; /* Yield */
+
/*
* Process need to schedule out in order
* to terminate. Prepare this a bit...
*/
+ ASSERT(state & ERTS_PSFLG_EXITING);
ASSERT(c_p->flags & F_DELAY_GC);
c_p->flags &= ~F_DELAY_GC;
c_p->arity = 0;
c_p->current = NULL;
+
*get_outp = 1;
*msgpp = NULL;
+
return consumed_reds;
}
@@ -3521,17 +4071,20 @@ erts_proc_sig_receive_helper(Process *c_p,
return consumed_reds;
}
- if (left_reds <= 0) {
- *get_outp = -1; /* yield */
- *msgpp = NULL;
-
- ASSERT(consumed_reds >= (fcalls - neg_o_reds));
- return consumed_reds;
- }
+ if (left_reds <= 0)
+ break; /* yield */
ASSERT(!c_p->sig_qs.cont);
/* Go fetch again... */
}
+
+ /* Yield... */
+
+ *get_outp = -1;
+ *msgpp = NULL;
+
+ ASSERT(consumed_reds >= (fcalls - neg_o_reds));
+ return consumed_reds;
}
static int
@@ -4268,7 +4821,7 @@ erts_proc_sig_hdbg_check_in_queue(Process *p, char *what, char *file, int line)
NULL,
NULL,
ERTS_PSFLG_SIG_IN_Q);
- ASSERT(p->sig_inq.len == len);
+ ASSERT(p->sig_inq.len == len); (void)len;
}
-#endif
+#endif /* ERTS_PROC_SIG_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
index d250ad820f..efa7c08664 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.h
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -33,6 +33,11 @@
* - Group leader
* - Is process alive
* - Process info request
+ * - Suspend request (monitor of suspend type)
+ * - Resume request (demonitor of suspend type)
+ * - Suspend cleanup (monitor down of suspend type)
+ * - Sync suspend
+ * - RPC request
* - Trace change
*
* The signal queue consists of three parts:
@@ -557,6 +562,102 @@ erts_proc_sig_send_process_info_request(Process *c_p,
Uint reserve_size,
Eterm ref);
+/**
+ *
+ * @brief Send a 'sync suspend' signal to a process.
+ *
+ * A response message '{Tag, Reply}' is sent to the
+ * sender when performed where Tag is the term passed
+ * as 'tag' argument. Reply is either 'suspended',
+ * 'not_suspended', 'exited' if the operation is
+ * asynchronous; otherwise, the 'reply' argument or
+ * 'badarg' if process terminated.
+ *
+ * This signal does *not* change the suspend state, only
+ * reads and reply the state. This signal is typically
+ * sent after a suspend request (monitor of suspend type)
+ * signal has been sent to the process in order to get a
+ * response when the suspend monitor has been processed.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] tag Tag to use in response
+ * message to the sending
+ * process (i.e., c_p).
+ *
+ * @param[in] reply Reply to send if this
+ * is a synchronous operation;
+ * otherwise, THE_NON_VALUE.
+ */
+void
+erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to,
+ Eterm tag, Eterm reply);
+
+/**
+ *
+ * @brief Send an 'rpc' signal to a process.
+ *
+ * The function 'func' will be executed in the
+ * context of the receiving process. A response
+ * message '{Ref, Result}' is sent to the sender
+ * when 'func' has been called. 'Ref' is the reference
+ * returned by this function and 'Result' is the
+ * term returned by 'func'. If the return value of
+ * 'func' is not an immediate term, 'func' has to
+ * allocate a heap fragment where the result is stored
+ * and update the the heap fragment pointer pointer
+ * passed as third argument to point to it.
+ *
+ * If this function returns a reference, 'func' will
+ * be called in the context of the receiver. However,
+ * note that this might happen when the receiver is in
+ * an exiting state. The caller of this function
+ * *unconditionally* has to enter a receive that match
+ * on the returned reference in all clauses as next
+ * receive; otherwise, bad things will happen!
+ *
+ * If THE_NON_VALUE is returned, the receiver did not
+ * exist. The signal was not sent, and no specific
+ * receive has to be entered by the caller.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver process.
+ *
+ * @param[in] reply Non-zero if a reply is wanted.
+ *
+ * @param[in] func Function to execute in the
+ * context of the receiver.
+ * First argument will be a
+ * pointer to the process struct
+ * of the receiver process.
+ * Second argument will be 'arg'
+ * (see below). Third argument
+ * will be a pointer to a pointer
+ * to a heap fragment for storage
+ * of result returned from 'func'
+ * (i.e. an 'out' parameter).
+ *
+ * @param[in] arg Void pointer to argument
+ * to pass as second argument
+ * in call of 'func'.
+ *
+ * @returns If the request was sent,
+ * an internal ordinary
+ * reference; otherwise,
+ * THE_NON_VALUE (non-existing
+ * receiver).
+ */
+Eterm
+erts_proc_sig_send_rpc_request(Process *c_p,
+ Eterm to,
+ int reply,
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
+ void *arg);
/*
* End of send operations of currently supported process signals.
@@ -733,6 +834,25 @@ Sint
erts_proc_sig_privqs_len(Process *c_p);
+/* SVERK: Doc me up! */
+erts_aint32_t
+erts_enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint msg_cnt,
+ erts_aint32_t in_state);
+
+/* SVERK: Doc me up! */
+void
+erts_proc_sig_send_pending(ErtsSchedulerData* esdp);
+
+/* SVERK Doc me up! */
+ERTS_GLB_INLINE void erts_proc_notify_new_sig(Process* rp, erts_aint32_t state,
+ erts_aint32_t enable_flag);
+
+void erts_make_dirty_proc_handled(Eterm pid, erts_aint32_t state,
+ erts_aint32_t prio);
+
+
typedef struct {
Uint size;
ErtsMessage *msgp;
@@ -801,6 +921,21 @@ void
erts_proc_sig_clear_seq_trace_tokens(Process *c_p);
/**
+ *
+ * @brief Handle pending suspend requests
+ *
+ * Should be called by processes when they stop
+ * execution on a dirty scheduler if they have
+ * pending suspend requests (i.e. when
+ * ERTS_PROC_GET_PENDING_SUSPEND(c_p) != NULL).
+ *
+ * @param[in] c_p Pointer to executing
+ * process
+ */
+void
+erts_proc_sig_handle_pending_suspend(Process *c_p);
+
+/**
* @brief Initialize this functionality
*/
void erts_proc_sig_queue_init(void);
@@ -867,6 +1002,24 @@ erts_proc_sig_fetch(Process *proc)
return res;
}
+ERTS_GLB_INLINE void
+erts_proc_notify_new_sig(Process* rp, erts_aint32_t state,
+ erts_aint32_t enable_flag)
+{
+ if (~(state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SIG_IN_Q))
+ | (~state & enable_flag)) {
+ /* Schedule process... */
+ state = erts_proc_sys_schedule(rp, state, enable_flag);
+ }
+
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ erts_make_dirty_proc_handled(rp->common.id, state, -1);
+ }
+}
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_PROC_SIG_QUEUE_H__ */
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index e8f58a196a..1478b71195 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -36,7 +36,6 @@
#include "erl_db.h"
#include "dist.h"
#include "beam_catches.h"
-#include "erl_instrument.h"
#include "erl_threads.h"
#include "erl_binary.h"
#include "beam_bp.h"
@@ -186,8 +185,6 @@ sched_get_busy_wait_params(ErtsSchedulerData *esdp)
return &sched_busy_wait_params[esdp->type];
}
-int erts_disable_proc_not_running_opt;
-
static ErtsAuxWorkData *aux_thread_aux_work_data;
static ErtsAuxWorkData *poll_thread_aux_work_data;
@@ -731,6 +728,11 @@ erts_pre_init_process(void)
= ERTS_PSD_DIST_ENTRY_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
= ERTS_PSD_DIST_ENTRY_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].get_locks
+ = ERTS_PSD_PENDING_SUSPEND_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].set_locks
+ = ERTS_PSD_PENDING_SUSPEND_SET_LOCKS;
#endif
}
@@ -745,7 +747,6 @@ void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
{
- erts_disable_proc_not_running_opt = 0;
erts_init_proc_lock(ncpu);
init_proclist_alloc();
@@ -2508,6 +2509,8 @@ handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
yield |= erts_handle_yielded_ets_all_request(awdp->esdp,
&awdp->yield.ets_all);
+ yield |= erts_handle_yielded_alcu_blockscan(awdp->esdp,
+ &awdp->yield.alcu_blockscan);
/*
* Other yielding operations...
@@ -5698,6 +5701,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->ssi = ssi;
esdp->current_process = NULL;
esdp->current_port = NULL;
+ esdp->current_nif = NULL;
esdp->virtual_reds = 0;
esdp->cpu_id = -1;
@@ -5713,6 +5717,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->io.out = (Uint64) 0;
esdp->io.in = (Uint64) 0;
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = NULL;
+#endif
+
if (daww_ptr) {
init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
*daww_ptr += daww_sz;
@@ -6604,13 +6614,13 @@ change_proc_schedule_state(Process *p,
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
- && (!(a & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)
- && (!(a & ERTS_PSFLG_ACTIVE)
- || (a & ERTS_PSFLG_SUSPENDED))))) {
+ & ((a & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ & !(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
}
@@ -8333,6 +8343,7 @@ sched_thread_func(void *vesdp)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
+ erts_alcu_sched_spec_data_init(esdp);
erts_ets_sched_spec_data_init(esdp);
process_main(esdp->x_reg_array, esdp->f_reg_array);
@@ -8544,427 +8555,22 @@ erts_start_schedulers(void)
}
}
-
-
-static void
-add_pend_suspend(Process *suspendee,
- Eterm originator_pid,
- void (*handle_func)(Process *,
- ErtsProcLocks,
- int,
- Eterm))
-{
- ErtsPendingSuspend *psp = erts_alloc(ERTS_ALC_T_PEND_SUSPEND,
- sizeof(ErtsPendingSuspend));
- psp->next = NULL;
-#ifdef DEBUG
-#if defined(ARCH_64)
- psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
-#else
- psp->end = (ErtsPendingSuspend *) 0xdeaddead;
-#endif
-#endif
- psp->pid = originator_pid;
- psp->handle_func = handle_func;
-
- if (suspendee->pending_suspenders)
- suspendee->pending_suspenders->end->next = psp;
- else
- suspendee->pending_suspenders = psp;
- suspendee->pending_suspenders->end = psp;
-}
-
-static void
-handle_pending_suspend(Process *p, ErtsProcLocks p_locks)
-{
- ErtsPendingSuspend *psp;
- int is_alive = !ERTS_PROC_IS_EXITING(p);
-
- ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS);
-
- /*
- * New pending suspenders might appear while we are processing
- * (since we may release the status lock on p while processing).
- */
- while (p->pending_suspenders) {
- psp = p->pending_suspenders;
- p->pending_suspenders = NULL;
- while (psp) {
- ErtsPendingSuspend *free_psp;
- (*psp->handle_func)(p, p_locks, is_alive, psp->pid);
- free_psp = psp;
- psp = psp->next;
- erts_free(ERTS_ALC_T_PEND_SUSPEND, (void *) free_psp);
- }
- }
-
-}
-
-static ERTS_INLINE void
-cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
-{
- if (is_not_nil(p->suspendee)) {
- ErtsMonitor *mon;
- Eterm suspendee = p->suspendee;
- Process *rp;
- if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
- suspendee, ERTS_PROC_LOCK_STATUS);
- if (rp) {
- erts_resume(rp, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- }
- if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- p->suspendee = NIL;
-
- mon = erts_monitor_tree_lookup(p->suspend_monitors,
- suspendee);
- if (mon) {
- erts_monitor_tree_delete(&p->suspend_monitors,
- mon);
- erts_monitor_suspend_destroy(erts_monitor_suspend(mon));
- }
- }
-}
-
-static void
-handle_pend_sync_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
- Process *suspender;
-
- ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ASSERT(is_nil(suspender->suspendee));
- if (suspendee_alive) {
- erts_suspend(suspendee, suspendee_locks, NULL);
- suspender->suspendee = suspendee->common.id;
- }
- /* suspender is suspended waiting for suspendee to suspend;
- resume suspender */
- ASSERT(suspendee != suspender);
- resume_process(suspender, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-static Process *
-pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks, int suspend)
-{
- Process *rp;
- int unlock_c_p_status;
-
- ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
-
- ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
- ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
-
- if (c_p->common.id == pid)
- return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
-
- if (c_p_locks & ERTS_PROC_LOCK_STATUS)
- unlock_c_p_status = 0;
- else {
- unlock_c_p_status = 1;
- erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- }
-
- if (c_p->suspendee == pid) {
- /* Process previously suspended by c_p (below)... */
- ErtsProcLocks rp_locks = pid_locks|ERTS_PROC_LOCK_STATUS;
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, rp_locks);
- c_p->suspendee = NIL;
- ASSERT(c_p->flags & F_P2PNR_RESCHED);
- c_p->flags &= ~F_P2PNR_RESCHED;
- if (!suspend && rp)
- resume_process(rp, rp_locks);
- }
- else {
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, ERTS_PROC_LOCK_STATUS);
-
- if (!rp) {
- c_p->flags &= ~F_P2PNR_RESCHED;
- goto done;
- }
-
- ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
-
- /*
- * Suspend the other process in order to prevent
- * it from being selected for normal execution.
- * This will however not prevent it from being
- * selected for execution of a system task. If
- * it is selected for execution of a system task
- * we might be blocked for quite a while if the
- * try-lock below fails. That is, there is room
- * for improvement here...
- */
-
- if (!suspend_process(c_p, rp)) {
- /* Other process running */
-
- ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
- & erts_atomic32_read_nob(&rp->state));
-
- if (!suspend
- && (erts_atomic32_read_nob(&rp->state)
- & ERTS_PSFLG_DIRTY_RUNNING)) {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) {
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- }
- goto done;
- }
-
- running:
-
- /*
- * If we got pending suspenders and suspend ourselves waiting
- * to suspend another process we might deadlock.
- * In this case we have to yield, be suspended by
- * someone else and then do it all over again.
- */
- if (!c_p->pending_suspenders) {
- /* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
- ASSERT(is_nil(c_p->suspendee));
-
- /* Suspend c_p; when rp is suspended c_p will be resumed. */
- suspend_process(c_p, c_p);
- c_p->flags |= F_P2PNR_RESCHED;
- }
- /* Yield (caller is assumed to yield immediately in bif). */
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = ERTS_PROC_LOCK_BUSY;
- }
- else {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) {
- if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_atomic32_read_nob(&rp->state)) {
- /* Executing system task... */
- resume_process(rp, ERTS_PROC_LOCK_STATUS);
- goto running;
- }
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- /*
- * If we are unlucky, the process just got selected for
- * execution of a system task. In this case we may be
- * blocked here for quite a while... Execution of system
- * tasks are fortunately quite rare events. We try to
- * avoid this by checking if it is in a state executing
- * system tasks (above), but it will not prevent all
- * scenarios for a long block here...
- */
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- if (!rp)
- goto done;
- }
-
- /*
- * The previous suspend has prevented the process
- * from being selected for normal execution regardless
- * of locks held or not held on it...
- */
-#ifdef DEBUG
- {
- erts_aint32_t state;
- state = erts_atomic32_read_nob(&rp->state);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
- }
-#endif
-
- if (!suspend)
- resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
- }
- }
-
- done:
-
- if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS))
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- if (unlock_c_p_status)
- erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
- return rp;
-}
-
-
-/*
- * Like erts_pid2proc() but:
- *
- * * At least ERTS_PROC_LOCK_MAIN have to be held on c_p.
- * * At least ERTS_PROC_LOCK_MAIN have to be taken on pid.
- * * It also waits for proc to be in a state != running and garbing.
- * * If ERTS_PROC_LOCK_BUSY is returned, the calling process has to
- * yield (ERTS_BIF_YIELD[0-3]()). c_p might in this case have been
- * suspended.
- */
-Process *
-erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 0);
-}
-
-/*
- * erts_pid2proc_nropt() is normally the same as
- * erts_pid2proc_not_running(). However it is only
- * to be used when 'not running' is a pure optimization,
- * not a requirement.
- */
-
-Process *
-erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- if (erts_disable_proc_not_running_opt)
- return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
- else
- return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks);
-}
-
-static ERTS_INLINE int
-do_bif_suspend_process(Process *c_p,
- ErtsMonitorSuspend *smon,
- Process *suspendee)
-{
- ASSERT(suspendee);
- ASSERT(!ERTS_PROC_IS_EXITING(suspendee));
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(suspendee));
- if (smon) {
- if (!smon->active) {
- if (!suspend_process(c_p, suspendee))
- return 0;
- }
- smon->active += smon->pending;
- ASSERT(smon->active);
- smon->pending = 0;
- return 1;
- }
- return 0;
-}
-
-static void
-handle_pend_bif_sync_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
- Process *suspender;
-
- ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ErtsMonitorSuspend *smon;
- ErtsMonitor *mon;
- mon = erts_monitor_tree_lookup(suspender->suspend_monitors,
- suspendee->common.id);
- smon = erts_monitor_suspend(mon);
-
- ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive) {
- if (mon) {
- erts_monitor_tree_delete(&suspender->suspend_monitors,
- mon);
- erts_monitor_suspend_destroy(smon);
- }
- }
- else {
-#ifdef DEBUG
- int res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
- ASSERT(!smon || res != 0);
- suspender->suspendee = suspendee->common.id;
- }
- /* suspender is suspended waiting for suspendee to suspend;
- resume suspender */
- ASSERT(suspender != suspendee);
- resume_process(suspender, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-static void
-handle_pend_bif_async_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
-
- Process *suspender;
-
- ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ErtsMonitorSuspend *smon;
- ErtsMonitor *mon;
- mon = erts_monitor_tree_lookup(suspender->suspend_monitors,
- suspendee->common.id);
- smon = erts_monitor_suspend(mon);
- ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive) {
- if (mon) {
- erts_monitor_tree_delete(&suspender->suspend_monitors,
- mon);
- erts_monitor_suspend_destroy(smon);
- }
- }
- else {
-#ifdef DEBUG
- int res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
- ASSERT(!smon || res != 0);
- }
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-
-/*
- * The erlang:suspend_process/2 BIF
- */
-
BIF_RETTYPE
-suspend_process_2(BIF_ALIST_2)
+erts_internal_suspend_process_2(BIF_ALIST_2)
{
Eterm res;
- Process* suspendee = NULL;
- ErtsMonitorSuspend *smon;
- ErtsProcLocks xlocks = (ErtsProcLocks) 0;
- int created;
-
- /* Options and default values: */
- int asynchronous = 0;
+ Eterm reply_tag = THE_NON_VALUE;
+ Eterm reply_res = THE_NON_VALUE;
+ int suspend;
+ int sync = 0;
+ int async = 0;
int unless_suspending = 0;
-
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsMonitorData *mdp;
if (BIF_P->common.id == BIF_ARG_1)
- goto badarg; /* We are not allowed to suspend ourselves */
+ BIF_RET(am_badarg); /* We are not allowed to suspend ourselves */
if (is_not_nil(BIF_ARG_2)) {
/* Parse option list */
@@ -8978,192 +8584,128 @@ suspend_process_2(BIF_ALIST_2)
unless_suspending = 1;
break;
case am_asynchronous:
- asynchronous = 1;
+ async = 1;
break;
- default:
- goto badarg;
+ default: {
+ if (is_tuple_arity(arg, 2)) {
+ Eterm *tp = tuple_val(arg);
+ if (tp[1] == am_asynchronous) {
+ async = 1;
+ reply_tag = tp[2];
+ break;
+ }
+ }
+ BIF_RET(am_badarg);
}
+ }
arg = CDR(lp);
- }
+ }
if (is_not_nil(arg))
- goto badarg;
- }
-
- xlocks = ERTS_PROC_LOCK_STATUS;
-
- erts_proc_lock(BIF_P, xlocks);
-
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|xlocks,
- BIF_ARG_1,
- ERTS_PROC_LOCK_STATUS);
- if (!suspendee)
- goto no_suspendee;
-
- smon = erts_monitor_suspend_tree_lookup_create(&BIF_P->suspend_monitors,
- &created,
- BIF_ARG_1);
-
- if (asynchronous) {
- /* --- Asynchronous suspend begin ---------------------------------- */
-
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(BIF_P));
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- == erts_proc_lc_my_proc_locks(suspendee));
-
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- if (unless_suspending)
- res = am_false;
- else if (smon->active == INT_MAX)
- goto system_limit;
- else {
- smon->active++;
- res = am_true;
- }
- /* done */
- }
- else {
- /* We havn't got any active suspends on the suspendee */
- if (smon->pending && unless_suspending)
- res = am_false;
- else {
- if (smon->pending == INT_MAX)
- goto system_limit;
-
- smon->pending++;
-
- if (!do_bif_suspend_process(BIF_P, smon, suspendee))
- add_pend_suspend(suspendee,
- BIF_P->common.id,
- handle_pend_bif_async_suspend);
-
- res = am_true;
- }
- /* done */
- }
- /* --- Asynchronous suspend end ------------------------------------ */
- }
- else /* if (!asynchronous) */ {
- /* --- Synchronous suspend begin ----------------------------------- */
-
- ERTS_LC_ASSERT(((ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS)
- & erts_proc_lc_my_proc_locks(BIF_P))
- == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS));
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- == erts_proc_lc_my_proc_locks(suspendee));
-
- if (BIF_P->suspendee == BIF_ARG_1) {
- /* We are back after a yield and the suspendee
- has been suspended on behalf of us. */
- ASSERT(smon->active >= 1);
- BIF_P->suspendee = NIL;
- res = (!unless_suspending || smon->active == 1
- ? am_true
- : am_false);
- /* done */
- }
- else if (smon->active) {
- if (unless_suspending)
- res = am_false;
- else {
- smon->active++;
- res = am_true;
- }
- /* done */
- }
- else {
- /* We haven't got any active suspends on the suspendee */
-
- /*
- * If we have pending suspenders and suspend ourselves waiting
- * to suspend another process, or suspend another process
- * we might deadlock. In this case we have to yield,
- * be suspended by someone else, and then do it all over again.
- */
- if (BIF_P->pending_suspenders)
- goto yield;
-
- if (!unless_suspending && smon->pending == INT_MAX)
- goto system_limit;
- if (!unless_suspending || smon->pending == 0)
- smon->pending++;
-
- if (do_bif_suspend_process(BIF_P, smon, suspendee)) {
- res = (!unless_suspending || smon->active == 1
- ? am_true
- : am_false);
- /* done */
- }
- else {
- /* Mark suspendee pending for suspend by BIF_P */
- add_pend_suspend(suspendee,
- BIF_P->common.id,
- handle_pend_bif_sync_suspend);
-
- ASSERT(is_nil(BIF_P->suspendee));
-
- /*
- * Suspend BIF_P; when suspendee is suspended, BIF_P
- * will be resumed and this BIF will be called again.
- * This time with BIF_P->suspendee == BIF_ARG_1 (see
- * above).
- */
- suspend_process(BIF_P, BIF_P);
- goto yield;
- }
- }
- /* --- Synchronous suspend end ------------------------------------- */
+ BIF_RET(am_badarg);
}
-#ifdef DEBUG
- {
- erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state);
- ASSERT((state & ERTS_PSFLG_SUSPENDED)
- || (asynchronous && smon->pending));
- ASSERT((state & ERTS_PSFLG_SUSPENDED)
- || !smon->active);
+ if (!unless_suspending) {
+ ErtsMonitor *mon;
+ mon = erts_monitor_tree_lookup_create(&ERTS_P_MONITORS(BIF_P),
+ &suspend,
+ ERTS_MON_TYPE_SUSPEND,
+ BIF_P->common.id,
+ BIF_ARG_1);
+ ASSERT(mon->other.item == BIF_ARG_1);
+
+ mdp = erts_monitor_to_data(mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+
+ mstate = erts_atomic_inc_read_relb(&msp->state);
+ ASSERT(suspend || (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 1);
+ sync = !async & !suspend & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ suspend = !!suspend; /* ensure 0|1 */
+ res = am_true;
}
-#endif
-
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(BIF_P, xlocks);
- BIF_RET(res);
-
- system_limit:
- ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
- goto do_return;
-
- no_suspendee: {
+ else {
ErtsMonitor *mon;
- BIF_P->suspendee = NIL;
- mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P),
+ BIF_ARG_1);
if (mon) {
- erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
- erts_monitor_suspend_destroy(erts_monitor_suspend(mon));
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+ mdp = erts_monitor_to_data(mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+ mstate = erts_atomic_read_nob(&msp->state);
+ ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 0);
+ mdp = NULL;
+ sync = !async & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ suspend = 0;
+ res = am_false;
+ }
+ else {
+ mdp = erts_monitor_create(ERTS_MON_TYPE_SUSPEND, NIL,
+ BIF_P->common.id,
+ BIF_ARG_1, NIL);
+ mon = &mdp->origin;
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+ mstate = erts_atomic_inc_read_relb(&msp->state);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE));
+ suspend = !0;
+ res = am_true;
+ }
+ }
+
+ if (suspend) {
+ erts_aint32_t state;
+ Process *rp;
+ int send_sig = 0;
+
+ /* fail state... */
+ state = (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS);
+
+ rp = erts_try_lock_sig_free_proc(BIF_ARG_1,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
+ &state);
+ if (!rp)
+ goto noproc;
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ send_sig = !0;
+ else {
+ send_sig = !suspend_process(BIF_P, rp);
+ if (!send_sig) {
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(rp), &mdp->target);
+ erts_atomic_read_bor_relb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ }
+ if (send_sig) {
+ if (erts_proc_sig_send_monitor(&mdp->target, BIF_ARG_1))
+ sync = !async;
+ else {
+ noproc:
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
+ erts_monitor_release_both(mdp);
+ if (!async)
+ res = am_badarg;
+ }
}
}
- badarg:
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto do_return;
+ if (sync) {
+ ASSERT(is_non_value(reply_tag));
+ reply_res = res;
+ reply_tag = res = erts_make_ref(BIF_P);
+ ERTS_RECV_MARK_SAVE(BIF_P);
+ ERTS_RECV_MARK_SET(BIF_P);
+ }
- yield:
- ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
-
- do_return:
- if (suspendee)
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- if (xlocks)
- erts_proc_unlock(BIF_P, xlocks);
- return res;
+ if (is_value(reply_tag))
+ erts_proc_sig_send_sync_suspend(BIF_P, BIF_ARG_1, reply_tag, reply_res);
+ BIF_RET(res);
}
-
/*
* The erlang:resume_process/1 BIF
*/
@@ -9172,90 +8714,32 @@ BIF_RETTYPE
resume_process_1(BIF_ALIST_1)
{
ErtsMonitor *mon;
- ErtsMonitorSuspend *smon;
- Process *suspendee;
- int is_active;
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
- mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
- smon = erts_monitor_suspend(mon);
-
- if (!smon) {
- /* No previous suspend or dead suspendee */
- goto error;
- }
- else if (smon->pending) {
- smon->pending--;
- ASSERT(smon->pending >= 0);
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
- is_active = smon->active;
- }
- else if (smon->active) {
- smon->active--;
- ASSERT(smon->pending == 0);
- is_active = 1;
- }
- else {
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P),
+ BIF_ARG_1);
+ if (!mon) {
/* No previous suspend or dead suspendee */
- goto no_suspendee;
+ BIF_ERROR(BIF_P, BADARG);
}
- if (smon->active || smon->pending || !is_active) {
- /* Leave the suspendee as it is; just verify that it is still alive */
- suspendee = erts_proc_lookup(BIF_ARG_1);
- if (!suspendee)
- goto no_suspendee;
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
- }
- else {
- /* Resume */
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
- BIF_ARG_1,
- ERTS_PROC_LOCK_STATUS);
- if (!suspendee) {
- mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
- smon = erts_monitor_suspend(mon);
- if (!mon)
- goto error;
- goto no_suspendee;
- }
+ mstate = erts_atomic_dec_read_relb(&msp->state);
- ASSERT(mon == erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1));
+ ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) >= 0);
- ASSERT(ERTS_PSFLG_SUSPENDED
- & erts_atomic32_read_nob(&suspendee->state));
- ASSERT(BIF_P != suspendee);
- resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
-
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- }
-
- if (!smon->active && !smon->pending) {
- ASSERT(mon);
- erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
- erts_monitor_suspend_destroy(smon);
+ if ((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) == 0) {
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), mon);
+ erts_proc_sig_send_demonitor(mon);
}
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
-
BIF_RET(am_true);
-
- no_suspendee:
- /* cleanup */
- ASSERT(mon);
- erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
- erts_monitor_suspend_destroy(smon);
-
- error:
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
- BIF_ERROR(BIF_P, BADARG);
}
BIF_RETTYPE
@@ -9601,6 +9085,17 @@ scheduler_gc_proc(Process *c_p, int reds_left)
return reds;
}
+static void
+unlock_lock_rq(int pre_free, void *vrq)
+{
+ ErtsRunQueue *rq = vrq;
+ if (pre_free)
+ erts_runq_unlock(rq);
+ else
+ erts_runq_lock(rq);
+}
+
+
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9671,8 +9166,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
} else {
is_normal_sched = !esdp;
if (is_normal_sched) {
- esdp = p->scheduler_data;
+ esdp = p->scheduler_data;
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ if (esdp->pending_signal.sig) {
+ ASSERT(esdp->pending_signal.dbg_from == p);
+ erts_proc_sig_send_pending(esdp);
+ }
}
else {
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
@@ -9680,12 +9180,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
- sched_out_proc:
-
- ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
reds = actual_reds = calls - esdp->virtual_reds;
+ internal_sched_out_proc:
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
@@ -9727,11 +9228,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* have to re-read state after taking lock */
state = erts_atomic32_read_nob(&p->state);
- if (p->pending_suspenders)
- handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_TRACE
- | ERTS_PROC_LOCK_STATUS));
-
esdp->reductions += reds;
{
@@ -9768,7 +9264,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
if (dec_refc)
- erts_proc_dec_refc(p);
+ erts_proc_dec_refc_free_func(p,
+ unlock_lock_rq,
+ (void *) rq);
}
ASSERT(!esdp->free_process);
@@ -10181,8 +9679,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (is_normal_sched) {
if (state & ERTS_PSFLG_RUNNING_SYS) {
if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) {
- int local_only = !!(p->flags & F_LOCAL_SIGS_ONLY);
- if (!local_only || (state & ERTS_PSFLG_SIG_Q)) {
+ int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY)
+ & !(state & ERTS_PSFLG_SUSPENDED));
+ if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) {
int sig_reds;
/*
* If we have dirty work scheduled we allow
@@ -10268,7 +9767,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
p->fcalls = reds;
-
+ if (reds != context_reds) {
+ actual_reds = context_reds - reds - esdp->virtual_reds;
+ ASSERT(actual_reds >= 0);
+ esdp->virtual_reds = 0;
+ p->reds += actual_reds;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+ }
+
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
ASSERT(erts_proc_read_refc(p) > 0);
@@ -10318,6 +9827,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
#endif
return p;
+
+ sched_out_proc:
+ actual_reds = context_reds;
+ actual_reds -= reds;
+ actual_reds -= esdp->virtual_reds;
+ reds = actual_reds;
+ goto internal_sched_out_proc;
+
}
}
@@ -10372,7 +9889,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -10934,7 +10451,7 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state,
msg = copy_struct(operation, osz, &hp, ohp);
msg = TUPLE4(hp, st->requester, target, prio, msg);
- erts_queue_message(rp, rp_locks, mp, msg, st->requester);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (rp_locks)
erts_proc_unlock(rp, rp_locks);
@@ -11830,7 +11347,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef HIPE
hipe_init_process(&p->hipe);
- hipe_init_process_smp(&p->hipe_smp);
#endif
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
@@ -11879,7 +11395,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
ERTS_P_LINKS(p) = NULL;
ERTS_P_MONITORS(p) = NULL;
ERTS_P_LT_MONITORS(p) = NULL;
- p->suspend_monitors = NULL;
ASSERT(is_pid(parent->group_leader));
@@ -11909,6 +11424,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->sig_inq.len = 0;
p->sig_inq.nmsigs.next = NULL;
p->sig_inq.nmsigs.last = NULL;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ p->sig_inq.may_contain_heap_terms = 0;
+#endif
p->bif_timers = NULL;
p->mbuf = NULL;
p->msg_frag = NULL;
@@ -11933,8 +11451,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->trace_msg_q = NULL;
p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -12101,7 +11617,6 @@ void erts_init_empty_process(Process *p)
ERTS_P_MONITORS(p) = NULL;
ERTS_P_LT_MONITORS(p) = NULL;
ERTS_P_LINKS(p) = NULL; /* List of links */
- p->suspend_monitors = NULL;
p->sig_qs.first = NULL;
p->sig_qs.last = &p->sig_qs.first;
p->sig_qs.cont = NULL;
@@ -12116,6 +11631,9 @@ void erts_init_empty_process(Process *p)
p->sig_inq.len = 0;
p->sig_inq.nmsigs.next = NULL;
p->sig_inq.nmsigs.last = NULL;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ p->sig_inq.may_contain_heap_terms = 0;
+#endif
p->bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
@@ -12149,7 +11667,6 @@ void erts_init_empty_process(Process *p)
#ifdef HIPE
hipe_init_process(&p->hipe);
- hipe_init_process_smp(&p->hipe_smp);
#endif
INIT_HOLE_CHECK(p);
@@ -12162,8 +11679,6 @@ void erts_init_empty_process(Process *p)
erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
erts_proc_lock_init(p);
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0);
@@ -12201,7 +11716,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(ERTS_P_MONITORS(p) == NULL);
ASSERT(ERTS_P_LT_MONITORS(p) == NULL);
ASSERT(ERTS_P_LINKS(p) == NULL);
- ASSERT(p->suspend_monitors == NULL);
ASSERT(p->sig_qs.first == NULL);
ASSERT(p->sig_qs.len == 0);
ASSERT(p->bif_timers == NULL);
@@ -12215,8 +11729,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->sig_inq.first == NULL);
ASSERT(p->sig_inq.len == 0);
- ASSERT(p->suspendee == NIL);
- ASSERT(p->pending_suspenders == NULL);
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -12322,8 +11834,6 @@ delete_process(Process* p)
erts_cleanup_messages(p->sig_qs.cont);
p->sig_qs.cont = NULL;
- ASSERT(!p->suspend_monitors);
-
p->fvalue = NIL;
}
@@ -12403,6 +11913,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
if (erts_monitor_is_target(mon)) {
/* We are being watched... */
switch (mon->type) {
+ case ERTS_MON_TYPE_SUSPEND:
case ERTS_MON_TYPE_PROC:
erts_proc_sig_send_monitor_down(mon, reason);
mon = NULL;
@@ -12474,6 +11985,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
else { /* Origin monitor */
/* We are watching someone else... */
switch (mon->type) {
+ case ERTS_MON_TYPE_SUSPEND:
case ERTS_MON_TYPE_PROC:
erts_proc_sig_send_demonitor(mon);
mon = NULL;
@@ -12626,21 +12138,6 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
erts_link_release(lnk);
}
-static void
-resume_suspend_monitor(ErtsMonitor *mon, void *vc_p)
-{
- ErtsMonitorSuspend *smon = erts_monitor_suspend(mon);
- Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
- smon->mon.other.item, ERTS_PROC_LOCK_STATUS);
- if (suspendee) {
- ASSERT(suspendee != vc_p);
- if (smon->active)
- resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- }
- erts_monitor_suspend_destroy(smon);
-}
-
/* this function fishishes a process and propagates exit messages - called
by process_main when a process dies */
void
@@ -12672,8 +12169,6 @@ erts_do_exit_process(Process* p, Eterm reason)
set_self_exiting(p, reason, NULL, NULL, NULL);
- cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
-
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
@@ -12806,11 +12301,6 @@ erts_continue_exit_process(Process *p)
p->flags &= ~F_USING_DDLL;
}
- if (p->suspend_monitors)
- erts_monitor_tree_foreach_delete(&p->suspend_monitors,
- resume_suspend_monitor,
- p);
-
/*
* The registered name *should* be the last "erlang resource" to
* cleanup.
@@ -13018,7 +12508,13 @@ erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks,
erts_aint32_t *statep)
{
Process *rp = erts_proc_lookup_raw(pid);
+ erts_aint32_t fail_state = ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q;
erts_aint32_t state;
+ ErtsProcLocks tmp_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
+
+ tmp_locks |= locks;
+ if (statep)
+ fail_state |= *statep;
if (!rp) {
if (statep)
@@ -13035,28 +12531,28 @@ erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks,
if (state & ERTS_PSFLG_FREE)
return NULL;
- if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q))
+ if (state & fail_state)
return ERTS_PROC_LOCK_BUSY;
- if (!locks)
- return rp;
-
- if (erts_proc_trylock(rp, locks) == EBUSY)
+ if (erts_proc_trylock(rp, tmp_locks) == EBUSY)
return ERTS_PROC_LOCK_BUSY;
state = erts_atomic32_read_nob(&rp->state);
if (statep)
*statep = state;
- if (state & ERTS_PSFLG_FREE) {
- erts_proc_unlock(rp, locks);
- return NULL;
+ if ((state & fail_state)
+ || rp->sig_inq.first
+ || rp->sig_qs.cont) {
+ erts_proc_unlock(rp, tmp_locks);
+ if (state & ERTS_PSFLG_FREE)
+ return NULL;
+ else
+ return ERTS_PROC_LOCK_BUSY;
}
- if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q)) {
- erts_proc_unlock(rp, locks);
- return ERTS_PROC_LOCK_BUSY;
- }
+ if (tmp_locks != locks)
+ erts_proc_unlock(rp, tmp_locks & ~locks);
return rp;
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e232776016..a60e117bab 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -593,6 +593,7 @@ typedef struct {
ErtsDelayedAuxWorkWakeupJob *job;
} delayed_wakeup;
struct {
+ ErtsAlcuBlockscanYieldData alcu_blockscan;
ErtsEtsAllYieldData ets_all;
/* Other yielding operations... */
} yield;
@@ -637,6 +638,7 @@ struct ErtsSchedulerData_ {
ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
Uint dirty_no; /* Scheduler number for dirty schedulers */
+ struct enif_environment_t *current_nif;
Process *dirty_shadow_process;
Port *current_port;
ErtsRunQueue *run_queue;
@@ -658,6 +660,13 @@ struct ErtsSchedulerData_ {
Uint64 out;
Uint64 in;
} io;
+ struct {
+ ErtsSignal* sig;
+ Eterm to;
+#ifdef DEBUG
+ Process* dbg_from;
+#endif
+ } pending_signal;
Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
@@ -796,14 +805,15 @@ erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_ETS_OWNED_TABLES 6
#define ERTS_PSD_ETS_FIXED_TABLES 7
#define ERTS_PSD_DIST_ENTRY 8
-#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 9 /* keep last... */
+#define ERTS_PSD_PENDING_SUSPEND 9
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 10 /* keep last... */
-#define ERTS_PSD_SIZE 10
+#define ERTS_PSD_SIZE 11
#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 9
+# define ERTS_PSD_SIZE 10
#endif
typedef struct {
@@ -840,6 +850,9 @@ typedef struct {
#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_PENDING_SUSPEND_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_PENDING_SUSPEND_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -875,20 +888,6 @@ typedef struct {
typedef struct ErtsProcSysTask_ ErtsProcSysTask;
typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
-
-typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
-struct ErtsPendingSuspend_ {
- ErtsPendingSuspend *next;
- ErtsPendingSuspend *end;
- Eterm pid;
- void (*handle_func)(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm pid);
-};
-
-
-
/* Defines to ease the change of memory architecture */
# define HEAP_START(p) (p)->heap
# define HEAP_TOP(p) (p)->htop
@@ -983,9 +982,6 @@ struct process {
Process *next; /* Pointer to next process in run queue */
- ErtsMonitor *suspend_monitors; /* Processes suspended by this process via
- erlang:suspend_process/1 */
-
ErtsSignalPrivQueues sig_qs; /* Signal queues */
ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
@@ -1049,12 +1045,7 @@ struct process {
ErlTraceMessageQueue *trace_msg_q;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
- Eterm suspendee;
- ErtsPendingSuspend *pending_suspenders;
erts_atomic_t run_queue;
-#ifdef HIPE
- struct hipe_process_state_smp hipe_smp;
-#endif
#ifdef CHECK_FOR_HOLES
Eterm* last_htop; /* No need to scan the heap below this point. */
@@ -1371,7 +1362,7 @@ extern int erts_system_profile_ts_type;
#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
-#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
+#define F_UNUSED (1 << 9)
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */
#define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */
@@ -1388,10 +1379,12 @@ extern int erts_system_profile_ts_type;
#define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */
#define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */
#define F_HIBERNATED (1 << 25) /* Hibernated */
-#define F_LOCAL_SIGS_ONLY (1 << 26)
+#define F_LOCAL_SIGS_ONLY (1 << 26) /* Handle privq sigs only */
#define F_TRAP_EXIT (1 << 27) /* Trapping exit */
-#define F_DEFERRED_SAVED_LAST (1 << 28)
-#define F_DELAYED_PSIGQS_LEN (1 << 29)
+#define F_DEFERRED_SAVED_LAST (1 << 28) /* Deferred sig_qs.saved_last */
+#define F_DELAYED_PSIGQS_LEN (1 << 29) /* Delayed update of sig_qs.len */
+#define F_HIPE_RECV_LOCKED (1 << 30) /* HiPE message queue locked */
+#define F_HIPE_RECV_YIELD (1 << 31) /* HiPE receive yield */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -2039,6 +2032,11 @@ erts_psd_set(Process *p, int ix, void *data)
#define ERTS_PROC_SET_DIST_ENTRY(P, DE) \
((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE)))
+#define ERTS_PROC_GET_PENDING_SUSPEND(P) \
+ ((void *) erts_psd_get((P), ERTS_PSD_PENDING_SUSPEND))
+#define ERTS_PROC_SET_PENDING_SUSPEND(P, PS) \
+ ((void *) erts_psd_set((P), ERTS_PSD_PENDING_SUSPEND, (void *) (PS)))
+
#ifdef HIPE
#define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \
((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF))
@@ -2603,16 +2601,6 @@ Process *erts_try_lock_sig_free_proc(Eterm pid,
ErtsProcLocks locks,
erts_aint32_t *statep);
-Process *erts_pid2proc_not_running(Process *,
- ErtsProcLocks,
- Eterm,
- ErtsProcLocks);
-Process *erts_pid2proc_nropt(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm pid,
- ErtsProcLocks pid_locks);
-extern int erts_disable_proc_not_running_opt;
-
#ifdef DEBUG
#define ERTS_ASSERT_IS_NOT_EXITING(P) \
do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0)
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 43f396c547..bd38eca4dc 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -921,6 +921,9 @@ ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE void erts_proc_inc_refc(Process *);
ERTS_GLB_INLINE void erts_proc_dec_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p,
+ void (*func)(int, void *),
+ void *arg);
ERTS_GLB_INLINE void erts_proc_add_refc(Process *, Sint);
ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *);
@@ -993,6 +996,21 @@ ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p)
}
}
+ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p,
+ void (*func)(int, void *),
+ void *arg)
+{
+ Sint referred;
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ referred = erts_ptab_atmc_dec_test_refc(&p->common);
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
+ (*func)(!0, arg);
+ erts_free_proc(p);
+ (*func)(0, arg);
+ }
+}
+
ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
{
Sint referred;
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 18483fca35..bddf403b0a 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1184,6 +1184,54 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val(x)))
#define is_not_map(x) (!is_map(x))
+#define MAP_HEADER(hp, sz, keys) \
+ ((hp)[0] = MAP_HEADER_FLATMAP, \
+ (hp)[1] = sz, \
+ (hp)[2] = keys)
+
+#define MAP_SZ(sz) (MAP_HEADER_FLATMAP_SZ + 2*sz + 1)
+
+#define MAP0_SZ MAP_SZ(0)
+#define MAP1_SZ MAP_SZ(1)
+#define MAP2_SZ MAP_SZ(2)
+#define MAP3_SZ MAP_SZ(3)
+#define MAP4_SZ MAP_SZ(4)
+#define MAP5_SZ MAP_SZ(5)
+#define MAP0(hp) \
+ (MAP_HEADER(hp, 0, TUPLE0(hp+MAP_HEADER_FLATMAP_SZ)), \
+ make_flatmap(hp))
+#define MAP1(hp, k1, v1) \
+ (MAP_HEADER(hp, 1, TUPLE1(hp+1+MAP_HEADER_FLATMAP_SZ, k1)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ make_flatmap(hp))
+#define MAP2(hp, k1, v1, k2, v2) \
+ (MAP_HEADER(hp, 2, TUPLE2(hp+2+MAP_HEADER_FLATMAP_SZ, k1, k2)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ make_flatmap(hp))
+#define MAP3(hp, k1, v1, k2, v2, k3, v3) \
+ (MAP_HEADER(hp, 3, TUPLE3(hp+3+MAP_HEADER_FLATMAP_SZ, k1, k2, k3)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ make_flatmap(hp))
+#define MAP4(hp, k1, v1, k2, v2, k3, v3, k4, v4) \
+ (MAP_HEADER(hp, 4, TUPLE4(hp+4+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \
+ make_flatmap(hp))
+#define MAP5(hp, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5) \
+ (MAP_HEADER(hp, 5, TUPLE5(hp+5+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4, k5)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+4] = v5, \
+ make_flatmap(hp))
+
+
/* number tests */
#define is_integer(x) (is_small(x) || is_big(x))
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 1e833539b3..f074dd8bdb 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2044,7 +2044,7 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
void
erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
{
- enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_error_logger, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_logger, msg, bp);
}
void
@@ -2110,13 +2110,13 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
erts_thr_progress_unblock();
break;
case SYS_MSG_TYPE_ERRLGR: {
- char *no_elgger = "(no error logger present)";
+ char *no_elgger = "(no logger present)";
Eterm *tp;
Eterm tag;
if (is_not_tuple(smqp->msg)) {
unexpected_elmsg:
erts_fprintf(stderr,
- "%s unexpected error logger message: %T\n",
+ "%s unexpected logger message: %T\n",
no_elgger,
smqp->msg);
}
@@ -2284,7 +2284,7 @@ sys_msg_dispatcher_func(void *unused)
}
break;
case SYS_MSG_TYPE_ERRLGR:
- receiver = am_error_logger;
+ receiver = am_logger;
break;
default:
receiver = NIL;
@@ -2313,7 +2313,7 @@ sys_msg_dispatcher_func(void *unused)
erts_proc_unlock(proc, proc_locks);
}
}
- else if (receiver == am_error_logger) {
+ else if (receiver == am_logger) {
proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
if (!proc)
goto failure;
@@ -2379,7 +2379,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
to = erts_get_system_profile();
break;
case SYS_MSG_TYPE_ERRLGR:
- to = am_error_logger;
+ to = am_logger;
break;
default:
to = NIL;
@@ -2629,6 +2629,38 @@ erts_tracer_to_term(Process *p, ErtsTracer tracer)
}
}
+Eterm
+erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer)
+{
+ Eterm res;
+ Eterm state;
+ Uint sz;
+
+ if (ERTS_TRACER_IS_NIL(tracer))
+ return am_false;
+
+ state = ERTS_TRACER_STATE(tracer);
+ sz = is_immed(state) ? 0 : size_object(state);
+
+ if (szp)
+ *szp += sz;
+
+ if (hpp)
+ res = is_immed(state) ? state : copy_struct(state, sz, hpp, ohp);
+ else
+ res = THE_NON_VALUE;
+
+ if (ERTS_TRACER_MODULE(tracer) != am_erl_tracer) {
+ if (szp)
+ *szp += 3;
+ if (hpp) {
+ res = TUPLE2(*hpp, ERTS_TRACER_MODULE(tracer), res);
+ *hpp += 3;
+ }
+ }
+
+ return res;
+}
static ERTS_INLINE int
send_to_tracer_nif_raw(Process *c_p, Process *tracee,
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index dbf7ebd2a1..3228e19809 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -198,6 +198,8 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
ErtsPTabElementCommon *t_p);
int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p);
Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
+Eterm erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer);
+
ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
void erts_tracer_replace(ErtsPTabElementCommon *t_p,
const ErtsTracer new_tracer);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 256670ff22..2cf268162d 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -123,6 +123,7 @@ void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
extern int erts_nif_get_funcs(struct erl_module_nif*,
struct enif_func_t **funcs);
+extern Module *erts_nif_get_module(struct erl_module_nif*);
extern Eterm erts_nif_call_function(Process *p, Process *tracee,
struct erl_module_nif*,
struct enif_func_t *,
@@ -200,6 +201,7 @@ typedef struct {
struct erts_driver_t_ {
erts_driver_t *next;
erts_driver_t *prev;
+ Eterm name_atom;
char *name;
struct {
int major;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 66b8005975..2446b3c074 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -956,6 +956,9 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
reds_left_in = ERTS_BIF_REDS_LEFT(c_p);
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT((c_p->scheduler_data)->current_port == NULL);
+ (c_p->scheduler_data)->current_port = prt;
}
ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS);
@@ -1017,6 +1020,9 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_port_release(prt);
if (c_p) {
+ ASSERT((c_p->scheduler_data)->current_port == prt);
+ (c_p->scheduler_data)->current_port = NULL;
+
erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (reds != (CONTEXT_REDS - sp->reds_left_in)) {
@@ -2906,11 +2912,8 @@ static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable)
{
if (dp->lock) {
if (enable) {
- Eterm name_as_atom = erts_atom_put((byte*)dp->name, sys_strlen(dp->name),
- ERTS_ATOM_ENC_LATIN1, 1);
-
- erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", name_as_atom,
- ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock",
+ dp->name_atom, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
} else {
erts_lcnt_uninstall(&dp->lock->lcnt);
}
@@ -7357,7 +7360,11 @@ no_stop_select_callback(ErlDrvEvent event, void* private)
static int
init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
{
+ drv->name_atom = erts_atom_put((byte*)de->driver_name,
+ sys_strlen(de->driver_name),
+ ERTS_ATOM_ENC_LATIN1, 1);
drv->name = de->driver_name;
+
ASSERT(de->extended_marker == ERL_DRV_EXTENDED_MARKER);
ASSERT(de->major_version >= 2);
drv->version.major = de->major_version;
@@ -7367,13 +7374,10 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
drv->lock = NULL;
} else {
- Eterm driver_id = erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1, 1);
-
drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t));
- erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_mtx_init(drv->lock, "driver_lock", drv->name_atom,
+ ERTS_LOCK_FLAGS_CATEGORY_IO);
}
drv->entry = de;
@@ -7459,18 +7463,12 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle,
erts_tsd_set(driver_list_lock_status_key, (void *) 1);
}
- if (taint) {
- Eterm name_atom = erts_atom_put((byte*)de->driver_name,
- sys_strlen(de->driver_name),
- ERTS_ATOM_ENC_LATIN1, 0);
- if (is_atom(name_atom))
- erts_add_taint(name_atom);
- else
- err = 1;
- }
-
if (!err) {
err = init_driver(dp, de, handle);
+
+ if (taint) {
+ erts_add_taint(dp->name_atom);
+ }
}
if (err) {
diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab
index 26bea0efc6..9bf3aefaca 100644
--- a/erts/emulator/beam/msg_instrs.tab
+++ b/erts/emulator/beam/msg_instrs.tab
@@ -102,6 +102,9 @@ i_loop_rec(Dest) {
if (ERTS_UNLIKELY(msgp == NULL)) {
int get_out;
SWAPOUT;
+ $SET_CP_I_ABS(I);
+ c_p->arity = 0;
+ c_p->current = NULL;
FCALLS -= erts_proc_sig_receive_helper(c_p, FCALLS, neg_o_reds,
&msgp, &get_out);
SWAPIN;
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 188e02eff8..d74052d8b2 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -140,7 +140,7 @@ Eterm*
erts_set_hole_marker(Eterm* ptr, Uint sz)
{
Eterm* p = ptr;
- int i;
+ Uint i;
for (i = 0; i < sz; i++) {
*p++ = ERTS_HOLE_MARKER;
@@ -1924,155 +1924,165 @@ make_internal_hash(Eterm term, Uint32 salt)
#undef HCONST
#undef MIX
+/* error_logger !
+ {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }}
+*/
static Eterm
-do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
- ErlHeapFragment **bp, Process **p, Uint sz)
+do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
+ Eterm **hp, ErlOffHeap **ohp,
+ ErlHeapFragment **bp, Uint sz)
{
Uint gl_sz;
gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
- sz = sz + gl_sz;
+ sz = sz + gl_sz + 6 /*outer 5-tuple*/
+ + MAP2_SZ /* error_logger map */;
+
+ *pid = erts_get_current_pid();
+
+ if (is_nil(gleader) && is_non_value(*pid)) {
+ sz += MAP2_SZ /* metadata map no gl, no pid */;
+ } else if (is_nil(gleader) || is_non_value(*pid))
+ sz += MAP3_SZ /* metadata map no gl or no pid*/;
+ else
+ sz += MAP4_SZ /* metadata map w gl w pid*/;
+
+ *ts = ERTS_MONOTONIC_TO_USEC(erts_get_monotonic_time(NULL)) + ERTS_MONOTONIC_OFFSET_USEC;
+ erts_bld_sint64(NULL, &sz, *ts);
*bp = new_message_buffer(sz);
*ohp = &(*bp)->off_heap;
*hp = (*bp)->mem;
- return (is_nil(gleader)
- ? am_noproc
- : (IS_CONST(gleader)
- ? gleader
- : copy_struct(gleader,gl_sz,hp,*ohp)));
+ return copy_struct(gleader,gl_sz,hp,*ohp);
}
-static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *bp,
- Process *p, Eterm message)
+static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args,
+ ErtsMonotonicTime ts, Eterm pid,
+ Eterm *hp, ErlHeapFragment *bp)
{
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "%T\n", message);
-#endif
- {
- Eterm from = erts_get_current_pid();
- if (is_not_internal_pid(from))
- from = NIL;
- erts_queue_error_logger_message(from, message, bp);
+ Eterm message, md, el_tag = tag;
+ Eterm time = erts_bld_sint64(&hp, NULL, ts);
+
+ /* This mapping is needed for the backwards compatible error_logger */
+ switch (tag) {
+ case am_info: el_tag = am_info_msg; break;
+ case am_warning: el_tag = am_warning_msg; break;
+ default:
+ ASSERT(am_error);
+ break;
}
+
+ md = MAP2(hp, am_emulator, am_true,
+ am_atom_put("tag", 3), el_tag);
+ hp += MAP2_SZ;
+
+ if (is_nil(gl) && is_non_value(pid)) {
+ /* no gl and no pid, probably from a port */
+ md = MAP2(hp,
+ am_error_logger, md,
+ am_time, time);
+ hp += MAP2_SZ;
+ pid = NIL;
+ } else if (is_nil(gl)) {
+ /* no gl */
+ md = MAP3(hp,
+ am_error_logger, md,
+ am_pid, pid,
+ am_time, time);
+ hp += MAP3_SZ;
+ } else if (is_non_value(pid)) {
+ /* no gl */
+ md = MAP3(hp,
+ am_error_logger, md,
+ am_atom_put("gl", 2), gl,
+ am_time, time);
+ hp += MAP3_SZ;
+ pid = NIL;
+ } else {
+ md = MAP4(hp,
+ am_error_logger, md,
+ am_atom_put("gl", 2), gl,
+ am_pid, pid,
+ am_time, time);
+ hp += MAP4_SZ;
+ }
+
+ message = TUPLE5(hp, am_log, tag, format, args, md);
+ erts_queue_error_logger_message(pid, message, bp);
}
-/* error_logger !
- {notify,{info_msg,gleader,{emulator,format,[args]}}} |
- {notify,{error,gleader,{emulator,format,[args]}}} |
- {notify,{warning_msg,gleader,{emulator,format,[args}]}} */
-static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
+static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len)
{
Uint sz;
- Eterm gl;
- Eterm list,args,format,tuple1,tuple2,tuple3;
+ Eterm list, args, format, pid;
+ ErtsMonotonicTime ts;
Eterm *hp = NULL;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
- Process *p = NULL;
-
- ASSERT(is_atom(tag));
-
- if (len <= 0) {
- return -1;
- }
sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
- + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */
+ 8 /* "~s~n" */;
/* gleader size is accounted and allocated next */
- gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
-
- if(is_nil(gl)) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
- tag, buf);
- return 0;
- }
+ gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
list = buf_to_intlist(&hp, buf, len, NIL);
args = CONS(hp,list,NIL);
hp += 2;
format = buf_to_intlist(&hp, "~s~n", 4, NIL);
- tuple1 = TUPLE3(hp, am_emulator, format, args);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
- do_send_logger_message(hp, ohp, bp, p, tuple3);
+ do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
return 0;
}
-static int do_send_term_to_logger(Eterm tag, Eterm gleader,
- char *buf, int len, Eterm args)
+static int do_send_term_to_logger(Eterm tag, Eterm gl,
+ char *buf, size_t len, Eterm args)
{
Uint sz;
- Eterm gl;
Uint args_sz;
- Eterm format,tuple1,tuple2,tuple3;
+ Eterm format, pid;
+ ErtsMonotonicTime ts;
Eterm *hp = NULL;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
- Process *p = NULL;
- ASSERT(is_atom(tag));
+ ASSERT(len > 0);
args_sz = size_object(args);
- sz = len * 2 /* format */ + args_sz
- + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */;
+ sz = len * 2 /* format */ + args_sz;
/* gleader size is accounted and allocated next */
- gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
-
- if(is_nil(gl)) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\" %T\n",
- tag, buf, args);
- return 0;
- }
+ gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
format = buf_to_intlist(&hp, buf, len, NIL);
args = copy_struct(args, args_sz, &hp, ohp);
- tuple1 = TUPLE3(hp, am_emulator, format, args);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
- do_send_logger_message(hp, ohp, bp, p, tuple3);
+ do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
return 0;
}
static ERTS_INLINE int
-send_info_to_logger(Eterm gleader, char *buf, int len)
+send_info_to_logger(Eterm gleader, char *buf, size_t len)
{
- return do_send_to_logger(am_info_msg, gleader, buf, len);
+ return do_send_to_logger(am_info, gleader, buf, len);
}
static ERTS_INLINE int
-send_warning_to_logger(Eterm gleader, char *buf, int len)
+send_warning_to_logger(Eterm gleader, char *buf, size_t len)
{
- Eterm tag;
- switch (erts_error_logger_warnings) {
- case am_info: tag = am_info_msg; break;
- case am_warning: tag = am_warning_msg; break;
- default: tag = am_error; break;
- }
- return do_send_to_logger(tag, gleader, buf, len);
+ return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len);
}
static ERTS_INLINE int
-send_error_to_logger(Eterm gleader, char *buf, int len)
+send_error_to_logger(Eterm gleader, char *buf, size_t len)
{
return do_send_to_logger(am_error, gleader, buf, len);
}
static ERTS_INLINE int
-send_error_term_to_logger(Eterm gleader, char *buf, int len, Eterm args)
+send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args)
{
return do_send_term_to_logger(am_error, gleader, buf, len, args);
}
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index bc9a700204..0a65e317ed 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -490,16 +490,21 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
/* same semantics, different debug trace messages */
/* XXX: BEAM has different entries for the locked and unlocked
cases. HiPE doesn't, so we must check dynamically. */
- if (p->hipe_smp.have_receive_locks)
- p->hipe_smp.have_receive_locks = 0;
+ if (p->flags & F_HIPE_RECV_LOCKED)
+ p->flags &= ~F_HIPE_RECV_LOCKED;
else
erts_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
p->i = hipe_beam_pc_resume;
p->arity = 0;
if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING)
ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ACTIVE);
- else
+ else if (!(p->flags & F_HIPE_RECV_YIELD))
erts_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE);
+ else {
+ /* Yielded from receive */
+ ERTS_VBUMP_ALL_REDS(p);
+ p->flags &= ~F_HIPE_RECV_YIELD;
+ }
erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
do_schedule:
{
@@ -522,7 +527,7 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
p = erts_schedule(NULL, p, reds_in - p->fcalls);
ERTS_REQ_PROC_MAIN_LOCK(p);
ASSERT(!(p->flags & F_HIPE_MODE));
- p->hipe_smp.have_receive_locks = 0;
+ p->flags &= ~F_HIPE_RECV_LOCKED;
reg = p->scheduler_data->x_reg_array;
}
{
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index cf8c4139be..211ce0492a 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -144,8 +144,8 @@ BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1)
else {
int tres = erts_set_proc_timer_term(p, timeout_value);
if (tres != 0) { /* Wrong time */
- if (p->hipe_smp.have_receive_locks) {
- p->hipe_smp.have_receive_locks = 0;
+ if (p->flags & F_HIPE_RECV_LOCKED) {
+ p->flags &= ~F_HIPE_RECV_LOCKED;
erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
}
BIF_ERROR(p, EXC_TIMEOUT_VALUE);
@@ -546,19 +546,17 @@ Eterm hipe_check_get_msg(Process *c_p)
if (!msgp) {
int get_out;
- (void) erts_proc_sig_receive_helper(c_p, CONTEXT_REDS, 0,
+ c_p->i = NULL;
+ c_p->arity = 0;
+ c_p->current = NULL;
+ (void) erts_proc_sig_receive_helper(c_p, CONTEXT_REDS/4, 0,
&msgp, &get_out);
/* FIXME: Need to bump reductions... */
if (!msgp) {
if (get_out) {
- if (get_out < 0) {
- /*
- * FIXME: We should get out yielding
- * here...
- */
- goto next_message;
- }
- /* Go exit... */
+ if (get_out < 0)
+ c_p->flags |= F_HIPE_RECV_YIELD; /* yield... */
+ /* else: go exit... */
return THE_NON_VALUE;
}
@@ -570,7 +568,7 @@ Eterm hipe_check_get_msg(Process *c_p)
*/
/* XXX: BEAM doesn't need this */
- c_p->hipe_smp.have_receive_locks = 1;
+ c_p->flags |= F_HIPE_RECV_LOCKED;
c_p->flags &= ~F_DELAY_GC;
return THE_NON_VALUE;
}
@@ -615,8 +613,8 @@ void hipe_clear_timeout(Process *c_p)
*/
/* XXX: BEAM has different entries for the locked and unlocked
cases. HiPE doesn't, so we must check dynamically. */
- if (c_p->hipe_smp.have_receive_locks) {
- c_p->hipe_smp.have_receive_locks = 0;
+ if (c_p->flags & F_HIPE_RECV_LOCKED) {
+ c_p->flags &= ~F_HIPE_RECV_LOCKED;
erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
}
if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h
index ef14c75f6c..18354ba0a6 100644
--- a/erts/emulator/hipe/hipe_process.h
+++ b/erts/emulator/hipe/hipe_process.h
@@ -82,13 +82,4 @@ static __inline__ void hipe_delete_process(struct hipe_process_state *p)
erts_free(ERTS_ALC_T_HIPE_STK, (void*)p->nstack);
}
-struct hipe_process_state_smp {
- int have_receive_locks;
-};
-
-static __inline__ void hipe_init_process_smp(struct hipe_process_state_smp *p)
-{
- p->have_receive_locks = 0;
-}
-
#endif /* HIPE_PROCESS_H */
diff --git a/erts/emulator/nifs/unix/unix_prim_file.c b/erts/emulator/nifs/unix/unix_prim_file.c
index 1637f9cb71..2b112dda76 100644
--- a/erts/emulator/nifs/unix/unix_prim_file.c
+++ b/erts/emulator/nifs/unix/unix_prim_file.c
@@ -512,8 +512,8 @@ int efile_sync(efile_data_t *d, int data_only) {
}
int efile_advise(efile_data_t *d, Sint64 offset, Sint64 length, enum efile_advise_t advise) {
- efile_unix_t *u = (efile_unix_t*)d;
#ifdef HAVE_POSIX_FADVISE
+ efile_unix_t *u = (efile_unix_t*)d;
int p_advise;
switch(advise) {
diff --git a/erts/emulator/nifs/win32/win_prim_file.c b/erts/emulator/nifs/win32/win_prim_file.c
index 8058350b25..044bee62cf 100644
--- a/erts/emulator/nifs/win32/win_prim_file.c
+++ b/erts/emulator/nifs/win32/win_prim_file.c
@@ -24,6 +24,7 @@
#include "prim_file_nif.h"
+#include <winioctl.h>
#include <windows.h>
#include <strsafe.h>
#include <wchar.h>
@@ -671,11 +672,48 @@ static int is_executable_file(const efile_path_t *path) {
return 0;
}
+/* Returns whether the path refers to a link-like object, e.g. a junction
+ * point, symbolic link, or mounted folder. */
+static int is_name_surrogate(const efile_path_t *path) {
+ HANDLE handle;
+ int result;
+
+ handle = CreateFileW((const WCHAR*)path->data, GENERIC_READ,
+ FILE_SHARE_FLAGS, NULL, OPEN_EXISTING,
+ FILE_FLAG_OPEN_REPARSE_POINT |
+ FILE_FLAG_BACKUP_SEMANTICS,
+ NULL);
+ result = 0;
+
+ if(handle != INVALID_HANDLE_VALUE) {
+ REPARSE_GUID_DATA_BUFFER reparse_buffer;
+ LPDWORD unused_length;
+ BOOL success;
+
+ success = DeviceIoControl(handle,
+ FSCTL_GET_REPARSE_POINT, NULL, 0,
+ &reparse_buffer, sizeof(reparse_buffer),
+ &unused_length, NULL);
+
+ /* ERROR_MORE_DATA is tolerated since we're guaranteed to have filled
+ * the field we want. */
+ if(success || GetLastError() == ERROR_MORE_DATA) {
+ result = IsReparseTagNameSurrogate(reparse_buffer.ReparseTag);
+ }
+
+ CloseHandle(handle);
+ }
+
+ return result;
+}
+
posix_errno_t efile_read_info(const efile_path_t *path, int follow_links, efile_fileinfo_t *result) {
BY_HANDLE_FILE_INFORMATION native_file_info;
DWORD attributes;
+ int is_link;
sys_memset(&native_file_info, 0, sizeof(native_file_info));
+ is_link = 0;
attributes = GetFileAttributesW((WCHAR*)path->data);
@@ -696,7 +734,11 @@ posix_errno_t efile_read_info(const efile_path_t *path, int follow_links, efile_
} else {
HANDLE handle;
- if(follow_links && (attributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
+ if(attributes & FILE_ATTRIBUTE_REPARSE_POINT) {
+ is_link = is_name_surrogate(path);
+ }
+
+ if(follow_links && is_link) {
posix_errno_t posix_errno;
efile_path_t resolved_path;
@@ -737,7 +779,7 @@ posix_errno_t efile_read_info(const efile_path_t *path, int follow_links, efile_
}
}
- if(attributes & FILE_ATTRIBUTE_REPARSE_POINT) {
+ if(is_link) {
result->type = EFILE_FILETYPE_SYMLINK;
/* This should be _S_IFLNK, but the old driver always set
* non-directories to _S_IFREG. */
@@ -917,6 +959,10 @@ posix_errno_t efile_read_link(ErlNifEnv *env, const efile_path_t *path, ERL_NIF_
return EINVAL;
}
+ if(!is_name_surrogate(path)) {
+ return EINVAL;
+ }
+
posix_errno = internal_read_link(path, &result_bin);
if(posix_errno == 0) {
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 6d531fdb76..3e77dce1cd 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -1736,7 +1736,7 @@ erts_check_io(ErtsPollThread *psi)
}
}
if (resource) {
- erts_resource_stop(resource, (ErlNifEvent)fd, 1);
+ erts_resource_stop(resource, (ErlNifEvent)fd, 0);
enif_release_resource(resource->data);
}
if (free_select)
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index e367d565a7..10adf80875 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -264,7 +264,7 @@ erts_os_monotonic_time(void)
ERTS_GLB_INLINE void
erts_os_times(ErtsMonotonicTime *mtimep, ErtsSystemTime *stimep)
{
- return (*erts_sys_time_data__.r.o.os_times)(mtimep, stimep);
+ (*erts_sys_time_data__.r.o.os_times)(mtimep, stimep);
}
#endif /* ERTS_OS_TIMES_INLINE_FUNC_PTR_CALL__ */
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index 661a2ee6c9..9c6dc3ff83 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -957,7 +957,7 @@ erl_544(Config) when is_list(Config) ->
StackFun = fun(_, _, _) -> false end,
FormatFun = fun (Term, _) -> io_lib:format("~tp", [Term]) end,
Formated =
- lib:format_stacktrace(1, Stack, StackFun, FormatFun),
+ erl_error:format_stacktrace(1, Stack, StackFun, FormatFun),
true = is_list(Formated),
ok
after
diff --git a/erts/emulator/test/dgawd_handler.erl b/erts/emulator/test/dgawd_handler.erl
index 52cdd26427..29b9d6ac7b 100644
--- a/erts/emulator/test/dgawd_handler.erl
+++ b/erts/emulator/test/dgawd_handler.erl
@@ -42,10 +42,10 @@
%%====================================================================
install() ->
- gen_event:add_handler(error_logger, ?MODULE, []).
+ error_logger:add_report_handler(?MODULE, []).
restore() ->
- gen_event:delete_handler(error_logger, ?MODULE, []).
+ error_logger:delete_report_handler(?MODULE).
got_dgawd_report() ->
gen_event:call(error_logger, ?MODULE, got_dgawd_report, 10*60*1000).
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index e40d346e10..45dd922ff0 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -73,7 +73,7 @@
dist_evil_parallel_receiver/0]).
%% epmd_module exports
--export([start_link/0, register_node/2, register_node/3, port_please/2]).
+-export([start_link/0, register_node/2, register_node/3, port_please/2, address_please/3]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -797,7 +797,7 @@ show_term(Term) ->
%% Tests behaviour after net_kernel:stop (OTP-2586).
stop_dist(Config) when is_list(Config) ->
- Str = os:cmd(atom_to_list(lib:progname())
+ Str = os:cmd(ct:get_progname()
++ " -noshell -pa "
++ proplists:get_value(data_dir, Config)
++ " -s run"),
@@ -974,9 +974,9 @@ dist_auto_connect_start(Name, Value) when is_list(Name), is_atom(Value) ->
ModuleDir = filename:dirname(code:which(?MODULE)),
ValueStr = atom_to_list(Value),
Cookie = atom_to_list(erlang:get_cookie()),
- Cmd = lists:concat(
+ Cmd = lists:append(
[%"xterm -e ",
- atom_to_list(lib:progname()),
+ ct:get_progname(),
% " -noinput ",
" -detached ",
long_or_short(), " ", Name,
@@ -2086,6 +2086,11 @@ port_please(_Name, _Ip) ->
{port, Port, Version}
end.
+address_please(_Name, _Address, _AddressFamily) ->
+ %% Use localhost.
+ IP = {127,0,0,1},
+ {ok, IP}.
+
%%% Utilities
timestamp() ->
diff --git a/erts/emulator/test/dump_SUITE.erl b/erts/emulator/test/dump_SUITE.erl
index 38fa198ea6..8d18d46d92 100644
--- a/erts/emulator/test/dump_SUITE.erl
+++ b/erts/emulator/test/dump_SUITE.erl
@@ -96,12 +96,12 @@ get_dump_when_done(Dump) ->
{ok, #file_info{ size = Sz }} ->
get_dump_when_done(Dump, Sz);
{error, enoent} ->
- timer:sleep(100),
+ timer:sleep(1000),
get_dump_when_done(Dump)
end.
get_dump_when_done(Dump, Sz) ->
- timer:sleep(100),
+ timer:sleep(1000),
case file:read_file_info(Dump) of
{ok, #file_info{ size = Sz }} ->
file:read_file(Dump);
diff --git a/erts/emulator/test/lcnt_SUITE.erl b/erts/emulator/test/lcnt_SUITE.erl
index 4e52c2813c..dfffd662e2 100644
--- a/erts/emulator/test/lcnt_SUITE.erl
+++ b/erts/emulator/test/lcnt_SUITE.erl
@@ -87,8 +87,9 @@ wait_for_empty_lock_list() ->
wait_for_empty_lock_list(10).
wait_for_empty_lock_list(Tries) when Tries > 0 ->
try_flush_cleanup_ops(),
- case erts_debug:lcnt_collect() of
- [{duration, _}, {locks, []}] ->
+ [{duration, _}, {locks, Locks}] = erts_debug:lcnt_collect(),
+ case remove_untoggleable_locks(Locks) of
+ [] ->
ok;
_ ->
timer:sleep(50),
@@ -124,7 +125,7 @@ toggle_lock_counting(Config) when is_list(Config) ->
get_lock_info_for(Categories) when is_list(Categories) ->
ok = erts_debug:lcnt_control(mask, Categories),
[{duration, _}, {locks, Locks}] = erts_debug:lcnt_collect(),
- Locks;
+ remove_untoggleable_locks(Locks);
get_lock_info_for(Category) when is_atom(Category) ->
get_lock_info_for([Category]).
@@ -178,3 +179,13 @@ registered_db_tables(Config) when is_list(Config) ->
(_Lock) -> false
end, DbLocks),
ok.
+
+%% Not all locks can be toggled on or off due to technical limitations, so we
+%% need to filter them out when checking whether we successfully disabled lock
+%% counting.
+remove_untoggleable_locks([]) ->
+ [];
+remove_untoggleable_locks([{resource_monitors, _, _, _} | T]) ->
+ remove_untoggleable_locks(T);
+remove_untoggleable_locks([H | T]) ->
+ [H | remove_untoggleable_locks(T)].
diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl
index c9e971af8a..f93c637650 100644
--- a/erts/emulator/test/map_SUITE.erl
+++ b/erts/emulator/test/map_SUITE.erl
@@ -36,7 +36,9 @@
t_map_equal/1,
t_map_compare/1,
t_map_size/1,
+ t_map_get/1,
t_is_map/1,
+ t_is_map_key/1,
%% Specific Map BIFs
t_bif_map_get/1,
@@ -124,7 +126,7 @@ all() -> [t_build_and_match_literals, t_build_and_match_literals_large,
%% erlang
t_erlang_hash, t_map_encode_decode,
t_gc_rare_map_overflow,
- t_map_size, t_is_map,
+ t_map_size, t_map_get, t_is_map,
%% non specific BIF related
t_bif_build_and_check,
@@ -680,6 +682,88 @@ t_map_size(Config) when is_list(Config) ->
end),
ok.
+t_map_get(Config) when is_list(Config) ->
+ %% small map
+ 1 = map_get(a, id(#{a=>1})),
+ 2 = map_get(b, id(#{a=>1, b=>2})),
+ "hi" = map_get("hello", id(#{a=>1, "hello"=>"hi"})),
+ "tuple hi" = map_get({1,1.0}, id(#{a=>a, {1,1.0}=>"tuple hi"})),
+
+ M0 = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
+ "v4" = map_get(<<"k2">>, M0#{<<"k2">> => "v4"}),
+
+ %% large map
+ M1 = maps:from_list([{I,I}||I<-lists:seq(1,100)] ++
+ [{a,1},{b,2},{"hello","hi"},{{1,1.0},"tuple hi"},
+ {k1,"v1"},{<<"k2">>,"v3"}]),
+ 1 = map_get(a, M1),
+ 2 = map_get(b, M1),
+ "hi" = map_get("hello", M1),
+ "tuple hi" = map_get({1,1.0}, M1),
+ "v3" = map_get(<<"k2">>, M1),
+
+ %% error cases
+ do_badmap(fun(T) ->
+ {'EXIT',{{badmap,T},[{erlang,map_get,_,_}|_]}} =
+ (catch map_get(a, T))
+ end),
+
+ {'EXIT',{{badkey,{1,1}},[{erlang,map_get,_,_}|_]}} =
+ (catch map_get({1,1}, id(#{{1,1.0}=>"tuple"}))),
+ {'EXIT',{{badkey,a},[{erlang,map_get,_,_}|_]}} = (catch map_get(a, id(#{}))),
+ {'EXIT',{{badkey,a},[{erlang,map_get,_,_}|_]}} =
+ (catch map_get(a, id(#{b=>1, c=>2}))),
+
+ %% in guards
+ M2 = id(#{a=>1}),
+ true = if map_get(a, M2) =:= 1 -> true; true -> false end,
+ false = if map_get(x, M2) =:= 1 -> true; true -> false end,
+ do_badmap(fun
+ (T) when map_get(x, T) =:= 1 -> ok;
+ (T) -> false = is_map(T)
+ end),
+ ok.
+
+t_is_map_key(Config) when is_list(Config) ->
+ %% small map
+ true = is_map_key(a, id(#{a=>1})),
+ true = is_map_key(b, id(#{a=>1, b=>2})),
+ true = is_map_key("hello", id(#{a=>1, "hello"=>"hi"})),
+ true = is_map_key({1,1.0}, id(#{a=>a, {1,1.0}=>"tuple hi"})),
+
+ M0 = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
+ true = is_map_key(<<"k2">>, M0#{<<"k2">> => "v4"}),
+
+ %% large map
+ M1 = maps:from_list([{I,I}||I<-lists:seq(1,100)] ++
+ [{a,1},{b,2},{"hello","hi"},{{1,1.0},"tuple hi"},
+ {k1,"v1"},{<<"k2">>,"v3"}]),
+ true = is_map_key(a, M1),
+ true = is_map_key(b, M1),
+ true = is_map_key("hello", M1),
+ true = is_map_key({1,1.0}, M1),
+ true = is_map_key(<<"k2">>, M1),
+
+ %% error cases
+ do_badmap(fun(T) ->
+ {'EXIT',{{badmap,T},[{erlang,is_map_key,_,_}|_]}} =
+ (catch is_map_key(a, T))
+ end),
+
+ false = is_map_key({1,1}, id(#{{1,1.0}=>"tuple"})),
+ false = is_map_key(a, id(#{})),
+ false = is_map_key(a, id(#{b=>1, c=>2})),
+
+ %% in guards
+ M2 = id(#{a=>1}),
+ true = if is_map_key(a, M2) -> true; true -> false end,
+ false = if is_map_key(x, M2) -> true; true -> false end,
+ do_badmap(fun
+ (T) when is_map_key(T, x) =:= 1 -> ok;
+ (T) -> false = is_map(T)
+ end),
+ ok.
+
build_and_check_size([K|Ks],N,M0) ->
N = map_size(M0),
M1 = M0#{ K => K },
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 08a7b4560c..4415d8d1b9 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -885,6 +885,26 @@ maps(Config) when is_list(Config) ->
erlang:match_spec_test(#{<<"b">> =>"camembert","c"=>"cabécou", "wat"=>"hi", b=><<"other">>},
[{#{<<"b">> => '$1',"wat" => '$2'},[],[#{a=>'$1',b=>'$2'}]}],
table),
+
+ {ok,1,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{map_size,'$1'}]}],table),
+ {ok,'EXIT',[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[],[{map_size,'$1'}]}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[{map_size,'$1'}],['$_']}], table),
+ {ok,true,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[{'=:=',{map_size,'$1'},1}],[true]}], table),
+
+ {ok,1,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{map_get,a,'$1'}]}], table),
+ {ok,'EXIT',[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{map_get,b,'$1'}]}], table),
+ {ok,'EXIT',[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[],[{map_get,b,'$1'}]}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[{map_get,b,'$1'}],['$_']}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[{map_get,b,'$1'}],['$_']}], table),
+ {ok,true,[],[]} = erlang:match_spec_test(#{a => true}, [{'$1',[{map_get,a,'$1'}],[true]}], table),
+
+ {ok,true,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{is_map_key,a,'$1'}]}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{is_map_key,b,'$1'}]}], table),
+ {ok,'EXIT',[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[],[{is_map_key,a,'$1'}]}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[{is_map_key,b,'$1'}],['$_']}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[{is_map_key,b,'$1'}],['$_']}], table),
+ {ok,true,[],[]} = erlang:match_spec_test(#{a => true}, [{'$1',[{is_map_key,a,'$1'}],[true]}], table),
+
%% large maps
Ls0 = [{I,<<I:32>>}||I <- lists:seq(1,415)],
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index a9eb4b2768..100fa006e7 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -653,7 +653,7 @@ select_steal(Config) when is_list(Config) ->
check_stop_ret(select_nif(RFd, ?ERL_NIF_SELECT_STOP, RFd, null, Ref)),
?assertMatch([{fd_resource_stop, RPtr, _}], flush()),
- {1, {RPtr, 1}} = last_fd_stop_call(),
+ {1, {RPtr, _DirectCall}} = last_fd_stop_call(),
?assert(is_closed_nif(WFd)),
@@ -2663,16 +2663,23 @@ nif_term_to_binary(Config) ->
nif_binary_to_term(Config) ->
ensure_lib_loaded(Config),
- T = {#{ok => nok}, <<0:8096>>, lists:seq(1,100)},
+ BigMap = maps:from_list([{I,-I} || I <- lists:seq(1,100)]),
+ [nif_binary_to_term_do(T)
+ || T <- [{#{ok => nok}, <<0:8096>>, lists:seq(1,100)},
+ atom, 42, self(), BigMap]],
+ ok.
+
+nif_binary_to_term_do(T) ->
+ Dummy = [true|false],
Bin = term_to_binary(T),
Len = byte_size(Bin),
- {Len,T} = binary_to_term_nif(Bin, undefined, 0),
+ {Len,T,Dummy} = binary_to_term_nif(Bin, undefined, 0),
Len = binary_to_term_nif(Bin, self(), 0),
- T = receive M -> M after 1000 -> timeout end,
+ {T,Dummy} = receive M -> M after 1000 -> timeout end,
- {Len, T} = binary_to_term_nif(Bin, undefined, ?ERL_NIF_BIN2TERM_SAFE),
+ {Len,T,Dummy} = binary_to_term_nif(Bin, undefined, ?ERL_NIF_BIN2TERM_SAFE),
false = binary_to_term_nif(<<131,100,0,14,"undefined_atom">>,
- undefined, ?ERL_NIF_BIN2TERM_SAFE),
+ undefined, ?ERL_NIF_BIN2TERM_SAFE),
false = binary_to_term_nif(Bin, undefined, 1),
ok.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index a0aef60cf1..155bda6df0 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -2405,7 +2405,7 @@ static ERL_NIF_TERM term_to_binary(ErlNifEnv* env, int argc, const ERL_NIF_TERM
static ERL_NIF_TERM binary_to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
- ERL_NIF_TERM term, ret_term;
+ ERL_NIF_TERM term, dummy, ret_term;
ErlNifPid pid;
ErlNifEnv *msg_env = env;
unsigned int opts;
@@ -2418,6 +2418,9 @@ static ERL_NIF_TERM binary_to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM
|| !enif_get_uint(env, argv[2], &opts))
return enif_make_badarg(env);
+ /* build dummy heap term first to provoke OTP-15080 */
+ dummy = enif_make_list_cell(msg_env, atom_true, atom_false);
+
ret = enif_binary_to_term(msg_env, bin.data, bin.size, &term,
(ErlNifBinaryToTerm)opts);
if (!ret)
@@ -2425,11 +2428,12 @@ static ERL_NIF_TERM binary_to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM
ret_term = enif_make_uint64(env, ret);
if (msg_env != env) {
- enif_send(env, &pid, msg_env, term);
+ enif_send(env, &pid, msg_env,
+ enif_make_tuple2(msg_env, term, dummy));
enif_free_env(msg_env);
return ret_term;
} else {
- return enif_make_tuple2(env, ret_term, term);
+ return enif_make_tuple3(env, ret_term, term, dummy);
}
}
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 5b39d05df8..eb9b94a316 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -965,7 +965,7 @@ env_slave(File, Env) ->
env_slave(File, Env, Body) ->
file:write_file(File, term_to_binary(Body)),
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
Dir = filename:dirname(code:which(?MODULE)),
Cmd = Program ++ " -pz " ++ Dir ++
" -noinput -run " ++ ?MODULE_STRING ++ " env_slave_main " ++
@@ -1129,7 +1129,7 @@ try_bad_args(Args) ->
cd(Config) when is_list(Config) ->
ct:timetrap({minutes, 1}),
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
DataDir = proplists:get_value(data_dir, Config),
TestDir = filename:join(DataDir, "dir"),
Cmd = Program ++ " -pz " ++ DataDir ++
@@ -1191,7 +1191,7 @@ cd(Config) when is_list(Config) ->
%% be relative the new cwd and not the original
cd_relative(Config) ->
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
DataDir = proplists:get_value(data_dir, Config),
TestDir = filename:join(DataDir, "dir"),
@@ -1214,7 +1214,7 @@ cd_relative(Config) ->
relative_cd() ->
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
ok = file:set_cwd(".."),
{ok, Cwd} = file:get_cwd(),
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 46ece531a8..585c5a1871 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -2217,36 +2217,44 @@ handle_event(Event, Pid) ->
processes_term_proc_list(Config) when is_list(Config) ->
Tester = self(),
- as_expected = processes_term_proc_list_test(false),
- {ok, Node} = start_node(Config, "+Mis true"),
- RT = spawn_link(Node, fun () ->
- receive after 1000 -> ok end,
- processes_term_proc_list_test(false),
- Tester ! {it_worked, self()}
- end),
- receive {it_worked, RT} -> ok end,
- stop_node(Node),
+
+ Run = fun(Args) ->
+ {ok, Node} = start_node(Config, Args),
+ RT = spawn_link(Node, fun () ->
+ receive after 1000 -> ok end,
+ as_expected = processes_term_proc_list_test(false),
+ Tester ! {it_worked, self()}
+ end),
+ receive {it_worked, RT} -> ok end,
+ stop_node(Node)
+ end,
+
+ %% We have to run this test case with +S1 since instrument:allocations()
+ %% will report a free()'d block as present until it's actually deallocated
+ %% by its employer.
+ Run("+MSe true +MSatags false +S1"),
+ Run("+MSe true +MSatags true +S1"),
+
ok.
-
+
-define(CHK_TERM_PROC_LIST(MC, XB),
chk_term_proc_list(?LINE, MC, XB)).
chk_term_proc_list(Line, MustChk, ExpectBlks) ->
- case {MustChk, instrument:memory_status(types)} of
- {false, false} ->
+ Allocs = instrument:allocations(#{ allocator_types => [sl_alloc] }),
+ case {MustChk, Allocs} of
+ {false, {error, not_enabled}} ->
not_enabled;
- {_, MS} ->
- {value,
- {ptab_list_deleted_el,
- DL}} = lists:keysearch(ptab_list_deleted_el, 1, MS),
- case lists:keysearch(blocks, 1, DL) of
- {value, {blocks, ExpectBlks, _, _}} ->
- ok;
- {value, {blocks, Blks, _, _}} ->
- exit({line, Line,
- mismatch, expected, ExpectBlks, actual, Blks});
- Unexpected ->
- exit(Unexpected)
+ {_, {ok, {_Shift, _Unscanned, ByOrigin}}} ->
+ ByType = maps:get(system, ByOrigin, #{}),
+ Hist = maps:get(ptab_list_deleted_el, ByType, {}),
+ case lists:sum(tuple_to_list(Hist)) of
+ ExpectBlks ->
+ ok;
+ Blks ->
+ exit({line, Line, mismatch,
+ expected, ExpectBlks,
+ actual, Blks})
end
end,
ok.
diff --git a/erts/emulator/test/sensitive_SUITE.erl b/erts/emulator/test/sensitive_SUITE.erl
index c3e303bbd1..9b23a30e88 100644
--- a/erts/emulator/test/sensitive_SUITE.erl
+++ b/erts/emulator/test/sensitive_SUITE.erl
@@ -413,7 +413,7 @@ my_process_info(Pid, Tag) ->
t_process_display(Config) when is_list(Config) ->
Dir = filename:dirname(code:which(?MODULE)),
- Cmd = atom_to_list(lib:progname()) ++ " -noinput -pa " ++ Dir ++
+ Cmd = ct:get_progname() ++ " -noinput -pa " ++ Dir ++
" -run " ++ ?MODULE_STRING ++ " remote_process_display",
io:put_chars(Cmd),
P = open_port({spawn,Cmd}, [in,stderr_to_stdout,eof]),
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index 029a6de897..3f2897242e 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -310,6 +310,13 @@ scheduler_wall_time_all(Config) when is_list(Config) ->
scheduler_wall_time_test(scheduler_wall_time_all).
scheduler_wall_time_test(Type) ->
+ case string:find(erlang:system_info(system_version),
+ "dirty-schedulers-TEST") == nomatch of
+ true -> run_scheduler_wall_time_test(Type);
+ false -> {skip, "Cannot be run with dirty-schedulers-TEST build"}
+ end.
+
+run_scheduler_wall_time_test(Type) ->
%% Should return undefined if system_flag is not turned on yet
undefined = statistics(Type),
%% Turn on statistics
diff --git a/erts/emulator/test/system_profile_SUITE.erl b/erts/emulator/test/system_profile_SUITE.erl
index c9be54f668..ae27bfe9df 100644
--- a/erts/emulator/test/system_profile_SUITE.erl
+++ b/erts/emulator/test/system_profile_SUITE.erl
@@ -95,18 +95,20 @@ do_runnable_procs({TsType, TsTypeFlag}) ->
% FIXME: Set #laps and #nodes in config file
Nodes = 10,
Laps = 10,
- Master = ring(Nodes),
+ All = ring(Nodes, [link,monitor]),
+ [Master | _] = All,
undefined = erlang:system_profile(Pid, [runnable_procs]++TsTypeFlag),
% loop a message
ok = ring_message(Master, message, Laps),
+ ok = kill_ring(Master),
+ [receive {'DOWN', _, process, P, _} -> ok end || P <- All],
Events = get_profiler_events(),
- kill_em_all = kill_ring(Master),
erlang:system_profile(undefined, []),
put(master, Master),
put(laps, Laps),
true = has_runnable_event(TsType, Events),
Pids = sort_events_by_pid(Events),
- ok = check_events(TsType, Pids),
+ ok = check_events(TsType, Pids, (Laps+1)*2+2, (Laps+1)*2),
erase(),
exit(Pid,kill),
ok.
@@ -139,7 +141,7 @@ do_runnable_ports({TsType, TsTypeFlag}, Config) ->
erlang:system_profile(undefined, []),
true = has_runnable_event(TsType, Events),
Pids = sort_events_by_pid(Events),
- ok = check_events(TsType, Pids),
+ ok = check_events(TsType, Pids, Laps*2+2, Laps*2),
erase(),
exit(Pid,kill),
ok.
@@ -171,12 +173,12 @@ dont_profile_profiler(Config) when is_list(Config) ->
Nodes = 10,
Laps = 10,
- Master = ring(Nodes),
+ [Master|_] = ring(Nodes, [link]),
undefined = erlang:system_profile(Pid, [runnable_procs]),
% loop a message
ok = ring_message(Master, message, Laps),
erlang:system_profile(undefined, []),
- kill_em_all = kill_ring(Master),
+ ok = kill_ring(Master),
Events = get_profiler_events(),
false = has_profiler_pid_event(Events, Pid),
@@ -248,27 +250,28 @@ check_block_system({TsType, TsTypeFlag}, Nodes) ->
%%% Check events
-check_events(_TsType, []) -> ok;
-check_events(TsType, [Pid | Pids]) ->
+check_events(_TsType, [], _, _) -> ok;
+check_events(TsType, [Pid | Pids], ExpMaster, ExpMember) ->
Master = get(master),
- Laps = get(laps),
CheckPids = get(pids),
{Events, N} = get_pid_events(Pid),
ok = check_event_flow(Events),
ok = check_event_ts(TsType, Events),
IsMember = lists:member(Pid, CheckPids),
- case Pid of
- Master ->
- io:format("Expected ~p and got ~p profile events from ~p: ok~n", [Laps*2+2, N, Pid]),
- N = Laps*2 + 2,
- check_events(TsType, Pids);
- Pid when IsMember == true ->
- io:format("Expected ~p and got ~p profile events from ~p: ok~n", [Laps*2, N, Pid]),
- N = Laps*2,
- check_events(TsType, Pids);
- Pid ->
- check_events(TsType, Pids)
- end.
+ {Title,Exp} = case Pid of
+ Master -> {master,ExpMaster};
+ Pid when IsMember == true -> {member,ExpMember};
+ _ -> {other,N}
+ end,
+ ok = case N of
+ Exp -> ok;
+ _ ->
+ io:format("Expected ~p and got ~p profile events from ~p ~p:~n~p~n",
+ [Exp, N, Title, Pid, Events]),
+ error
+ end,
+ check_events(TsType, Pids, ExpMaster, ExpMember).
+
%% timestamp consistency check for descending timestamps
@@ -296,7 +299,13 @@ check_event_ts(TsType, [{Pid, _, _, TS1}=Event | Events], {Pid,_,_,TS0}) ->
%% consistency check for active vs. inactive activity (runnable)
check_event_flow(Events) ->
- check_event_flow(Events, undefined).
+ case check_event_flow(Events, undefined) of
+ ok -> ok;
+ Error ->
+ io:format("Events = ~p\n", [Events]),
+ Error
+ end.
+
check_event_flow([], _) -> ok;
check_event_flow([Event | PidEvents], undefined) ->
check_event_flow(PidEvents, Event);
@@ -336,10 +345,11 @@ sort_events_by_pid([Event | Events],Pids) ->
%% API
% Returns master pid
-ring(N) ->
- Pids = build_ring(N, []),
+ring(N, SpawnOpt) ->
+ Pids = build_ring(N, [], SpawnOpt),
put(pids, Pids),
- setup_ring(Pids).
+ setup_ring(Pids),
+ Pids.
ring_message(Master, Message, Laps) ->
Master ! {message, Master, Laps, Message},
@@ -347,13 +357,19 @@ ring_message(Master, Message, Laps) ->
{laps_complete, Master} -> ok
end.
-kill_ring(Master) -> Master ! kill_em_all.
+kill_ring(Master) ->
+ Master ! kill_em_all,
+ ok.
%% Process ring helpers
-build_ring(0, Pids) -> Pids;
-build_ring(N, Pids) ->
- build_ring(N - 1, [spawn_link(?MODULE, ring_loop, [undefined]) | Pids]).
+build_ring(0, Pids, _) -> Pids;
+build_ring(N, Pids, SpawnOpt) ->
+ Pid = case spawn_opt(?MODULE, ring_loop, [undefined], SpawnOpt) of
+ {P,_} -> P;
+ P -> P
+ end,
+ build_ring(N-1, [Pid | Pids], SpawnOpt).
setup_ring([Master | Relayers]) ->
% Relayers may not include the master pid
@@ -382,15 +398,13 @@ ring_loop(RelayTo) ->
{message, Master, Lap, Msg}=Message ->
case {self(), Lap} of
{Master, 0} ->
- get(supervisor) ! {laps_complete, self()},
- ring_loop(RelayTo);
+ get(supervisor) ! {laps_complete, self()};
{Master, Lap} ->
- RelayTo ! {message, Master, Lap - 1, Msg},
- ring_loop(RelayTo);
+ RelayTo ! {message, Master, Lap - 1, Msg};
_ ->
- RelayTo ! Message,
- ring_loop(RelayTo)
- end
+ RelayTo ! Message
+ end,
+ ring_loop(RelayTo)
end.
%%%
diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl
index def25dba7d..138aefb29c 100644
--- a/erts/emulator/test/trace_SUITE.erl
+++ b/erts/emulator/test/trace_SUITE.erl
@@ -29,7 +29,7 @@
receive_trace/1, link_receive_call_correlation/1, self_send/1,
timeout_trace/1, send_trace/1,
procs_trace/1, dist_procs_trace/1, procs_new_trace/1,
- suspend/1, mutual_suspend/1, suspend_exit/1, suspender_exit/1,
+ suspend/1, suspend_exit/1, suspender_exit/1,
suspend_system_limit/1, suspend_opts/1, suspend_waiting/1,
new_clear/1, existing_clear/1, tracer_die/1,
set_on_spawn/1, set_on_first_spawn/1, cpu_timestamp/1,
@@ -53,7 +53,7 @@ all() ->
[cpu_timestamp, receive_trace, link_receive_call_correlation,
self_send, timeout_trace,
send_trace, procs_trace, dist_procs_trace, suspend,
- mutual_suspend, suspend_exit, suspender_exit,
+ suspend_exit, suspender_exit,
suspend_system_limit, suspend_opts, suspend_waiting,
new_clear, existing_clear, tracer_die, set_on_spawn,
set_on_first_spawn, set_on_link, set_on_first_link,
@@ -1234,55 +1234,6 @@ do_suspend(Pid, N) ->
erlang:yield(),
do_suspend(Pid, N-1).
-
-
-mutual_suspend(Config) when is_list(Config) ->
- TimeoutSecs = 5*60,
- ct:timetrap({seconds, TimeoutSecs}),
- Parent = self(),
- Fun = fun () ->
- receive
- {go, Pid} ->
- do_mutual_suspend(Pid, 100000)
- end,
- Parent ! {done, self()},
- receive after infinity -> ok end
- end,
- P1 = spawn_link(Fun),
- P2 = spawn_link(Fun),
- T1 = erlang:start_timer((TimeoutSecs - 5)*1000, self(), oops),
- T2 = erlang:start_timer((TimeoutSecs - 5)*1000, self(), oops),
- P1 ! {go, P2},
- P2 ! {go, P1},
- Res1 = receive
- {done, P1} -> done;
- {timeout,T1,_} -> timeout
- end,
- Res2 = receive
- {done, P2} -> done;
- {timeout,T2,_} -> timeout
- end,
- P1S = process_info(P1, status),
- P2S = process_info(P2, status),
- io:format("P1S=~p P2S=~p", [P1S, P2S]),
- false = {status, suspended} == P1S,
- false = {status, suspended} == P2S,
- unlink(P1), exit(P1, bang),
- unlink(P2), exit(P2, bang),
- done = Res1,
- done = Res2,
- ok.
-
-do_mutual_suspend(_Pid, 0) ->
- ok;
-do_mutual_suspend(Pid, N) ->
- %% Suspend a process and test that it is suspended.
- true = erlang:suspend_process(Pid),
- {status, suspended} = process_info(Pid, status),
- %% Unsuspend the process.
- true = erlang:resume_process(Pid),
- do_mutual_suspend(Pid, N-1).
-
suspend_exit(Config) when is_list(Config) ->
ct:timetrap({minutes, 2}),
rand:seed(exsplus, {4711,17,4711}),
@@ -1513,7 +1464,8 @@ suspend_opts(Config) when is_list(Config) ->
dbl_async = AA,
synced = S,
async_once = AO} = Acc) ->
- erlang:suspend_process(Tok, [asynchronous]),
+ Tag = {make_ref(), self()},
+ erlang:suspend_process(Tok, [{asynchronous, Tag}]),
Res = case {suspend_count(Tok), N rem 4} of
{0, 2} ->
erlang:suspend_process(Tok,
@@ -1549,7 +1501,11 @@ suspend_opts(Config) when is_list(Config) ->
_ ->
Acc
end,
- erlang:resume_process(Tok),
+ receive
+ {Tag, Result} ->
+ suspended = Result,
+ erlang:resume_process(Tok)
+ end,
erlang:yield(),
Res
end,
diff --git a/erts/emulator/test/tracer_SUITE.erl b/erts/emulator/test/tracer_SUITE.erl
index e1362ef07a..070462b0f1 100644
--- a/erts/emulator/test/tracer_SUITE.erl
+++ b/erts/emulator/test/tracer_SUITE.erl
@@ -623,7 +623,7 @@ test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
Expect(Pid1, State1, Opts),
receive M11 -> ct:fail({unexpected, M11}) after 0 -> ok end,
- if not Dies ->
+ if not Dies andalso Event /= in ->
{flags, [TraceFlag]} = erlang:trace_info(Pid1, flags),
{tracer, {tracer_test, State1}} = erlang:trace_info(Pid1, tracer),
erlang:trace(Pid1, false, [TraceFlag]);
@@ -640,7 +640,7 @@ test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
Expect(Pid1T, State1, Opts#{ scheduler_id => number,
timestamp => timestamp}),
receive M11T -> ct:fail({unexpected, M11T}) after 0 -> ok end,
- if not Dies ->
+ if not Dies andalso Event /= in ->
{flags, [scheduler_id, TraceFlag, timestamp]}
= erlang:trace_info(Pid1T, flags),
{tracer, {tracer_test, State1}} = erlang:trace_info(Pid1T, tracer),
@@ -655,7 +655,7 @@ test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
Tc(Pid2),
ok = trace_delivered(Pid2),
receive M2 -> ct:fail({unexpected, M2}) after 0 -> ok end,
- if not Dies ->
+ if not Dies andalso Event /= in ->
{flags, [TraceFlag]} = erlang:trace_info(Pid2, flags),
{tracer, {tracer_test, State2}} = erlang:trace_info(Pid2, tracer),
erlang:trace(Pid2, false, [TraceFlag]);
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index cf6d1bacca..21a3f40c97 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -79,6 +79,7 @@ static const char plusM_au_allocs[]= {
static char *plusM_au_alloc_switches[] = {
"as",
"asbcst",
+ "atags",
"acul",
"acnl",
"acfml",
diff --git a/erts/etc/common/heart.c b/erts/etc/common/heart.c
index bc353e384e..8f1e89b638 100644
--- a/erts/etc/common/heart.c
+++ b/erts/etc/common/heart.c
@@ -825,11 +825,8 @@ write_message(fd, mp)
int fd;
struct msg *mp;
{
- int len;
- char* tmp;
+ int len = ntohs(mp->len);
- tmp = (char*) &(mp->len);
- len = (*tmp * 256) + *(tmp+1);
if ((len == 0) || (len > MSG_BODY_SIZE)) {
return MSG_HDR_SIZE;
} /* cc68k wants (char *) */
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index e5ef819444..39e378193a 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -1232,6 +1232,142 @@ end
# Commands for special term bunches.
#
+define etp-sig-int
+ set $etp_sig_is_message = 0
+ set $etp_sig_tag = ($arg0)->m[0]
+ if ($etp_sig_tag & 0x3) != 0 || $etp_sig_tag == etp_the_non_value
+ set $etp_sig_is_message = !0
+ # A message
+ if $etp_sig_tag != etp_the_non_value
+ etp-1 $etp_sig_tag 0
+ else
+ print "!ENCODED-DIST-MSG"
+ end
+ if ($arg0)->m[1] != $etp_nil
+ printf " @token= "
+ etp-1 ($arg0)->m[1] 0
+ end
+ printf " @from= "
+ etp-1 ($arg0)->m[2] 0
+ else
+ if ($etp_sig_tag & 0x3f) != 0x30
+ print "!INVALID-SIGNAL"
+ else
+ set $etp_sig_op = (($etp_sig_tag >> 6) & 0xff)
+ set $etp_sig_type = (($etp_sig_tag >> 14) & 0xff)
+ if $etp_sig_op == 0
+ printf "!EXIT[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 1
+ printf "!EXIT-LINKED[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 2
+ printf "!MONITOR-DOWN[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 3
+ printf "!MONITOR[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 4
+ printf "!DEMONITOR[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 5
+ printf "!LINK[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 6
+ printf "!UNLINK[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 7
+ printf "!GROUP-LEADER[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 8
+ printf "!TRACE-CHANGE-STATE[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 9
+ printf "!PERSISTENT-MONITOR-MESSAGE[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 10
+ printf "!IS-ALIVE[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 11
+ printf "!PROCESS-INFO[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 12
+ printf "!SYNC-SUSPEND[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 13
+ printf "!RPC[%d]", $etp_sig_type
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+end
+
+
+define etp-sigq-int
+# Args: ErlMessageQueue*
+#
+# Non-reentrant
+#
+ set $etp_sig = ($arg0)
+ set $etp_sig_save = ($arg1)
+ set $etp_sig_save_last = ($arg2)
+ set $etp_sigq_msig_len = 0
+ set $etp_sigq_nmsig_len = 0
+
+ printf " ["
+ while $etp_sig != (void *) 0
+ set $etp_sig_next = $etp_sig->next
+ if $etp_sig != ($arg0)
+ printf " "
+ end
+ etp-sig-int $etp_sig
+ if $etp_sig_is_message
+ set $etp_sigq_msig_len++
+ else
+ set $etp_sigq_nmsig_len++
+ end
+ if $etp_sig_next
+ printf ","
+ end
+ if $etp_sig_save && *$etp_sig_save == $etp_sig
+ printf " %% <== SAVE"
+ else
+ if $etp_sig_save_last && *$etp_sig_save_last == $etp_sig
+ printf " %% <== SAVED_LAST"
+ else
+ end
+ end
+ if $etp_sig_next
+ printf "\n"
+ end
+ set $etp_sig = $etp_sig_next
+ end
+ printf "]\n\n"
+ printf " Message signals: %d\n", $etp_sigq_msig_len
+ printf " Non-message signals: %d\n\n", $etp_sigq_nmsig_len
+end
+
+define etp-sigqs
+ printf " --- Inner signal queue (message queue) ---\n"
+ etp-sigq-int ($arg0)->sig_qs.first ($arg0)->sig_qs.save ($arg0)->sig_qs.saved_last
+ printf " --- Middle signal queue ---\n"
+ etp-sigq-int ($arg0)->sig_qs.cont ($arg0)->sig_qs.save ($arg0)->sig_qs.saved_last
+ printf " --- Outer queue ---\n"
+ etp-sigq-int ($arg0)->sig_inq.first ($arg0)->sig_qs.save ($arg0)->sig_qs.saved_last
+end
+
define etp-msgq
# Args: ErlMessageQueue*
#
@@ -1937,7 +2073,7 @@ document etp-proc-flags
%---------------------------------------------------------------------------
end
-define etp-process-info
+define etp-process-info-int
# Args: Process*
#
printf " Pid: "
@@ -2000,6 +2136,17 @@ define etp-process-info
etp-1 ((Eterm)($etp_proc->parent))
printf "\n Pointer: (Process *) %p\n", $etp_proc
end
+ if ($arg1)
+ etp-sigqs $etp_proc
+ end
+end
+
+define etp-process-info
+ etp-process-info-int ($arg0) 0
+end
+
+define etp-process-info-x
+ etp-process-info-int ($arg0) !0
end
document etp-process-info
@@ -2010,7 +2157,7 @@ document etp-process-info
%---------------------------------------------------------------------------
end
-define etp-processes
+define etp-processes-int
if (!erts_initialized)
printf "No processes, since system isn't initialized!\n"
else
@@ -2026,7 +2173,7 @@ define etp-processes
if ($proc != ((Process *) 0) && $proc != $invalid_proc)
printf "---\n"
printf " Pix: %d\n", $proc_ix
- etp-process-info $proc
+ etp-process-info-int $proc ($arg0)
set $proc_cnt--
end
if $proc_ix == $proc_printile
@@ -2039,6 +2186,14 @@ define etp-processes
end
end
+define etp-processes
+ etp-processes-int 0
+end
+
+define etp-processes-x
+ etp-processes-int !0
+end
+
document etp-processes
%---------------------------------------------------------------------------
% etp-processes
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index 02e77bfbb2..495d306a23 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erl_tracer.beam b/erts/preloaded/ebin/erl_tracer.beam
index 7754d64a6b..cd2c0ac69d 100644
--- a/erts/preloaded/ebin/erl_tracer.beam
+++ b/erts/preloaded/ebin/erl_tracer.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 3ef5ad0646..52f4c686a9 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_code_purger.beam b/erts/preloaded/ebin/erts_code_purger.beam
index 655e1d2e06..b6c69e3e67 100644
--- a/erts/preloaded/ebin/erts_code_purger.beam
+++ b/erts/preloaded/ebin/erts_code_purger.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam b/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
index 2df6da7415..1013b8de0c 100644
--- a/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
+++ b/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam
index f5967780ad..73bd730eaa 100644
--- a/erts/preloaded/ebin/erts_internal.beam
+++ b/erts/preloaded/ebin/erts_internal.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_literal_area_collector.beam b/erts/preloaded/ebin/erts_literal_area_collector.beam
index bb2676a9e8..18f1f76055 100644
--- a/erts/preloaded/ebin/erts_literal_area_collector.beam
+++ b/erts/preloaded/ebin/erts_literal_area_collector.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 9043ae302e..1b458fc5da 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam
index 6a03451ad5..69d809e325 100644
--- a/erts/preloaded/ebin/otp_ring0.beam
+++ b/erts/preloaded/ebin/otp_ring0.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_buffer.beam b/erts/preloaded/ebin/prim_buffer.beam
index 06d9276247..e2f0d3f44d 100644
--- a/erts/preloaded/ebin/prim_buffer.beam
+++ b/erts/preloaded/ebin/prim_buffer.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_eval.beam b/erts/preloaded/ebin/prim_eval.beam
index 17c59708e7..e962fcfa17 100644
--- a/erts/preloaded/ebin/prim_eval.beam
+++ b/erts/preloaded/ebin/prim_eval.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 9cc22222db..b11e428229 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index a42f45c180..350cc343d5 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam
index 1ec6870178..1d50d32efe 100644
--- a/erts/preloaded/ebin/prim_zip.beam
+++ b/erts/preloaded/ebin/prim_zip.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index 7a5f4d7527..a328711702 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl
index b82b9f5b46..42c1f32a8e 100644
--- a/erts/preloaded/src/erl_prim_loader.erl
+++ b/erts/preloaded/src/erl_prim_loader.erl
@@ -299,8 +299,12 @@ check_file_result(Func, Target, {error,Reason}) ->
end,
%% this is equal to calling error_logger:error_report/1 which
%% we don't want to do from code_server during system boot
- error_logger ! {notify,{error_report,group_leader(),
- {self(),std_error,Report}}},
+ logger ! {log,error,#{label=>{?MODULE,file_error},report=>Report},
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}},
error
end;
check_file_result(_, _, Other) ->
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index c761bbaaeb..3a42e841e2 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -135,13 +135,13 @@
-export([insert_element/3]).
-export([integer_to_binary/1, integer_to_list/1]).
-export([iolist_size/1, iolist_to_binary/1, iolist_to_iovec/1]).
--export([is_alive/0, is_builtin/3, is_process_alive/1, length/1, link/1]).
+-export([is_alive/0, is_builtin/3, is_map_key/2, is_process_alive/1, length/1, link/1]).
-export([list_to_atom/1, list_to_binary/1]).
-export([list_to_bitstring/1, list_to_existing_atom/1, list_to_float/1]).
-export([list_to_integer/1, list_to_integer/2]).
-export([list_to_pid/1, list_to_port/1, list_to_ref/1, list_to_tuple/1, loaded/0]).
-export([localtime/0, make_ref/0]).
--export([map_size/1, match_spec_test/3, md5/1, md5_final/1]).
+-export([map_size/1, map_get/2, match_spec_test/3, md5/1, md5_final/1]).
-export([md5_init/0, md5_update/2, module_loaded/1, monitor/2]).
-export([monitor_node/2, monitor_node/3, nif_error/1, nif_error/2]).
-export([node/0, node/1, now/0, phash/2, phash2/1, phash2/2]).
@@ -1121,6 +1121,13 @@ is_alive() ->
is_builtin(_Module, _Function, _Arity) ->
erlang:nif_error(undefined).
+%% Shadowed by erl_bif_types: erlang:is_map_key/2
+-spec is_map_key(Key, Map) -> boolean() when
+ Key :: term(),
+ Map :: map().
+is_map_key(_,_) ->
+ erlang:nif_error(undef).
+
%% is_process_alive/1
-spec is_process_alive(Pid) -> boolean() when
Pid :: pid().
@@ -1230,6 +1237,14 @@ make_ref() ->
map_size(_Map) ->
erlang:nif_error(undefined).
+%% Shadowed by erl_bif_types: erlang:map_get/2
+-spec map_get(Key, Map) -> Value when
+ Map :: map(),
+ Key :: any(),
+ Value :: any().
+map_get(_Key, _Map) ->
+ erlang:nif_error(undefined).
+
%% match_spec_test/3
-spec erlang:match_spec_test(MatchAgainst, MatchSpec, Type) -> TestResult when
MatchAgainst :: [term()] | tuple(),
@@ -1506,8 +1521,21 @@ pre_loaded() ->
-spec erlang:process_display(Pid, Type) -> true when
Pid :: pid(),
Type :: backtrace.
-process_display(_Pid, _Type) ->
- erlang:nif_error(undefined).
+process_display(Pid, Type) ->
+ case case erts_internal:process_display(Pid, Type) of
+ Ref when erlang:is_reference(Ref) ->
+ receive
+ {Ref, Res} ->
+ Res
+ end;
+ Res ->
+ Res
+ end of
+ badarg ->
+ erlang:error(badarg, [Pid, Type]);
+ Result ->
+ Result
+ end.
%% process_flag/3
-spec process_flag(Pid, Flag, Value) -> OldValue when
@@ -1515,8 +1543,15 @@ process_display(_Pid, _Type) ->
Flag :: save_calls,
Value :: non_neg_integer(),
OldValue :: non_neg_integer().
-process_flag(_Pid, _Flag, _Value) ->
- erlang:nif_error(undefined).
+process_flag(Pid, Flag, Value) ->
+ case case erts_internal:process_flag(Pid, Flag, Value) of
+ Ref when erlang:is_reference(Ref) ->
+ receive {Ref, Res} -> Res end;
+ Res -> Res
+ end of
+ badarg -> erlang:error(badarg, [Pid, Flag, Value]);
+ Result -> Result
+ end.
%% process_info/1
-spec process_info(Pid) -> Info when
@@ -1670,12 +1705,26 @@ setnode(_P1, _P2) ->
erlang:nif_error(undefined).
%% setnode/3
--spec erlang:setnode(P1, P2, P3) -> dist_handle() when
- P1 :: atom(),
- P2 :: port(),
- P3 :: {term(), term(), term(), term()}.
-setnode(_P1, _P2, _P3) ->
- erlang:nif_error(undefined).
+-spec erlang:setnode(Node, DistCtrlr, Opts) -> dist_handle() when
+ Node :: atom(),
+ DistCtrlr :: port() | pid(),
+ Opts :: {integer(), integer(), atom(), atom()}.
+setnode(Node, DistCtrlr, {Flags, Ver, IC, OC} = Opts) when erlang:is_atom(IC),
+ erlang:is_atom(OC) ->
+ case case erts_internal:create_dist_channel(Node, DistCtrlr,
+ Flags, Ver) of
+ {ok, DH} -> DH;
+ {message, Ref} -> receive {Ref, Res} -> Res end;
+ Err -> Err
+ end of
+ Error when erlang:is_atom(Error) ->
+ erlang:error(Error, [Node, DistCtrlr, Opts]);
+ DHandle ->
+ DHandle
+ end;
+setnode(Node, DistCtrlr, Opts) ->
+ erlang:error(badarg, [Node, DistCtrlr, Opts]).
+
%% size/1
%% Shadowed by erl_bif_types: erlang:size/1
@@ -1734,9 +1783,32 @@ start_timer(_Time, _Dest, _Msg, _Options) ->
-spec erlang:suspend_process(Suspendee, OptList) -> boolean() when
Suspendee :: pid(),
OptList :: [Opt],
- Opt :: unless_suspending | asynchronous.
-suspend_process(_Suspendee, _OptList) ->
- erlang:nif_error(undefined).
+ Opt :: unless_suspending | asynchronous | {asynchronous, term()}.
+suspend_process(Suspendee, OptList) ->
+ case case erts_internal:suspend_process(Suspendee, OptList) of
+ Ref when erlang:is_reference(Ref) ->
+ receive {Ref, Res} -> Res end;
+ Res ->
+ Res
+ end of
+ true -> true;
+ false -> false;
+ Error -> erlang:error(Error, [Suspendee, OptList])
+ end.
+
+-spec erlang:suspend_process(Suspendee) -> 'true' when
+ Suspendee :: pid().
+suspend_process(Suspendee) ->
+ case case erts_internal:suspend_process(Suspendee, []) of
+ Ref when erlang:is_reference(Ref) ->
+ receive {Ref, Res} -> Res end;
+ Res ->
+ Res
+ end of
+ true -> true;
+ false -> erlang:error(internal_error, [Suspendee]);
+ Error -> erlang:error(Error, [Suspendee])
+ end.
%% system_monitor/0
-spec erlang:system_monitor() -> MonSettings when
@@ -3030,15 +3102,6 @@ send_nosuspend(Pid, Msg, Opts) ->
localtime_to_universaltime(Localtime) ->
erlang:localtime_to_universaltime(Localtime, undefined).
--spec erlang:suspend_process(Suspendee) -> 'true' when
- Suspendee :: pid().
-suspend_process(P) ->
- case catch erlang:suspend_process(P, []) of
- {'EXIT', {Reason, _}} -> erlang:error(Reason, [P]);
- {'EXIT', Reason} -> erlang:error(Reason, [P]);
- Res -> Res
- end.
-
%%
%% Port BIFs
%%
@@ -3552,11 +3615,9 @@ max(A, _) -> A.
%%
-type memory_type() :: 'total' | 'processes' | 'processes_used' | 'system'
- | 'atom' | 'atom_used' | 'binary' | 'code' | 'ets'
- | 'low' | 'maximum'.
+ | 'atom' | 'atom_used' | 'binary' | 'code' | 'ets'.
-define(CARRIER_ALLOCS, [mseg_alloc]).
--define(LOW_ALLOCS, [ll_low_alloc, std_low_alloc]).
-define(ALL_NEEDED_ALLOCS, (erlang:system_info(alloc_util_allocators)
-- ?CARRIER_ALLOCS)).
@@ -3568,9 +3629,7 @@ max(A, _) -> A.
atom_used = 0,
binary = 0,
code = 0,
- ets = 0,
- low = 0,
- maximum = 0}).
+ ets = 0}).
-spec erlang:memory() -> [{Type, Size}] when
Type :: memory_type(),
@@ -3580,14 +3639,6 @@ memory() ->
notsup ->
erlang:error(notsup);
Mem ->
- InstrTail = case Mem#memory.maximum of
- 0 -> [];
- _ -> [{maximum, Mem#memory.maximum}]
- end,
- Tail = case Mem#memory.low of
- 0 -> InstrTail;
- _ -> [{low, Mem#memory.low} | InstrTail]
- end,
[{total, Mem#memory.total},
{processes, Mem#memory.processes},
{processes_used, Mem#memory.processes_used},
@@ -3596,7 +3647,7 @@ memory() ->
{atom_used, Mem#memory.atom_used},
{binary, Mem#memory.binary},
{code, Mem#memory.code},
- {ets, Mem#memory.ets} | Tail]
+ {ets, Mem#memory.ets}]
end.
-spec erlang:memory(Type :: memory_type()) -> non_neg_integer();
@@ -3684,16 +3735,6 @@ need_mem_info(binary) ->
{false, [binary_alloc], true, false};
need_mem_info(ets) ->
{true, [ets_alloc], true, false};
-need_mem_info(low) ->
- LowAllocs = ?LOW_ALLOCS -- ?CARRIER_ALLOCS,
- {_, _, FeatureList, _} = erlang:system_info(allocator),
- AlcUAllocs = case LowAllocs -- FeatureList of
- [] -> LowAllocs;
- _ -> []
- end,
- {false, AlcUAllocs, true, true};
-need_mem_info(maximum) ->
- {true, [], true, true};
need_mem_info(_) ->
{false, [], false, true}.
@@ -3706,8 +3747,6 @@ get_memval(atom_used, #memory{atom_used = V}) -> V;
get_memval(binary, #memory{binary = V}) -> V;
get_memval(code, #memory{code = V}) -> V;
get_memval(ets, #memory{ets = V}) -> V;
-get_memval(low, #memory{low = V}) -> V;
-get_memval(maximum, #memory{maximum = V}) -> V;
get_memval(_, #memory{}) -> 0.
memory_is_supported() ->
@@ -3762,16 +3801,6 @@ fix_proc([_ | Rest], Acc) ->
fix_proc([], Acc) ->
Acc.
-is_low_alloc(_A, []) ->
- false;
-is_low_alloc(A, [A|_As]) ->
- true;
-is_low_alloc(A, [_A|As]) ->
- is_low_alloc(A, As).
-
-is_low_alloc(A) ->
- is_low_alloc(A, ?LOW_ALLOCS).
-
au_mem_data(notsup, _) ->
notsup;
au_mem_data(_, [{_, false} | _]) ->
@@ -3824,16 +3853,11 @@ au_mem_data(#memory{total = Tot,
Rest)
end;
au_mem_data(#memory{total = Tot,
- system = Sys,
- low = Low} = Mem,
- [{A, _, Data} | Rest]) ->
+ system = Sys} = Mem,
+ [{_, _, Data} | Rest]) ->
Sz = blocks_size(Data, 0),
au_mem_data(Mem#memory{total = Tot+Sz,
- system = Sys+Sz,
- low = case is_low_alloc(A) of
- true -> Low+Sz;
- false -> Low
- end},
+ system = Sys+Sz},
Rest);
au_mem_data(EMD, []) ->
EMD.
@@ -3855,10 +3879,6 @@ receive_emd(Ref) ->
receive_emd(Ref, #memory{}, erlang:system_info(schedulers)).
aa_mem_data(#memory{} = Mem,
- [{maximum, Max} | Rest]) ->
- aa_mem_data(Mem#memory{maximum = Max},
- Rest);
-aa_mem_data(#memory{} = Mem,
[{total, Tot} | Rest]) ->
aa_mem_data(Mem#memory{total = Tot,
system = 0}, % system will be adjusted later
@@ -3981,4 +4001,3 @@ gc_info(Ref, N, {OrigColls,OrigRecl}) ->
{Ref, {_,Colls, Recl}} ->
gc_info(Ref, N-1, {Colls+OrigColls,Recl+OrigRecl})
end.
-
diff --git a/erts/preloaded/src/erts_dirty_process_signal_handler.erl b/erts/preloaded/src/erts_dirty_process_signal_handler.erl
index ab71790b9d..381f81ef14 100644
--- a/erts/preloaded/src/erts_dirty_process_signal_handler.erl
+++ b/erts/preloaded/src/erts_dirty_process_signal_handler.erl
@@ -50,10 +50,12 @@ handle_request(Pid) when is_pid(Pid) ->
handle_incoming_signals(Pid, 0);
handle_request({Requester, Target, Prio,
{SysTaskOp, ReqId, Arg} = Op} = Request) ->
- case handle_sys_task(Requester, Target, SysTaskOp, ReqId, Arg) of
- true ->
+ case handle_sys_task(Requester, Target, SysTaskOp, ReqId, Arg, 0) of
+ done ->
ok;
- false ->
+ busy ->
+ self() ! Request;
+ normal ->
%% Target has stopped executing dirty since the
%% initial request was made. Dispatch the
%% request to target and let it handle it itself...
@@ -83,15 +85,19 @@ handle_incoming_signals(Pid, N) ->
_Res -> ok
end.
-handle_sys_task(Requester, Target, check_process_code, ReqId, Module) ->
- case erts_internal:is_process_executing_dirty(Target) of
- false ->
- false;
- true ->
- _ = check_process(Requester, Target, ReqId, Module),
- true
+handle_sys_task(Requester, Target, check_process_code, ReqId, Module, N) ->
+ case erts_internal:check_dirty_process_code(Target, Module) of
+ Bool when Bool == true; Bool == false ->
+ Requester ! {check_process_code, ReqId, Bool},
+ done;
+ busy ->
+ case N > 5 of
+ true ->
+ busy;
+ false ->
+ handle_sys_task(Requester, Target, check_process_code,
+ ReqId, Module, N+1)
+ end;
+ Res ->
+ Res
end.
-
-check_process(Requester, Target, ReqId, Module) ->
- Result = erts_internal:check_dirty_process_code(Target, Module),
- Requester ! {check_process_code, ReqId, Result}.
diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl
index da5c9c68ed..88f47e917b 100644
--- a/erts/preloaded/src/erts_internal.erl
+++ b/erts/preloaded/src/erts_internal.erl
@@ -80,6 +80,16 @@
-export([is_process_alive/1, is_process_alive/2]).
+-export([gather_alloc_histograms/1, gather_carrier_info/1]).
+
+-export([suspend_process/2]).
+
+-export([process_display/2]).
+
+-export([process_flag/3]).
+
+-export([create_dist_channel/4]).
+
%%
%% Await result of send to port
%%
@@ -301,7 +311,8 @@ get_cpc_opts([{allow_gc, AllowGC} | Options], Async) when AllowGC == true;
get_cpc_opts([], Async) ->
Async.
--spec check_dirty_process_code(Pid,Module) -> 'true' | 'false' when
+-spec check_dirty_process_code(Pid, Module) -> Result when
+ Result :: boolean() | 'normal' | 'busy',
Pid :: pid(),
Module :: module().
check_dirty_process_code(_Pid,_Module) ->
@@ -621,3 +632,62 @@ is_process_alive(Pid) ->
Res
end.
+-spec gather_alloc_histograms({Type, SchedId, HistWidth, HistStart, Ref}) -> MsgCount when
+ Type :: atom(),
+ SchedId :: non_neg_integer(),
+ HistWidth :: non_neg_integer(),
+ HistStart :: non_neg_integer(),
+ Ref :: reference(),
+ MsgCount :: non_neg_integer().
+
+gather_alloc_histograms(_) ->
+ erlang:nif_error(undef).
+
+-spec gather_carrier_info({Type, SchedId, HistWidth, HistStart, Ref}) -> MsgCount when
+ Type :: atom(),
+ SchedId :: non_neg_integer(),
+ HistWidth :: non_neg_integer(),
+ HistStart :: non_neg_integer(),
+ Ref :: reference(),
+ MsgCount :: non_neg_integer().
+
+gather_carrier_info(_) ->
+ erlang:nif_error(undef).
+
+-spec suspend_process(Suspendee, OptList) -> Result when
+ Result :: boolean() | 'badarg' | reference(),
+ Suspendee :: pid(),
+ OptList :: [Opt],
+ Opt :: unless_suspending | asynchronous | {asynchronous, term()}.
+
+suspend_process(_Suspendee, _OptList) ->
+ erlang:nif_error(undefined).
+
+%% process_display/2
+-spec process_display(Pid, Type) -> 'true' | 'badarg' | reference() when
+ Pid :: pid(),
+ Type :: backtrace.
+process_display(_Pid, _Type) ->
+ erlang:nif_error(undefined).
+
+%% process_flag/3
+-spec process_flag(Pid, Flag, Value) -> OldValue | 'badarg' | reference() when
+ Pid :: pid(),
+ Flag :: save_calls,
+ Value :: non_neg_integer(),
+ OldValue :: non_neg_integer().
+process_flag(_Pid, _Flag, _Value) ->
+ erlang:nif_error(undefined).
+
+-spec create_dist_channel(Node, DistCtrlr, Flags, Ver) -> Result when
+ Node :: atom(),
+ DistCtrlr :: port() | pid(),
+ Flags :: integer(),
+ Ver :: integer(),
+ Result :: {'ok', erlang:dist_handle()}
+ | {'message', reference()}
+ | 'badarg'
+ | 'system_limit'.
+
+create_dist_channel(_Node, _DistCtrlr, _Flags, _Ver) ->
+ erlang:nif_error(undefined).
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 0c74169e97..9e3de87e08 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -485,7 +485,12 @@ do_handle_msg(Msg,State) ->
X ->
case whereis(user) of
undefined ->
- catch error_logger ! {info, self(), {self(), X, []}};
+ Time = erlang:monotonic_time(microsecond),
+ catch logger ! {log, info, "init got unexpected: ~p", [X],
+ #{pid=>self(),
+ gl=>self(),
+ time=>Time,
+ error_logger=>#{tag=>info_msg}}};
User ->
User ! X,
ok
@@ -559,8 +564,11 @@ do_stop({stop,Status},State) ->
clear_system(BootPid,State) ->
Heart = get_heart(State#state.kernel),
- shutdown_pids(Heart,BootPid,State),
- unload(Heart).
+ Logger = get_logger(State#state.kernel),
+ shutdown_pids(Heart,Logger,BootPid,State),
+ unload(Heart),
+ kill_em([Logger]),
+ do_unload([logger_server]).
flush() ->
receive
@@ -580,19 +588,26 @@ stop_heart(State) ->
shutdown_kernel_pid(Pid, BootPid, self(), State)
end.
-shutdown_pids(Heart,BootPid,State) ->
+shutdown_pids(Heart,Logger,BootPid,State) ->
Timer = shutdown_timer(State#state.flags),
catch shutdown(State#state.kernel,BootPid,Timer,State),
- kill_all_pids(Heart), % Even the shutdown timer.
- kill_all_ports(Heart),
+ kill_all_pids(Heart,Logger), % Even the shutdown timer.
+ kill_all_ports(Heart), % Logger has no ports
flush_timout(Timer).
-get_heart([{heart,Pid}|_Kernel]) -> Pid;
-get_heart([_|Kernel]) -> get_heart(Kernel);
-get_heart(_) -> false.
+get_heart(Kernel) ->
+ get_kernelpid(heart,Kernel).
+get_logger(Kernel) ->
+ get_kernelpid(logger,Kernel).
-shutdown([{heart,_Pid}|Kernel],BootPid,Timer,State) ->
+get_kernelpid(Name,[{Name,Pid}|_Kernel]) -> Pid;
+get_kernelpid(Name,[_|Kernel]) -> get_kernelpid(Name,Kernel);
+get_kernelpid(_,_) -> false.
+
+
+shutdown([{Except,_Pid}|Kernel],BootPid,Timer,State)
+ when Except==heart; Except==logger ->
shutdown(Kernel, BootPid, Timer, State);
shutdown([{_Name,Pid}|Kernel],BootPid,Timer,State) ->
shutdown_kernel_pid(Pid, BootPid, Timer, State),
@@ -644,24 +659,25 @@ resend(_) ->
%%
%% Kill all existing pids in the system (except init and heart).
-kill_all_pids(Heart) ->
- case get_pids(Heart) of
+kill_all_pids(Heart,Logger) ->
+ case get_pids(Heart,Logger) of
[] ->
ok;
Pids ->
kill_em(Pids),
- kill_all_pids(Heart) % Continue until all are really killed.
+ kill_all_pids(Heart,Logger) % Continue until all are really killed.
end.
%% All except system processes.
-get_pids(Heart) ->
+get_pids(Heart,Logger) ->
Pids = [P || P <- processes(), not erts_internal:is_system_process(P)],
- delete(Heart,self(),Pids).
+ delete(Heart,Logger,self(),Pids).
-delete(Heart,Init,[Heart|Pids]) -> delete(Heart,Init,Pids);
-delete(Heart,Init,[Init|Pids]) -> delete(Heart,Init,Pids);
-delete(Heart,Init,[Pid|Pids]) -> [Pid|delete(Heart,Init,Pids)];
-delete(_,_,[]) -> [].
+delete(Heart,Logger,Init,[Heart|Pids]) -> delete(Heart,Logger,Init,Pids);
+delete(Heart,Logger,Init,[Logger|Pids]) -> delete(Heart,Logger,Init,Pids);
+delete(Heart,Logger,Init,[Init|Pids]) -> delete(Heart,Logger,Init,Pids);
+delete(Heart,Logger,Init,[Pid|Pids]) -> [Pid|delete(Heart,Logger,Init,Pids)];
+delete(_,_,_,[]) -> [].
kill_em([Pid|Pids]) ->
exit(Pid,kill),
@@ -691,9 +707,9 @@ kill_all_ports(_,_) ->
ok.
unload(false) ->
- do_unload(sub(erlang:pre_loaded(),erlang:loaded()));
+ do_unload(sub([logger_server|erlang:pre_loaded()],erlang:loaded()));
unload(_) ->
- do_unload(sub([heart|erlang:pre_loaded()],erlang:loaded())).
+ do_unload(sub([heart,logger_server|erlang:pre_loaded()],erlang:loaded())).
do_unload([M|Mods]) ->
catch erlang:purge_module(M),
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index 432a8c15cd..558a0f883f 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -557,8 +557,13 @@ list_dir_convert([RawName | Rest], SkipInvalid, Result) ->
{error, ignore} ->
list_dir_convert(Rest, SkipInvalid, Result);
{error, warning} ->
- error_logger:warning_msg(
- "Non-unicode filename ~p ignored\n", [RawName]),
+ %% this is equal to calling error_logger:warning_msg/2 which
+ %% we don't want to do from code_server during system boot
+ logger ! {log,warning,"Non-unicode filename ~p ignored\n", [RawName],
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>warning_msg}}},
list_dir_convert(Rest, SkipInvalid, Result);
{error, _} ->
{error, {no_translation, RawName}}
diff --git a/erts/test/erlc_SUITE.erl b/erts/test/erlc_SUITE.erl
index 394ecc8964..622c4ec06b 100644
--- a/erts/test/erlc_SUITE.erl
+++ b/erts/test/erlc_SUITE.erl
@@ -505,7 +505,7 @@ run_command(Dir, {win32, _}, Cmd) ->
{BatchFile,
Run,
["@echo off\r\n",
- "set ERLC_EMULATOR=", atom_to_list(lib:progname()), "\r\n",
+ "set ERLC_EMULATOR=", ct:get_progname(), "\r\n",
Cmd, "\r\n",
"if errorlevel 1 echo _ERROR_\r\n",
"if not errorlevel 1 echo _OK_\r\n"]};
@@ -514,7 +514,7 @@ run_command(Dir, {unix, _}, Cmd) ->
{Name,
"/bin/sh " ++ Name,
["#!/bin/sh\n",
- "ERLC_EMULATOR='", atom_to_list(lib:progname()), "'\n",
+ "ERLC_EMULATOR='", ct:get_progname(), "'\n",
"export ERLC_EMULATOR\n",
Cmd, "\n",
"case $? in\n",
diff --git a/erts/test/upgrade_SUITE.erl b/erts/test/upgrade_SUITE.erl
index 31ceb06314..73d221cfab 100644
--- a/erts/test/upgrade_SUITE.erl
+++ b/erts/test/upgrade_SUITE.erl
@@ -132,7 +132,7 @@ upgrade_test1(FromVsn,ToVsn,Config) ->
{FromRel,FromApps} = target_system(FromRelName, FromVsn,
CreateDir, InstallDir,Config),
- {ToRel,ToApps} = upgrade_system(FromRel, ToRelName, ToVsn,
+ {ToRel,ToApps} = upgrade_system(FromVsn, FromRel, ToRelName, ToVsn,
CreateDir, InstallDir),
do_upgrade(FromVsn, FromApps, ToRel, ToApps, InstallDir).
@@ -216,7 +216,7 @@ target_system(RelName0,RelVsn,CreateDir,InstallDir,Config) ->
%%% Create a release containing the current (the test node) OTP
%%% release, including relup to allow upgrade from an earlier OTP
%%% release.
-upgrade_system(FromRel, ToRelName0, ToVsn,
+upgrade_system(FromVsn, FromRel, ToRelName0, ToVsn,
CreateDir, InstallDir) ->
{RelName,Apps,_} = create_relfile(node(),CreateDir,ToRelName0,ToVsn),
@@ -226,6 +226,11 @@ upgrade_system(FromRel, ToRelName0, ToVsn,
ok = systools:make_relup(RelName,[FromRel],[FromRel],
[{path,[FromPath]},
{outdir,CreateDir}]),
+ case {FromVsn,ToVsn} of
+ {"20"++_,"21"++_} -> fix_relup_inets_ftp(filename:dirname(RelName));
+ _ -> ok
+ end,
+
SysConfig = filename:join([CreateDir, "sys.config"]),
write_file(SysConfig, "[]."),
@@ -233,6 +238,41 @@ upgrade_system(FromRel, ToRelName0, ToVsn,
{RelName,Apps}.
+%% In OTP-21, ftp and tftp were split out from inets and formed two
+%% new separate applications. When creating the relup, systools
+%% automatically adds new applications first, before upgrading
+%% existing applications. Since ftp and tftp have processes with the
+%% same name as in the old version of inets, the upgrade failed with
+%% trying to start the new applications (already exist).
+%%
+%% To go around this problem, this function adds an instruction to
+%% stop inets before the new applications are started. This is a very
+%% specific adjustment, and it will be needed for any upgrade which
+%% involves conversion from inets to ftp/tftp.
+fix_relup_inets_ftp(Dir) ->
+ Filename = filename:join(Dir,"relup"),
+ {ok,[{ToVsn,Up,Down}]} = file:consult(Filename),
+ [{FromVsn,UpDescr,UpInstr}] = Up,
+ [{FromVsn,DownDescr,DownInstr}] = Down,
+
+ Fun = fun(point_of_no_return) -> false;
+ (_) -> true
+ end,
+ {UpBefore,[point_of_no_return|UpAfter]} = lists:splitwith(Fun,UpInstr),
+ {DownBefore,[point_of_no_return|DownAfter]} = lists:splitwith(Fun,DownInstr),
+ NewRelup =
+ {ToVsn,
+ [{FromVsn,UpDescr,UpBefore++[point_of_no_return,
+ {apply,{application,stop,[inets]}} |
+ UpAfter]}],
+ [{FromVsn,DownDescr,DownBefore++[point_of_no_return,
+ {apply,{application,stop,[inets]}} |
+ DownAfter]}]},
+ {ok, Fd} = file:open(Filename, [write,{encoding,utf8}]),
+ io:format(Fd, "%% ~s~n~tp.~n", [epp:encoding_to_string(utf8),NewRelup]),
+ ok = file:close(Fd).
+
+
%%%-----------------------------------------------------------------
%%% Start a new node running the release from target_system/5
%%% above. Then upgrade to the system from upgrade_system/5.
diff --git a/erts/vsn.mk b/erts/vsn.mk
index c3a62a5535..25acd9cc34 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
#
-VSN = 9.3
+VSN = 9.3.1
# Port number 4365 in 4.2
# Port number 4366 in 4.3