aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in22
-rw-r--r--erts/emulator/beam/beam_emu.c2
-rw-r--r--erts/emulator/beam/benchmark.c65
-rw-r--r--erts/emulator/beam/benchmark.h39
-rw-r--r--erts/emulator/beam/bif.tab3
-rw-r--r--erts/emulator/beam/break.c129
-rw-r--r--erts/emulator/beam/erl_alloc.c2
-rw-r--r--erts/emulator/beam/erl_alloc_util.c317
-rw-r--r--erts/emulator/beam/erl_alloc_util.h13
-rw-r--r--erts/emulator/beam/erl_async.c13
-rw-r--r--erts/emulator/beam/erl_bif_info.c7
-rw-r--r--erts/emulator/beam/erl_bif_port.c6
-rw-r--r--erts/emulator/beam/erl_bif_trace.c4
-rw-r--r--erts/emulator/beam/erl_db.c40
-rw-r--r--erts/emulator/beam/erl_db_hash.c164
-rw-r--r--erts/emulator/beam/erl_db_hash.h1
-rw-r--r--erts/emulator/beam/erl_db_tree.c24
-rw-r--r--erts/emulator/beam/erl_db_util.c52
-rw-r--r--erts/emulator/beam/erl_db_util.h1
-rw-r--r--erts/emulator/beam/erl_driver.h5
-rw-r--r--erts/emulator/beam/erl_drv_thread.c7
-rw-r--r--erts/emulator/beam/erl_gc.c18
-rw-r--r--erts/emulator/beam/erl_init.c13
-rw-r--r--erts/emulator/beam/erl_instrument.c2
-rw-r--r--erts/emulator/beam/erl_math.c18
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_printf_term.c2
-rw-r--r--erts/emulator/beam/erl_process.c311
-rw-r--r--erts/emulator/beam/erl_process.h21
-rw-r--r--erts/emulator/beam/erl_process_dict.c2
-rw-r--r--erts/emulator/beam/erl_process_dump.c174
-rw-r--r--erts/emulator/beam/erl_ptab.c100
-rw-r--r--erts/emulator/beam/erl_ptab.h6
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_thr_progress.c94
-rw-r--r--erts/emulator/beam/erl_thr_progress.h64
-rw-r--r--erts/emulator/beam/erl_threads.h705
-rw-r--r--erts/emulator/beam/erl_time.h2
-rw-r--r--erts/emulator/beam/erl_trace.c7
-rw-r--r--erts/emulator/beam/erl_utils.h43
-rw-r--r--erts/emulator/beam/erl_vm.h1
-rw-r--r--erts/emulator/beam/external.h1
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/io.c31
-rw-r--r--erts/emulator/beam/sys.h23
-rw-r--r--erts/emulator/beam/utils.c124
-rw-r--r--erts/emulator/drivers/common/inet_drv.c110
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c73
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c12
-rw-r--r--erts/emulator/hipe/hipe_amd64.c18
-rw-r--r--erts/emulator/hipe/hipe_arch.h1
-rw-r--r--erts/emulator/hipe/hipe_arm.c2
-rw-r--r--erts/emulator/hipe/hipe_arm.h4
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m473
-rw-r--r--erts/emulator/hipe/hipe_arm_glue.S41
-rw-r--r--erts/emulator/hipe/hipe_bif0.c22
-rw-r--r--erts/emulator/hipe/hipe_bif0.tab2
-rw-r--r--erts/emulator/hipe/hipe_bif1.c53
-rw-r--r--erts/emulator/hipe/hipe_perfctr.c229
-rw-r--r--erts/emulator/hipe/hipe_perfctr.h23
-rw-r--r--erts/emulator/hipe/hipe_perfctr.tab25
-rw-r--r--erts/emulator/hipe/hipe_ppc.c4
-rw-r--r--erts/emulator/hipe/hipe_ppc.h4
-rw-r--r--erts/emulator/hipe/hipe_sparc.c17
-rw-r--r--erts/emulator/hipe/hipe_sparc.h4
-rw-r--r--erts/emulator/hipe/hipe_x86.c17
-rw-r--r--erts/emulator/hipe/hipe_x86.h4
-rw-r--r--erts/emulator/internal_doc/CarrierMigration.md134
-rw-r--r--erts/emulator/internal_doc/SuperCarrier.md191
-rw-r--r--erts/emulator/sys/common/erl_check_io.c4
-rw-r--r--erts/emulator/sys/unix/erl_child_setup.c13
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h41
-rw-r--r--erts/emulator/sys/unix/sys.c285
-rw-r--r--erts/emulator/sys/unix/sys_float.c4
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h12
-rw-r--r--erts/emulator/sys/win32/sys.c21
-rw-r--r--erts/emulator/test/bif_SUITE.erl33
-rw-r--r--erts/emulator/test/port_SUITE.erl22
-rw-r--r--erts/emulator/test/trace_bif_SUITE.erl4
-rwxr-xr-xerts/emulator/utils/make_compiler_flags2
80 files changed, 3011 insertions, 1397 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 53fc7bd713..a632faf57d 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -349,16 +349,6 @@ endif
EPCRE_LIB = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
DEPLIBS += $(EPCRE_LIB)
-PERFCTR_PATH=@PERFCTR_PATH@
-USE_PERFCTR=@USE_PERFCTR@
-ifdef PERFCTR_PATH
-LIBS += $(PERFCTR_PATH)/usr.lib/libperfctr.a
-else
-ifdef USE_PERFCTR
-LIBS += -lperfctr
-endif
-endif
-
LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
@@ -566,9 +556,6 @@ HIPE_ppc64_TAB=hipe/hipe_ppc64.tab $(HIPE_ARCH64_TAB)
HIPE_arm_TAB=hipe/hipe_arm.tab
HIPE_ARCH_TAB=$(HIPE_$(ARCH)_TAB)
BIFS += hipe/hipe_bif0.tab hipe/hipe_bif1.tab hipe/hipe_bif2.tab $(HIPE_ARCH_TAB)
-ifdef USE_PERFCTR
-BIFS += hipe/hipe_perfctr.tab
-endif
endif
$(TARGET)/erl_bif_table.c \
@@ -664,10 +651,6 @@ COMMON_INCLUDES += -I../include/internal -I../include/internal/$(TARGET)
INCLUDES = -I$(TTF_DIR) $(COMMON_INCLUDES)
-ifdef PERFCTR_PATH
-INCLUDES += -I$(PERFCTR_PATH)/usr.lib -I$(PERFCTR_PATH)/linux/include
-endif
-
ifeq ($(TARGET),win32)
$(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c
$(V_CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@
@@ -927,14 +910,11 @@ HIPE_OBJS= \
$(OBJDIR)/hipe_mode_switch.o \
$(OBJDIR)/hipe_native_bif.o \
$(OBJDIR)/hipe_stack.o $(HIPE_ARCH_OBJS)
-ifdef USE_PERFCTR
-HIPE_OBJS += $(OBJDIR)/hipe_perfctr.o
-endif
ifdef HIPE_ENABLED
EXTRA_BASE_OBJS += $(HIPE_OBJS)
endif
-BASE_OBJS = $(RUN_OBJS) $(EMU_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
+BASE_OBJS = $(EMU_OBJS) $(RUN_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
before_DTrace_OBJS = $(BASE_OBJS) $(DRV_OBJS)
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 292971a387..b89c8b3900 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -5171,7 +5171,7 @@ get_map_elements_fail:
#ifndef NO_JUMP_TABLE
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
/* Are tables correctly generated by beam_makeops? */
- ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
+ ERTS_CT_ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
#ifdef DEBUG
counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
#endif
diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c
index 8613131176..b16fe6b271 100644
--- a/erts/emulator/beam/benchmark.c
+++ b/erts/emulator/beam/benchmark.c
@@ -37,37 +37,9 @@ unsigned long long major_gc;
#ifdef BM_TIMERS
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
-
-#include "libperfctr.h"
-struct vperfctr *system_clock;
-double cpu_khz;
-BM_NEW_TIMER(start);
-
-static double get_hrvtime(void)
-{
- unsigned long long ticks;
- double milli_seconds;
-
- ticks = vperfctr_read_tsc(system_clock);
- milli_seconds = (double)ticks / cpu_khz;
- return milli_seconds;
-}
-
-static void stop_hrvtime(void)
-{
- if(system_clock)
- {
- vperfctr_stop(system_clock);
- vperfctr_close(system_clock);
- system_clock = NULL;
- }
-}
-
-#else /* not perfctr, asuming Solaris */
+/* assuming Solaris */
#include <time.h>
BM_TIMER_T system_clock;
-#endif
unsigned long local_pause_times[MAX_PAUSE_TIME];
unsigned long pause_times[MAX_PAUSE_TIME];
@@ -117,40 +89,6 @@ unsigned long long message_sizes[1000];
void init_benchmarking()
{
#ifdef BM_TIMERS
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
- /* pass `--with-perfctr=/path/to/perfctr' when configuring */
- struct perfctr_info info;
- struct vperfctr_control control;
- int i;
-
- system_clock = vperfctr_open();
- if (system_clock != NULL)
- {
- if (vperfctr_info(system_clock,&info) >= 0)
- {
- cpu_khz = (double)info.cpu_khz;
- if (info.cpu_features & PERFCTR_FEATURE_RDTSC)
- {
- memset(&control,0,sizeof control);
- control.cpu_control.tsc_on = 1;
- }
- }
- if (vperfctr_control(system_clock,&control) < 0)
- {
- vperfctr_close(system_clock);
- system_clock = NULL;
- }
- }
-
- for (i = 0; i < 1000; i++)
- {
- BM_START_TIMER(system);
- BM_STOP_TIMER(system);
- }
-
- timer_time = system_time / 1000;
- start_time = 0;
-#else
int i;
for (i = 0; i < 1000; i++)
{
@@ -158,7 +96,6 @@ void init_benchmarking()
BM_STOP_TIMER(system);
}
timer_time = system_time / 1000;
-#endif
for (i = 0; i < MAX_PAUSE_TIME; i++) {
local_pause_times[i] = 0;
diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h
index 766edaac42..904564a96b 100644
--- a/erts/emulator/beam/benchmark.h
+++ b/erts/emulator/beam/benchmark.h
@@ -37,10 +37,7 @@
/* BM_TIMERS keeps track of the time spent in diferent parts of the
* system. It only measures accual active time, not time spent in idle
- * mode. These timers requires hardware support. For Linux, use the
- * package perfctr from user.it.uu.se/~mikpe/linux/perfctr. If this
- * package is not specified when configuring the system
- * (--with-perfctr=PATH), the Solaris hrtime_t will be used.
+ * mode. Currently, the Solaris hrtime_t will be used.
* To add new timers look below.
*/
#define BM_TIMERS
@@ -142,38 +139,7 @@ extern unsigned long long major_gc;
* meassure (send time in shared heap for instance).
*/
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
-#include "libperfctr.h"
-
-#define BM_TIMER_T double
-
-extern struct vperfctr *system_clock;
-extern double cpu_khz;
-extern BM_TIMER_T start_time;
-
-#define BM_START_TIMER(t) start_time = \
- (BM_TIMER_T)vperfctr_read_tsc(system_clock) / \
- cpu_khz;
-
-#define BM_STOP_TIMER(t) do { \
- BM_TIMER_T tmp = ((BM_TIMER_T)vperfctr_read_tsc(system_clock) / cpu_khz); \
- tmp -= (start_time + timer_time); \
- t##_time += (tmp > 0 ? tmp : 0); \
-} while(0)
-
-#define BM_TIME_PRINTER(str,time) do { \
- int min,sec,milli,micro; \
- BM_TIMER_T tmp = (time) * 1000; \
- micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- sec = (uint)(tmp - ((int)(tmp / 60)) * 60); \
- min = (uint)tmp / 60; \
- erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \
-} while(0)
-
-#else /* !USE_PERFCTR (Assuming Solaris) */
+/* (Assuming Solaris) */
#define BM_TIMER_T hrtime_t
#define BM_START_TIMER(t) system_clock = sys_gethrtime()
@@ -196,7 +162,6 @@ extern BM_TIMER_T start_time;
} while(0)
extern BM_TIMER_T system_clock;
-#endif /* USE_PERFCTR */
extern BM_TIMER_T timer_time;
extern BM_TIMER_T system_time;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 55ac778475..1d0d214e77 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -194,6 +194,7 @@ bif math:erf/1
bif math:erfc/1
bif math:exp/1
bif math:log/1
+bif math:log2/1
bif math:log10/1
bif math:sqrt/1
bif math:atan2/2
@@ -600,6 +601,8 @@ bif maps:values/1
bif erts_internal:cmp_term/2
+bif ets:take/2
+
#
# New in 17.1
#
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 5aee85174f..4ede2c9d7d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -181,7 +181,6 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
ASSERT(is_node_name_atom(mon->pid));
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
mon->pid, mon->ref);
- erts_print(to, to_arg,"}");
} else if (is_atom(mon->name)){ /* local by name */
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
erts_this_dist_entry->sysname, mon->ref);
@@ -210,25 +209,12 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "State: ");
state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_FREE)
- erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
- else if (state & ERTS_PSFLG_EXITING)
- erts_print(to, to_arg, "Exiting\n");
- else if (state & ERTS_PSFLG_GC) {
- garbing = 1;
- running = 1;
- erts_print(to, to_arg, "Garbing\n");
- }
- else if (state & ERTS_PSFLG_SUSPENDED)
- erts_print(to, to_arg, "Suspended\n");
- else if (state & ERTS_PSFLG_RUNNING) {
- running = 1;
- erts_print(to, to_arg, "Running\n");
- }
- else if (state & ERTS_PSFLG_ACTIVE)
- erts_print(to, to_arg, "Scheduled\n");
- else
- erts_print(to, to_arg, "Waiting\n");
+ erts_dump_process_state(to, to_arg, state);
+ if (state & ERTS_PSFLG_GC) {
+ garbing = 1;
+ running = 1;
+ } else if (state & ERTS_PSFLG_RUNNING)
+ running = 1;
/*
* If the process is registered as a global process, display the
@@ -352,6 +338,10 @@ print_process_info(int to, void *to_arg, Process *p)
#endif
erts_stack_dump(to, to_arg, p);
}
+
+ /* Display all states */
+ erts_print(to, to_arg, "Internal State: ");
+ erts_dump_extended_process_state(to, to_arg, state);
}
static void
@@ -671,6 +661,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
{
#ifdef ERTS_SMP
ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */
+ int bc;
#endif
int fd;
size_t envsz;
@@ -681,27 +672,39 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
char* dumpname;
int secs;
int env_erl_crash_dump_seconds_set = 1;
+ int i;
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
#ifdef ERTS_SMP
+ /* Order all managed threads to block, this has to be done
+ first to guarantee that this is the only thread to generate
+ crash dump. */
+ bc = erts_thr_progress_fatal_error_block(&tpd_buf);
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
- * Wait for all managed threads to block. If all threads haven't blocked
- * after a minute, we go anyway and hope for the best...
- *
- * We do not release system again. We expect an exit() or abort() after
- * dump has been written.
+ * We suspend all scheduler threads so that we can dump some
+ * data about the currently running processes and scheduler data.
+ * We have to be very very careful when doing this as the schedulers
+ * could be anywhere.
*/
- erts_thr_progress_fatal_error_block(60000, &tpd_buf);
- /* Either worked or not... */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
+ if (!erts_equal_tids(tid,erts_thr_self()))
+ sys_thr_suspend(tid);
+ }
+
+#endif
/* Allow us to pass certain places without locking... */
erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
-#else
+
+#else /* !ERTS_SMP */
erts_writing_erl_crash_dump = 1;
-#endif
+#endif /* ERTS_SMP */
envsz = sizeof(env);
/* ERL_CRASH_DUMP_SECONDS not set
@@ -758,9 +761,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_fprintf(stderr,"\nCrash dump is being written to: %s...", dumpname);
fd = open(dumpname,O_WRONLY | O_CREAT | O_TRUNC,0640);
- if (fd < 0)
+ if (fd < 0)
return; /* Can't create the crash dump, skip it */
-
time(&now);
erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now));
@@ -774,9 +776,74 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_fdprintf(fd, "System version: ");
erts_print_system_version(fd, NULL, NULL);
erts_fdprintf(fd, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
+
erts_fdprintf(fd, "Taints: ");
erts_print_nif_taints(fd, NULL);
erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
+
+#ifdef USE_THREADS
+ /* We want to note which thread it was that called erl_exit */
+ if (erts_get_scheduler_data()) {
+ erts_fdprintf(fd, "Calling Thread: scheduler:%d\n",
+ erts_get_scheduler_data()->no);
+ } else {
+ if (!erts_thr_getname(erts_thr_self(), dumpnamebuf, MAXPATHLEN))
+ erts_fdprintf(fd, "Calling Thread: %s\n", dumpnamebuf);
+ else
+ erts_fdprintf(fd, "Calling Thread: %p\n", erts_thr_self());
+ }
+#else
+ erts_fdprintf(fd, "Calling Thread: scheduler:1\n");
+#endif
+
+#if defined(ERTS_HAVE_TRY_CATCH)
+
+ /*
+ * erts_print_scheduler_info is not guaranteed to be safe to call
+ * here for all schedulers as we may have suspended a scheduler
+ * in the middle of updating the STACK_TOP and STACK_START
+ * variables and thus when scanning the stack we could get
+ * segmentation faults. We protect against this very unlikely
+ * scenario by using the ERTS_SYS_TRY_CATCH.
+ */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i)),
+ erts_fdprintf(fd, "** crashed **\n"));
+ }
+#endif
+
+#ifdef ERTS_SMP
+
+#if defined(ERTS_THR_HAVE_SIG_FUNCS)
+
+ /* We resume all schedulers so that we are in a known safe state
+ when we write the rest of the crash dump */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
+ if (!erts_equal_tids(tid,erts_thr_self()))
+ sys_thr_resume(tid);
+ }
+#endif
+
+ /*
+ * Wait for all managed threads to block. If all threads haven't blocked
+ * after a minute, we go anyway and hope for the best...
+ *
+ * We do not release system again. We expect an exit() or abort() after
+ * dump has been written.
+ */
+ erts_thr_progress_fatal_error_wait(60000);
+ /* Either worked or not... */
+#endif
+
+#ifndef ERTS_HAVE_TRY_CATCH
+ /* This is safe to call here, as all schedulers are blocked */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i));
+ }
+#endif
+
info(fd, NULL); /* General system info */
if (erts_ptab_initialized(&erts_proc))
process_info(fd, NULL); /* Info about each process and port */
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 90cd227fae..f2bceff4eb 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3939,7 +3939,7 @@ static Uint
install_debug_functions(void)
{
int i;
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 55052430e1..2f277690e4 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -205,7 +205,7 @@ MBC after deallocating first block:
ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
(B)->bhdr = ((Sz) | (F)), \
(B)->u.carrier = (C))
-
+
# define IS_MBC_FIRST_ABLK(AP,B) \
((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
&& ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
@@ -378,9 +378,8 @@ do { \
#ifdef ERTS_SMP
#define SBC_HEADER_SIZE \
- (UNIT_CEILING(sizeof(Carrier_t) \
- - sizeof(ErtsAlcCPoolData_t) \
- + ABLK_HDR_SZ) \
+ (UNIT_CEILING(offsetof(Carrier_t, cpool) \
+ + ABLK_HDR_SZ) \
- ABLK_HDR_SZ)
#else
#define SBC_HEADER_SIZE \
@@ -929,6 +928,88 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
#ifdef ERTS_SMP
+#ifdef DEBUG
+static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* p;
+
+ ASSERT(node != sentinel);
+ for (p = sentinel->next; p != sentinel; p = p->next) {
+ if (p == node)
+ return 1;
+ }
+ return 0;
+}
+#endif /* DEBUG */
+
+static ERTS_INLINE void
+link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* before_me = after_me->next;
+ ASSERT(node != after_me && node != before_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* after_me = before_me->prev;
+ ASSERT(node != before_me && node != after_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+unlink_edl(ErtsDoubleLink_t* node)
+{
+ node->next->prev = node->prev;
+ node->prev->next = node->next;
+}
+
+static ERTS_INLINE void
+relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ if (node != before_me && node != before_me->prev) {
+ unlink_edl(node);
+ link_edl_before(before_me, node);
+ }
+}
+
+static ERTS_INLINE int is_abandoned(Carrier_t *crr)
+{
+ return crr->cpool.abandoned.next != NULL;
+}
+
+static ERTS_INLINE void
+link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr)
+{
+ ASSERT(!is_abandoned(crr));
+
+ link_edl_after(list, &crr->cpool.abandoned);
+
+ ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned);
+ ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned);
+}
+
+static ERTS_INLINE void
+unlink_abandoned_carrier(Carrier_t *crr)
+{
+ ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list,
+ &crr->cpool.abandoned) ||
+ is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list,
+ &crr->cpool.abandoned));
+
+ unlink_edl(&crr->cpool.abandoned);
+
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
+}
+
static ERTS_INLINE void
clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
{
@@ -955,7 +1036,7 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
}
}
-#endif
+#endif /* ERTS_SMP */
#if 0
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
@@ -1361,7 +1442,7 @@ get_pref_allctr(void *extra)
pref_ix = ERTS_ALC_GET_THR_IX();
- ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
+ ERTS_CT_ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
ASSERT(0 <= pref_ix && pref_ix < tspec->size);
return tspec->allctr[pref_ix];
@@ -1780,7 +1861,7 @@ handle_delayed_dealloc(Allctr_t *allctr,
* if this carrier is pulled from dc_list by cpool_fetch()
*/
ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
- ERTS_ALC_CPOOL_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
+ ERTS_CT_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
#ifdef MBC_ABLK_OFFSET_BITS
blk->u.carrier = crr;
#else
@@ -2575,10 +2656,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#ifdef ERTS_SMP
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
-#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 10
+#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
+#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10
#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
-#define ERTS_ALC_CPOOL_MAX_NO_CARRIERS 5
-#define ERTS_ALC_CPOOL_INSERT_ALLOWED_OFFSET 100
#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
#define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0)
@@ -2755,9 +2835,6 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
(erts_aint_t) CARRIER_SZ(crr));
erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers);
- erts_smp_atomic_set_nob(&crr->allctr,
- ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
-
/*
* We search in 'next' direction and begin by passing
* one element before trying to insert. This in order to
@@ -2816,6 +2893,9 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
cpool_set_mod_marked(&cpd2p->prev,
(erts_aint_t) &crr->cpool,
(erts_aint_t) cpd1p);
+
+ erts_smp_atomic_set_wb(&crr->allctr,
+ ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
}
static void
@@ -2916,59 +2996,163 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
static Carrier_t *
cpool_fetch(Allctr_t *allctr, UWord size)
{
- int i;
+ int i, i_stop, has_passed_sentinel;
Carrier_t *crr;
ErtsAlcCPoolData_t *cpdp;
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *cpool_entrance;
+ ErtsAlcCPoolData_t *sentinel;
+ ErtsDoubleLink_t* dl;
+ ErtsDoubleLink_t* first_old_traitor;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
- i = 0;
+ i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
+ first_old_traitor = allctr->cpool.traitor_list.next;
+ cpool_entrance = NULL;
- /* First; check our own pending dealloc carrier list... */
- crr = allctr->cpool.dc_list.last;
- while (crr && i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) {
- if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- unlink_carrier(&allctr->cpool.dc_list, crr);
-#ifdef ERTS_ALC_CPOOL_DEBUG
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
- ((erts_aint_t) allctr))
- == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
-#else
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
-#endif
- return crr;
+ /*
+ * Search my own pooled_list,
+ * i.e my abandoned carriers that were in the pool last time I checked.
+ */
+
+ dl = allctr->cpool.pooled_list.next;
+ while(dl != &allctr->cpool.pooled_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+
+ ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl));
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+ }
+ else { /* Not in pool, move to traitor_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.traitor_list, crr);
+ }
+ if (--i <= 0) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return NULL;
}
- crr = crr->prev;
- i++;
}
- /* ... then the pool ... */
+ /* Now search traitor_list.
+ * i.e carriers employed by other allocators last time I checked.
+ * They might have been abandoned since then.
+ */
+
+ i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ?
+ 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT);
+ dl = first_old_traitor;
+ while(dl != &allctr->cpool.traitor_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+ ASSERT(dl != &allctr->cpool.pooled_list);
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY)
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+
+ /* Move to pooled_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
+ }
+ if (--i <= i_stop) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ if (i > 0)
+ break;
+ else
+ return NULL;
+ }
+ }
/*
- * We search in 'prev' direction and begin by passing
- * one element before trying to fetch. This in order to
- * avoid contention with threads inserting elements.
+ * Finally search the shared pool and try employ foreign carriers
*/
- cpdp = cpool_aint2cpd(cpool_read(&sentinel->prev));
- if (cpdp == sentinel)
- return NULL;
+ sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ if (cpool_entrance) {
+ /* We saw a pooled carried above, use it as entrance into the pool
+ */
+ cpdp = cpool_entrance;
+ }
+ else {
+ /* No pooled carried seen above. Start search at cpool sentinel,
+ * but begin by passing one element before trying to fetch.
+ * This in order to avoid contention with threads inserting elements.
+ */
+ cpool_entrance = sentinel;
+ cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ }
- while (i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) {
+ has_passed_sentinel = 0;
+ while (1) {
erts_aint_t exp;
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
- if (cpdp == sentinel) {
+ if (cpdp == cpool_entrance) {
+ if (cpool_entrance == sentinel) {
+ cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ }
+ i = 0; /* Last one to inspect */
+ }
+ else if (cpdp == sentinel) {
+ if (has_passed_sentinel) {
+ /* We been here before. cpool_entrance must have been removed */
+ return NULL;
+ }
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == sentinel)
return NULL;
- i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; /* Last one to inspect */
+ has_passed_sentinel = 1;
}
- crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
+ crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool));
exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL|ERTS_CRR_ALCTR_FLG_BUSY))
- == ERTS_CRR_ALCTR_FLG_IN_POOL)
+ if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL)
&& (erts_atomic_read_nob(&cpdp->max_size) >= size)) {
erts_aint_t act;
/* Try to fetch it... */
@@ -2977,11 +3161,35 @@ cpool_fetch(Allctr_t *allctr, UWord size)
exp);
if (act == exp) {
cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ unlink_abandoned_carrier(crr);
+ }
return crr;
}
}
- i++;
+ if (--i <= 0)
+ return NULL;
}
+
+ /* Last; check our own pending dealloc carrier list... */
+ crr = allctr->cpool.dc_list.last;
+ while (crr) {
+ if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ unlink_carrier(&allctr->cpool.dc_list, crr);
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
+ ((erts_aint_t) allctr))
+ == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
+#else
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+#endif
+ return crr;
+ }
+ crr = crr->prev;
+ if (--i <= 0)
+ return NULL;
+ }
+
return NULL;
}
@@ -3078,6 +3286,9 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
return;
}
+ if (is_abandoned(crr))
+ unlink_abandoned_carrier(crr);
+
if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
dealloc_carrier(allctr, crr, 1);
@@ -3124,6 +3335,8 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
limit = (csz/100)*allctr->cpool.util_limit;
crr->cpool.abandon_limit = limit;
}
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
}
static void
@@ -3154,6 +3367,9 @@ abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
STAT_MBC_CPOOL_INSERT(allctr, crr);
unlink_carrier(&allctr->mbc_list, crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
allctr->remove_mbc(allctr, crr);
@@ -3661,6 +3877,11 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
if (busy_pcrr_pp && *busy_pcrr_pp) {
ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
*busy_pcrr_pp = NULL;
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
+ == (((erts_aint_t) allctr)
+ | ERTS_CRR_ALCTR_FLG_IN_POOL
+ | ERTS_CRR_ALCTR_FLG_BUSY));
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
cpool_delete(allctr, allctr, crr);
}
else
@@ -5540,6 +5761,10 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->min_block_size = sz;
}
+ allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list;
+ allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list;
+ allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list;
+ allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list;
allctr->cpool.dc_list.first = NULL;
allctr->cpool.dc_list.last = NULL;
allctr->cpool.abandon_limit = 0;
@@ -5717,7 +5942,7 @@ erts_alcu_init(AlcUInit_t *init)
erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
}
#endif
- ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
+ ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
max_mseg_carriers = init->mmc;
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 7be6b1ed9d..eee920e66c 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -268,6 +268,11 @@ typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
#ifdef ERTS_SMP
+typedef struct ErtsDoubleLink_t_ {
+ struct ErtsDoubleLink_t_ *next;
+ struct ErtsDoubleLink_t_ *prev;
+}ErtsDoubleLink_t;
+
typedef struct {
erts_atomic_t next;
erts_atomic_t prev;
@@ -277,6 +282,7 @@ typedef struct {
UWord abandon_limit;
UWord blocks;
UWord blocks_size;
+ ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
} ErtsAlcCPoolData_t;
#endif
@@ -500,7 +506,12 @@ struct Allctr_t_ {
CarrierList_t sbc_list;
#ifdef ERTS_SMP
struct {
- CarrierList_t dc_list;
+ /* pooled_list, traitor list and dc_list contain only
+ carriers _created_ by this allocator */
+ ErtsDoubleLink_t pooled_list;
+ ErtsDoubleLink_t traitor_list;
+ CarrierList_t dc_list;
+
UWord abandon_limit;
int disable_abandon;
int check_limit_count;
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index decae6b2ca..bc06d41720 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -176,7 +176,7 @@ erts_init_async(void)
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
#endif
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
- char *ptr;
+ char *ptr, thr_name[16];
size_t tot_size = 0;
int i;
@@ -227,23 +227,16 @@ erts_init_async(void)
thr_opts.suggested_stack_size
= erts_async_thread_suggested_stack_size;
-#ifdef ETHR_HAVE_THREAD_NAMES
- thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1));
-#endif
+ thr_opts.name = thr_name;
for (i = 0; i < erts_async_max_threads; i++) {
ErtsAsyncQ *aq = async_q(i);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(thr_opts.name, "async_%d", i+1);
-#endif
+ erts_snprintf(thr_opts.name, 16, "async_%d", i+1);
erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
}
-#ifdef ETHR_HAVE_THREAD_NAMES
- free(thr_opts.name);
-#endif
/* Wait for async threads to initialize... */
erts_mtx_lock(&async->init.data.mtx);
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index d2ee5e4224..d750e34be3 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3900,6 +3900,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
}
}
+ else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
+ /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
+#if defined(ERTS_HAVE_TRY_CATCH)
+ erts_get_scheduler_data()->run_queue = NULL;
+#endif
+ erl_exit(ERTS_DUMP_EXIT, "%T", BIF_ARG_2);
+ }
}
BIF_ERROR(BIF_P, BADARG);
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 64bd598ba6..7ce950e090 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -472,7 +472,7 @@ cleanup_old_port_data(erts_aint_t data)
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
size_t size;
ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
- size = sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm) - 1);
+ size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap,
(void *) pdhp,
&pdhp->later_op,
@@ -508,7 +508,7 @@ erts_port_data_size(Port *prt)
}
else {
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
- return (Uint) sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm)-1);
+ return (Uint) sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
}
}
@@ -550,7 +550,7 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
hsize = size_object(BIF_ARG_2);
pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP,
- sizeof(ErtsPortDataHeap) + hsize*(sizeof(Eterm)-1));
+ sizeof(ErtsPortDataHeap) + (hsize-1)*sizeof(Eterm));
hp = &pdhp->heap[0];
pdhp->off_heap.first = NULL;
pdhp->off_heap.overhead = 0;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 06fbbea123..f5e582b1c5 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -651,7 +651,7 @@ Eterm trace_3(BIF_ALIST_3)
if (pid_spec == am_all) {
if (on) {
if (!erts_cpu_timestamp) {
-#ifdef HAVE_CLOCK_GETTIME
+#ifdef HAVE_CLOCK_GETTIME_CPU_TIME
/*
Perhaps clock_gettime was found during config
on a different machine than this. We check
@@ -678,7 +678,7 @@ Eterm trace_3(BIF_ALIST_3)
if (erts_start_now_cpu() < 0) {
goto error;
}
-#endif /* HAVE_CLOCK_GETTIME */
+#endif /* HAVE_CLOCK_GETTIME_CPU_TIME */
erts_cpu_timestamp = !0;
}
}
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 8f246ffa07..4806befd99 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -753,6 +753,31 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
BIF_RET(ret);
}
+/*
+** take(Tab, Key)
+*/
+BIF_RETTYPE ets_take_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+#ifdef DEBUG
+ int cret;
+#endif
+ Eterm ret;
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC);
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+#ifdef DEBUG
+ cret =
+#endif
+ tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
+ ASSERT(cret == DB_ERROR_NONE);
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_RET(ret);
+}
+
/*
** update_element(Tab, Key, {Pos, Value})
** update_element(Tab, Key, [{Pos, Value}])
@@ -2643,7 +2668,9 @@ BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
BIF_RETTYPE ets_info_1(BIF_ALIST_1)
{
static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
- am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed};
+ am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed,
+ am_write_concurrency,
+ am_read_concurrency};
Eterm results[sizeof(fields)/sizeof(Eterm)];
DbTable* tb;
Eterm res;
@@ -3670,6 +3697,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_protected;
else if (tb->common.status & DB_PUBLIC)
ret = am_public;
+ } else if (What == am_write_concurrency) {
+ ret = tb->common.status & DB_FINE_LOCKED ? am_true : am_false;
+ } else if (What == am_read_concurrency) {
+ ret = tb->common.status & DB_FREQ_READ ? am_true : am_false;
} else if (What == am_name) {
ret = tb->common.the_name;
} else if (What == am_keypos) {
@@ -3752,7 +3783,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
make_small(stats.max_chain_len),
- make_small(db_kept_items_hash(&tb->hash)));
+ make_small(stats.kept_items));
}
else {
ret = am_false;
@@ -3774,6 +3805,11 @@ static void print_table(int to, void *to_arg, int show, DbTable* tb)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
+ erts_print(to, to_arg, "Type: %T\n", table_info(NULL, tb, am_type));
+ erts_print(to, to_arg, "Protection: %T\n", table_info(NULL, tb, am_protection));
+ erts_print(to, to_arg, "Compressed: %T\n", table_info(NULL, tb, am_compressed));
+ erts_print(to, to_arg, "Write Concurrency: %T\n", table_info(NULL, tb, am_write_concurrency));
+ erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency));
}
void db_info(int to, void *to_arg, int show) /* Called by break handler */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 06dac8f161..c2157457a0 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -382,7 +382,7 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
static void shrink(DbTableHash* tb, int nactive);
static void grow(DbTableHash* tb, int nactive);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
- DbTableHash*);
+ Uint sz, DbTableHash*);
static int analyze_pattern(DbTableHash *tb, Eterm pattern,
struct mp_info *mpi);
@@ -426,6 +426,7 @@ static int db_select_count_continue_hash(Process *p, DbTable *tbl,
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_take_hash(Process *, DbTable *, Eterm, Eterm *);
static void db_print_hash(int to,
void *to_arg,
int show,
@@ -536,6 +537,7 @@ DbTableMethod db_hash =
db_select_delete_continue_hash,
db_select_count_hash,
db_select_count_continue_hash,
+ db_take_hash,
db_delete_all_objects_hash,
db_free_table_hash,
db_free_table_continue_hash,
@@ -646,25 +648,6 @@ restart:
/* ToDo: Maybe try grow/shrink the table as well */
}
-/* Only used by tests
-*/
-Uint db_kept_items_hash(DbTableHash *tb)
-{
- Uint kept_items = 0;
- Uint ix = 0;
- erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
- HashDbTerm* b;
- do {
- for (b = BUCKET(tb, ix); b != NULL; b = b->next) {
- if (b->hvalue == INVALID_HASH) {
- ++kept_items;
- }
- }
- ix = next_slot(tb, ix, &lck);
- }while (ix);
- return kept_items;
-}
-
int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
@@ -879,34 +862,49 @@ Ldone:
return ret;
}
+static Eterm
+get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
+ HashDbTerm *b1, HashDbTerm **bend)
+{
+ HashDbTerm* b2 = b1->next;
+ Eterm copy;
+ Uint sz = b1->dbterm.size + 2;
+
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ while (b2 && has_key(tb, b2, key, hval)) {
+ if (b2->hvalue != INVALID_HASH)
+ sz += b2->dbterm.size + 2;
+
+ b2 = b2->next;
+ }
+ }
+ copy = build_term_list(p, b1, b2, sz, tb);
+ CHECK_TABLES();
+ if (bend) {
+ *bend = b2;
+ }
+ return copy;
+}
+
int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
int ix;
- HashDbTerm* b1;
+ HashDbTerm* b;
erts_smp_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
-
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- HashDbTerm* b2 = b1->next;
- Eterm copy;
+ b = BUCKET(tb, ix);
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- while(b2 != NULL && has_key(tb,b2,key,hval))
- b2 = b2->next;
- }
- copy = build_term_list(p, b1, b2, tb);
- CHECK_TABLES();
- *ret = copy;
+ while(b != 0) {
+ if (has_live_key(tb, b, key, hval)) {
+ *ret = get_term_list(p, tb, key, hval, b, NULL);
goto done;
}
- b1 = b1->next;
+ b = b->next;
}
*ret = NIL;
done:
@@ -1240,7 +1238,7 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
lck = RLOCK_HASH(tb, slot);
nactive = NACTIVE(tb);
if (slot < nactive) {
- *ret = build_term_list(p, BUCKET(tb, slot), 0, tb);
+ *ret = build_term_list(p, BUCKET(tb, slot), NULL, 0, tb);
retval = DB_ERROR_NONE;
}
else if (slot == nactive) {
@@ -2069,6 +2067,46 @@ trap:
}
+static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm **bp, *b;
+ HashValue hval = MAKE_HASH(key);
+ erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval);
+ int ix = hash_to_ix(tb, hval);
+ int nitems_diff = 0;
+
+ *ret = NIL;
+ for (bp = &BUCKET(tb, ix), b = *bp; b; bp = &b->next, b = b->next) {
+ if (has_live_key(tb, b, key, hval)) {
+ HashDbTerm *bend;
+
+ *ret = get_term_list(p, tb, key, hval, b, &bend);
+ while (b != bend) {
+ --nitems_diff;
+ if (nitems_diff == -1 && IS_FIXED(tb)) {
+ /* Pseudo remove (no need to keep several of same key) */
+ add_fixed_deletion(tb, ix);
+ bp = &b->next;
+ b->hvalue = INVALID_HASH;
+ b = b->next;
+ } else {
+ *bp = b->next;
+ free_term(tb, b);
+ b = *bp;
+ }
+ }
+ break;
+ }
+ }
+ WUNLOCK_HASH(lck);
+ if (nitems_diff) {
+ erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ try_shrink(tb);
+ }
+ return DB_ERROR_NONE;
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
@@ -2104,10 +2142,38 @@ int db_mark_all_deleted_hash(DbTable *tbl)
static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
+ DbHashStats stats;
int i;
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
-
+
+#ifdef ERTS_SMP
+ i = tbl->common.is_thread_safe;
+ /* If crash dumping we set table to thread safe in order to
+ avoid taking any locks */
+ if (ERTS_IS_CRASH_DUMPING)
+ tbl->common.is_thread_safe = 1;
+
+ db_calc_stats_hash(&tbl->hash, &stats);
+
+ tbl->common.is_thread_safe = i;
+#else
+ db_calc_stats_hash(&tbl->hash, &stats);
+#endif
+
+ erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
+ erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
+ erts_print(to, to_arg, "Chain Length Min: %d\n", stats.min_chain_len);
+ erts_print(to, to_arg, "Chain Length Std Dev: %f\n",
+ stats.std_dev_chain_len);
+ erts_print(to, to_arg, "Chain Length Expected Std Dev: %f\n",
+ stats.std_dev_expected);
+
+ if (IS_FIXED(tb))
+ erts_print(to, to_arg, "Fixed: %d\n", stats.kept_items);
+ else
+ erts_print(to, to_arg, "Fixed: false\n");
+
if (show) {
for (i = 0; i < NACTIVE(tb); i++) {
HashDbTerm* list = BUCKET(tb,i);
@@ -2483,23 +2549,23 @@ static int free_seg(DbTableHash *tb, int free_records)
** Copy terms from ptr1 until ptr2
** works for ptr1 == ptr2 == 0 => []
** or ptr2 == 0
+** sz is either precalculated heap size or 0 if not known
*/
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
- DbTableHash* tb)
+ Uint sz, DbTableHash* tb)
{
- int sz = 0;
HashDbTerm* ptr;
Eterm list = NIL;
Eterm copy;
Eterm *hp, *hend;
- ptr = ptr1;
- while(ptr != ptr2) {
-
- if (ptr->hvalue != INVALID_HASH)
- sz += ptr->dbterm.size + 2;
-
- ptr = ptr->next;
+ if (!sz) {
+ ptr = ptr1;
+ while(ptr != ptr2) {
+ if (ptr->hvalue != INVALID_HASH)
+ sz += ptr->dbterm.size + 2;
+ ptr = ptr->next;
+ }
}
hp = HAlloc(p, sz);
@@ -2833,6 +2899,7 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
erts_smp_rwmtx_t* lck;
int sum = 0;
int sq_sum = 0;
+ int kept_items = 0;
int ix;
int len;
@@ -2844,6 +2911,8 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
len = 0;
for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
len++;
+ if (b->hvalue == INVALID_HASH)
+ ++kept_items;
}
sum += len;
sq_sum += len*len;
@@ -2855,7 +2924,8 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
stats->std_dev_chain_len = sqrt((sq_sum - stats->avg_chain_len*sum) / NACTIVE(tb));
/* Expected standard deviation from a good uniform hash function,
ie binomial distribution (not taking the linear hashing into acount) */
- stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+ stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+ stats->kept_items = kept_items;
}
#ifdef HARDDEBUG
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e68081a5b1..f12cd363b0 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -104,6 +104,7 @@ typedef struct {
float std_dev_expected;
int max_chain_len;
int min_chain_len;
+ int kept_items;
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index a62a83a928..720c0659c3 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -383,6 +383,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
Eterm pattern, Eterm *ret);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
static void db_print_tree(int to, void *to_arg,
int show, DbTable *tbl);
static int db_free_table_tree(DbTable *tbl);
@@ -431,6 +432,7 @@ DbTableMethod db_tree =
db_select_delete_continue_tree,
db_select_count_tree,
db_select_count_continue_tree,
+ db_take_tree,
db_delete_all_objects_tree,
db_free_table_tree,
db_free_table_continue_tree,
@@ -1722,6 +1724,28 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
}
+static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *this;
+
+ *ret = NIL;
+ this = linkout_tree(tb, key, NULL);
+ if (this) {
+ Eterm copy, *hp, *hend;
+
+ hp = HAlloc(p, this->dbterm.size + 2);
+ hend = hp + this->dbterm.size + 2;
+ copy = db_copy_object_from_ets(&tb->common,
+ &this->dbterm, &hp, &MSO(p));
+ *ret = CONS(hp, copy, NIL);
+ hp += 2;
+ HRelease(p, hend, hp);
+ free_term(tb, this);
+ }
+ return DB_ERROR_NONE;
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index b9fd3b208e..7eb80e3bb1 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -3254,34 +3254,38 @@ int db_is_variable(Eterm obj)
/* return 1 if obj contains a variable or underscore */
/* return 0 if obj is fully ground */
-int db_has_variable(Eterm obj)
-{
- switch(obj & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_LIST: {
- while (is_list(obj)) {
- if (db_has_variable(CAR(list_val(obj))))
+int db_has_variable(Eterm node) {
+ DECLARE_ESTACK(s);
+
+ ESTACK_PUSH(s,node);
+ while (!ESTACK_ISEMPTY(s)) {
+ node = ESTACK_POP(s);
+ switch(node & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ while (is_list(node)) {
+ ESTACK_PUSH(s,CAR(list_val(node)));
+ node = CDR(list_val(node));
+ }
+ ESTACK_PUSH(s,node); /* Non wellformed list or [] */
+ break;
+ case TAG_PRIMARY_BOXED:
+ if (is_tuple(node)) {
+ Eterm *tuple = tuple_val(node);
+ int arity = arityval(*tuple);
+ while(arity--) {
+ ESTACK_PUSH(s,*(++tuple));
+ }
+ }
+ break;
+ case TAG_PRIMARY_IMMED1:
+ if (node == am_Underscore || db_is_variable(node) >= 0) {
+ DESTROY_ESTACK(s);
return 1;
- obj = CDR(list_val(obj));
- }
- return(db_has_variable(obj)); /* Non wellformed list or [] */
- }
- case TAG_PRIMARY_BOXED:
- if (!is_tuple(obj)) {
- return 0;
- } else {
- Eterm *tuple = tuple_val(obj);
- int arity = arityval(*tuple++);
- while(arity--) {
- if (db_has_variable(*tuple))
- return 1;
- tuple++;
}
- return(0);
+ break;
}
- case TAG_PRIMARY_IMMED1:
- if (obj == am_Underscore || db_is_variable(obj) >= 0)
- return 1;
}
+ DESTROY_ESTACK(s);
return 0;
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 328b19dfc9..5ace93c8ed 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -165,6 +165,7 @@ typedef struct db_table_method
DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
+ int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
int (*db_delete_all_objects)(Process* p,
DbTable* db /* [in out] */ );
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index f9938fc66c..e498ac70ec 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -133,7 +133,7 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 3
-#define ERL_DRV_EXTENDED_MINOR_VERSION 1
+#define ERL_DRV_EXTENDED_MINOR_VERSION 2
/*
* The emulator will refuse to load a driver with a major version
@@ -361,6 +361,9 @@ typedef struct erl_drv_entry {
/* Called on behalf of driver_select when
it is safe to release 'event'. A typical
unix driver would call close(event) */
+ void (*emergency_close)(ErlDrvData drv_data);
+ /* called when the port is closed abruptly.
+ specifically when erl_crash_dump is called. */
/* When adding entries here, dont forget to pad in obsolete/driver.h */
} ErlDrvEntry;
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 147249f751..31b05d22af 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -601,17 +601,14 @@ erl_drv_thread_create(char *name,
#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid;
- ethr_thr_opts ethr_opts;
+ ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
ethr_thr_opts *use_opts;
- ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
if (!opts)
use_opts = NULL;
else {
- sys_memcpy((void *) &ethr_opts,
- (void *) &def_ethr_opts,
- sizeof(ethr_thr_opts));
ethr_opts.suggested_stack_size = opts->suggested_stack_size;
+ ethr_opts.name = name;
use_opts = &ethr_opts;
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 5f78a7b532..d1a7ee113b 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -175,15 +175,15 @@ erts_init_gc(void)
int i = 0, ix;
Sint max_heap_size = 0;
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
- ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
- ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
- ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
- ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
- ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
- ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
erts_test_long_gc_sleep = 0;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 77445ef1ff..fe065e196d 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -191,6 +191,8 @@ int erts_disable_tolerant_timeofday; /* Time correction can be disabled it is
int erts_atom_table_size = ATOM_LIMIT; /* Maximum number of atoms */
+int erts_pd_initial_size = 10;
+
int erts_modified_timing_level;
int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
@@ -516,6 +518,8 @@ void erts_usage(void)
H_DEFAULT_SIZE);
erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
VH_DEFAULT_SIZE);
+ erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n",
+ erts_pd_initial_size);
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
@@ -1405,6 +1409,7 @@ erl_start(int argc, char **argv)
*
* h|ms - min_heap_size
* h|mbs - min_bin_vheap_size
+ * h|pds - erts_pd_initial_size
*
*/
if (has_prefix("mbs", sub_param)) {
@@ -1422,6 +1427,14 @@ erl_start(int argc, char **argv)
erts_usage();
}
VERBOSE(DEBUG_SYSTEM, ("using minimum heap size %d\n", H_MIN_SIZE));
+ } else if (has_prefix("pds", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if ((erts_pd_initial_size = atoi(arg)) <= 0) {
+ erts_fprintf(stderr, "bad initial process dictionary size %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using initial process dictionary size %d\n",
+ erts_pd_initial_size));
} else {
/* backward compatibility */
arg = get_arg(argv[i]+2, argv[i+1], &i);
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index df7c443387..da85b86c87 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1226,7 +1226,7 @@ erts_instr_init(int stat, int map_stat)
mem_anchor = NULL;
/* Install instrumentation functions */
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
index 16d4fdc09c..9b864628db 100644
--- a/erts/emulator/beam/erl_math.c
+++ b/erts/emulator/beam/erl_math.c
@@ -207,6 +207,24 @@ BIF_RETTYPE math_log_1(BIF_ALIST_1)
return math_call_1(BIF_P, log, BIF_ARG_1);
}
+#ifdef HAVE_LOG2
+static double
+log2_wrapper(double x)
+{
+ return log2(x);
+}
+#else
+static double
+log2_wrapper(double x)
+{
+ return log(x) / 0.6931471805599453; /* log(2.0); */
+}
+#endif
+
+BIF_RETTYPE math_log2_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, log2_wrapper, BIF_ARG_1);
+}
BIF_RETTYPE math_log10_1(BIF_ALIST_1)
{
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index c8bb126687..fa1bde1c87 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -627,7 +627,7 @@ erts_mtrace_install_wrapper_functions(void)
if (erts_mtrace_enabled) {
int i;
/* Install trace functions */
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *) real_allctrs,
(void *) erts_allctrs,
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 74e38c13df..c982dc2080 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -594,7 +594,7 @@ erts_printf_term(fmtfn_t fn, void* arg, ErlPfEterm term, long precision,
ErlPfEterm* term_base)
{
int res;
- ASSERT(sizeof(ErlPfEterm) == sizeof(Eterm));
+ ERTS_CT_ASSERT(sizeof(ErlPfEterm) == sizeof(Eterm));
res = print_term(fn, arg, (Eterm)term, &precision, (Eterm*)term_base);
if (res < 0)
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 7b272885a7..ba09ee57c2 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -457,8 +457,7 @@ do { \
static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
- int yreg);
+static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
@@ -716,72 +715,24 @@ sched_wall_time_ts(void)
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
-#ifdef ARCH_64
-
static ERTS_INLINE Uint64
aschedtime_read(ErtsAtomicSchedTime *var)
{
- return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var);
+ return (Uint64) erts_atomic64_read_nob((erts_atomic64_t *) var);
}
static ERTS_INLINE void
aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
{
- erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val);
+ erts_atomic64_set_nob((erts_atomic64_t *) var, (erts_aint64_t) val);
}
static ERTS_INLINE void
aschedtime_init(ErtsAtomicSchedTime *var)
{
- erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0);
-}
-
-#elif defined(ARCH_32)
-
-static ERTS_INLINE Uint64
-aschedtime_read(ErtsAtomicSchedTime *var)
-{
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw);
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw.dw_sint;
-#else
- {
- Uint64 res;
- res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
- }
-#endif
+ erts_atomic64_init_nob((erts_atomic64_t *) var, (erts_aint64_t) 0);
}
-static ERTS_INLINE void
-aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
-{
- erts_dw_aint_t dw;
-#ifdef ETHR_SU_DW_NAINT_T__
- dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
- erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw);
-}
-
-static ERTS_INLINE void
-aschedtime_init(ErtsAtomicSchedTime *var)
-{
- erts_dw_aint_t dw;
- dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0;
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0;
- erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw);
-}
-
-#else
-# error :-/
-#endif
-
#define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50
#define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0))
@@ -2366,7 +2317,6 @@ erts_active_schedulers(void)
ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
- ASSERT(as >= 0);
return as;
}
@@ -3244,11 +3194,11 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
return 0;
wrq = ERTS_RUNQ_IX(ix);
flags = ERTS_RUNQ_FLGS_GET(wrq);
+ if (activate && !(flags & ERTS_RUNQ_FLG_SUSPENDED)) {
+ if (try_inc_no_active_runqs(ix+1))
+ (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
+ }
if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) {
- if (activate) {
- if (try_inc_no_active_runqs(ix+1))
- (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
- }
wake_scheduler(wrq);
return 1;
}
@@ -7883,23 +7833,17 @@ erts_start_schedulers(void)
Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
+ char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
opts.detached = 1;
-#ifdef ETHR_HAVE_THREAD_NAMES
- opts.name = malloc(80);
- if (!opts.name) {
- ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
- }
-#endif
+ opts.name = name;
#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "runq_supervisor");
-#endif
+ erts_snprintf(opts.name, 16, "runq_supervisor");
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
erl_exit(1, "Failed to create run-queue supervision event\n");
@@ -7926,9 +7870,7 @@ erts_start_schedulers(void)
ASSERT(actual == esdp->no - 1);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "scheduler_%d", actual + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1);
#ifdef __OSE__
/* This should be done in the bind strategy */
@@ -7950,18 +7892,14 @@ erts_start_schedulers(void)
int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1);
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix);
@@ -7972,9 +7910,7 @@ erts_start_schedulers(void)
ERTS_THR_MEMORY_BARRIER;
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "aux");
-#endif
+ erts_snprintf(opts.name, 16, "aux");
#ifdef __OSE__
opts.coreNo = 0;
@@ -8000,9 +7936,6 @@ erts_start_schedulers(void)
erts_send_error_to_logger_nogl(dsbufp);
}
-#ifdef ETHR_HAVE_THREAD_NAMES
- free(opts.name);
-#endif
}
#endif /* ERTS_SMP */
@@ -10484,7 +10417,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
init_arg.run_queue = rq;
init_arg.state = state;
- ASSERT(((char *) p) == ((char *) &p->common));
+ ERTS_CT_ASSERT(offsetof(Process,common) == 0);
if (!erts_ptab_new_element(&erts_proc,
&p->common,
@@ -12203,7 +12136,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
}
erts_program_counter_info(to, to_arg, p);
for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
}
}
@@ -12260,7 +12193,7 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
static int
-stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -12289,6 +12222,214 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
/*
+ * Print scheduler information
+ */
+void
+erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
+ int i;
+ erts_aint32_t flg;
+ Process *p;
+
+ erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+
+#ifdef ERTS_SMP
+ flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags);
+ erts_print(to, to_arg, "Scheduler Sleep Info Flags: ");
+ for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case ERTS_SSI_FLG_SLEEPING:
+ erts_print(to, to_arg, "SLEEPING"); break;
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_print(to, to_arg, "POLL_SLEEPING"); break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_print(to, to_arg, "TSE_SLEEPING"); break;
+ case ERTS_SSI_FLG_WAITING:
+ erts_print(to, to_arg, "WAITING"); break;
+ case ERTS_SSI_FLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+#endif
+
+ flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work);
+ erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: ");
+ for (i = 0; i < ERTS_SSI_AUX_WORK_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP:
+ erts_print(to, to_arg, "DELAYED_AW_WAKEUP"); break;
+ case ERTS_SSI_AUX_WORK_DD:
+ erts_print(to, to_arg, "DELAYED_DEALLOC"); break;
+ case ERTS_SSI_AUX_WORK_DD_THR_PRGR:
+ erts_print(to, to_arg, "DELAYED_DEALLOC_THR_PRGR"); break;
+ case ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC:
+ erts_print(to, to_arg, "FIX_ALLOC_DEALLOC"); break;
+ case ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM:
+ erts_print(to, to_arg, "FIX_ALLOC_LOWER_LIM"); break;
+ case ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP:
+ erts_print(to, to_arg, "THR_PRGR_LATER_OP"); break;
+ case ERTS_SSI_AUX_WORK_ASYNC_READY:
+ erts_print(to, to_arg, "ASYNC_READY"); break;
+ case ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN:
+ erts_print(to, to_arg, "ASYNC_READY_CLEAN"); break;
+ case ERTS_SSI_AUX_WORK_MISC_THR_PRGR:
+ erts_print(to, to_arg, "MISC_THR_PRGR"); break;
+ case ERTS_SSI_AUX_WORK_MISC:
+ erts_print(to, to_arg, "MISC"); break;
+ case ERTS_SSI_AUX_WORK_CHECK_CHILDREN:
+ erts_print(to, to_arg, "CHECK_CHILDREN"); break;
+ case ERTS_SSI_AUX_WORK_SET_TMO:
+ erts_print(to, to_arg, "SET_TMO"); break;
+ case ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK:
+ erts_print(to, to_arg, "MSEG_CACHE_CHECK"); break;
+ case ERTS_SSI_AUX_WORK_REAP_PORTS:
+ erts_print(to, to_arg, "REAP_PORTS"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+
+ erts_print(to, to_arg, "Current Port: ");
+ if (esdp->current_port)
+ erts_print(to, to_arg, "%T", esdp->current_port->common.id);
+ erts_print(to, to_arg, "\n");
+
+ p = esdp->current_process;
+ erts_print(to, to_arg, "Current Process: ");
+ if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) {
+ flg = erts_smp_atomic32_read_dirty(&p->state);
+ erts_print(to, to_arg, "%T\n", p->common.id);
+
+ erts_print(to, to_arg, "Current Process State: ");
+ erts_dump_process_state(to, to_arg, flg);
+
+ erts_print(to, to_arg, "Current Process Internal State: ");
+ erts_dump_extended_process_state(to, to_arg, flg);
+
+ erts_print(to, to_arg, "Current Process Program counter: %p (", p->i);
+ print_function_from_pc(to, to_arg, p->i);
+ erts_print(to, to_arg, ")\n");
+ erts_print(to, to_arg, "Current Process CP: %p (", p->cp);
+ print_function_from_pc(to, to_arg, p->cp);
+ erts_print(to, to_arg, ")\n");
+
+ /* Getting this stacktrace can segfault if we are very very
+ unlucky if called while a process is being garbage collected.
+ Therefore we only call this on other schedulers if we either
+ have protection against segfaults, or we know that the process
+ is not garbage collecting. It *should* always be safe to call
+ on a process owned by us, even if it is currently being garbage
+ collected.
+ */
+ erts_print(to, to_arg, "Current Process Limited Stack Trace:\n");
+ erts_limited_stack_trace(to, to_arg, p);
+ } else
+ erts_print(to, to_arg, "\n");
+
+ for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) {
+ erts_print(to, to_arg, "Run Queue ");
+ switch (i) {
+ case PRIORITY_MAX:
+ erts_print(to, to_arg, "Max ");
+ break;
+ case PRIORITY_HIGH:
+ erts_print(to, to_arg, "High ");
+ break;
+ case PRIORITY_NORMAL:
+ erts_print(to, to_arg, "Normal ");
+ break;
+ case PRIORITY_LOW:
+ erts_print(to, to_arg, "Low ");
+ break;
+ default:
+ erts_print(to, to_arg, "Unknown ");
+ break;
+ }
+ erts_print(to, to_arg, "Length: %d\n",
+ erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len));
+ }
+ erts_print(to, to_arg, "Run Queue Port Length: %d\n",
+ erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len));
+
+ flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags);
+ erts_print(to, to_arg, "Run Queue Flags: ");
+ for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case (1 << PRIORITY_MAX):
+ erts_print(to, to_arg, "NONEMPTY_MAX"); break;
+ case (1 << PRIORITY_HIGH):
+ erts_print(to, to_arg, "NONEMPTY_HIGH"); break;
+ case (1 << PRIORITY_NORMAL):
+ erts_print(to, to_arg, "NONEMPTY_NORMAL"); break;
+ case (1 << PRIORITY_LOW):
+ erts_print(to, to_arg, "NONEMPTY_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_LOW"); break;
+ case ERTS_RUNQ_FLG_OUT_OF_WORK:
+ erts_print(to, to_arg, "OUT_OF_WORK"); break;
+ case ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK:
+ erts_print(to, to_arg, "HALFTIME_OUT_OF_WORK"); break;
+ case ERTS_RUNQ_FLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_RUNQ_FLG_CHK_CPU_BIND:
+ erts_print(to, to_arg, "CHK_CPU_BIND"); break;
+ case ERTS_RUNQ_FLG_INACTIVE:
+ erts_print(to, to_arg, "INACTIVE"); break;
+ case ERTS_RUNQ_FLG_NONEMPTY:
+ erts_print(to, to_arg, "NONEMPTY"); break;
+ case ERTS_RUNQ_FLG_PROTECTED:
+ erts_print(to, to_arg, "PROTECTED"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+}
+
+/*
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
* on all schedulers, then schedules itself out.
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 3d08be25ff..d12ac792af 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -170,6 +170,8 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLG_PROTECTED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 7)
+
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
| ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
@@ -252,6 +254,8 @@ typedef enum {
#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_FLGS_MAX 5
+
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -283,6 +287,8 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12)
#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13)
+#define ERTS_SSI_AUX_WORK_MAX 14
+
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -352,13 +358,7 @@ typedef struct {
#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
-#ifdef ARCH_64
-typedef erts_atomic_t ErtsAtomicSchedTime;
-#elif defined(ARCH_32)
-typedef erts_dw_atomic_t ErtsAtomicSchedTime;
-#else
-# error :-/
-#endif
+typedef erts_atomic64_t ErtsAtomicSchedTime;
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
typedef struct {
@@ -1081,6 +1081,9 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(19)
#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(20)
#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(21)
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 22)
+#else
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 18)
#endif
#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \
@@ -1616,7 +1619,11 @@ void erts_cleanup_empty_process(Process* p);
void erts_debug_verify_clean_empty_process(Process* p);
#endif
void erts_stack_dump(int to, void *to_arg, Process *);
+void erts_limited_stack_trace(int to, void *to_arg, Process *);
void erts_program_counter_info(int to, void *to_arg, Process *);
+void erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp);
+void erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg);
+void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg);
Eterm erts_get_process_priority(Process *p);
Eterm erts_set_process_priority(Process *p, Eterm prio);
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 3ce707efda..00761f2d0e 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -47,7 +47,7 @@
/* Hash constant macros */
#define MAX_HASH 1342177280UL
-#define INITIAL_SIZE 10
+#define INITIAL_SIZE (erts_pd_initial_size)
/* Hash utility macros */
#define HASH_RANGE(PDict) ((PDict)->homeSize + (PDict)->splitPosition)
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 2f3cf23b00..36bb6b2f0e 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -43,8 +43,9 @@ static void dump_process_info(int to, void *to_arg, Process *p);
static void dump_element(int to, void *to_arg, Eterm x);
static void dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep);
static void dump_element_nl(int to, void *to_arg, Eterm x);
-static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
+static int stack_element_dump(int to, void *to_arg, Eterm* sp,
int yreg);
+static void stack_trace_dump(int to, void *to_arg, Eterm* sp);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static void heap_dump(int to, void *to_arg, Eterm x);
static void dump_binaries(int to, void *to_arg, Binary* root);
@@ -148,7 +149,7 @@ dump_process_info(int to, void *to_arg, Process *p)
if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
}
erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
@@ -243,9 +244,65 @@ dump_element_nl(int to, void *to_arg, Eterm x)
erts_putc(to, to_arg, '\n');
}
+static void
+stack_trace_dump(int to, void *to_arg, Eterm *sp) {
+ Eterm x = *sp;
+ if (is_CP(x)) {
+ erts_print(to, to_arg, "%p:", sp);
+ erts_print(to, to_arg, "SReturn addr 0x%X (", cp_val(x));
+ print_function_from_pc(to, to_arg, cp_val(x));
+ erts_print(to, to_arg, ")\n");
+ }
+}
+
+void
+erts_limited_stack_trace(int to, void *to_arg, Process *p)
+{
+ Eterm* sp;
+
+
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
+ return;
+ }
+
+ if (STACK_START(p) < STACK_TOP(p)) {
+ return;
+ }
+
+ if ((STACK_START(p) - STACK_TOP(p)) < 512) {
+ if (erts_sys_is_area_readable((char*)STACK_TOP(p),
+ (char*)STACK_START(p)))
+ for (sp = STACK_TOP(p); sp < STACK_START(p); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_TOP(p), STACK_START(p));
+ } else {
+ sp = STACK_TOP(p);
+ if (erts_sys_is_area_readable((char*)STACK_TOP(p),
+ (char*)(STACK_TOP(p) + 25)))
+ for (; sp < (STACK_TOP(p) + 256); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_TOP(p), STACK_TOP(p) + 256);
+
+ erts_print(to, to_arg, "%p: skipping %d frames\n",
+ sp, STACK_START(p) - STACK_TOP(p) - 512);
+
+ if (erts_sys_is_area_readable((char*)(STACK_START(p) - 256),
+ (char*)STACK_START(p)))
+ for (sp = STACK_START(p) - 256; sp < STACK_START(p); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_START(p) - 256, STACK_START(p));
+ }
+
+}
static int
-stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -508,3 +565,114 @@ dump_externally(int to, void *to_arg, Eterm term)
erts_print(to, to_arg, "%02X", *s++);
}
}
+
+void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg) {
+ if (psflg & ERTS_PSFLG_FREE)
+ erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
+ else if (psflg & ERTS_PSFLG_EXITING)
+ erts_print(to, to_arg, "Exiting\n");
+ else if (psflg & ERTS_PSFLG_GC) {
+ erts_print(to, to_arg, "Garbing\n");
+ }
+ else if (psflg & ERTS_PSFLG_SUSPENDED)
+ erts_print(to, to_arg, "Suspended\n");
+ else if (psflg & ERTS_PSFLG_RUNNING) {
+ erts_print(to, to_arg, "Running\n");
+ }
+ else if (psflg & ERTS_PSFLG_ACTIVE)
+ erts_print(to, to_arg, "Scheduled\n");
+ else
+ erts_print(to, to_arg, "Waiting\n");
+}
+
+void
+erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
+
+ int i;
+
+ switch (ERTS_PSFLGS_GET_ACT_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "ACT_PRIO_MAX | "); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "ACT_PRIO_HIGH | "); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "ACT_PRIO_NORMAL | "); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "ACT_PRIO_LOW | "); break;
+ }
+ switch (ERTS_PSFLGS_GET_USR_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "USR_PRIO_MAX | "); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "USR_PRIO_HIGH | "); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "USR_PRIO_NORMAL | "); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "USR_PRIO_LOW | "); break;
+ }
+ switch (ERTS_PSFLGS_GET_PRQ_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "PRQ_PRIO_MAX"); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "PRQ_PRIO_HIGH"); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "PRQ_PRIO_NORMAL"); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "PRQ_PRIO_LOW"); break;
+ }
+
+ psflg &= ~(ERTS_PSFLGS_ACT_PRIO_MASK |
+ ERTS_PSFLGS_USR_PRIO_MASK |
+ ERTS_PSFLGS_PRQ_PRIO_MASK);
+
+ if (psflg)
+ erts_print(to, to_arg, " | ");
+
+ for (i = 0; i < ERTS_PSFLG_MAX && psflg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (psflg & chk) {
+ switch (chk) {
+ case ERTS_PSFLG_IN_PRQ_MAX:
+ erts_print(to, to_arg, "IN_PRQ_MAX"); break;
+ case ERTS_PSFLG_IN_PRQ_HIGH:
+ erts_print(to, to_arg, "IN_PRQ_HIGH"); break;
+ case ERTS_PSFLG_IN_PRQ_NORMAL:
+ erts_print(to, to_arg, "IN_PRQ_NORMAL"); break;
+ case ERTS_PSFLG_IN_PRQ_LOW:
+ erts_print(to, to_arg, "IN_PRQ_LOW"); break;
+ case ERTS_PSFLG_FREE:
+ erts_print(to, to_arg, "FREE"); break;
+ case ERTS_PSFLG_EXITING:
+ erts_print(to, to_arg, "EXITING"); break;
+ case ERTS_PSFLG_PENDING_EXIT:
+ erts_print(to, to_arg, "PENDING_EXIT"); break;
+ case ERTS_PSFLG_ACTIVE:
+ erts_print(to, to_arg, "ACTIVE"); break;
+ case ERTS_PSFLG_IN_RUNQ:
+ erts_print(to, to_arg, "IN_RUNQ"); break;
+ case ERTS_PSFLG_RUNNING:
+ erts_print(to, to_arg, "RUNNING"); break;
+ case ERTS_PSFLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_PSFLG_GC:
+ erts_print(to, to_arg, "GC"); break;
+ case ERTS_PSFLG_BOUND:
+ erts_print(to, to_arg, "BOUND"); break;
+ case ERTS_PSFLG_TRAP_EXIT:
+ erts_print(to, to_arg, "TRAP_EXIT"); break;
+ case ERTS_PSFLG_ACTIVE_SYS:
+ erts_print(to, to_arg, "ACTIVE_SYS"); break;
+ case ERTS_PSFLG_RUNNING_SYS:
+ erts_print(to, to_arg, "RUNNING_SYS"); break;
+ case ERTS_PSFLG_PROXY:
+ erts_print(to, to_arg, "PROXY"); break;
+ case ERTS_PSFLG_DELAYED_SYS:
+ erts_print(to, to_arg, "DELAYED_SYS"); break;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ case ERTS_PSFLG_DIRTY_CPU_PROC:
+ erts_print(to, to_arg, "DIRTY_CPU_PROC"); break;
+ case ERTS_PSFLG_DIRTY_IO_PROC:
+ erts_print(to, to_arg, "DIRTY_IO_PROC"); break;
+ case ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q:
+ erts_print(to, to_arg, "DIRTY_CPU_PROC_IN_Q"); break;
+ case ERTS_PSFLG_DIRTY_IO_PROC_IN_Q:
+ erts_print(to, to_arg, "DIRTY_IO_PROC_IN_Q"); break;
+#endif
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", chk); break;
+ }
+ if (psflg > chk)
+ erts_print(to, to_arg, " | ");
+ psflg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+}
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index eabf016081..02943ee683 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -280,124 +280,38 @@ struct ErtsPTabListBifData_ {
};
-#ifdef ARCH_32
-
-static ERTS_INLINE Uint64
-dw_aint_to_uint64(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-static void
-unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
-}
-
static ERTS_INLINE void
last_data_init_nob(ErtsPTab *ptab, Uint64 val)
{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw);
+ erts_smp_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE void
last_data_set_relb(ErtsPTab *ptab, Uint64 val)
{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw);
+ erts_smp_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE Uint64
last_data_read_nob(ErtsPTab *ptab)
{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw);
- return dw_aint_to_uint64(&dw);
+ return (Uint64) erts_smp_atomic64_read_nob(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_read_acqb(ErtsPTab *ptab)
{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw);
- return dw_aint_to_uint64(&dw);
+ return (Uint64) erts_smp_atomic64_read_acqb(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
{
- erts_dw_aint_t dw_new, dw_xchg;
-
- unint64_to_dw_aint(&dw_new, new);
- unint64_to_dw_aint(&dw_xchg, exp);
-
- if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
- &dw_new,
- &dw_xchg))
- return exp;
- else
- return dw_aint_to_uint64(&dw_xchg);
-}
-
-#elif defined(ARCH_64)
-
-union {
- erts_smp_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-static ERTS_INLINE void
-last_data_init_nob(ErtsPTab *ptab, Uint64 val)
-{
- erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val);
+ return (Uint64) erts_smp_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data,
+ (erts_aint64_t) new,
+ (erts_aint64_t) exp);
}
-static ERTS_INLINE void
-last_data_set_relb(ErtsPTab *ptab, Uint64 val)
-{
- erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE Uint64
-last_data_read_nob(ErtsPTab *ptab)
-{
- return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data);
-}
-
-static ERTS_INLINE Uint64
-last_data_read_acqb(ErtsPTab *ptab)
-{
- return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data);
-}
-
-static ERTS_INLINE Uint64
-last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
-{
- return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
- (erts_aint_t) new,
- (erts_aint_t) exp);
-}
-
-#else
-# error "Not 64-bit, nor 32-bit architecture..."
-#endif
-
static ERTS_INLINE int
last_data_cmp(Uint64 ld1, Uint64 ld2)
{
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index e3e05f14af..876241159b 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -88,11 +88,7 @@ typedef struct {
} ErtsPTabListData;
typedef struct {
-#ifdef ARCH_32
- erts_smp_dw_atomic_t last_data;
-#else
- erts_smp_atomic_t last_data;
-#endif
+ erts_smp_atomic64_t last_data;
erts_smp_atomic32_t count;
erts_smp_atomic32_t aid_ix;
erts_smp_atomic32_t fid_ix;
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index c38ef47d87..6c40edeb3e 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -60,6 +60,7 @@ typedef erts_tsd_key_t erts_smp_tsd_key_t;
#define erts_smp_dw_atomic_t erts_dw_atomic_t
#define erts_smp_atomic_t erts_atomic_t
#define erts_smp_atomic32_t erts_atomic32_t
+#define erts_smp_atomic64_t erts_atomic64_t
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
@@ -95,6 +96,7 @@ typedef int erts_smp_tsd_key_t;
#define erts_smp_dw_atomic_t erts_no_dw_atomic_t
#define erts_smp_atomic_t erts_no_atomic_t
#define erts_smp_atomic32_t erts_no_atomic32_t
+#define erts_smp_atomic64_t erts_no_atomic64_t
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
typedef struct { } erts_smp_rwlock_t;
@@ -489,6 +491,116 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty
#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty
+/* 64-bit atomics */
+
+#define erts_smp_atomic64_init_nob erts_atomic64_init_nob
+#define erts_smp_atomic64_set_nob erts_atomic64_set_nob
+#define erts_smp_atomic64_read_nob erts_atomic64_read_nob
+#define erts_smp_atomic64_inc_read_nob erts_atomic64_inc_read_nob
+#define erts_smp_atomic64_dec_read_nob erts_atomic64_dec_read_nob
+#define erts_smp_atomic64_inc_nob erts_atomic64_inc_nob
+#define erts_smp_atomic64_dec_nob erts_atomic64_dec_nob
+#define erts_smp_atomic64_add_read_nob erts_atomic64_add_read_nob
+#define erts_smp_atomic64_add_nob erts_atomic64_add_nob
+#define erts_smp_atomic64_read_bor_nob erts_atomic64_read_bor_nob
+#define erts_smp_atomic64_read_band_nob erts_atomic64_read_band_nob
+#define erts_smp_atomic64_xchg_nob erts_atomic64_xchg_nob
+#define erts_smp_atomic64_cmpxchg_nob erts_atomic64_cmpxchg_nob
+#define erts_smp_atomic64_read_bset_nob erts_atomic64_read_bset_nob
+
+#define erts_smp_atomic64_init_mb erts_atomic64_init_mb
+#define erts_smp_atomic64_set_mb erts_atomic64_set_mb
+#define erts_smp_atomic64_read_mb erts_atomic64_read_mb
+#define erts_smp_atomic64_inc_read_mb erts_atomic64_inc_read_mb
+#define erts_smp_atomic64_dec_read_mb erts_atomic64_dec_read_mb
+#define erts_smp_atomic64_inc_mb erts_atomic64_inc_mb
+#define erts_smp_atomic64_dec_mb erts_atomic64_dec_mb
+#define erts_smp_atomic64_add_read_mb erts_atomic64_add_read_mb
+#define erts_smp_atomic64_add_mb erts_atomic64_add_mb
+#define erts_smp_atomic64_read_bor_mb erts_atomic64_read_bor_mb
+#define erts_smp_atomic64_read_band_mb erts_atomic64_read_band_mb
+#define erts_smp_atomic64_xchg_mb erts_atomic64_xchg_mb
+#define erts_smp_atomic64_cmpxchg_mb erts_atomic64_cmpxchg_mb
+#define erts_smp_atomic64_read_bset_mb erts_atomic64_read_bset_mb
+
+#define erts_smp_atomic64_init_acqb erts_atomic64_init_acqb
+#define erts_smp_atomic64_set_acqb erts_atomic64_set_acqb
+#define erts_smp_atomic64_read_acqb erts_atomic64_read_acqb
+#define erts_smp_atomic64_inc_read_acqb erts_atomic64_inc_read_acqb
+#define erts_smp_atomic64_dec_read_acqb erts_atomic64_dec_read_acqb
+#define erts_smp_atomic64_inc_acqb erts_atomic64_inc_acqb
+#define erts_smp_atomic64_dec_acqb erts_atomic64_dec_acqb
+#define erts_smp_atomic64_add_read_acqb erts_atomic64_add_read_acqb
+#define erts_smp_atomic64_add_acqb erts_atomic64_add_acqb
+#define erts_smp_atomic64_read_bor_acqb erts_atomic64_read_bor_acqb
+#define erts_smp_atomic64_read_band_acqb erts_atomic64_read_band_acqb
+#define erts_smp_atomic64_xchg_acqb erts_atomic64_xchg_acqb
+#define erts_smp_atomic64_cmpxchg_acqb erts_atomic64_cmpxchg_acqb
+#define erts_smp_atomic64_read_bset_acqb erts_atomic64_read_bset_acqb
+
+#define erts_smp_atomic64_init_relb erts_atomic64_init_relb
+#define erts_smp_atomic64_set_relb erts_atomic64_set_relb
+#define erts_smp_atomic64_read_relb erts_atomic64_read_relb
+#define erts_smp_atomic64_inc_read_relb erts_atomic64_inc_read_relb
+#define erts_smp_atomic64_dec_read_relb erts_atomic64_dec_read_relb
+#define erts_smp_atomic64_inc_relb erts_atomic64_inc_relb
+#define erts_smp_atomic64_dec_relb erts_atomic64_dec_relb
+#define erts_smp_atomic64_add_read_relb erts_atomic64_add_read_relb
+#define erts_smp_atomic64_add_relb erts_atomic64_add_relb
+#define erts_smp_atomic64_read_bor_relb erts_atomic64_read_bor_relb
+#define erts_smp_atomic64_read_band_relb erts_atomic64_read_band_relb
+#define erts_smp_atomic64_xchg_relb erts_atomic64_xchg_relb
+#define erts_smp_atomic64_cmpxchg_relb erts_atomic64_cmpxchg_relb
+#define erts_smp_atomic64_read_bset_relb erts_atomic64_read_bset_relb
+
+#define erts_smp_atomic64_init_ddrb erts_atomic64_init_ddrb
+#define erts_smp_atomic64_set_ddrb erts_atomic64_set_ddrb
+#define erts_smp_atomic64_read_ddrb erts_atomic64_read_ddrb
+#define erts_smp_atomic64_inc_read_ddrb erts_atomic64_inc_read_ddrb
+#define erts_smp_atomic64_dec_read_ddrb erts_atomic64_dec_read_ddrb
+#define erts_smp_atomic64_inc_ddrb erts_atomic64_inc_ddrb
+#define erts_smp_atomic64_dec_ddrb erts_atomic64_dec_ddrb
+#define erts_smp_atomic64_add_read_ddrb erts_atomic64_add_read_ddrb
+#define erts_smp_atomic64_add_ddrb erts_atomic64_add_ddrb
+#define erts_smp_atomic64_read_bor_ddrb erts_atomic64_read_bor_ddrb
+#define erts_smp_atomic64_read_band_ddrb erts_atomic64_read_band_ddrb
+#define erts_smp_atomic64_xchg_ddrb erts_atomic64_xchg_ddrb
+#define erts_smp_atomic64_cmpxchg_ddrb erts_atomic64_cmpxchg_ddrb
+#define erts_smp_atomic64_read_bset_ddrb erts_atomic64_read_bset_ddrb
+
+#define erts_smp_atomic64_init_rb erts_atomic64_init_rb
+#define erts_smp_atomic64_set_rb erts_atomic64_set_rb
+#define erts_smp_atomic64_read_rb erts_atomic64_read_rb
+#define erts_smp_atomic64_inc_read_rb erts_atomic64_inc_read_rb
+#define erts_smp_atomic64_dec_read_rb erts_atomic64_dec_read_rb
+#define erts_smp_atomic64_inc_rb erts_atomic64_inc_rb
+#define erts_smp_atomic64_dec_rb erts_atomic64_dec_rb
+#define erts_smp_atomic64_add_read_rb erts_atomic64_add_read_rb
+#define erts_smp_atomic64_add_rb erts_atomic64_add_rb
+#define erts_smp_atomic64_read_bor_rb erts_atomic64_read_bor_rb
+#define erts_smp_atomic64_read_band_rb erts_atomic64_read_band_rb
+#define erts_smp_atomic64_xchg_rb erts_atomic64_xchg_rb
+#define erts_smp_atomic64_cmpxchg_rb erts_atomic64_cmpxchg_rb
+#define erts_smp_atomic64_read_bset_rb erts_atomic64_read_bset_rb
+
+#define erts_smp_atomic64_init_wb erts_atomic64_init_wb
+#define erts_smp_atomic64_set_wb erts_atomic64_set_wb
+#define erts_smp_atomic64_read_wb erts_atomic64_read_wb
+#define erts_smp_atomic64_inc_read_wb erts_atomic64_inc_read_wb
+#define erts_smp_atomic64_dec_read_wb erts_atomic64_dec_read_wb
+#define erts_smp_atomic64_inc_wb erts_atomic64_inc_wb
+#define erts_smp_atomic64_dec_wb erts_atomic64_dec_wb
+#define erts_smp_atomic64_add_read_wb erts_atomic64_add_read_wb
+#define erts_smp_atomic64_add_wb erts_atomic64_add_wb
+#define erts_smp_atomic64_read_bor_wb erts_atomic64_read_bor_wb
+#define erts_smp_atomic64_read_band_wb erts_atomic64_read_band_wb
+#define erts_smp_atomic64_xchg_wb erts_atomic64_xchg_wb
+#define erts_smp_atomic64_cmpxchg_wb erts_atomic64_cmpxchg_wb
+#define erts_smp_atomic64_read_bset_wb erts_atomic64_read_bset_wb
+
+#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty
+#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty
+
#else /* !ERTS_SMP */
/* Double word size atomics */
@@ -751,6 +863,116 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_set_dirty erts_no_atomic32_set
#define erts_smp_atomic32_read_dirty erts_no_atomic32_read
+/* 64-bit atomics */
+
+#define erts_smp_atomic64_init_nob erts_no_atomic64_set
+#define erts_smp_atomic64_set_nob erts_no_atomic64_set
+#define erts_smp_atomic64_read_nob erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_nob erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_nob erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_nob erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_nob erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_nob erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_nob erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_nob erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_nob erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_nob erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_nob erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_mb erts_no_atomic64_set
+#define erts_smp_atomic64_set_mb erts_no_atomic64_set
+#define erts_smp_atomic64_read_mb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_mb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_mb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_mb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_mb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_mb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_mb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_mb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_mb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_mb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_mb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_acqb erts_no_atomic64_set
+#define erts_smp_atomic64_set_acqb erts_no_atomic64_set
+#define erts_smp_atomic64_read_acqb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_acqb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_acqb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_acqb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_acqb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_acqb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_acqb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_acqb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_acqb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_acqb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_acqb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_relb erts_no_atomic64_set
+#define erts_smp_atomic64_set_relb erts_no_atomic64_set
+#define erts_smp_atomic64_read_relb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_relb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_relb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_relb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_relb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_relb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_relb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_relb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_relb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_relb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_relb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_ddrb erts_no_atomic64_set
+#define erts_smp_atomic64_set_ddrb erts_no_atomic64_set
+#define erts_smp_atomic64_read_ddrb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_ddrb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_ddrb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_ddrb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_ddrb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_ddrb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_ddrb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_rb erts_no_atomic64_set
+#define erts_smp_atomic64_set_rb erts_no_atomic64_set
+#define erts_smp_atomic64_read_rb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_rb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_rb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_rb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_rb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_rb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_rb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_rb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_rb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_rb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_rb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_wb erts_no_atomic64_set
+#define erts_smp_atomic64_set_wb erts_no_atomic64_set
+#define erts_smp_atomic64_read_wb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_wb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_wb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_wb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_wb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_wb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_wb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_wb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_wb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_wb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_wb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_set_dirty erts_no_atomic64_set
+#define erts_smp_atomic64_read_dirty erts_no_atomic64_read
+
#endif /* !ERTS_SMP */
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 545a0343d0..c2365c5cf7 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -115,70 +115,24 @@
#undef read_nob
#define read_nob erts_thr_prgr_read_nob__
-#ifdef ARCH_64
-
static ERTS_INLINE void
set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_set_mb(atmc, val);
+ erts_atomic64_set_mb(atmc, (erts_aint64_t) val);
}
static ERTS_INLINE void
set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_set_nob(atmc, val);
+ erts_atomic64_set_nob(atmc, (erts_aint64_t) val);
}
static ERTS_INLINE void
init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_init_nob(atmc, val);
-}
-
-#else
-
-#undef dw_aint_to_val
-#define dw_aint_to_val erts_thr_prgr_dw_aint_to_val__
-
-static void
-val_to_dw_aint(erts_dw_aint_t *dw_aint, ErtsThrPrgrVal val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw_aint->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw_aint->sint[ERTS_DW_AINT_LOW_WORD]
- = (erts_aint_t) (val & 0xffffffff);
- dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]
- = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
+ erts_atomic64_init_nob(atmc, (erts_aint64_t) val);
}
-static ERTS_INLINE void
-set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_set_mb(atmc, &dw_aint);
-}
-
-static ERTS_INLINE void
-set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_set_nob(atmc, &dw_aint);
-}
-
-static ERTS_INLINE void
-init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_init_nob(atmc, &dw_aint);
-}
-
-#endif
-
/* #define ERTS_THR_PROGRESS_STATE_DEBUG */
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
@@ -1381,25 +1335,10 @@ erts_thr_progress_block(void)
thr_progress_block(tmp_thr_prgr_data(NULL), 1);
}
-void
-erts_thr_progress_fatal_error_block(SWord timeout,
- ErtsThrPrgrData *tmp_tpd_bufp)
+int
+erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp)
{
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
- erts_aint32_t bc;
- SWord time_left = timeout;
- SysTimeval to;
-
- /*
- * Counting poll intervals may give us a too long timeout
- * if cpu is busy. If we got tolerant time of day we use it
- * to prevent this.
- */
- if (!erts_disable_tolerant_timeofday) {
- erts_get_timeval(&to);
- to.tv_sec += timeout / 1000;
- to.tv_sec += timeout % 1000;
- }
if (!tpd) {
/*
@@ -1412,9 +1351,26 @@ erts_thr_progress_fatal_error_block(SWord timeout,
init_tmp_thr_prgr_data(tpd);
}
- bc = thr_progress_block(tpd, 0);
- if (bc == 0)
- return; /* Succefully blocked all managed threads */
+ /* Returns number of threads that have not yes been blocked */
+ return thr_progress_block(tpd, 0);
+}
+
+void
+erts_thr_progress_fatal_error_wait(SWord timeout) {
+ erts_aint32_t bc;
+ SWord time_left = timeout;
+ SysTimeval to;
+
+ /*
+ * Counting poll intervals may give us a too long timeout
+ * if cpu is busy. If we got tolerant time of day we use it
+ * to prevent this.
+ */
+ if (!erts_disable_tolerant_timeofday) {
+ erts_get_timeval(&to);
+ to.tv_sec += timeout / 1000;
+ to.tv_sec += timeout % 1000;
+ }
while (1) {
if (erts_milli_sleep(ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL) == 0)
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 5f392944c2..cf11c4e114 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -83,8 +83,8 @@ typedef struct {
ErtsThrPrgrLeaderState leader_state;
} ErtsThrPrgrData;
-void erts_thr_progress_fatal_error_block(SWord timeout,
- ErtsThrPrgrData *tmp_tpd_bufp);
+int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp);
+void erts_thr_progress_fatal_error_wait(SWord timeout);
#endif /* ERTS_SMP */
@@ -115,11 +115,7 @@ struct ErtsThrPrgrLaterOp_ {
extern erts_tsd_key_t erts_thr_prgr_data_key__;
-#ifdef ARCH_64
-# define ERTS_THR_PRGR_ATOMIC erts_atomic_t
-#else /* ARCH_32 */
-# define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
-#endif
+#define ERTS_THR_PRGR_ATOMIC erts_atomic64_t
typedef struct {
void *arg;
@@ -158,10 +154,6 @@ void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
void erts_thr_progress_dbg_print_state(void);
-#ifdef ARCH_32
-#define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
-ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint);
-#endif
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
@@ -184,68 +176,24 @@ ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ARCH_64
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_nob(atmc);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_acqb(atmc);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_mb(atmc);
-}
-
-#else /* ARCH_32 */
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (ErtsThrPrgrVal) dw_aint->dw_sint;
-#else
- ErtsThrPrgrVal res;
- res = (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_nob(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_nob(atmc);
}
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_acqb(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_acqb(atmc);
}
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_mb(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_mb(atmc);
}
-#endif
-
ERTS_GLB_INLINE int
erts_thr_progress_is_managed_thread(void)
{
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 80026104db..1fd800d524 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -344,6 +344,16 @@ typedef ethr_ts_event erts_tse_t;
#define erts_aint32_t ethr_sint32_t
#define erts_atomic32_t ethr_atomic32_t
+#if defined(ARCH_32)
+# define erts_atomic64_t ethr_dw_atomic_t
+# define erts_aint64_t ethr_sint64_t
+#elif defined(ARCH_64)
+# define erts_atomic64_t ethr_atomic_t
+# define erts_aint64_t ethr_sint_t
+#else
+# error "Not supported architecture"
+#endif
+
#define ERTS_DW_AINT_HIGH_WORD ETHR_DW_SINT_HIGH_WORD
#define ERTS_DW_AINT_LOW_WORD ETHR_DW_SINT_LOW_WORD
@@ -414,10 +424,12 @@ typedef int erts_tse_t;
typedef struct { SWord sint[2]; } erts_dw_aint_t;
typedef SWord erts_aint_t;
typedef Sint32 erts_aint32_t;
+typedef Sint64 erts_aint64_t;
#define erts_dw_atomic_t erts_dw_aint_t
#define erts_atomic_t erts_aint_t
#define erts_atomic32_t erts_aint32_t
+#define erts_atomic64_t erts_aint64_t
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -446,6 +458,7 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#define erts_no_dw_atomic_t erts_dw_aint_t
#define erts_no_atomic_t erts_aint_t
#define erts_no_atomic32_t erts_aint32_t
+#define erts_no_atomic64_t erts_aint64_t
#define ERTS_AINT_NULL ((erts_aint_t) NULL)
@@ -463,6 +476,7 @@ ERTS_GLB_INLINE void erts_thr_detach(erts_tid_t tid);
ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
+ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
int enable_lcnt);
@@ -570,6 +584,29 @@ ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp
ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
erts_aint32_t mask,
erts_aint32_t set);
+ERTS_GLB_INLINE void erts_no_atomic64_set(erts_no_atomic64_t *var,
+ erts_aint64_t i);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read(erts_no_atomic64_t *var);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_inc_read(erts_no_atomic64_t *incp);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_dec_read(erts_no_atomic64_t *decp);
+ERTS_GLB_INLINE void erts_no_atomic64_inc(erts_no_atomic64_t *incp);
+ERTS_GLB_INLINE void erts_no_atomic64_dec(erts_no_atomic64_t *decp);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_add_read(erts_no_atomic64_t *addp,
+ erts_aint64_t i);
+ERTS_GLB_INLINE void erts_no_atomic64_add(erts_no_atomic64_t *addp,
+ erts_aint64_t i);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bor(erts_no_atomic64_t *var,
+ erts_aint64_t mask);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_band(erts_no_atomic64_t *var,
+ erts_aint64_t mask);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp,
+ erts_aint64_t new);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp,
+ erts_aint64_t new,
+ erts_aint64_t expected);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
+ erts_aint64_t mask,
+ erts_aint64_t set);
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
@@ -620,11 +657,17 @@ ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
ERTS_GLB_INLINE int erts_thr_get_main_status(void);
ERTS_GLB_INLINE void erts_thr_yield(void);
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
sigset_t *oset);
ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
+
+#ifdef USE_THREADS
+ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig);
+#endif
+
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
#ifdef USE_THREADS
@@ -1200,6 +1243,441 @@ erts_atomic32_read_dirty(erts_atomic32_t *var)
#endif
+/* 64-bit atomics */
+
+#if defined(ARCH_64)
+
+#define erts_atomic64_init_nob ethr_atomic_init
+#define erts_atomic64_set_nob ethr_atomic_set
+#define erts_atomic64_read_nob ethr_atomic_read
+#define erts_atomic64_inc_read_nob ethr_atomic_inc_read
+#define erts_atomic64_dec_read_nob ethr_atomic_dec_read
+#define erts_atomic64_inc_nob ethr_atomic_inc
+#define erts_atomic64_dec_nob ethr_atomic_dec
+#define erts_atomic64_add_read_nob ethr_atomic_add_read
+#define erts_atomic64_add_nob ethr_atomic_add
+#define erts_atomic64_read_bor_nob ethr_atomic_read_bor
+#define erts_atomic64_read_band_nob ethr_atomic_read_band
+#define erts_atomic64_xchg_nob ethr_atomic_xchg
+#define erts_atomic64_cmpxchg_nob ethr_atomic_cmpxchg
+#define erts_atomic64_read_bset_nob erts_atomic_read_bset_nob
+
+#define erts_atomic64_init_mb ethr_atomic_init_mb
+#define erts_atomic64_set_mb ethr_atomic_set_mb
+#define erts_atomic64_read_mb ethr_atomic_read_mb
+#define erts_atomic64_inc_read_mb ethr_atomic_inc_read_mb
+#define erts_atomic64_dec_read_mb ethr_atomic_dec_read_mb
+#define erts_atomic64_inc_mb ethr_atomic_inc_mb
+#define erts_atomic64_dec_mb ethr_atomic_dec_mb
+#define erts_atomic64_add_read_mb ethr_atomic_add_read_mb
+#define erts_atomic64_add_mb ethr_atomic_add_mb
+#define erts_atomic64_read_bor_mb ethr_atomic_read_bor_mb
+#define erts_atomic64_read_band_mb ethr_atomic_read_band_mb
+#define erts_atomic64_xchg_mb ethr_atomic_xchg_mb
+#define erts_atomic64_cmpxchg_mb ethr_atomic_cmpxchg_mb
+#define erts_atomic64_read_bset_mb erts_atomic_read_bset_mb
+
+#define erts_atomic64_init_acqb ethr_atomic_init_acqb
+#define erts_atomic64_set_acqb ethr_atomic_set_acqb
+#define erts_atomic64_read_acqb ethr_atomic_read_acqb
+#define erts_atomic64_inc_read_acqb ethr_atomic_inc_read_acqb
+#define erts_atomic64_dec_read_acqb ethr_atomic_dec_read_acqb
+#define erts_atomic64_inc_acqb ethr_atomic_inc_acqb
+#define erts_atomic64_dec_acqb ethr_atomic_dec_acqb
+#define erts_atomic64_add_read_acqb ethr_atomic_add_read_acqb
+#define erts_atomic64_add_acqb ethr_atomic_add_acqb
+#define erts_atomic64_read_bor_acqb ethr_atomic_read_bor_acqb
+#define erts_atomic64_read_band_acqb ethr_atomic_read_band_acqb
+#define erts_atomic64_xchg_acqb ethr_atomic_xchg_acqb
+#define erts_atomic64_cmpxchg_acqb ethr_atomic_cmpxchg_acqb
+#define erts_atomic64_read_bset_acqb erts_atomic_read_bset_acqb
+
+#define erts_atomic64_init_relb ethr_atomic_init_relb
+#define erts_atomic64_set_relb ethr_atomic_set_relb
+#define erts_atomic64_read_relb ethr_atomic_read_relb
+#define erts_atomic64_inc_read_relb ethr_atomic_inc_read_relb
+#define erts_atomic64_dec_read_relb ethr_atomic_dec_read_relb
+#define erts_atomic64_inc_relb ethr_atomic_inc_relb
+#define erts_atomic64_dec_relb ethr_atomic_dec_relb
+#define erts_atomic64_add_read_relb ethr_atomic_add_read_relb
+#define erts_atomic64_add_relb ethr_atomic_add_relb
+#define erts_atomic64_read_bor_relb ethr_atomic_read_bor_relb
+#define erts_atomic64_read_band_relb ethr_atomic_read_band_relb
+#define erts_atomic64_xchg_relb ethr_atomic_xchg_relb
+#define erts_atomic64_cmpxchg_relb ethr_atomic_cmpxchg_relb
+#define erts_atomic64_read_bset_relb erts_atomic_read_bset_relb
+
+#define erts_atomic64_init_ddrb ethr_atomic_init_ddrb
+#define erts_atomic64_set_ddrb ethr_atomic_set_ddrb
+#define erts_atomic64_read_ddrb ethr_atomic_read_ddrb
+#define erts_atomic64_inc_read_ddrb ethr_atomic_inc_read_ddrb
+#define erts_atomic64_dec_read_ddrb ethr_atomic_dec_read_ddrb
+#define erts_atomic64_inc_ddrb ethr_atomic_inc_ddrb
+#define erts_atomic64_dec_ddrb ethr_atomic_dec_ddrb
+#define erts_atomic64_add_read_ddrb ethr_atomic_add_read_ddrb
+#define erts_atomic64_add_ddrb ethr_atomic_add_ddrb
+#define erts_atomic64_read_bor_ddrb ethr_atomic_read_bor_ddrb
+#define erts_atomic64_read_band_ddrb ethr_atomic_read_band_ddrb
+#define erts_atomic64_xchg_ddrb ethr_atomic_xchg_ddrb
+#define erts_atomic64_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb
+#define erts_atomic64_read_bset_ddrb erts_atomic_read_bset_ddrb
+
+#define erts_atomic64_init_rb ethr_atomic_init_rb
+#define erts_atomic64_set_rb ethr_atomic_set_rb
+#define erts_atomic64_read_rb ethr_atomic_read_rb
+#define erts_atomic64_inc_read_rb ethr_atomic_inc_read_rb
+#define erts_atomic64_dec_read_rb ethr_atomic_dec_read_rb
+#define erts_atomic64_inc_rb ethr_atomic_inc_rb
+#define erts_atomic64_dec_rb ethr_atomic_dec_rb
+#define erts_atomic64_add_read_rb ethr_atomic_add_read_rb
+#define erts_atomic64_add_rb ethr_atomic_add_rb
+#define erts_atomic64_read_bor_rb ethr_atomic_read_bor_rb
+#define erts_atomic64_read_band_rb ethr_atomic_read_band_rb
+#define erts_atomic64_xchg_rb ethr_atomic_xchg_rb
+#define erts_atomic64_cmpxchg_rb ethr_atomic_cmpxchg_rb
+#define erts_atomic64_read_bset_rb erts_atomic_read_bset_rb
+
+#define erts_atomic64_init_wb ethr_atomic_init_wb
+#define erts_atomic64_set_wb ethr_atomic_set_wb
+#define erts_atomic64_read_wb ethr_atomic_read_wb
+#define erts_atomic64_inc_read_wb ethr_atomic_inc_read_wb
+#define erts_atomic64_dec_read_wb ethr_atomic_dec_read_wb
+#define erts_atomic64_inc_wb ethr_atomic_inc_wb
+#define erts_atomic64_dec_wb ethr_atomic_dec_wb
+#define erts_atomic64_add_read_wb ethr_atomic_add_read_wb
+#define erts_atomic64_add_wb ethr_atomic_add_wb
+#define erts_atomic64_read_bor_wb ethr_atomic_read_bor_wb
+#define erts_atomic64_read_band_wb ethr_atomic_read_band_wb
+#define erts_atomic64_xchg_wb ethr_atomic_xchg_wb
+#define erts_atomic64_cmpxchg_wb ethr_atomic_cmpxchg_wb
+#define erts_atomic64_read_bset_wb erts_atomic_read_bset_wb
+
+#define erts_atomic64_set_dirty erts_atomic_set_dirty
+#define erts_atomic64_read_dirty erts_atomic_read_dirty
+
+#elif defined(ARCH_32)
+
+#undef ERTS_ATOMIC64_OPS_DECL__
+
+#define ERTS_ATOMIC64_OPS_DECL__(BARRIER) \
+ERTS_GLB_INLINE void \
+erts_atomic64_init_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE void \
+erts_atomic64_set_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_inc_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_dec_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE void \
+erts_atomic64_inc_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE void \
+erts_atomic64_dec_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_add_read_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE void \
+erts_atomic64_add_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bor_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_band_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_xchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_cmpxchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t new, \
+ erts_aint64_t exp); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bset_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t mask, \
+ erts_aint64_t set)
+
+ERTS_ATOMIC64_OPS_DECL__(nob);
+ERTS_ATOMIC64_OPS_DECL__(mb);
+ERTS_ATOMIC64_OPS_DECL__(acqb);
+ERTS_ATOMIC64_OPS_DECL__(relb);
+ERTS_ATOMIC64_OPS_DECL__(ddrb);
+ERTS_ATOMIC64_OPS_DECL__(rb);
+ERTS_ATOMIC64_OPS_DECL__(wb);
+
+#undef ERTS_ATOMIC64_OPS_DECL__
+
+ERTS_GLB_INLINE void
+erts_atomic64_set_dirty(erts_atomic64_t *var, erts_aint64_t val);
+ERTS_GLB_INLINE erts_aint64_t
+erts_atomic64_read_dirty(erts_atomic64_t *var);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * The ethr_dw_atomic_*_nob() functions below
+ * are here to make it possible for the
+ * ERTS_ATOMIC64_OPS_IMPL__() to map erts
+ * barriers to ethread barriers...
+ */
+static ERTS_INLINE void
+ethr_dw_atomic_init_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_init(var, val);
+}
+
+static ERTS_INLINE void
+ethr_dw_atomic_set_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_set(var, val);
+}
+
+static ERTS_INLINE void
+ethr_dw_atomic_read_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_read(var, val);
+}
+
+static ERTS_INLINE int
+ethr_dw_atomic_cmpxchg_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *new,
+ ethr_dw_sint_t *xchg)
+{
+ return ethr_dw_atomic_cmpxchg(var, new, xchg);
+}
+
+#undef ERTS_ATOMIC64_OPS_IMPL__
+#undef ERTS_ATOMIC64_DW_CMPXCHG_IMPL__
+#undef ERTS_DW_SINT_TO_AINT64__
+#undef ERTS_AINT64_TO_DW_SINT__
+
+#ifdef ETHR_SU_DW_NAINT_T__
+#define ERTS_DW_SINT_TO_AINT64__(DW) \
+ ((erts_aint64_t) DW.dw_sint)
+#define ERTS_AINT64_TO_DW_SINT__(DW, AINT64) \
+ (DW.dw_sint = (ETHR_SU_DW_NAINT_T__) AINT64)
+#else /* !ETHR_SU_DW_NAINT_T__ */
+#define ERTS_DW_SINT_TO_AINT64__(DW) \
+ ((((erts_aint64_t) DW.sint[ETHR_DW_SINT_HIGH_WORD]) << 32) \
+ | (((erts_aint64_t) DW.sint[ETHR_DW_SINT_LOW_WORD]) \
+ & ((erts_aint64_t) 0xffffffff)))
+#define ERTS_AINT64_TO_DW_SINT__(DW, AINT64) \
+ do { \
+ DW.sint[ETHR_DW_SINT_LOW_WORD] = \
+ (ethr_sint_t) (AINT64 & 0xffffffff); \
+ DW.sint[ETHR_DW_SINT_HIGH_WORD] = \
+ (ethr_sint_t) ((AINT64 >> 32) & 0xffffffff); \
+ } while (0)
+#endif /* !ETHR_SU_DW_NAINT_T__ */
+
+#define ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(CmpXchgOp, \
+ AVarP, XchgVar, NewVar, \
+ ModificationCode) \
+do { \
+ ethr_dw_sint_t dw_xchg__, dw_new__; \
+ ethr_dw_atomic_read(AVarP, &dw_xchg__); \
+ do { \
+ XchgVar = ERTS_DW_SINT_TO_AINT64__(dw_xchg__); \
+ { \
+ ModificationCode; \
+ } \
+ ERTS_AINT64_TO_DW_SINT__(dw_new__, NewVar); \
+ } while (!CmpXchgOp((AVarP), &dw_new__, &dw_xchg__)); \
+} while (0)
+
+#define ERTS_ATOMIC64_OPS_IMPL__(BARRIER) \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_init_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ ethr_dw_sint_t dw; \
+ ERTS_AINT64_TO_DW_SINT__(dw, val); \
+ ethr_dw_atomic_init_ ## BARRIER(var, &dw); \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_set_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ ethr_dw_sint_t dw; \
+ ERTS_AINT64_TO_DW_SINT__(dw, val); \
+ ethr_dw_atomic_set_ ## BARRIER(var, &dw); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ ethr_dw_sint_t dw; \
+ ethr_dw_atomic_read_ ## BARRIER(var, &dw); \
+ return ERTS_DW_SINT_TO_AINT64__(dw); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_inc_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + 1); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_dec_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg - 1); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_inc_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + 1); \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_dec_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg - 1); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_add_read_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + val); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_add_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + val); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bor_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg | val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_band_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg & val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_xchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_cmpxchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t new, \
+ erts_aint64_t exp) \
+{ \
+ ethr_dw_sint_t dw_xchg, dw_new; \
+ ERTS_AINT64_TO_DW_SINT__(dw_xchg, exp); \
+ ERTS_AINT64_TO_DW_SINT__(dw_new, new); \
+ if (ethr_dw_atomic_cmpxchg_ ## BARRIER(var, &dw_new, &dw_xchg)) \
+ return exp; \
+ return ERTS_DW_SINT_TO_AINT64__(dw_xchg); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bset_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t mask, \
+ erts_aint64_t set) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ { \
+ new = xchg & ~mask; \
+ new |= mask & set; \
+ }); \
+ return xchg; \
+}
+
+ERTS_ATOMIC64_OPS_IMPL__(nob)
+ERTS_ATOMIC64_OPS_IMPL__(mb)
+ERTS_ATOMIC64_OPS_IMPL__(acqb)
+ERTS_ATOMIC64_OPS_IMPL__(relb)
+ERTS_ATOMIC64_OPS_IMPL__(ddrb)
+ERTS_ATOMIC64_OPS_IMPL__(rb)
+ERTS_ATOMIC64_OPS_IMPL__(wb)
+
+#undef ERTS_ATOMIC64_OPS_IMPL__
+#undef ERTS_ATOMIC64_DW_CMPXCHG_IMPL__
+
+ERTS_GLB_INLINE void
+erts_atomic64_set_dirty(erts_atomic64_t *var, erts_aint64_t val)
+{
+ ethr_sint_t *sint = ethr_dw_atomic_addr(var);
+ ethr_dw_sint_t dw;
+ ERTS_AINT64_TO_DW_SINT__(dw, val);
+ sint[0] = dw.sint[0];
+ sint[1] = dw.sint[1];
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_atomic64_read_dirty(erts_atomic64_t *var)
+{
+ ethr_sint_t *sint;
+ ethr_dw_sint_t dw;
+ sint = ethr_dw_atomic_addr(var);
+ dw.sint[0] = sint[0];
+ dw.sint[1] = sint[1];
+ return ERTS_DW_SINT_TO_AINT64__(dw);
+}
+
+#undef ERTS_DW_SINT_TO_AINT64__
+#undef ERTS_AINT64_TO_DW_SINT__
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ARCH_32 */
+
#else /* !USE_THREADS */
/* Double word size atomics */
@@ -1462,6 +1940,116 @@ erts_atomic32_read_dirty(erts_atomic32_t *var)
#define erts_atomic32_set_dirty erts_no_atomic32_set
#define erts_atomic32_read_dirty erts_no_atomic32_read
+/* 64-bit atomics */
+
+#define erts_atomic64_init_nob erts_no_atomic64_set
+#define erts_atomic64_set_nob erts_no_atomic64_set
+#define erts_atomic64_read_nob erts_no_atomic64_read
+#define erts_atomic64_inc_read_nob erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_nob erts_no_atomic64_dec_read
+#define erts_atomic64_inc_nob erts_no_atomic64_inc
+#define erts_atomic64_dec_nob erts_no_atomic64_dec
+#define erts_atomic64_add_read_nob erts_no_atomic64_add_read
+#define erts_atomic64_add_nob erts_no_atomic64_add
+#define erts_atomic64_read_bor_nob erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_nob erts_no_atomic64_read_band
+#define erts_atomic64_xchg_nob erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_nob erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_mb erts_no_atomic64_set
+#define erts_atomic64_set_mb erts_no_atomic64_set
+#define erts_atomic64_read_mb erts_no_atomic64_read
+#define erts_atomic64_inc_read_mb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_mb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_mb erts_no_atomic64_inc
+#define erts_atomic64_dec_mb erts_no_atomic64_dec
+#define erts_atomic64_add_read_mb erts_no_atomic64_add_read
+#define erts_atomic64_add_mb erts_no_atomic64_add
+#define erts_atomic64_read_bor_mb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_mb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_mb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_mb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_acqb erts_no_atomic64_set
+#define erts_atomic64_set_acqb erts_no_atomic64_set
+#define erts_atomic64_read_acqb erts_no_atomic64_read
+#define erts_atomic64_inc_read_acqb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_acqb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_acqb erts_no_atomic64_inc
+#define erts_atomic64_dec_acqb erts_no_atomic64_dec
+#define erts_atomic64_add_read_acqb erts_no_atomic64_add_read
+#define erts_atomic64_add_acqb erts_no_atomic64_add
+#define erts_atomic64_read_bor_acqb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_acqb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_acqb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_acqb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_relb erts_no_atomic64_set
+#define erts_atomic64_set_relb erts_no_atomic64_set
+#define erts_atomic64_read_relb erts_no_atomic64_read
+#define erts_atomic64_inc_read_relb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_relb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_relb erts_no_atomic64_inc
+#define erts_atomic64_dec_relb erts_no_atomic64_dec
+#define erts_atomic64_add_read_relb erts_no_atomic64_add_read
+#define erts_atomic64_add_relb erts_no_atomic64_add
+#define erts_atomic64_read_bor_relb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_relb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_relb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_relb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_ddrb erts_no_atomic64_set
+#define erts_atomic64_set_ddrb erts_no_atomic64_set
+#define erts_atomic64_read_ddrb erts_no_atomic64_read
+#define erts_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_ddrb erts_no_atomic64_inc
+#define erts_atomic64_dec_ddrb erts_no_atomic64_dec
+#define erts_atomic64_add_read_ddrb erts_no_atomic64_add_read
+#define erts_atomic64_add_ddrb erts_no_atomic64_add
+#define erts_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_ddrb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_ddrb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_rb erts_no_atomic64_set
+#define erts_atomic64_set_rb erts_no_atomic64_set
+#define erts_atomic64_read_rb erts_no_atomic64_read
+#define erts_atomic64_inc_read_rb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_rb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_rb erts_no_atomic64_inc
+#define erts_atomic64_dec_rb erts_no_atomic64_dec
+#define erts_atomic64_add_read_rb erts_no_atomic64_add_read
+#define erts_atomic64_add_rb erts_no_atomic64_add
+#define erts_atomic64_read_bor_rb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_rb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_rb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_rb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_wb erts_no_atomic64_set
+#define erts_atomic64_set_wb erts_no_atomic64_set
+#define erts_atomic64_read_wb erts_no_atomic64_read
+#define erts_atomic64_inc_read_wb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_wb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_wb erts_no_atomic64_inc
+#define erts_atomic64_dec_wb erts_no_atomic64_dec
+#define erts_atomic64_add_read_wb erts_no_atomic64_add_read
+#define erts_atomic64_add_wb erts_no_atomic64_add
+#define erts_atomic64_read_bor_wb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_wb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_wb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_wb erts_no_atomic64_read_bset
+
+#define erts_atomic64_set_dirty erts_no_atomic64_set
+#define erts_atomic64_read_dirty erts_no_atomic64_read
+
#endif /* !USE_THREADS */
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1548,6 +2136,16 @@ erts_thr_self(void)
#endif
}
+ERTS_GLB_INLINE int
+erts_thr_getname(erts_tid_t tid, char *buf, size_t len)
+{
+#ifdef USE_THREADS
+ return ethr_getname(tid, buf, len);
+#else
+ return -1;
+#endif
+}
+
ERTS_GLB_INLINE int
erts_equal_tids(erts_tid_t x, erts_tid_t y)
@@ -2383,6 +2981,104 @@ erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
return old;
}
+/* atomic64 */
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_set(erts_no_atomic64_t *var, erts_aint64_t i)
+{
+ *var = i;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read(erts_no_atomic64_t *var)
+{
+ return *var;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_inc_read(erts_no_atomic64_t *incp)
+{
+ return ++(*incp);
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_dec_read(erts_no_atomic64_t *decp)
+{
+ return --(*decp);
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_inc(erts_no_atomic64_t *incp)
+{
+ ++(*incp);
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_dec(erts_no_atomic64_t *decp)
+{
+ --(*decp);
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_add_read(erts_no_atomic64_t *addp, erts_aint64_t i)
+{
+ return *addp += i;
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_add(erts_no_atomic64_t *addp, erts_aint64_t i)
+{
+ *addp += i;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read_bor(erts_no_atomic64_t *var, erts_aint64_t mask)
+{
+ erts_aint64_t old;
+ old = *var;
+ *var |= mask;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read_band(erts_no_atomic64_t *var, erts_aint64_t mask)
+{
+ erts_aint64_t old;
+ old = *var;
+ *var &= mask;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, erts_aint64_t new)
+{
+ erts_aint64_t old = *xchgp;
+ *xchgp = new;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp,
+ erts_aint64_t new,
+ erts_aint64_t expected)
+{
+ erts_aint64_t old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
+ erts_aint64_t mask,
+ erts_aint64_t set)
+{
+ erts_aint64_t old = *var;
+ *var &= ~mask;
+ *var |= (mask & set);
+ return old;
+}
+
/* spinlock */
ERTS_GLB_INLINE void
@@ -2838,6 +3534,15 @@ ERTS_GLB_INLINE void erts_thr_yield(void)
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
+erts_thr_kill(erts_tid_t tid, int sig) {
+#ifdef USE_THREADS
+ int res = ethr_kill((ethr_tid)tid, sig);
+ if (res)
+ erts_thr_fatal_error(res, "killing thread");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
#ifdef USE_THREADS
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 4bbdcaa3e3..7ed1a395ad 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -107,7 +107,7 @@ ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed)
/* time_sup */
-#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
# ifndef HAVE_ERTS_NOW_CPU
# define HAVE_ERTS_NOW_CPU
# ifdef HAVE_GETHRVTIME
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ea5c850a30..2f9969b0e7 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2225,7 +2225,7 @@ trace_gc(Process *p, Eterm what)
Eterm* limit;
#endif
- ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
+ ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
UseTmpHeap(LOCAL_HEAP_SIZE,p);
@@ -3492,16 +3492,13 @@ init_sys_msg_dispatcher(void)
thr_opts.coreNo = 0;
#endif
thr_opts.detached = 1;
+ thr_opts.name = "sys_msg_dispatcher";
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
-#ifdef ETHR_HAVE_THREAD_NAMES
- thr_opts.name = "sys_msg_dispatcher";
-#endif
-
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 0807649ea1..c32f8fd61c 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -32,11 +32,7 @@ typedef struct {
#endif
union {
Uint64 not_atomic;
-#ifdef ARCH_64
- erts_atomic_t atomic;
-#else
- erts_dw_atomic_t atomic;
-#endif
+ erts_atomic64_t atomic;
} counter;
} erts_interval_t;
@@ -50,9 +46,6 @@ Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-#ifdef ARCH_32
-ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
-#endif
ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
@@ -62,46 +55,16 @@ ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ARCH_32
-
-ERTS_GLB_INLINE Uint64
-erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-#endif
-
ERTS_GLB_INLINE Uint64
erts_current_interval_nob__(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
+ return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
}
ERTS_GLB_INLINE Uint64
erts_current_interval_acqb__(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
+ return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
}
ERTS_GLB_INLINE Uint64
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 78d98229d8..6e9216bef3 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -172,6 +172,7 @@ extern int H_MIN_SIZE; /* minimum (heap + stack) */
extern int BIN_VH_MIN_SIZE; /* minimum virtual (bin) heap */
extern int erts_atom_table_size;/* Atom table size */
+extern int erts_pd_initial_size;/* Initial Process dictionary table size */
#define ORIG_CREATION 0
#define INTERNAL_CREATION 255
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index f120e96e3b..50fcfa04d6 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -157,7 +157,6 @@ void erts_init_atom_cache_map(ErtsAtomCacheMap *);
void erts_reset_atom_cache_map(ErtsAtomCacheMap *);
void erts_destroy_atom_cache_map(ErtsAtomCacheMap *);
void erts_finalize_atom_cache_map(ErtsAtomCacheMap *, Uint32);
-Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index ec8c1e3ccb..5330f389e0 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -161,6 +161,7 @@ struct erts_driver_t_ {
void (*ready_async)(ErlDrvData drv_data, ErlDrvThreadData thread_data); /* Might be NULL */
void (*process_exit)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
void (*stop_select)(ErlDrvEvent event, void*); /* Might be NULL */
+ void (*emergency_close)(ErlDrvData drv_data); /* Might be NULL */
};
extern erts_driver_t *driver_list;
@@ -883,6 +884,7 @@ Uint erts_port_ioq_size(Port *pp);
void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
+void erts_emergency_close_ports(void);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_enable_io_lock_count(int enable);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 4d262ff022..dc4c6fc350 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -391,7 +391,7 @@ static Port *create_port(char *name,
/* Set default tracing */
erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
- ASSERT(((char *) prt) == ((char *) &prt->common));
+ ERTS_CT_ASSERT(offsetof(Port,common) == 0);
#if !ERTS_PORT_INIT_INSTR_NEED_ID
/*
@@ -6698,7 +6698,7 @@ static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
{
RefThing *refp;
ASSERT(is_internal_ref(ref));
- ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
+ ERTS_CT_ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
refp = ref_thing_ptr(ref);
memset(mon,0,sizeof(ErlDrvMonitor));
memcpy(mon,refp,sizeof(RefThing));
@@ -7342,6 +7342,8 @@ no_stop_select_callback(ErlDrvEvent event, void* private)
erts_send_error_to_logger_nogl(dsbufp);
}
+#define IS_DRIVER_VERSION_GE(DE,MAJOR,MINOR) \
+ ((DE)->major_version >= (MAJOR) && (DE)->minor_version >= (MINOR))
static int
init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
@@ -7389,6 +7391,7 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
drv->ready_async = de->ready_async;
drv->process_exit = de->process_exit;
+ drv->emergency_close = IS_DRIVER_VERSION_GE(de,3,2) ? de->emergency_close : NULL;
if (de->stop_select)
drv->stop_select = de->stop_select;
else
@@ -7407,6 +7410,8 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
}
}
+#undef IS_DRIVER_VERSION_GE
+
void
erts_destroy_driver(erts_driver_t *drv)
{
@@ -7550,7 +7555,7 @@ Port *erts_get_heart_port(void)
if (!port)
continue;
/* only examine undead or alive ports */
- if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD)
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
continue;
/* immediate atom compare */
reg = port->common.u.alive.reg;
@@ -7561,3 +7566,23 @@ Port *erts_get_heart_port(void)
return NULL;
}
+
+void erts_emergency_close_ports(void)
+{
+ int ix, max = erts_ptab_max(&erts_port);
+
+ for (ix = 0; ix < max; ix++) {
+ Port *port = erts_pix2port(ix);
+
+ if (!port)
+ continue;
+ /* only examine undead or alive ports */
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ continue;
+
+ /* emergency close socket */
+ if (port->drv_ptr->emergency_close) {
+ port->drv_ptr->emergency_close((ErlDrvData) port->drv_data);
+ }
+ }
+}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index c29d4b3777..828f5b427a 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -189,6 +189,22 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
#endif
/*
+ * Compile time assert
+ * (the actual compiler error msg can be a bit confusing)
+ */
+#if ERTS_AT_LEAST_GCC_VSN__(3,1,1)
+# define ERTS_CT_ASSERT(e) \
+ do { \
+ enum { compile_time_assert__ = __builtin_choose_expr((e),0,(void)0) }; \
+ } while(0)
+#else
+# define ERTS_CT_ASSERT(e) \
+ do { \
+ enum { compile_time_assert__ = 1/(e) }; \
+ } while (0)
+#endif
+
+/*
* Microsoft C/C++: We certainly want to use stdarg.h and prototypes.
* But MSC doesn't define __STDC__, unless we compile with the -Za
* flag (strict ANSI C, no Microsoft extension). Compiling with -Za
@@ -756,6 +772,8 @@ typedef struct {
} ErtsCheckIoDebugInfo;
int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
+int erts_sys_is_area_readable(char *start, char *stop);
+
/* xxxP */
#define SYS_DEFAULT_FLOAT_DECIMALS 20
void init_sys_float(void);
@@ -784,6 +802,11 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
+#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
+extern void sys_thr_resume(erts_tid_t tid);
+extern void sys_thr_suspend(erts_tid_t tid);
+#endif
+
/* utils.c */
/* Options to sys_alloc_opt */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index e03cd22070..b341c4d949 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -4214,19 +4214,7 @@ void erts_silence_warn_unused_result(long unused)
void
erts_interval_init(erts_interval_t *icp)
{
-#ifdef ARCH_64
- erts_atomic_init_nob(&icp->counter.atomic, 0);
-#else
- erts_dw_aint_t dw;
-#ifdef ETHR_SU_DW_NAINT_T__
- dw.dw_sint = 0;
-#else
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = 0;
- dw.sint[ERTS_DW_AINT_LOW_WORD] = 0;
-#endif
- erts_dw_atomic_init_nob(&icp->counter.atomic, &dw);
-
-#endif
+ erts_atomic64_init_nob(&icp->counter.atomic, 0);
#ifdef DEBUG
icp->smp_api = 0;
#endif
@@ -4248,55 +4236,13 @@ erts_smp_interval_init(erts_interval_t *icp)
static ERTS_INLINE Uint64
step_interval_nob(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
}
static ERTS_INLINE Uint64
step_interval_relb(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_inc_read_relb(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_relb(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic);
}
@@ -4304,38 +4250,10 @@ static ERTS_INLINE Uint64
ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
{
Uint64 curr_ic;
-#ifdef ARCH_64
- curr_ic = (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
+ curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
if (curr_ic > ic)
return curr_ic;
- return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
-
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
}
@@ -4343,38 +4261,10 @@ static ERTS_INLINE Uint64
ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
{
Uint64 curr_ic;
-#ifdef ARCH_64
- curr_ic = (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
+ curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
if (curr_ic > ic)
return curr_ic;
- return (Uint64) erts_atomic_inc_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &exp);
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
-
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_acqb(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic);
}
Uint64
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index db8a251fdd..5196eb51c6 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -268,14 +268,13 @@ static BOOL (WINAPI *fpSetHandleInformation)(HANDLE,DWORD,DWORD);
#define sock_htonl(x) htonl((x))
#define sock_send(s,buf,len,flag) send((s),(buf),(len),(flag))
#define sock_sendv(s, vec, size, np, flag) \
- WSASend((s),(WSABUF*)(vec),\
- (size),(np),(flag),NULL,NULL)
+ WSASend((s),(WSABUF*)(vec),(size),(np),(flag),NULL,NULL)
#define sock_recv(s,buf,len,flag) recv((s),(buf),(len),(flag))
#define sock_recvfrom(s,buf,blen,flag,addr,alen) \
- recvfrom((s),(buf),(blen),(flag),(addr),(alen))
+ recvfrom((s),(buf),(blen),(flag),(addr),(alen))
#define sock_sendto(s,buf,blen,flag,addr,alen) \
- sendto((s),(buf),(blen),(flag),(addr),(alen))
+ sendto((s),(buf),(blen),(flag),(addr),(alen))
#define sock_hostname(buf, len) gethostname((buf), (len))
#define sock_getservbyname(name,proto) getservbyname((name),(proto))
@@ -360,9 +359,9 @@ static ssize_t writev_fallback(int fd, const struct iovec *iov, int iovcnt, int
#define sock_accept(s, addr, len) accept((s), (addr), (len))
#define sock_send(s,buf,len,flag) inet_send((s),(buf),(len),(flag))
#define sock_sendto(s,buf,blen,flag,addr,alen) \
- sendto((s),(buf),(blen),(flag),(addr),(alen))
+ sendto((s),(buf),(blen),(flag),(addr),(alen))
#define sock_sendv(s, vec, size, np, flag) \
- (*(np) = writev_fallback((s), (struct iovec*)(vec), (size), (*(np))))
+ (*(np) = writev_fallback((s), (struct iovec*)(vec), (size), (*(np))))
#define sock_sendmsg(s,msghdr,flag) sendmsg((s),(msghdr),(flag))
#define sock_open(af, type, proto) socket((af), (type), (proto))
@@ -1178,6 +1177,7 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData, unsigned int,
static void tcp_inet_timeout(ErlDrvData);
static void tcp_inet_process_exit(ErlDrvData, ErlDrvMonitor *);
static void inet_stop_select(ErlDrvEvent, void*);
+static void inet_emergency_close(ErlDrvData);
#ifdef __WIN32__
static void tcp_inet_event(ErlDrvData, ErlDrvEvent);
static void find_dynamic_functions(void);
@@ -1288,7 +1288,8 @@ static struct erl_drv_entry tcp_inet_driver_entry =
ERL_DRV_FLAG_USE_PORT_LOCKING|ERL_DRV_FLAG_SOFT_BUSY,
NULL,
tcp_inet_process_exit,
- inet_stop_select
+ inet_stop_select,
+ inet_emergency_close
};
@@ -1341,7 +1342,8 @@ static struct erl_drv_entry udp_inet_driver_entry =
ERL_DRV_FLAG_USE_PORT_LOCKING,
NULL,
NULL,
- inet_stop_select
+ inet_stop_select,
+ inet_emergency_close
};
#endif
@@ -1375,7 +1377,8 @@ static struct erl_drv_entry sctp_inet_driver_entry =
ERL_DRV_FLAG_USE_PORT_LOCKING,
NULL,
NULL, /* process_exit */
- inet_stop_select
+ inet_stop_select,
+ inet_emergency_close
};
#endif
@@ -1421,7 +1424,7 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event);
static int packet_inet_output(udp_descriptor* udesc, HANDLE event);
#endif
-/* convert descriptor poiner to inet_descriptor pointer */
+/* convert descriptor pointer to inet_descriptor pointer */
#define INETP(d) (&(d)->inet)
#ifdef __OSE__
@@ -2890,6 +2893,9 @@ static ErlDrvTermData am_sctp_rtoinfo, /* Option names */
/* For #sctp_paddrinfo{}: */
am_active, am_inactive,
+# if HAVE_DECL_SCTP_UNCONFIRMED
+ am_unconfirmed,
+# endif
/* For #sctp_status{}: */
# if HAVE_DECL_SCTP_EMPTY
@@ -3919,7 +3925,10 @@ static void inet_init_sctp(void) {
/* For #sctp_paddrinfo{}: */
INIT_ATOM(active);
INIT_ATOM(inactive);
-
+# if HAVE_DECL_SCTP_UNCONFIRMED
+ INIT_ATOM(unconfirmed);
+# endif
+
/* For #sctp_status{}: */
# if HAVE_DECL_SCTP_EMPTY
INIT_ATOM(empty);
@@ -3948,9 +3957,9 @@ static int inet_init()
if (0 != erl_drv_tsd_key_create("inet_buffer_stack_key", &buffer_stack_key))
goto error;
- ASSERT(sizeof(struct in_addr) == 4);
+ ERTS_CT_ASSERT(sizeof(struct in_addr) == 4);
# if defined(HAVE_IN6) && defined(AF_INET6)
- ASSERT(sizeof(struct in6_addr) == 16);
+ ERTS_CT_ASSERT(sizeof(struct in6_addr) == 16);
# endif
INIT_ATOM(ok);
@@ -3996,7 +4005,7 @@ static int inet_init()
#ifdef HAVE_SCTP
/* Check the size of SCTP AssocID -- currently both this driver and the
Erlang part require 32 bit: */
- ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
+ ERTS_CT_ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
# if defined(HAVE_SCTP_BINDX)
p_sctp_bindx = sctp_bindx;
# if defined(HAVE_SCTP_PEELOFF)
@@ -4721,6 +4730,36 @@ static char* sockaddr_to_buf(struct sockaddr* addr, char* ptr, char* end)
return NULL;
}
+/* sockaddr_bufsz_need
+ * Returns the number of bytes needed to store the information
+ * through sockaddr_to_buf
+ */
+
+static size_t sockaddr_bufsz_need(struct sockaddr* addr)
+{
+ if (addr->sa_family == AF_INET || addr->sa_family == 0) {
+ return 1 + sizeof(struct in_addr);
+ }
+#if defined(HAVE_IN6) && defined(AF_INET6)
+ else if (addr->sa_family == AF_INET6) {
+ return 1 + sizeof(struct in6_addr);
+ }
+#endif
+#if defined(AF_LINK)
+ if (addr->sa_family == AF_LINK) {
+ struct sockaddr_dl *sdl_p = (struct sockaddr_dl*) addr;
+ return 2 + sdl_p->sdl_alen;
+ }
+#endif
+#if defined(AF_PACKET) && defined(HAVE_NETPACKET_PACKET_H)
+ else if(addr->sa_family == AF_PACKET) {
+ struct sockaddr_ll *sll_p = (struct sockaddr_ll*) addr;
+ return 2 + sll_p->sll_halen;
+ }
+#endif
+ return 0;
+}
+
static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr)
{
buf_check(ptr,end,1);
@@ -5799,6 +5838,11 @@ done:
}
#elif defined(HAVE_GETIFADDRS)
+#ifdef DEBUG
+#define GETIFADDRS_BUFSZ (1)
+#else
+#define GETIFADDRS_BUFSZ (512)
+#endif
static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
char **rbuf_pp, ErlDrvSizeT rsize)
@@ -5809,15 +5853,15 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
char *buf_p;
char *buf_alloc_p;
- buf_size = 512;
- buf_alloc_p = ALLOC(buf_size);
+ buf_size = GETIFADDRS_BUFSZ;
+ buf_alloc_p = ALLOC(GETIFADDRS_BUFSZ);
buf_p = buf_alloc_p;
# define BUF_ENSURE(Size) \
do { \
int NEED_, GOT_ = buf_p - buf_alloc_p; \
NEED_ = GOT_ + (Size); \
if (NEED_ > buf_size) { \
- buf_size = NEED_ + 512; \
+ buf_size = NEED_ + GETIFADDRS_BUFSZ; \
buf_alloc_p = REALLOC(buf_alloc_p, buf_size); \
buf_p = buf_alloc_p + GOT_; \
} \
@@ -5830,7 +5874,7 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
while (! (P_ = sockaddr_to_buf((sa), buf_p, \
buf_alloc_p+buf_size))) { \
int GOT_ = buf_p - buf_alloc_p; \
- buf_size += 512; \
+ buf_size += GETIFADDRS_BUFSZ; \
buf_alloc_p = REALLOC(buf_alloc_p, buf_size); \
buf_p = buf_alloc_p + GOT_; \
} \
@@ -5887,10 +5931,11 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
|| ifa_p->ifa_addr->sa_family == AF_PACKET
#endif
) {
- char *bp = buf_p;
- BUF_ENSURE(1);
- SOCKADDR_TO_BUF(INET_IFOPT_HWADDR, ifa_p->ifa_addr);
- if (buf_p - bp < 4) buf_p = bp; /* Empty hwaddr */
+ size_t need = sockaddr_bufsz_need(ifa_p->ifa_addr);
+ if (need > 3) {
+ BUF_ENSURE(1 + need);
+ SOCKADDR_TO_BUF(INET_IFOPT_HWADDR, ifa_p->ifa_addr);
+ }
}
#endif
}
@@ -5905,6 +5950,7 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
return buf_size;
# undef BUF_ENSURE
}
+#undef GETIFADDRS_BUFSZ
#else
@@ -7338,8 +7384,13 @@ static int load_paddrinfo (ErlDrvTermData * spec, int i,
case SCTP_INACTIVE:
i = LOAD_ATOM (spec, i, am_inactive);
break;
+# if HAVE_DECL_SCTP_UNCONFIRMED
+ case SCTP_UNCONFIRMED:
+ i = LOAD_ATOM (spec, i, am_unconfirmed);
+ break;
+# endif
default:
- ASSERT(0); /* NB: SCTP_UNCONFIRMED modifier not yet supported */
+ i = LOAD_ATOM (spec, i, am_undefined);
}
i = LOAD_INT (spec, i, pai->spinfo_cwnd);
i = LOAD_INT (spec, i, pai->spinfo_srtt);
@@ -8204,6 +8255,19 @@ static void inet_stop(inet_descriptor* desc)
FREE(desc);
}
+static void inet_emergency_close(ErlDrvData data)
+{
+ /* valid for any (UDP, TCP or SCTP) descriptor */
+ tcp_descriptor* tcp_desc = (tcp_descriptor*)data;
+ inet_descriptor* desc = INETP(tcp_desc);
+ DEBUGF(("inet_emergency_close(%ld) {s=%d\r\n",
+ (long)desc->port, desc->s));
+ if (desc->s != INVALID_SOCKET) {
+ sock_close(desc->s);
+ }
+}
+
+
static void set_default_msgq_limits(ErlDrvPort port)
{
ErlDrvSizeT q_high = INET_HIGH_MSGQ_WATERMARK;
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
index 3143e4511d..f7b2d91d23 100644
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ b/erts/emulator/drivers/common/zlib_drv.c
@@ -62,8 +62,17 @@
#define CRC32_COMBINE 23
#define ADLER32_COMBINE 24
+#define INFLATE_CHUNK 25
+
+
#define DEFAULT_BUFSZ 4000
+/* This flag is used in the same places, where zlib return codes
+ * (Z_OK, Z_STREAM_END, Z_NEED_DICT) are. So, we need to set it to
+ * relatively large value to avoid possible value clashes in future.
+ * */
+#define INFLATE_HAS_MORE 100
+
static int zlib_init(void);
static ErlDrvData zlib_start(ErlDrvPort port, char* buf);
static void zlib_stop(ErlDrvData e);
@@ -295,6 +304,58 @@ static int zlib_inflate(ZLibData* d, int flush)
return res;
}
+static int zlib_inflate_chunk(ZLibData* d)
+{
+ int res = Z_OK;
+
+ if ((d->bin == NULL) && (zlib_output_init(d) < 0)) {
+ errno = ENOMEM;
+ return Z_ERRNO;
+ }
+
+ while ((driver_sizeq(d->port) > 0) && (d->s.avail_out > 0) &&
+ (res != Z_STREAM_END)) {
+ int vlen;
+ SysIOVec* iov = driver_peekq(d->port, &vlen);
+ int len;
+
+ d->s.next_in = iov[0].iov_base;
+ d->s.avail_in = iov[0].iov_len;
+ while((d->s.avail_in > 0) && (d->s.avail_out > 0) && (res != Z_STREAM_END)) {
+ res = inflate(&d->s, Z_NO_FLUSH);
+ if (res == Z_NEED_DICT) {
+ /* Essential to eat the header bytes that zlib has looked at */
+ len = iov[0].iov_len - d->s.avail_in;
+ driver_deq(d->port, len);
+ return res;
+ }
+ if (res == Z_BUF_ERROR) {
+ /* Was possible more output, but actually not */
+ res = Z_OK;
+ }
+ else if (res < 0) {
+ return res;
+ }
+ }
+ len = iov[0].iov_len - d->s.avail_in;
+ driver_deq(d->port, len);
+ }
+
+ /* We are here because all input was consumed or EOS reached or output
+ * buffer is full */
+ if (d->want_crc) {
+ d->crc = crc32(d->crc, (unsigned char*) d->bin->orig_bytes,
+ d->binsz - d->s.avail_out);
+ }
+ zlib_output(d);
+ if ((res == Z_OK) && (d->s.avail_in > 0))
+ res = INFLATE_HAS_MORE;
+ else if (res == Z_STREAM_END) {
+ d->inflate_eos_seen = 1;
+ }
+ return res;
+}
+
static int zlib_deflate(ZLibData* d, int flush)
{
int res = Z_OK;
@@ -568,6 +629,18 @@ static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *bu
return zlib_return(res, rbuf, rlen);
}
+ case INFLATE_CHUNK:
+ if (d->state != ST_INFLATE) goto badarg;
+ if (len != 0) goto badarg;
+ res = zlib_inflate_chunk(d);
+ if (res == INFLATE_HAS_MORE) {
+ return zlib_value2(4, 0, rbuf, rlen);
+ } else if (res == Z_NEED_DICT) {
+ return zlib_value2(3, d->s.adler, rbuf, rlen);
+ } else {
+ return zlib_return(res, rbuf, rlen);
+ }
+
case GET_QSIZE:
return zlib_value(driver_sizeq(d->port), rbuf, rlen);
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index be2fee1f25..a5960716f2 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -338,8 +338,8 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
}
#endif
DEBUGLOG(("utf8_mode is %s\n",(utf8_mode) ? "on" : "off"));
- sys_sigset(SIGCONT, cont);
- sys_sigset(SIGWINCH, winch);
+ sys_signal(SIGCONT, cont);
+ sys_signal(SIGWINCH, winch);
driver_select(port, (ErlDrvEvent)(UWord)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 1);
ttysl_port = port;
@@ -423,8 +423,8 @@ static void ttysl_stop(ErlDrvData ttysl_data)
tty_reset(ttysl_fd);
driver_select(ttysl_port, (ErlDrvEvent)(UWord)ttysl_fd,
ERL_DRV_WRITE|ERL_DRV_READ|ERL_DRV_USE, 0);
- sys_sigset(SIGCONT, SIG_DFL);
- sys_sigset(SIGWINCH, SIG_DFL);
+ sys_signal(SIGCONT, SIG_DFL);
+ sys_signal(SIGWINCH, SIG_DFL);
}
ttysl_port = (ErlDrvPort)-1;
ttysl_fd = -1;
@@ -1458,11 +1458,11 @@ static RETSIGTYPE suspend(int sig)
exit(1);
}
- sys_sigset(sig, SIG_DFL); /* Set signal handler to default */
+ sys_signal(sig, SIG_DFL); /* Set signal handler to default */
sys_sigrelease(sig); /* Allow 'sig' to come through */
kill(getpid(), sig); /* Send ourselves the signal */
sys_sigblock(sig); /* Reset to old mask */
- sys_sigset(sig, suspend); /* Reset signal handler */
+ sys_signal(sig, suspend); /* Reset signal handler */
if (tty_set(ttysl_fd) < 0) {
fprintf(stderr,"Can't set tty raw \n");
diff --git a/erts/emulator/hipe/hipe_amd64.c b/erts/emulator/hipe/hipe_amd64.c
index 16c597e7b4..63646825b2 100644
--- a/erts/emulator/hipe/hipe_amd64.c
+++ b/erts/emulator/hipe/hipe_amd64.c
@@ -125,7 +125,7 @@ static void atexit_alloc_code_stats(void)
#define MAP_ANONYMOUS MAP_ANON
#endif
-static void morecore(unsigned int alloc_bytes)
+static int morecore(unsigned int alloc_bytes)
{
unsigned int map_bytes;
char *map_hint, *map_start;
@@ -174,10 +174,9 @@ static void morecore(unsigned int alloc_bytes)
abort();
}
#endif
- if (map_start == MAP_FAILED) {
- perror("mmap");
- abort();
- }
+ if (map_start == MAP_FAILED)
+ return -1;
+
ALLOC_CODE_STATS(total_mapped += map_bytes);
/* Merge adjacent mappings, so the trailing portion of the previous
@@ -197,6 +196,8 @@ static void morecore(unsigned int alloc_bytes)
}
ALLOC_CODE_STATS(atexit_alloc_code_stats());
+
+ return 0;
}
static void *alloc_code(unsigned int alloc_bytes)
@@ -206,8 +207,8 @@ static void *alloc_code(unsigned int alloc_bytes)
/* Align function entries. */
alloc_bytes = (alloc_bytes + 3) & ~3;
- if (code_bytes < alloc_bytes)
- morecore(alloc_bytes);
+ if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
+ return NULL;
ALLOC_CODE_STATS(++nr_allocs);
ALLOC_CODE_STATS(total_alloc += alloc_bytes);
res = code_next;
@@ -224,7 +225,6 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
-
/* Make stub for native code calling exported beam function.
*/
void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
@@ -253,6 +253,8 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
((P_CALLEE_EXP + 4) >= 128 ? 3 : 0) +
(P_ARITY >= 128 ? 3 : 0);
codep = code = alloc_code(codeSize);
+ if (!code)
+ return NULL;
/* movl $callee_exp, P_CALLEE_EXP(%ebp); 3 or 6 bytes, plus 4 */
codep[0] = 0xc7;
diff --git a/erts/emulator/hipe/hipe_arch.h b/erts/emulator/hipe/hipe_arch.h
index 04ed980126..b45209b3f7 100644
--- a/erts/emulator/hipe/hipe_arch.h
+++ b/erts/emulator/hipe/hipe_arch.h
@@ -29,6 +29,7 @@ extern void hipe_patch_load_fe(Uint *address, Uint value);
extern int hipe_patch_insn(void *address, Uint value, Eterm type);
extern int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline);
+extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
extern void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity);
#if defined(__sparc__)
diff --git a/erts/emulator/hipe/hipe_arm.c b/erts/emulator/hipe/hipe_arm.c
index 165eb543c8..c0c6305c68 100644
--- a/erts/emulator/hipe/hipe_arm.c
+++ b/erts/emulator/hipe/hipe_arm.c
@@ -283,6 +283,8 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
*/
code = alloc_stub(4, &tramp_callemu);
+ if (!code)
+ return NULL;
callemu_offset = ((int)&nbif_callemu - ((int)&code[2] + 8)) >> 2;
if (!(callemu_offset >= -0x00800000 && callemu_offset <= 0x007FFFFF)) {
callemu_offset = ((int)tramp_callemu - ((int)&code[2] + 8)) >> 2;
diff --git a/erts/emulator/hipe/hipe_arm.h b/erts/emulator/hipe/hipe_arm.h
index 19f2a986cf..b9cd1a750c 100644
--- a/erts/emulator/hipe/hipe_arm.h
+++ b/erts/emulator/hipe/hipe_arm.h
@@ -40,8 +40,4 @@ static __inline__ int hipe_word32_address_ok(void *address)
extern void hipe_arm_inc_stack(void);
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#endif /* HIPE_ARM_H */
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index 57e51bb8b1..884240be9c 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -26,6 +26,7 @@ include(`hipe/hipe_arm_asm.m4')
.text
.p2align 2
+ .arm
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define CALL_BIF(F) ldr r14, =F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
@@ -392,7 +393,14 @@ $1:
mov r0, P
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(0)
+#else
QUICK_CALL_RET($2,0)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -408,7 +416,14 @@ $1:
NBIF_ARG(r1,1,0)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(1)
+#else
QUICK_CALL_RET($2,1)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -425,7 +440,14 @@ $1:
NBIF_ARG(r2,2,1)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(2)
+#else
QUICK_CALL_RET($2,2)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -443,7 +465,14 @@ $1:
NBIF_ARG(r3,3,2)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(3)
+#else
QUICK_CALL_RET($2,3)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -467,7 +496,14 @@ $1:
NBIF_ARG(r3,5,2)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(5)
+#else
QUICK_CALL_RET($2,5)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -489,9 +525,16 @@ define(noproc_primop_interface_0,
#`define' HAVE_$1
.global $1
$1:
- /* XXX: this case is always trivial; how to suppress the branch? */
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(0)
+#else
+ /* XXX: this case is always trivial; how to suppress the branch? */
QUICK_CALL_RET($2,0)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -506,7 +549,14 @@ $1:
NBIF_ARG(r0,1,0)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(1)
+#else
QUICK_CALL_RET($2,1)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -522,7 +572,14 @@ $1:
NBIF_ARG(r1,2,1)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(2)
+#else
QUICK_CALL_RET($2,2)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -539,7 +596,14 @@ $1:
NBIF_ARG(r2,3,2)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(3)
+#else
QUICK_CALL_RET($2,3)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -559,7 +623,14 @@ $1:
str r4, [sp, #0]
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(5)
+#else
QUICK_CALL_RET($2,5)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
diff --git a/erts/emulator/hipe/hipe_arm_glue.S b/erts/emulator/hipe/hipe_arm_glue.S
index 069cb4512e..e7ff267606 100644
--- a/erts/emulator/hipe/hipe_arm_glue.S
+++ b/erts/emulator/hipe/hipe_arm_glue.S
@@ -24,6 +24,7 @@
.text
.p2align 2
+ .arm
/*
* Enter Erlang from C.
@@ -69,6 +70,7 @@
* Emulated code recursively calls native code.
*/
.global hipe_arm_call_to_native
+ .type hipe_arm_call_to_native, %function
hipe_arm_call_to_native:
ENTER_FROM_C
/* get argument registers */
@@ -84,6 +86,7 @@ hipe_arm_call_to_native:
* This is where native code returns to emulated code.
*/
.global nbif_return
+ .type nbif_return, %function
nbif_return:
str r0, [P, #P_ARG0] /* save retval */
mov r0, #HIPE_MODE_SWITCH_RES_RETURN
@@ -94,6 +97,7 @@ nbif_return:
* Emulated code returns to its native code caller.
*/
.global hipe_arm_return_to_native
+ .type hipe_arm_return_to_native, %function
hipe_arm_return_to_native:
ENTER_FROM_C
/* get return value */
@@ -110,6 +114,7 @@ hipe_arm_return_to_native:
* Emulated code tailcalls native code.
*/
.global hipe_arm_tailcall_to_native
+ .type hipe_arm_tailcall_to_native, %function
hipe_arm_tailcall_to_native:
ENTER_FROM_C
/* get argument registers */
@@ -124,6 +129,7 @@ hipe_arm_tailcall_to_native:
* Emulated code throws an exception to its native code caller.
*/
.global hipe_arm_throw_to_native
+ .type hipe_arm_throw_to_native, %function
hipe_arm_throw_to_native:
ENTER_FROM_C
/* invoke the handler */
@@ -141,6 +147,7 @@ hipe_arm_throw_to_native:
* XXX: Different stubs for different number of register parameters?
*/
.global nbif_callemu
+ .type nbif_callemu, %function
nbif_callemu:
str r8, [P, #P_CALLEE_EXP]
str r0, [P, #P_ARITY]
@@ -152,6 +159,7 @@ nbif_callemu:
* nbif_apply
*/
.global nbif_apply
+ .type nbif_apply, %function
nbif_apply:
STORE_ARG_REGS
mov r0, #HIPE_MODE_SWITCH_RES_APPLY
@@ -168,6 +176,7 @@ nbif_apply:
*/
#if NR_ARG_REGS >= 6
.global nbif_ccallemu6
+ .type nbif_ccallemu6, %function
nbif_ccallemu6:
str ARG5, [P, #P_ARG5]
#if NR_ARG_REGS > 6
@@ -180,6 +189,7 @@ nbif_ccallemu6:
#if NR_ARG_REGS >= 5
.global nbif_ccallemu5
+ .type nbif_ccallemu5, %function
nbif_ccallemu5:
str ARG4, [P, #P_ARG4]
#if NR_ARG_REGS > 5
@@ -192,6 +202,7 @@ nbif_ccallemu5:
#if NR_ARG_REGS >= 4
.global nbif_ccallemu4
+ .type nbif_ccallemu4, %function
nbif_ccallemu4:
str ARG3, [P, #P_ARG3]
#if NR_ARG_REGS > 4
@@ -204,6 +215,7 @@ nbif_ccallemu4:
#if NR_ARG_REGS >= 3
.global nbif_ccallemu3
+ .type nbif_ccallemu3, %function
nbif_ccallemu3:
str ARG2, [P, #P_ARG2]
#if NR_ARG_REGS > 3
@@ -216,6 +228,7 @@ nbif_ccallemu3:
#if NR_ARG_REGS >= 2
.global nbif_ccallemu2
+ .type nbif_ccallemu2, %function
nbif_ccallemu2:
str ARG1, [P, #P_ARG1]
#if NR_ARG_REGS > 2
@@ -228,6 +241,7 @@ nbif_ccallemu2:
#if NR_ARG_REGS >= 1
.global nbif_ccallemu1
+ .type nbif_ccallemu1, %function
nbif_ccallemu1:
str ARG0, [P, #P_ARG0]
#if NR_ARG_REGS > 1
@@ -239,6 +253,7 @@ nbif_ccallemu1:
#endif
.global nbif_ccallemu0
+ .type nbif_ccallemu0, %function
nbif_ccallemu0:
/* We use r1 not ARG0 here because ARG0 is not
defined when NR_ARG_REGS == 0. */
@@ -253,6 +268,7 @@ nbif_ccallemu0:
* This is where native code suspends.
*/
.global nbif_suspend_0
+ .type nbif_suspend_0, %function
nbif_suspend_0:
mov r0, #HIPE_MODE_SWITCH_RES_SUSPEND
b .suspend_exit
@@ -261,6 +277,7 @@ nbif_suspend_0:
* Suspend from a receive (waiting for a message)
*/
.global nbif_suspend_msg
+ .type nbif_suspend_msg, %function
nbif_suspend_msg:
mov r0, #HIPE_MODE_SWITCH_RES_WAIT
b .suspend_exit
@@ -271,6 +288,7 @@ nbif_suspend_msg:
* else { return 0; }
*/
.global nbif_suspend_msg_timeout
+ .type nbif_suspend_msg_timeout, %function
nbif_suspend_msg_timeout:
ldr r1, [P, #P_FLAGS]
mov r0, #HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT
@@ -285,23 +303,31 @@ nbif_suspend_msg_timeout:
* This is the default exception handler for native code.
*/
.global nbif_fail
+ .type nbif_fail, %function
nbif_fail:
mov r0, #HIPE_MODE_SWITCH_RES_THROW
b .flush_exit /* no need to save RA */
.global nbif_0_gc_after_bif
- .global nbif_1_gc_after_bif
- .global nbif_2_gc_after_bif
- .global nbif_3_gc_after_bif
+ .type nbif_0_gc_after_bif, %function
nbif_0_gc_after_bif:
mov r1, #0
b .gc_after_bif
+
+ .global nbif_1_gc_after_bif
+ .type nbif_1_gc_after_bif, %function
nbif_1_gc_after_bif:
mov r1, #1
b .gc_after_bif
+
+ .global nbif_2_gc_after_bif
+ .type nbif_2_gc_after_bif, %function
nbif_2_gc_after_bif:
mov r1, #2
b .gc_after_bif
+
+ .global nbif_3_gc_after_bif
+ .type nbif_3_gc_after_bif, %function
nbif_3_gc_after_bif:
mov r1, #3
/*FALLTHROUGH*/
@@ -329,18 +355,25 @@ nbif_3_gc_after_bif:
* TEMP_LR contains a copy of LR
*/
.global nbif_0_simple_exception
+ .type nbif_0_simple_exception, %function
nbif_0_simple_exception:
mov r1, #0
b .nbif_simple_exception
+
.global nbif_1_simple_exception
+ .type nbif_1_simple_exception, %function
nbif_1_simple_exception:
mov r1, #1
b .nbif_simple_exception
+
.global nbif_2_simple_exception
+ .type nbif_2_simple_exception, %function
nbif_2_simple_exception:
mov r1, #2
b .nbif_simple_exception
+
.global nbif_3_simple_exception
+ .type nbif_3_simple_exception, %function
nbif_3_simple_exception:
mov r1, #3
/*FALLTHROUGH*/
@@ -384,6 +417,7 @@ nbif_3_simple_exception:
* the gray/white stack boundary
*/
.global nbif_stack_trap_ra
+ .type nbif_stack_trap_ra, %function
nbif_stack_trap_ra: /* a return address, not a function */
# This only handles a single return value.
# If we have more, we need to save them in the PCB.
@@ -400,6 +434,7 @@ nbif_stack_trap_ra: /* a return address, not a function */
* Caller saved its LR in TEMP_LR (== TEMP1) before calling us.
*/
.global hipe_arm_inc_stack
+ .type hipe_arm_inc_stack, %function
hipe_arm_inc_stack:
STORE_ARG_REGS
mov TEMP_ARG0, lr
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 9eb0b88ced..9e5830f345 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -397,15 +397,17 @@ BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
ASSERT(bitoffs == 0);
ASSERT(bitsize == 0);
trampolines = NIL;
-#ifdef HIPE_ALLOC_CODE
- address = HIPE_ALLOC_CODE(nrbytes, BIF_ARG_2, &trampolines, BIF_P);
- if (!address)
- BIF_ERROR(BIF_P, BADARG);
-#else
- if (is_not_nil(BIF_ARG_2))
- BIF_ERROR(BIF_P, BADARG);
- address = erts_alloc(ERTS_ALC_T_HIPE, nrbytes);
-#endif
+ address = hipe_alloc_code(nrbytes, BIF_ARG_2, &trampolines, BIF_P);
+ if (!address) {
+ Uint nrcallees;
+
+ if (is_tuple(BIF_ARG_2))
+ nrcallees = arityval(tuple_val(BIF_ARG_2)[0]);
+ else
+ nrcallees = 0;
+ erl_exit(1, "%s: failed to allocate %lu bytes and %lu trampolines\r\n",
+ __func__, (unsigned long)nrbytes, (unsigned long)nrcallees);
+ }
memcpy(address, bytes, nrbytes);
hipe_flush_icache_range(address, nrbytes);
hp = HAlloc(BIF_P, 3);
@@ -1280,6 +1282,8 @@ static void *hipe_make_stub(Eterm m, Eterm f, unsigned int arity, int is_remote)
export_entry = erts_export_get_or_make_stub(m, f, arity);
StubAddress = hipe_make_native_stub(export_entry, arity);
+ if (!StubAddress)
+ erl_exit(1, "hipe_make_stub: code allocation failed\r\n");
return StubAddress;
}
diff --git a/erts/emulator/hipe/hipe_bif0.tab b/erts/emulator/hipe/hipe_bif0.tab
index 2514b1c3a5..d715a0914b 100644
--- a/erts/emulator/hipe/hipe_bif0.tab
+++ b/erts/emulator/hipe/hipe_bif0.tab
@@ -49,7 +49,6 @@ bif hipe_bifs:constants_size/0
bif hipe_bifs:merge_term/1
bif hipe_bifs:fun_to_address/1
-#bif hipe_bifs:get_emu_address/1
bif hipe_bifs:set_native_address/3
#bif hipe_bifs:address_to_fun/1
@@ -72,7 +71,6 @@ bif hipe_bifs:term_to_word/1
bif hipe_bifs:get_fe/2
bif hipe_bifs:set_native_address_in_fe/2
-#bif hipe_bifs:make_native_stub/2
bif hipe_bifs:find_na_or_make_stub/2
bif hipe_bifs:check_crc/1
diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c
index 56767ef04b..ecb34df412 100644
--- a/erts/emulator/hipe/hipe_bif1.c
+++ b/erts/emulator/hipe/hipe_bif1.c
@@ -574,22 +574,6 @@ BIF_RETTYPE hipe_bifs_pause_times_0(BIF_ALIST_0)
/* XXX: these macros have free variables */
#ifdef BM_TIMERS
-#if USE_PERFCTR
-#define MAKE_TIME(_timer_) { \
- BM_TIMER_T tmp = _timer_##_time; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- sec = (uint)(tmp - ((int)(tmp / 60)) * 60); \
- min = (uint)tmp / 60; }
-
-#define MAKE_MICRO_TIME(_timer_) { \
- BM_TIMER_T tmp = _timer_##_time * 1000; \
- micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- sec = (uint)tmp / 1000; }
-
-#else
#define MAKE_TIME(_timer_) { \
BM_TIMER_T tmp = _timer_##_time / 1000000; \
milli = tmp % 1000; \
@@ -604,7 +588,6 @@ BIF_RETTYPE hipe_bifs_pause_times_0(BIF_ALIST_0)
milli = tmp % 1000; \
sec = tmp / 1000; }
-#endif
#else
#define MAKE_TIME(_timer_)
#define MAKE_MICRO_TIME(_timer_)
@@ -852,9 +835,6 @@ BIF_RETTYPE hipe_bifs_misc_timer_clear_0(BIF_ALIST_0)
/*
* HiPE hrvtime().
* These implementations are currently available:
- * + On Linux with the perfctr extension we can use the process'
- * virtualised time-stamp counter. To enable this mode you must
- * pass `--with-perfctr=/path/to/perfctr' when configuring.
* + The fallback, which is the same as {X,_} = runtime(statistics).
*/
@@ -866,37 +846,6 @@ static double fallback_get_hrvtime(void)
return (double)ms_user;
}
-#if USE_PERFCTR
-
-#include "hipe_perfctr.h"
-static int hrvtime_started; /* 0: closed, +1: perfctr, -1: fallback */
-#define hrvtime_is_started() (hrvtime_started != 0)
-
-static void start_hrvtime(void)
-{
- if (hipe_perfctr_hrvtime_open() >= 0)
- hrvtime_started = 1;
- else
- hrvtime_started = -1;
-}
-
-static void stop_hrvtime(void)
-{
- if (hrvtime_started > 0)
- hipe_perfctr_hrvtime_close();
- hrvtime_started = 0;
-}
-
-static double get_hrvtime(void)
-{
- if (hrvtime_started > 0)
- return hipe_perfctr_hrvtime_get();
- else
- return fallback_get_hrvtime();
-}
-
-#else /* !USE_PERFCTR */
-
/*
* Fallback, if nothing better exists.
* This is the same as {X,_} = statistics(runtime), which uses
@@ -908,8 +857,6 @@ static double get_hrvtime(void)
#define stop_hrvtime() do{}while(0)
#define get_hrvtime() fallback_get_hrvtime()
-#endif /* !USE_PERFCTR */
-
BIF_RETTYPE hipe_bifs_get_hrvtime_0(BIF_ALIST_0)
{
Eterm *hp;
diff --git a/erts/emulator/hipe/hipe_perfctr.c b/erts/emulator/hipe/hipe_perfctr.c
deleted file mode 100644
index 371b3fb097..0000000000
--- a/erts/emulator/hipe/hipe_perfctr.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-#include "sys.h"
-#include "error.h"
-#include "global.h"
-#include "bif.h"
-#include "big.h"
-#include "erl_binary.h"
-#include "hipe_perfctr.h"
-#include "libperfctr.h"
-
-static struct vperfctr *vperfctr;
-static unsigned int have_rdtsc;
-static double tsc_to_ms;
-static unsigned int tsc_on; /* control calls must set tsc_on if have_rdtsc is true */
-static unsigned int nractrs;
-static unsigned int users;
-#define USER_BIFS (1<<0)
-#define USER_HRVTIME (1<<1)
-
-static int hipe_perfctr_open(unsigned int user)
-{
- struct perfctr_info info;
-
- if (!vperfctr) {
- vperfctr = vperfctr_open();
- if (!vperfctr)
- return -1;
- if (vperfctr_info(vperfctr, &info) >= 0) {
- tsc_to_ms = (double)(info.tsc_to_cpu_mult ? : 1) / (double)info.cpu_khz;
- have_rdtsc = (info.cpu_features & PERFCTR_FEATURE_RDTSC) ? 1 : 0;
- }
- tsc_on = 0;
- nractrs = 0;
- }
- users |= user;
- return 0;
-}
-
-static void hipe_perfctr_reset(void)
-{
- struct vperfctr_control control;
-
- memset(&control, 0, sizeof control);
- if (have_rdtsc)
- control.cpu_control.tsc_on = 1;
- nractrs = 0;
- if (vperfctr_control(vperfctr, &control) >= 0)
- tsc_on = 1;
-}
-
-static void hipe_perfctr_close(unsigned int user)
-{
- if (!vperfctr)
- return;
- users &= ~user;
- switch (users) {
- case 0:
- vperfctr_unlink(vperfctr);
- vperfctr_close(vperfctr);
- vperfctr = NULL;
- tsc_on = 0;
- nractrs = 0;
- break;
- case USER_HRVTIME:
- hipe_perfctr_reset();
- }
-}
-
-/*
- * Interface for HiPE's hrvtime code.
- */
-
-int hipe_perfctr_hrvtime_open(void)
-{
- if (hipe_perfctr_open(USER_HRVTIME) < 0)
- return -1;
- if (have_rdtsc) {
- if (!tsc_on)
- hipe_perfctr_reset(); /* note: updates tsc_on */
- if (tsc_on)
- return 0;
- }
- hipe_perfctr_hrvtime_close();
- return -1;
-}
-
-void hipe_perfctr_hrvtime_close(void)
-{
- hipe_perfctr_close(USER_HRVTIME);
-}
-
-double hipe_perfctr_hrvtime_get(void)
-{
- return (double)vperfctr_read_tsc(vperfctr) * tsc_to_ms;
-}
-
-/*
- * BIF interface for user-programmable performance counters.
- */
-
-BIF_RETTYPE hipe_bifs_vperfctr_open_0(BIF_ALIST_0)
-{
- if (hipe_perfctr_open(USER_BIFS) < 0)
- BIF_RET(am_false); /* arity 0 BIFs can't fail :-( */
- BIF_RET(am_true);
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_close_0(BIF_ALIST_0)
-{
- hipe_perfctr_close(USER_BIFS);
- BIF_RET(NIL);
-}
-
-static Eterm ull_to_integer(unsigned long long x, Process *p)
-{
- unsigned long long tmpx;
- unsigned int ds, i;
- size_t sz;
- Eterm *hp;
- ErtsDigit *xp;
-
- if (x <= (unsigned long long)MAX_SMALL)
- return make_small(x);
-
- /* Calculate number of digits. */
- ds = 0;
- tmpx = x;
- do {
- ++ds;
- tmpx = (tmpx >> (D_EXP / 2)) >> (D_EXP / 2);
- } while (tmpx != 0);
-
- sz = BIG_NEED_SIZE(ds); /* number of words including arity */
- hp = HAlloc(p, sz);
- *hp = make_pos_bignum_header(sz-1);
-
- xp = (ErtsDigit*)(hp+1);
- i = 0;
- do {
- xp[i++] = (ErtsDigit)x;
- x = (x >> (D_EXP / 2)) >> (D_EXP / 2);
- } while (i < ds);
- while (i & (BIG_DIGITS_PER_WORD-1))
- xp[i++] = 0;
-
- return make_big(hp);
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_info_0(BIF_ALIST_0)
-{
- struct perfctr_info info;
-
- if (!vperfctr || vperfctr_info(vperfctr, &info) < 0)
- BIF_RET(am_false); /* arity 0 BIFs can't fail :-( */
- BIF_RET(new_binary(BIF_P, (void*)&info, sizeof info));
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_read_tsc_0(BIF_ALIST_0)
-{
- unsigned long long val;
-
- if (!vperfctr || !tsc_on)
- BIF_RET(am_false); /* arity 0 BIFs can't fail :-( */
- val = vperfctr_read_tsc(vperfctr);
- BIF_RET(ull_to_integer(val, BIF_P));
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_read_pmc_1(BIF_ALIST_1)
-{
- Uint pmc;
- unsigned long long val;
-
- if (!vperfctr ||
- is_not_small(BIF_ARG_1) ||
- (pmc = unsigned_val(BIF_ARG_1), pmc >= nractrs))
- BIF_RET(am_false); /* for consistency with the arity 0 BIFs */
- val = vperfctr_read_pmc(vperfctr, pmc);
- BIF_RET(ull_to_integer(val, BIF_P));
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_control_1(BIF_ALIST_1)
-{
- void *bytes;
- struct vperfctr_control control;
- Uint bitoffs;
- Uint bitsize;
-
- if (!vperfctr)
- BIF_ERROR(BIF_P, BADARG);
- if (is_not_binary(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
- if (binary_size(BIF_ARG_1) != sizeof control)
- BIF_ERROR(BIF_P, BADARG);
- ERTS_GET_BINARY_BYTES(BIF_ARG_1, bytes, bitoffs, bitsize);
- ASSERT(bitoffs == 0);
- ASSERT(bitsize == 0);
- memcpy(&control, bytes, sizeof control);
- if (have_rdtsc)
- control.cpu_control.tsc_on = 1;
- if (vperfctr_control(vperfctr, &control) < 0) {
- hipe_perfctr_reset();
- BIF_ERROR(BIF_P, BADARG);
- }
- tsc_on = control.cpu_control.tsc_on;
- nractrs = control.cpu_control.nractrs;
- BIF_RET(NIL);
-}
diff --git a/erts/emulator/hipe/hipe_perfctr.h b/erts/emulator/hipe/hipe_perfctr.h
deleted file mode 100644
index 8fbf9ecf35..0000000000
--- a/erts/emulator/hipe/hipe_perfctr.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-
-extern int hipe_perfctr_hrvtime_open(void);
-extern void hipe_perfctr_hrvtime_close(void);
-extern double hipe_perfctr_hrvtime_get(void);
diff --git a/erts/emulator/hipe/hipe_perfctr.tab b/erts/emulator/hipe/hipe_perfctr.tab
deleted file mode 100644
index eaecea4651..0000000000
--- a/erts/emulator/hipe/hipe_perfctr.tab
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2004-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-
-bif hipe_bifs:vperfctr_open/0
-bif hipe_bifs:vperfctr_close/0
-bif hipe_bifs:vperfctr_info/0
-bif hipe_bifs:vperfctr_read_tsc/0
-bif hipe_bifs:vperfctr_read_pmc/1
-bif hipe_bifs:vperfctr_control/1
diff --git a/erts/emulator/hipe/hipe_ppc.c b/erts/emulator/hipe/hipe_ppc.c
index 4dc26cdbc8..1eaa9f6855 100644
--- a/erts/emulator/hipe/hipe_ppc.c
+++ b/erts/emulator/hipe/hipe_ppc.c
@@ -293,6 +293,8 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
abort();
code = alloc_stub(7);
+ if (!code)
+ return NULL;
/* addis r12,0,callee_exp@highest */
code[0] = 0x3d800000 | (((unsigned long)callee_exp >> 48) & 0xffff);
@@ -381,6 +383,8 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
abort();
code = alloc_stub(4);
+ if (!code)
+ return NULL;
/* addi r12,0,callee_exp@l */
code[0] = 0x39800000 | ((unsigned long)callee_exp & 0xFFFF);
diff --git a/erts/emulator/hipe/hipe_ppc.h b/erts/emulator/hipe/hipe_ppc.h
index 66000c1846..e9d3e6564b 100644
--- a/erts/emulator/hipe/hipe_ppc.h
+++ b/erts/emulator/hipe/hipe_ppc.h
@@ -64,10 +64,6 @@ AEXTERN(void,hipe_ppc_inc_stack,(void));
extern void hipe_ppc_inc_stack(void); /* we don't have the AEXTERN() fallback :-( */
#endif
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#if !defined(__powerpc64__)
extern const unsigned int fconv_constant[];
#endif
diff --git a/erts/emulator/hipe/hipe_sparc.c b/erts/emulator/hipe/hipe_sparc.c
index 2052aa8498..fea3b623a9 100644
--- a/erts/emulator/hipe/hipe_sparc.c
+++ b/erts/emulator/hipe/hipe_sparc.c
@@ -130,7 +130,7 @@ static void atexit_alloc_code_stats(void)
#define ALLOC_CODE_STATS(X) do{}while(0)
#endif
-static void morecore(unsigned int alloc_bytes)
+static int morecore(unsigned int alloc_bytes)
{
unsigned int map_bytes;
char *map_hint, *map_start;
@@ -158,10 +158,9 @@ static void morecore(unsigned int alloc_bytes)
#endif
,
-1, 0);
- if (map_start == MAP_FAILED) {
- perror("mmap");
- abort();
- }
+ if (map_start == MAP_FAILED)
+ return -1;
+
ALLOC_CODE_STATS(total_mapped += map_bytes);
/* Merge adjacent mappings, so the trailing portion of the previous
@@ -177,6 +176,8 @@ static void morecore(unsigned int alloc_bytes)
}
ALLOC_CODE_STATS(atexit_alloc_code_stats());
+
+ return 0;
}
static void *alloc_code(unsigned int alloc_bytes)
@@ -186,8 +187,8 @@ static void *alloc_code(unsigned int alloc_bytes)
/* Align function entries. */
alloc_bytes = (alloc_bytes + 3) & ~3;
- if (code_bytes < alloc_bytes)
- morecore(alloc_bytes);
+ if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
+ return NULL;
ALLOC_CODE_STATS(++nr_allocs);
ALLOC_CODE_STATS(total_alloc += alloc_bytes);
res = code_next;
@@ -211,6 +212,8 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
int i;
code = alloc_code(5*sizeof(int));
+ if (!code)
+ return NULL;
/* sethi %hi(Address), %i4 */
code[0] = 0x39000000 | (((unsigned int)callee_exp >> 10) & 0x3FFFFF);
diff --git a/erts/emulator/hipe/hipe_sparc.h b/erts/emulator/hipe/hipe_sparc.h
index 1134b86004..2d92ca3ca8 100644
--- a/erts/emulator/hipe/hipe_sparc.h
+++ b/erts/emulator/hipe/hipe_sparc.h
@@ -47,8 +47,4 @@ static __inline__ int hipe_word32_address_ok(void *address)
extern void hipe_sparc_inc_stack(void);
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#endif /* HIPE_SPARC_H */
diff --git a/erts/emulator/hipe/hipe_x86.c b/erts/emulator/hipe/hipe_x86.c
index 314f6b597c..998905ea63 100644
--- a/erts/emulator/hipe/hipe_x86.c
+++ b/erts/emulator/hipe/hipe_x86.c
@@ -108,7 +108,7 @@ static void atexit_alloc_code_stats(void)
#define MAP_ANONYMOUS MAP_ANON
#endif
-static void morecore(unsigned int alloc_bytes)
+static int morecore(unsigned int alloc_bytes)
{
unsigned int map_bytes;
char *map_hint, *map_start;
@@ -136,10 +136,9 @@ static void morecore(unsigned int alloc_bytes)
#endif
,
-1, 0);
- if (map_start == MAP_FAILED) {
- perror("mmap");
- abort();
- }
+ if (map_start == MAP_FAILED)
+ return -1;
+
ALLOC_CODE_STATS(total_mapped += map_bytes);
/* Merge adjacent mappings, so the trailing portion of the previous
@@ -155,6 +154,8 @@ static void morecore(unsigned int alloc_bytes)
}
ALLOC_CODE_STATS(atexit_alloc_code_stats());
+
+ return 0;
}
static void *alloc_code(unsigned int alloc_bytes)
@@ -164,8 +165,8 @@ static void *alloc_code(unsigned int alloc_bytes)
/* Align function entries. */
alloc_bytes = (alloc_bytes + 3) & ~3;
- if (code_bytes < alloc_bytes)
- morecore(alloc_bytes);
+ if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
+ return NULL;
ALLOC_CODE_STATS(++nr_allocs);
ALLOC_CODE_STATS(total_alloc += alloc_bytes);
res = code_next;
@@ -207,6 +208,8 @@ void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
(P_CALLEE_EXP >= 128 ? 3 : 0) +
(P_ARITY >= 128 ? 3 : 0);
codep = code = alloc_code(codeSize);
+ if (!code)
+ return NULL;
/* movl $beamAddress, P_CALLEE_EXP(%ebp); 3 or 6 bytes, plus 4 */
codep[0] = 0xc7;
diff --git a/erts/emulator/hipe/hipe_x86.h b/erts/emulator/hipe/hipe_x86.h
index 97f09e38cd..f29117d0c4 100644
--- a/erts/emulator/hipe/hipe_x86.h
+++ b/erts/emulator/hipe/hipe_x86.h
@@ -53,8 +53,4 @@ extern void nbif_inc_stack_0(void);
extern void nbif_handle_fp_exception(void);
#endif
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#endif /* HIPE_X86_H */
diff --git a/erts/emulator/internal_doc/CarrierMigration.md b/erts/emulator/internal_doc/CarrierMigration.md
index b93c11c6ec..2a9594db25 100644
--- a/erts/emulator/internal_doc/CarrierMigration.md
+++ b/erts/emulator/internal_doc/CarrierMigration.md
@@ -16,12 +16,12 @@ When a carrier is empty, i.e. contains only one large free block, it
is deallocated. Since multiblock carriers can contain both allocated
blocks and free blocks at the same time, an allocator instance might
be stuck with a large amount of poorly utilized carriers if the memory
-load decrease. After a peak in memory usage it is expected that not
-all memory can be returned since the blocks still allocated is likely
+load decreases. After a peak in memory usage it is expected that not
+all memory can be returned since the blocks still allocated are likely
to be dispersed over multiple carriers. Such poorly utilized carriers
-can usually be reused if the memory load increase again. However,
+can usually be reused if the memory load increases again. However,
since each scheduler thread manages its own set of allocator
-instances, and memory load is not necessarily connected to CPU load we
+instances, and memory load is not necessarily correlated to CPU load, we
might get into a situation where there are lots of poorly utilized
multiblock carriers on some allocator instances while we need to
allocate new multiblock carriers on other allocator instances. In
@@ -50,13 +50,13 @@ the allocator instance manages. Free blocks in one specific carrier
can be referred to from potentially every other carrier that is
managed, and the amount of such references can be huge. That is, the
work of removing the free blocks of such a carrier from the search
-tree will be huge. One way of solving this could be to not migrate
+tree will be huge. One way of solving this could be not to migrate
carriers that contain lots of free blocks, but this would prevent us
-from migrating carriers that potentially needs to be migrated in order
+from migrating carriers that potentially need to be migrated in order
to solve the problem we set out to solve.
By using one data structure of free blocks in each carrier and an
-allocator instance wide data structure of carriers managed by the
+allocator instance-wide data structure of carriers managed by the
allocator instance, the work needed in order to remove and add
carriers can be kept to a minimum. When migration of carriers is
enabled on a specific allocator type, we require that an allocation
@@ -76,9 +76,9 @@ through a pool of carriers. In order for a carrier migration to
complete, one scheduler needs to move the carrier into the pool, and
another scheduler needs to take the carrier out of the pool.
-The pool is implemented as a lock free, circular, double linked,
+The pool is implemented as a lock-free, circular, double linked,
list. The list contains a sentinel which is used as the starting point
-when inserting to, or fetching from the pool. Carriers in the pool are
+when inserting to, or fetching from, the pool. Carriers in the pool are
elements in this list.
The list can be modified by all scheduler threads
@@ -108,19 +108,19 @@ all search operations need to read the content of the sentinel. If we
were to modify the sentinel, the cache line containing the sentinel
would unnecessarily be bounced between processors.
-The `prev`, and `next` fields in the elements of the list contains the
+The `prev` and `next` fields in the elements of the list contain the
value of the pointer, a modification marker, and a deleted
marker. Memory operations on these fields are done using atomic memory
operations. When a thread has set the modification marker in a field,
no-one except the thread that set the marker is allowed to modify the
-field. If multiple modification markers needs to be set, we always
+field. If multiple modification markers need to be set, we always
begin with `next` fields followed by `prev` fields in the order
following the actual pointers. This guarantees that no deadlocks will
occur.
When a carrier is being removed from a pool, we mark it with a thread
progress value that needs to be reached before we are allowed to
-modify the `next`, and `prev` fields. That is, until we reach this
+modify the `next` and `prev` fields. That is, until we reach this
thread progress we are not allowed to insert the carrier into the pool
again, and we are not allowed to deallocate the carrier. This ensures
that threads inspecting the pool always will be able to traverse the
@@ -130,12 +130,12 @@ threads may have references to it via the pool.
### Migration ###
-There exist one pool for each allocator type enabling migration of
+There exists one pool for each allocator type enabling migration of
carriers between scheduler specific allocator instances of the same
allocator type.
Each allocator instance keeps track of the current utilization of its
-multiblock carriers. When the utilization falls below the "abandon
+multiblock carriers. When the total utilization falls below the "abandon
carrier utilization limit" it starts to inspect the utilization of the
current carrier when deallocations are made. If also the utilization
of the carrier falls below the "abandon carrier utilization limit" it
@@ -146,28 +146,53 @@ Since the carrier has been unlinked from the data structure of
available free blocks, no more allocations will be made in the
carrier. The allocator instance putting the carrier into the pool,
however, still has the responsibility of performing deallocations in
-it while it remains in the pool.
+it while it remains in the pool. The allocator instance with this
+deallocation responsibility is here called the **employer**.
-Each carrier has a flag field containing information about allocator
-instance owning the carrier, a flag indicating if the carrier is in
+Each carrier has a flag field containing information about the
+employing allocator instance, a flag indicating if the carrier is in
the pool or not, and a flag indicating if it is busy or not. When the
-carrier is in the pool, the owning allocator instance needs to mark it
+carrier is in the pool, the employing allocator instance needs to mark it
as busy while operating on it. If another thread inspects it in order
-to try to fetch it from the pool, it will abort the fetch if it is
-busy. When fetching the carrier from the pool, ownership will changed
-and further deallocations in the carrier will be redirected to the new
-owner using the delayed dealloc functionality.
+to try to fetch it from the pool, it will skip it if it is busy. When
+fetching the carrier from the pool, employment will change and further
+deallocations in the carrier will be redirected to the new
+employer using the delayed dealloc functionality.
If a carrier in the pool becomes empty, it will be withdrawn from the
pool. All carriers that become empty are also always passed to its
-originating allocator instance for deallocation using the delayed
+**owning** allocator instance for deallocation using the delayed
dealloc functionality. Since carriers this way always will be
-deallocated by the allocator instance that allocated the carrier the
+deallocated by the owner that allocated the carrier, the
underlying functionality of allocating and deallocating carriers can
remain simple and doesn't have to bother about multiple threads. In a
NUMA system we will also not mix carriers originating from multiple
NUMA nodes.
+In short:
+
+* The allocator instance that created a carrier **owns** it.
+* An empty carrier is always deallocated by its **owner**.
+* **Ownership** never changes.
+* The allocator instance that uses a carrier **employs** it.
+* An **employer** can abandon a carrier into the pool.
+* Pooled carriers are not allocated from.
+* Deallocation in a pooled carrier is still performed by its **employer**.
+* **Employment** can only change when a carrier is fetched from the pool.
+
+### Searching the pool ###
+
+To harbor real time characteristics, searching the pool is
+limited. We only inspect a limited number of carriers. If none of
+those carriers had a free block large enough to satisfy the allocation
+request, the search will fail. A carrier in the pool can also be busy
+if another thread is currently doing block deallocation work on the
+carrier. A busy carrier will also be skipped by the search as it can
+not satisfy the request. The pool is lock-free and we do not want to
+block, waiting for the other thread to finish.
+
+#### Before OTP 17.4 ####
+
When an allocator instance needs more carrier space, it always begins
by inspecting its own carriers that are waiting for thread progress
before they can be deallocated. If no such carrier could be found, it
@@ -176,10 +201,69 @@ it will allocate a new carrier. Regardless of where the allocator
instance gets the carrier from it the just links in the carrier into
its data structure of free blocks.
+#### After OTP 17.4 ####
+
+The old search algorithm had a problem as the search always started at
+the same position in the pool, the sentinel. This could lead to
+contention from concurrent searching processes. But even worse, it
+could lead to a "bad" state when searches fail with a high rate
+leading to new carriers instead being allocated. These new carriers
+may later be inserted into the pool due to bad utilization. If the
+frequency of insertions into the pool is higher than successful
+fetching from the pool, memory will eventually get exhausted.
+
+This "bad" state consists of a cluster of small and/or highly
+fragmented carriers located at the sentinel in the pool. The largest free
+block in such a "bad" carrier is rather small, making it unable to satisfy
+most allocation requests. As the search always started at the
+sentinel, any such "bad" carriers that had been left in the pool would
+eventually cluster together at the sentinel. All searches first
+have to skip past this cluster of "bad" carriers to reach a "good"
+carrier. When the cluster gets to the same size as the search limit,
+all searches will essentially fail.
+
+To counter the "bad cluster" problem and also ease the contention, the
+search will now always start by first looking at the allocators **own**
+carriers. That is, carriers that were initially created by the
+allocator itself and later had been abandoned to the pool. If none of
+our own abandoned carrier would do, then the search continues into the
+pool, as before, to look for carriers created by other
+allocators. However, if we have at least one abandoned carrier of our
+own that could not satisfy the request, we can use that as entry point
+into the pool.
+
+The result is that we prefer carriers created by the thread itself,
+which is good for NUMA performance. And we get more entry points when
+searching the pool, which will ease contention and clustering.
+
+To do the first search among own carriers, every allocator instance
+has two new lists: `pooled_list` and `traitor_list`. These lists are only
+accessed by the allocator itself and they only contain the allocator's
+own carriers. When an owned carrier is abandoned and put in the
+pool, it is also linked into `pooled_list`. When we search our
+`pooled_list` and find a carrier that is no longer in the pool, we
+move that carrier from `pooled_list` to `traitor_list` as it is now
+employed by another allocator. If searching `pooled_list` fails, we
+also do a limited search of `traitor_list`. When finding an abandoned
+carrier in `traitor_list` it is either employed or moved back to
+`pooled_list` if it could not satisfy the allocation request.
+
+When searching `pooled_list` and `traitor_list` we always start at the
+point where the last search ended. This to avoid clustering
+problems and increase the probability to find a "good" carrier. As
+`pooled_list` and `traitor_list` are only accessed by the owning
+allocator instance, they need no thread synchronization at all.
+
+Furthermore, the search for own carriers that are scheduled
+for deallocation is now done as the last search option. The idea is
+that it is better to reuse a poorly utilized carrier than to
+resurrect an empty carrier that was just about to be released back to
+the OS.
+
### Result ###
The use of this strategy of abandoning carriers with poor utilization
-and reusing these in allocator instances with an increased carrier
+and reusing them in allocator instances with an increased carrier
demand is extremely effective and completely eliminates the problems
that otherwise sometimes occurred when CPU load dropped while memory
load did not.
diff --git a/erts/emulator/internal_doc/SuperCarrier.md b/erts/emulator/internal_doc/SuperCarrier.md
new file mode 100644
index 0000000000..0ad6af41de
--- /dev/null
+++ b/erts/emulator/internal_doc/SuperCarrier.md
@@ -0,0 +1,191 @@
+Super Carrier
+=============
+
+A super carrier is large memory area, allocated at VM start, which can
+be used during runtime to allocate normal carriers from.
+
+The super carrier feature was introduced in OTP R16B03. It is
+enabled with command line option +MMscs <size in Mb>
+and can be configured with other options.
+
+Problem
+-------
+
+The initial motivation for this feature was customers asking for a way
+to pre-allocate physcial memory at VM start for it to use.
+
+Other problems were different experienced limitations of the OS
+implementation of mmap:
+
+* Increasingly bad performance of mmap/munmap as the number of mmap'ed areas grow.
+* Fragmentation problem between mmap'ed areas.
+
+A third problem was management of low memory in the halfword
+emulator. The implementation used a naive linear search structure to
+hold free segments which would lead to poor performance when
+fragmentation increased.
+
+
+Solution
+--------
+
+Allocate one large continious area of address space at VM start and
+then use that area to satisfy our dynamic memory need during
+runtime. In other words: implement our own mmap.
+
+### Use cases ###
+
+If command line option +MMscrpm (Reserve Physical Memory) is set to
+false, only virtual space is allocated for the super carrier from
+start. The super carrier then acts as an "alternative mmap" implementation
+without changing the consumption of physical memory pages. Physical
+pages will be reserved on demand when an allocation is done from the super
+carrier and be unreserved when the memory is released back to the
+super carrier.
+
+If +MMscrpm is set to true, which is default, the initial allocation
+will reserve physical memory for the entire super carrier. This can be
+used by users that want to ensure a certain *minimum* amount of
+physical memory for the VM.
+
+However, what reservation of physical memory actually means highly
+depends on the operating system, and how it is configured. For
+example, different memory overcommit settings on Linux drastically
+change the behaviour.
+
+A third feature is to have the super carrier limit the *maximum*
+amount of memory used by the VM. If +MMsco (Super Carrier Only) is set
+to true, which is default, allocations will only be done from the
+super carrier. When the super carrier gets full, the VM will fail due
+to out of memory.
+If +MMsco is false, allocations will use mmap directly if the super
+carrier is full.
+
+
+
+### Implementation ###
+
+The entire super carrier implementation is kept in erl_mmap.c. The
+name suggest that it can be viewed as our own mmap implementation.
+
+A super carrier needs to satisfy two slightly different kinds of
+allocation requests; multi block carriers (MBC) and single block
+carriers (SBC). They are both rather large blocks of continious
+memory, but MBCs and SBCs have different demands on alignment and
+size.
+
+SBCs can have arbitrary size and do only need minimum 8-byte
+alignment.
+
+MBCs are more restricted. They can only have a number of fixed
+sizes that are powers of 2. The start address need to have a very
+large aligment (currently 256 kb, called "super alignment"). This is a
+design choice that allows very low overhead per allocated block in the
+MBC.
+
+To reduce fragmentation within the super carrier, it is good to keep SBCs
+and MBCs apart. MBCs with their uniform alignment and sizes can be
+packed very efficiently together. SBCs without demand for aligment can
+also be allocated quite efficiently together. But mixing them can lead
+to a lot of memory wasted when we need to create large holes of
+padding to the next alignment limit.
+
+The super carrier thus contains two areas. One area for MBCs growing from
+the bottom and up. And one area for SBCs growing from the top and
+down. Like a process with a heap and a stack growing towards each
+other.
+
+
+### Data structures ###
+
+The MBC area is called **sa** as in super aligned and the SBC area is
+called **sua** as in super un-aligned.
+
+Note that the "super" in super alignment and the "super" in super
+carrier has nothing to do with each other. We could have choosen
+another naming to avoid confusion, such as "meta" carrier or "giant"
+aligment.
+
+ +-------+ <---- sua.top
+ | sua |
+ | |
+ |-------| <---- sua.bot
+ | |
+ | |
+ | |
+ |-------| <---- sa.top
+ | |
+ | sa |
+ | |
+ +-------+ <---- sa.bot
+
+
+When a carrier is deallocated a free memory segment will be created
+inside the corresponding area, unless the carrier was at the very top
+(in `sa`) or bottom (in `sua`) in which case the area will just shrink
+down or up.
+
+We need to keep track of all the free segments in order to reuse them
+for new carrier allocations. One initial idea was to use the same
+mechanism that is used to keep track of free blocks within MBCs
+(alloc_util and the different strategies). However, that would not be
+as straight forward as one can think and can also waste quite a lot of
+memory as it uses prepended block headers. The granularity of the
+super carrier is one memory page (usually 4kb). We want to allocate
+and free entire pages and we don't want to waste an entire page just
+to hold the block header of the following pages.
+
+Instead we store the meta information about all the free segments in a
+dedicated area apart from the `sa` and `sua` areas. Every free segment is
+represented by a descriptor struct (`ErtsFreeSegDesc`).
+
+ typedef struct {
+ RBTNode snode; /* node in 'stree' */
+ RBTNode anode; /* node in 'atree' */
+ char* start;
+ char* end;
+ }ErtsFreeSegDesc;
+
+To find the smallest free segment that will satisfy a carrier allocation
+(best fit), the free segments are organized in a tree sorted by
+size (`stree`). We search in this tree at allocation. If no free segment of
+sufficient size was found, the area (`sa` or `sua`) is instead expanded.
+If two or more free segments with equal size exist, the one at lowest
+address is choosen for `sa` and highest address for `sua`.
+
+At carrier deallocation, we want to coalesce with any adjacent free
+segments, to form one large free segment. To do that, all free
+segments are also organized in a tree sorted in address order (`atree`).
+
+So, in total we keep four trees of free descriptors for the super
+carrier; two for `sa` and two for `sua`. They all use the same
+red-black-tree implementation that support the different sorting
+orders used.
+
+When allocating a new MBC we first search after a free segment in `sa`,
+then try to raise `sa.top`, and then as a fallback try to search after a
+free segment in `sua`. When an MBC is allocated in `sua`, a larger segment
+is allocated which is then trimmed to obtain the right
+alignment. Allocation search for an SBC is done in reverse order. When
+an SBC is allocated in `sa`, the size is aligned up to super aligned
+size.
+
+### The free descriptor area ###
+
+As mentioned above, the descriptors for the free segments are
+allocated in a separate area. This area has a constant configurable
+size (+MMscrfsd) that defaults to 65536 descriptors. This should be
+more than enough in most cases. If the descriptors area should fill up,
+new descriptor areas will be allocated first directly from the OS, and
+then from `sua` and `sa` in the super carrier, and lastly from the memory
+segment itself which is being deallocated. Allocating free descriptor
+areas from the super carrier is only a last resort, and should be
+avoided, as it creates fragmentation.
+
+### Halfword emulator ###
+
+The halfword emulator uses the super carrier implementation to manage
+its low memory mappings thar are needed for all term storage. The
+super carrier can here not be configured by command line options. One
+could imagine a second configurable instance of the super carrier used
+by high memory allocation, but that has not been implemented.
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 81cb5dc4bb..0051b45b31 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -268,6 +268,8 @@ free_drv_select_data(ErtsDrvSelectDataState *dsp)
erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, dsp);
}
+#if ERTS_CIO_HAVE_DRV_EVENT
+
static ERTS_INLINE ErtsDrvEventDataState *
alloc_drv_event_data(void)
{
@@ -290,6 +292,8 @@ free_drv_event_data(ErtsDrvEventDataState *dep)
erts_free(ERTS_ALC_T_DRV_EV_D_STATE, dep);
}
+#endif /* ERTS_CIO_HAVE_DRV_EVENT */
+
static ERTS_INLINE void
remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
{
diff --git a/erts/emulator/sys/unix/erl_child_setup.c b/erts/emulator/sys/unix/erl_child_setup.c
index 94eb6b1547..5ad92dad02 100644
--- a/erts/emulator/sys/unix/erl_child_setup.c
+++ b/erts/emulator/sys/unix/erl_child_setup.c
@@ -101,7 +101,9 @@ main(int argc, char *argv[])
if (sscanf(argv[CS_ARGV_FD_CR_IX], "%d:%d", &from, &to) != 2)
return 1;
-#if defined(__ANDROID__)
+#if defined(HAVE_CLOSEFROM)
+ closefrom(from);
+#elif defined(__ANDROID__)
for (i = from; i <= to; i++) {
if (i!=__system_properties_fd)
(void) close(i);
@@ -109,13 +111,6 @@ main(int argc, char *argv[])
#else
for (i = from; i <= to; i++)
(void) close(i);
-#endif /* __ANDROID__ */
-
-#if defined(HAVE_CLOSEFROM)
- closefrom(from);
-#else
- for (i = from; i <= to; i++)
- (void) close(i);
#endif
if (!(argv[CS_ARGV_WD_IX][0] == '.' && argv[CS_ARGV_WD_IX][1] == '\0')
@@ -147,8 +142,6 @@ main(int argc, char *argv[])
return 1;
}
-
-
#if defined(__ANDROID__)
int __system_properties_fd(void)
{
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index f7a6298d5b..f0050db114 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -45,7 +45,7 @@
#include <fcntl.h>
#include "erl_errno.h"
#include <signal.h>
-
+#include <setjmp.h>
#if HAVE_SYS_SOCKETIO_H
# include <sys/socketio.h>
@@ -188,7 +188,7 @@ typedef hrtime_t SysHrTime;
#endif /* GETHRTIME_WITH_CLOCK_GETTIME */
#endif /* HAVE_GETHRTIME */
-#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
typedef long long SysCpuTime;
typedef struct timespec SysTimespec;
@@ -200,7 +200,7 @@ typedef struct timespec SysTimespec;
int sys_start_hrvtime(void);
int sys_stop_hrvtime(void);
-#elif defined(HAVE_CLOCK_GETTIME)
+#elif defined(HAVE_CLOCK_GETTIME_CPU_TIME)
#define sys_clock_gettime(cid,tp) clock_gettime((cid),&(tp))
#define sys_get_proc_cputime(t,tp) sys_clock_gettime(CLOCK_PROCESS_CPUTIME_ID,(tp))
@@ -211,13 +211,8 @@ int sys_stop_hrvtime(void);
#define SYS_CLOCK_RESOLUTION 1
/* These are defined in sys.c */
-#if defined(SIG_SIGSET) /* Old SysV */
-RETSIGTYPE (*sys_sigset())();
-#elif defined(SIG_SIGNAL) /* Old BSD */
-RETSIGTYPE (*sys_sigset())();
-#else
-RETSIGTYPE (*sys_sigset(int, RETSIGTYPE (*func)(int)))(int);
-#endif
+typedef void (*SIGFUNC)(int);
+extern SIGFUNC sys_signal(int, SIGFUNC);
extern void sys_sigrelease(int);
extern void sys_sigblock(int);
extern void sys_stop_cat(void);
@@ -229,7 +224,7 @@ extern void sys_stop_cat(void);
#ifdef USE_ISINF_ISNAN /* simulate finite() */
# define isfinite(f) (!isinf(f) && !isnan(f))
# define HAVE_ISFINITE
-#elif defined(__GNUC__) && defined(HAVE_FINITE)
+#elif (defined(__GNUC__) && !defined(__llvm__)) && defined(HAVE_FINITE)
/* We use finite in gcc as it emits assembler instead of
the function call that isfinite emits. The assembler is
significantly faster. */
@@ -354,4 +349,28 @@ extern int exit_async(void);
#define ERTS_EXIT_AFTER_DUMP _exit
+#if !defined(__APPLE__) && !defined(__MACH__)
+/* Some OS X versions do not allow (ab)using signal handlers like this */
+#define ERTS_HAVE_TRY_CATCH 1
+
+/* We try to simulate a try catch in C with the help of signal handlers.
+ * Only use this as a very last resort, as it is not very portable and
+ * quite unstable. It is also not thread safe, so make sure that only
+ * one thread can call this at a time!
+ */
+extern void erts_sys_sigsegv_handler(int);
+extern jmp_buf erts_sys_sigsegv_jmp;
+#define ERTS_SYS_TRY_CATCH(EXPR,CATCH) \
+ do { \
+ SIGFUNC prev_handler = sys_signal(SIGSEGV, \
+ erts_sys_sigsegv_handler); \
+ if (!setjmp(erts_sys_sigsegv_jmp)) { \
+ EXPR; \
+ } else { \
+ CATCH; \
+ } \
+ sys_signal(SIGSEGV,prev_handler); \
+ } while(0)
+#endif
+
#endif /* #ifndef _ERL_UNIX_SYS_H */
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 5de0c281c4..0d9c743c0c 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -86,6 +86,14 @@ static erts_smp_rwmtx_t environ_rwmtx;
#define DISABLE_VFORK 0
#endif
+#if defined IOV_MAX
+#define MAXIOV IOV_MAX
+#elif defined UIO_MAXIOV
+#define MAXIOV UIO_MAXIOV
+#else
+#define MAXIOV 16
+#endif
+
#ifdef USE_THREADS
# ifdef ENABLE_CHILD_WAITER_THREAD
# define CHLDWTHR ENABLE_CHILD_WAITER_THREAD
@@ -216,10 +224,14 @@ static erts_smp_atomic_t sys_misc_mem_sz;
#if defined(ERTS_SMP)
static void smp_sig_notify(char c);
static int sig_notify_fds[2] = {-1, -1};
-#elif defined(USE_THREADS)
-static int async_fd[2];
+
+static int sig_suspend_fds[2] = {-1, -1};
+#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
+
#endif
+jmp_buf erts_sys_sigsegv_jmp;
+
#if CHLDWTHR || defined(ERTS_SMP)
erts_mtx_t chld_stat_mtx;
#endif
@@ -260,6 +272,8 @@ static void note_child_death(int, int);
static void* child_waiter(void *);
#endif
+static int crashdump_companion_cube_fd = -1;
+
/********************* General functions ****************************/
/* This is used by both the drivers and general I/O, must be set early */
@@ -589,6 +603,14 @@ erts_sys_pre_init(void)
close(fd);
}
+ /* We need a file descriptor to close in the crashdump creation.
+ * We close this one to be sure we can get a fd for our real file ...
+ * so, we create one here ... a stone to carry all the way home.
+ */
+
+ crashdump_companion_cube_fd = open("/dev/null", O_RDONLY);
+
+ /* don't lose it, there will be cake */
}
void
@@ -646,39 +668,7 @@ erl_sys_init(void)
/* signal handling */
-#ifdef SIG_SIGSET /* Old SysV */
-RETSIGTYPE (*sys_sigset(sig, func))()
-int sig;
-RETSIGTYPE (*func)();
-{
- return(sigset(sig, func));
-}
-void sys_sigblock(int sig)
-{
- sighold(sig);
-}
-void sys_sigrelease(int sig)
-{
- sigrelse(sig);
-}
-#else /* !SIG_SIGSET */
-#ifdef SIG_SIGNAL /* Old BSD */
-RETSIGTYPE (*sys_sigset(sig, func))(int, int)
-int sig;
-RETSIGTYPE (*func)();
-{
- return(signal(sig, func));
-}
-sys_sigblock(int sig)
-{
- sigblock(sig);
-}
-sys_sigrelease(int sig)
-{
- sigsetmask(sigblock(0) & ~sigmask(sig));
-}
-#else /* !SIG_SIGNAL */ /* The True Way - POSIX!:-) */
-RETSIGTYPE (*sys_sigset(int sig, RETSIGTYPE (*func)(int)))(int)
+SIGFUNC sys_signal(int sig, SIGFUNC func)
{
struct sigaction act, oact;
@@ -711,36 +701,47 @@ void sys_sigrelease(int sig)
sigaddset(&mask, sig);
sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
}
-#endif /* !SIG_SIGNAL */
-#endif /* !SIG_SIGSET */
-#if (0) /* not used? -- gordon */
-static void (*break_func)();
-static RETSIGTYPE break_handler(int sig)
-{
-#ifdef QNX
- /* Turn off SIGCHLD during break processing */
- sys_sigblock(SIGCHLD);
-#endif
- (*break_func)();
-#ifdef QNX
- sys_sigrelease(SIGCHLD);
-#endif
+void erts_sys_sigsegv_handler(int signo) {
+ if (signo == SIGSEGV) {
+ longjmp(erts_sys_sigsegv_jmp, 1);
+ }
+}
+
+/*
+ * Function returns 1 if we can read from all values in between
+ * start and stop.
+ */
+int
+erts_sys_is_area_readable(char *start, char *stop) {
+ int fds[2];
+ if (!pipe(fds)) {
+ /* We let write try to figure out if the pointers are readable */
+ int res = write(fds[1], start, (char*)stop - (char*)start);
+ if (res == -1) {
+ close(fds[0]);
+ close(fds[1]);
+ return 0;
+ }
+ close(fds[0]);
+ close(fds[1]);
+ return 1;
+ }
+ return 0;
+
}
-#endif /* 0 */
static ERTS_INLINE int
prepare_crash_dump(int secs)
{
#define NUFBUF (3)
- int i, max;
+ int i;
char env[21]; /* enough to hold any 64-bit integer */
size_t envsz;
DeclareTmpHeapNoproc(heap,NUFBUF);
Port *heart_port;
Eterm *hp = heap;
Eterm list = NIL;
- int heart_fd[2] = {-1,-1};
int has_heart = 0;
UseTmpHeapNoproc(NUFBUF);
@@ -763,43 +764,22 @@ prepare_crash_dump(int secs)
alarm((unsigned int)secs);
}
- if (heart_port) {
- /* hearts input fd
- * We "know" drv_data is the in_fd since the port is started with read|write
- */
- heart_fd[0] = (int)heart_port->drv_data;
- heart_fd[1] = (int)driver_data[heart_fd[0]].ofd;
- has_heart = 1;
+ /* close all viable sockets via emergency close callbacks.
+ * Specifically we want to close epmd sockets.
+ */
- list = CONS(hp, make_small(8), list); hp += 2;
+ erts_emergency_close_ports();
+ if (heart_port) {
+ has_heart = 1;
+ list = CONS(hp, make_small(8), list); hp += 2;
/* send to heart port, CMD = 8, i.e. prepare crash dump =o */
erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port,
heart_port->common.id, list, NULL);
}
- /* Make sure we unregister at epmd (unknown fd) and get at least
- one free filedescriptor (for erl_crash.dump) */
-
- max = max_files;
- if (max < 1024)
- max = 1024;
- for (i = 3; i < max; i++) {
-#if defined(ERTS_SMP)
- /* We don't want to close the signal notification pipe... */
- if (i == sig_notify_fds[0] || i == sig_notify_fds[1])
- continue;
-#elif defined(USE_THREADS)
- /* We don't want to close the async notification pipe... */
- if (i == async_fd[0] || i == async_fd[1])
- continue;
-#endif
- /* We don't want to close our heart yet ... */
- if (i == heart_fd[0] || i == heart_fd[1])
- continue;
-
- close(i);
- }
+ /* Make sure we have a fd for our crashdump file. */
+ close(crashdump_companion_cube_fd);
envsz = sizeof(env);
i = erts_sys_getenv__("ERL_CRASH_DUMP_NICE", env, &envsz);
@@ -877,9 +857,23 @@ sigusr1_exit(void)
#ifdef ETHR_UNUSABLE_SIGUSRX
#warning "Unusable SIGUSR1 & SIGUSR2. Disabling use of these signals"
-#endif
-#ifndef ETHR_UNUSABLE_SIGUSRX
+#else
+
+#ifdef ERTS_SMP
+void
+sys_thr_suspend(erts_tid_t tid) {
+ erts_thr_kill(tid, ERTS_SYS_SUSPEND_SIGNAL);
+}
+
+void
+sys_thr_resume(erts_tid_t tid) {
+ int i = 0, res;
+ do {
+ res = write(sig_suspend_fds[1],&i,sizeof(i));
+ } while (res < 0 && errno == EAGAIN);
+}
+#endif
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
static RETSIGTYPE user_signal1(void)
@@ -894,20 +888,20 @@ static RETSIGTYPE user_signal1(int signum)
#endif
}
-#ifdef QUANTIFY
+#ifdef ERTS_SMP
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
-static RETSIGTYPE user_signal2(void)
+static RETSIGTYPE suspend_signal(void)
#else
-static RETSIGTYPE user_signal2(int signum)
+static RETSIGTYPE suspend_signal(int signum)
#endif
{
-#ifdef ERTS_SMP
- smp_sig_notify('2');
-#else
- quantify_save_data();
-#endif
+ int res;
+ int buf[1];
+ do {
+ res = read(sig_suspend_fds[0], buf, sizeof(int));
+ } while (res < 0 && errno == EINTR);
}
-#endif
+#endif /* #ifdef ERTS_SMP */
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
@@ -932,9 +926,9 @@ static RETSIGTYPE do_quit(int signum)
/* Disable break */
void erts_set_ignore_break(void) {
- sys_sigset(SIGINT, SIG_IGN);
- sys_sigset(SIGQUIT, SIG_IGN);
- sys_sigset(SIGTSTP, SIG_IGN);
+ sys_signal(SIGINT, SIG_IGN);
+ sys_signal(SIGQUIT, SIG_IGN);
+ sys_signal(SIGTSTP, SIG_IGN);
}
/* Don't use ctrl-c for break handler but let it be
@@ -957,14 +951,14 @@ void erts_replace_intr(void) {
void init_break_handler(void)
{
- sys_sigset(SIGINT, request_break);
+ sys_signal(SIGINT, request_break);
#ifndef ETHR_UNUSABLE_SIGUSRX
- sys_sigset(SIGUSR1, user_signal1);
-#ifdef QUANTIFY
- sys_sigset(SIGUSR2, user_signal2);
-#endif
+ sys_signal(SIGUSR1, user_signal1);
+#ifdef ERTS_SMP
+ sys_signal(ERTS_SYS_SUSPEND_SIGNAL, suspend_signal);
+#endif /* #ifdef ERTS_SMP */
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
- sys_sigset(SIGQUIT, do_quit);
+ sys_signal(SIGQUIT, do_quit);
}
int sys_max_files(void)
@@ -981,8 +975,13 @@ static void block_signals(void)
sys_sigblock(SIGINT);
#ifndef ETHR_UNUSABLE_SIGUSRX
sys_sigblock(SIGUSR1);
+#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+#endif /* #ifndef ERTS_SMP */
+
+#if defined(ERTS_SMP) && !defined(ETHR_UNUSABLE_SIGUSRX)
+ sys_sigblock(ERTS_SYS_SUSPEND_SIGNAL);
#endif
-#endif
+
}
static void unblock_signals(void)
@@ -996,8 +995,14 @@ static void unblock_signals(void)
#ifndef ETHR_UNUSABLE_SIGUSRX
sys_sigrelease(SIGUSR1);
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+#endif /* #ifndef ERTS_SMP */
+
+#if defined(ERTS_SMP) && !defined(ETHR_UNUSABLE_SIGUSRX)
+ sys_sigrelease(ERTS_SYS_SUSPEND_SIGNAL);
#endif
+
}
+
/************************** Time stuff **************************/
#ifdef HAVE_GETHRTIME
#ifdef GETHRTIME_WITH_CLOCK_GETTIME
@@ -1327,9 +1332,10 @@ static int spawn_init()
thr_opts.detached = 0;
thr_opts.suggested_stack_size = 0; /* Smallest possible */
+ thr_opts.name = "child_waiter";
#endif
- sys_sigset(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
+ sys_signal(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
erts_smp_atomic_add_nob(&sys_misc_mem_sz,
@@ -1342,7 +1348,7 @@ static int spawn_init()
sys_sigblock(SIGCHLD);
#endif
- sys_sigset(SIGCHLD, onchld); /* Reap children */
+ sys_signal(SIGCHLD, onchld); /* Reap children */
#if CHLDWTHR
erts_thr_create(&child_waiter_tid, child_waiter, NULL, &thr_opts);
@@ -1623,9 +1629,13 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
goto child_error;
}
+#if defined(HAVE_CLOSEFROM)
+ closefrom(opts->use_stdio ? 3 : 5);
+#else
for (i = opts->use_stdio ? 3 : 5; i < max_files; i++)
(void) close(i);
-
+#endif
+
if (opts->wd && chdir(opts->wd) < 0)
goto child_error;
@@ -2524,32 +2534,28 @@ fd_async(void *async_data)
SysIOVec *iov0;
SysIOVec *iov;
int iovlen;
- int iovcnt;
- int p;
+ int err;
/* much of this code is stolen from efile_drv:invoke_writev */
driver_pdl_lock(dd->blocking->pdl);
iov0 = driver_peekq(dd->port_num, &iovlen);
- /* Calculate iovcnt */
- for (p = 0, iovcnt = 0; iovcnt < iovlen;
- p += iov0[iovcnt++].iov_len)
- ;
+ iovlen = iovlen < MAXIOV ? iovlen : MAXIOV;
iov = erts_alloc_fnf(ERTS_ALC_T_SYS_WRITE_BUF,
- sizeof(SysIOVec)*iovcnt);
+ sizeof(SysIOVec)*iovlen);
if (!iov) {
res = -1;
- errno = ENOMEM;
- erts_free(ERTS_ALC_T_SYS_WRITE_BUF, iov);
+ err = ENOMEM;
driver_pdl_unlock(dd->blocking->pdl);
} else {
- memcpy(iov,iov0,iovcnt*sizeof(SysIOVec));
+ memcpy(iov,iov0,iovlen*sizeof(SysIOVec));
driver_pdl_unlock(dd->blocking->pdl);
res = writev(dd->ofd, iov, iovlen);
+ err = errno;
erts_free(ERTS_ALC_T_SYS_WRITE_BUF, iov);
}
dd->blocking->res = res;
- dd->blocking->err = errno;
+ dd->blocking->err = err;
}
void fd_ready_async(ErlDrvData drv_data,
@@ -3206,13 +3212,6 @@ signal_dispatcher_thread_func(void *unused)
case '1': /* SIGUSR1 */
sigusr1_exit();
break;
-#ifdef QUANTIFY
- case '2': /* SIGUSR2 */
- quantify_save_data(); /* Might take a substantial amount of
- time, but this is a test/debug
- build */
- break;
-#endif
default:
erl_exit(ERTS_ABORT_EXIT,
"signal-dispatcher thread received unknown "
@@ -3230,6 +3229,7 @@ init_smp_sig_notify(void)
{
erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
thr_opts.detached = 1;
+ thr_opts.name = "sys_sig_dispatcher";
if (pipe(sig_notify_fds) < 0) {
erl_exit(ERTS_ABORT_EXIT,
@@ -3244,6 +3244,17 @@ init_smp_sig_notify(void)
NULL,
&thr_opts);
}
+
+static void
+init_smp_sig_suspend(void) {
+ if (pipe(sig_suspend_fds) < 0) {
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to create sig_suspend pipe: %s (%d)\n",
+ erl_errno_id(errno),
+ errno);
+ }
+}
+
#ifdef __DARWIN__
int erts_darwin_main_thread_pipe[2];
@@ -3271,9 +3282,11 @@ erts_sys_main_thread(void)
#endif
smp_sig_notify(0); /* Notify initialized */
- while (1) {
- /* Wait for a signal to arrive... */
+
+ /* Wait for a signal to arrive... */
+
#ifdef __DARWIN__
+ while (1) {
/*
* The wx driver needs to be able to steal the main thread for Cocoa to
* work properly.
@@ -3288,12 +3301,24 @@ erts_sys_main_thread(void)
void* (*func)(void*);
void* arg;
void *resp;
- read(erts_darwin_main_thread_pipe[0],&func,sizeof(void* (*)(void*)));
- read(erts_darwin_main_thread_pipe[0],&arg, sizeof(void*));
+ res = read(erts_darwin_main_thread_pipe[0],&func,sizeof(void* (*)(void*)));
+ if (res != sizeof(void* (*)(void*)))
+ break;
+ res = read(erts_darwin_main_thread_pipe[0],&arg,sizeof(void*));
+ if (res != sizeof(void*))
+ break;
resp = (*func)(arg);
write(erts_darwin_main_thread_result_pipe[1],&resp,sizeof(void *));
}
-#else
+
+ if (res == -1 && errno != EINTR)
+ break;
+ }
+ /* Something broke with the main thread pipe, so we ignore it for now.
+ Most probably erts has closed this pipe and is about to exit. */
+#endif /* #ifdef __DARWIN__ */
+
+ while (1) {
#ifdef DEBUG
int res =
#else
@@ -3302,7 +3327,6 @@ erts_sys_main_thread(void)
select(0, NULL, NULL, NULL, NULL);
ASSERT(res < 0);
ASSERT(errno == EINTR);
-#endif
}
}
@@ -3394,6 +3418,7 @@ erl_sys_args(int* argc, char** argv)
#ifdef ERTS_SMP
init_smp_sig_notify();
+ init_smp_sig_suspend();
#endif
/* Handled arguments have been marked with NULL. Slide arguments
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index cafeab547e..2ffa649767 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -32,7 +32,7 @@ void
erts_sys_init_float(void)
{
# ifdef SIGFPE
- sys_sigset(SIGFPE, SIG_IGN); /* Ignore so we can test for NaN and Inf */
+ sys_signal(SIGFPE, SIG_IGN); /* Ignore so we can test for NaN and Inf */
# endif
}
@@ -667,7 +667,7 @@ static void fpe_sig_handler(int sig)
static void erts_thread_catch_fp_exceptions(void)
{
- sys_sigset(SIGFPE, fpe_sig_handler);
+ sys_signal(SIGFPE, fpe_sig_handler);
unmask_fpe();
}
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 838f0c61eb..fde32c8684 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -236,4 +236,16 @@ typedef long ssize_t;
int init_async(int);
int exit_async(void);
#endif
+
+#define ERTS_HAVE_TRY_CATCH 1
+
+#define ERTS_SYS_TRY_CATCH(EXPR,CATCH) \
+ __try { \
+ EXPR; \
+ } \
+ __except(GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) \
+ { \
+ CATCH; \
+ }
+
#endif
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 164ef95629..5d51659b4e 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -247,6 +247,27 @@ void erl_sys_args(int* argc, char** argv)
#endif
}
+/*
+ * Function returns 1 if we can read from all values in between
+ * start and stop.
+ */
+int
+erts_sys_is_area_readable(char *start, char *stop) {
+ volatile char tmp;
+ __try
+ {
+ while(start < stop) {
+ tmp = *start;
+ start++;
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return 0;
+ }
+ return 1;
+}
+
int erts_sys_prepare_crash_dump(int secs)
{
Port *heart_port;
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index fbc229bc53..fc9bdae0a0 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -20,6 +20,7 @@
-module(bif_SUITE).
-include_lib("test_server/include/test_server.hrl").
+-include_lib("kernel/include/file.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
@@ -681,8 +682,38 @@ erlang_halt(Config) when is_list(Config) ->
{badrpc,nodedown} = rpc:call(N2, erlang, halt, [0]),
{ok,N3} = slave:start(H, halt_node3),
{badrpc,nodedown} = rpc:call(N3, erlang, halt, [0,[]]),
- ok.
+ % This test triggers a segfault when dumping a crash dump
+ % to make sure that we can handle it properly.
+ {ok,N4} = slave:start(H, halt_node4),
+ CrashDump = filename:join(proplists:get_value(priv_dir,Config),
+ "segfault_erl_crash.dump"),
+ true = rpc:call(N4, os, putenv, ["ERL_CRASH_DUMP",CrashDump]),
+ false = rpc:call(N4, erts_debug, set_internal_state,
+ [available_internal_state, true]),
+ {badrpc,nodedown} = rpc:call(N4, erts_debug, set_internal_state,
+ [broken_halt, "Validate correct crash dump"]),
+ ok = wait_until_stable_size(CrashDump,-1),
+ {ok, Bin} = file:read_file(CrashDump),
+ case {string:str(binary_to_list(Bin),"\n=end\n"),
+ string:str(binary_to_list(Bin),"\r\n=end\r\n")} of
+ {0,0} -> ct:fail("Could not find end marker in crash dump");
+ _ -> ok
+ end.
+
+wait_until_stable_size(_File,-10) ->
+ {error,enoent};
+wait_until_stable_size(File,PrevSz) ->
+ timer:sleep(250),
+ case file:read_file_info(File) of
+ {error,enoent} ->
+ wait_until_stable_size(File,PrevSz-1);
+ {ok,#file_info{size = PrevSz }} when PrevSz /= -1 ->
+ io:format("Crashdump file size was: ~p (~s)~n",[PrevSz,File]),
+ ok;
+ {ok,#file_info{size = NewSz }} ->
+ wait_until_stable_size(File,NewSz)
+ end.
%% Helpers
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 1bb4cb3637..6bbf93b7d7 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -2349,8 +2349,10 @@ port_setget_data(Config) when is_list(Config) ->
Port = erlang:open_port({spawn_driver, "echo_drv"}, []),
NSched = erlang:system_info(schedulers_online),
+ HeapData = {1,2,3,<<"A heap binary">>,fun()->"This is fun"end,
+ list_to_binary(lists:seq(1,100))},
PRs = lists:map(fun(I) ->
- spawn_opt(fun() -> port_setget_data_hammer(Port,1) end,
+ spawn_opt(fun() -> port_setget_data_hammer(Port,HeapData,false,1) end,
[monitor, {scheduler, I rem NSched}])
end,
lists:seq(1,10)),
@@ -2368,13 +2370,17 @@ port_setget_data(Config) when is_list(Config) ->
PRs),
ok.
-port_setget_data_hammer(Port, N) ->
+port_setget_data_hammer(Port, HeapData, IsSet0, N) ->
Rand = random:uniform(3),
- try case Rand of
- 1 -> true = erlang:port_set_data(Port, atom);
- 2 -> true = erlang:port_set_data(Port, {1,2,3});
- 3 -> erlang:port_get_data(Port)
- end
+ IsSet1 = try case Rand of
+ 1 -> true = erlang:port_set_data(Port, atom), true;
+ 2 -> true = erlang:port_set_data(Port, HeapData), true;
+ 3 -> case erlang:port_get_data(Port) of
+ atom -> true;
+ HeapData -> true;
+ undefined -> false=IsSet0
+ end
+ end
catch
error:badarg ->
true = get(prepare_for_close),
@@ -2387,7 +2393,7 @@ port_setget_data_hammer(Port, N) ->
after 0 ->
ok
end,
- port_setget_data_hammer(Port, N+1).
+ port_setget_data_hammer(Port, HeapData, IsSet1, N+1).
wait_until(Fun) ->
diff --git a/erts/emulator/test/trace_bif_SUITE.erl b/erts/emulator/test/trace_bif_SUITE.erl
index 2c78aa394f..063e348836 100644
--- a/erts/emulator/test/trace_bif_SUITE.erl
+++ b/erts/emulator/test/trace_bif_SUITE.erl
@@ -260,7 +260,9 @@ bif_process() ->
apply(erlang, Name, Args),
bif_process();
{do_time_bif} ->
- _ = time(), %Assignment tells compiler to keep call.
+ %% Match the return value to ensure that the time() call
+ %% is not optimized away.
+ {_,_,_} = time(),
bif_process();
{do_statistics_bif} ->
statistics(runtime),
diff --git a/erts/emulator/utils/make_compiler_flags b/erts/emulator/utils/make_compiler_flags
index cebe8cd0c5..ca1bc47113 100755
--- a/erts/emulator/utils/make_compiler_flags
+++ b/erts/emulator/utils/make_compiler_flags
@@ -70,7 +70,7 @@ my($prog) = $prog[$#prog];
print "/* Warning: Do not edit this file.\n";
print " Auto-generated by '$prog'.*/\n";
-foreach(keys %constants) {
+foreach (sort(keys %constants)) {
print "const char* erts_build_flags_$_ = \"$constants{$_}\";\n"
}