aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in25
-rw-r--r--erts/emulator/beam/atom.names2
-rw-r--r--erts/emulator/beam/beam_bp.c5
-rw-r--r--erts/emulator/beam/beam_emu.c10
-rw-r--r--erts/emulator/beam/beam_load.c6
-rw-r--r--erts/emulator/beam/beam_ranges.c2
-rw-r--r--erts/emulator/beam/bif.c32
-rw-r--r--erts/emulator/beam/bif.h118
-rw-r--r--erts/emulator/beam/bif.tab13
-rw-r--r--erts/emulator/beam/big.c36
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/binary.c1157
-rw-r--r--erts/emulator/beam/break.c1
-rw-r--r--erts/emulator/beam/copy.c3
-rw-r--r--erts/emulator/beam/dist.c64
-rw-r--r--erts/emulator/beam/dist.h6
-rw-r--r--erts/emulator/beam/erl_alloc.c4
-rw-r--r--erts/emulator/beam/erl_alloc.h2
-rw-r--r--erts/emulator/beam/erl_alloc.types6
-rw-r--r--erts/emulator/beam/erl_alloc_util.c332
-rw-r--r--erts/emulator/beam/erl_alloc_util.h13
-rw-r--r--erts/emulator/beam/erl_async.c6
-rw-r--r--erts/emulator/beam/erl_bif_binary.c27
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c6
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/erl_bif_info.c118
-rw-r--r--erts/emulator/beam/erl_bif_port.c23
-rw-r--r--erts/emulator/beam/erl_binary.h33
-rw-r--r--erts/emulator/beam/erl_db.c62
-rw-r--r--erts/emulator/beam/erl_db_tree.c4
-rw-r--r--erts/emulator/beam/erl_driver.h23
-rw-r--r--erts/emulator/beam/erl_drv_nif.h1
-rw-r--r--erts/emulator/beam/erl_gc.c14
-rw-r--r--erts/emulator/beam/erl_init.c26
-rw-r--r--erts/emulator/beam/erl_lock_check.c23
-rw-r--r--erts/emulator/beam/erl_lock_count.c161
-rw-r--r--erts/emulator/beam/erl_lock_count.h20
-rw-r--r--erts/emulator/beam/erl_map.c32
-rw-r--r--erts/emulator/beam/erl_message.c59
-rw-r--r--erts/emulator/beam/erl_nif.c548
-rw-r--r--erts/emulator/beam/erl_nif.h31
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h45
-rw-r--r--erts/emulator/beam/erl_port_task.c102
-rw-r--r--erts/emulator/beam/erl_port_task.h4
-rw-r--r--erts/emulator/beam/erl_process.c293
-rw-r--r--erts/emulator/beam/erl_process.h42
-rw-r--r--erts/emulator/beam/erl_ptab.c100
-rw-r--r--erts/emulator/beam/erl_ptab.h6
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_term.h12
-rw-r--r--erts/emulator/beam/erl_thr_progress.c52
-rw-r--r--erts/emulator/beam/erl_thr_progress.h60
-rw-r--r--erts/emulator/beam/erl_threads.h679
-rw-r--r--erts/emulator/beam/erl_trace.c23
-rw-r--r--erts/emulator/beam/erl_trace.h1
-rw-r--r--erts/emulator/beam/erl_unicode.c2
-rw-r--r--erts/emulator/beam/erl_utils.h43
-rw-r--r--erts/emulator/beam/external.c201
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/global.h55
-rw-r--r--erts/emulator/beam/io.c139
-rw-r--r--erts/emulator/beam/ops.tab3
-rw-r--r--erts/emulator/beam/sys.h22
-rw-r--r--erts/emulator/beam/utils.c618
-rw-r--r--erts/emulator/drivers/common/gzio.c3
-rw-r--r--erts/emulator/drivers/common/inet_drv.c746
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c3
-rw-r--r--erts/emulator/drivers/unix/multi_drv.c2
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c7
-rw-r--r--erts/emulator/drivers/win32/win_efile.c769
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m45
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m473
-rw-r--r--erts/emulator/hipe/hipe_arm_glue.S41
-rw-r--r--erts/emulator/hipe/hipe_bif0.c2
-rw-r--r--erts/emulator/hipe/hipe_bif2.c7
-rw-r--r--erts/emulator/hipe/hipe_bif2.tab1
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m413
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c20
-rw-r--r--erts/emulator/hipe/hipe_risc_glue.h8
-rw-r--r--erts/emulator/hipe/hipe_x86_glue.h8
-rw-r--r--erts/emulator/hipe/hipe_x86_signal.c6
-rw-r--r--erts/emulator/internal_doc/CarrierMigration.md134
-rw-r--r--erts/emulator/internal_doc/SuperCarrier.md191
-rw-r--r--erts/emulator/sys/common/erl_check_io.c701
-rw-r--r--erts/emulator/sys/common/erl_check_io.h45
-rw-r--r--erts/emulator/sys/common/erl_poll.c2
-rw-r--r--erts/emulator/sys/common/erl_sys_common_misc.c8
-rw-r--r--erts/emulator/sys/ose/beam.lmconf (renamed from erts/emulator/sys/ose/default.lmconf)3
-rw-r--r--erts/emulator/sys/ose/erl_main.c2
-rw-r--r--erts/emulator/sys/ose/erl_poll.c8
-rw-r--r--erts/emulator/sys/ose/sys.c353
-rw-r--r--erts/emulator/sys/unix/erl_child_setup.c39
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h27
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys_ddll.c4
-rw-r--r--erts/emulator/sys/unix/sys.c77
-rw-r--r--erts/emulator/sys/win32/erl_poll.c2
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h24
-rw-r--r--[-rwxr-xr-x]erts/emulator/sys/win32/sys.c0
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/a_SUITE.erl14
-rw-r--r--erts/emulator/test/async_ports_SUITE.erl118
-rw-r--r--erts/emulator/test/async_ports_SUITE_data/Makefile.src15
-rw-r--r--erts/emulator/test/async_ports_SUITE_data/cport.c81
-rw-r--r--erts/emulator/test/binary_SUITE.erl276
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl6
-rw-r--r--erts/emulator/test/driver_SUITE.erl103
-rw-r--r--erts/emulator/test/driver_SUITE_data/smaller_major_vsn_drv.c4
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c9
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c11
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c9
-rw-r--r--erts/emulator/test/erts_debug_SUITE.erl6
-rw-r--r--erts/emulator/test/float_SUITE_data/fp_drv.c17
-rw-r--r--erts/emulator/test/fun_SUITE.erl23
-rw-r--r--erts/emulator/test/map_SUITE.erl19
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl14
-rw-r--r--erts/emulator/test/nif_SUITE.erl79
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c123
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.c3
-rw-r--r--erts/emulator/test/num_bif_SUITE.erl32
-rw-r--r--erts/emulator/test/port_SUITE.erl66
-rw-r--r--erts/emulator/test/system_info_SUITE.erl1
-rw-r--r--erts/emulator/test/trace_SUITE.erl22
-rw-r--r--erts/emulator/test/trace_call_time_SUITE.erl96
-rw-r--r--erts/emulator/test/tuple_SUITE.erl95
-rw-r--r--erts/emulator/test/z_SUITE.erl28
-rwxr-xr-xerts/emulator/utils/gen_git_version6
-rwxr-xr-xerts/emulator/utils/make_compiler_flags2
125 files changed, 8135 insertions, 2258 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 523130d01a..7145824f91 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -378,7 +378,9 @@ LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
endif # erts_internal_r
+ifneq ($(TARGET),arm-unknown-linux-androideabi)
LIBS += @LIBRT@
+endif
LIBS += @LIBCARBON@
@@ -414,13 +416,6 @@ else
UNIX_ONLY_BUILDS =
endif
-ifeq ($(TARGET), win32)
-TMPVAR := $(shell LANG=C $(PERL) utils/make_compiler_flags -o $(TTF_DIR)/erl_compile_flags.h -v CONFIG_H "N/A" -v CFLAGS "$(CFLAGS)" -v LDFLAGS "$(LDFLAGS)")
-else
-# We force this to be run every time this makefile is executed
-TMPVAR := $(shell LANG=C $(PERL) utils/make_compiler_flags -o $(TTF_DIR)/erl_compile_flags.h -f CONFIG_H "$(ERL_TOP)/erts/$(TARGET)/config.h" -v CFLAGS "$(CFLAGS)" -v LDFLAGS "$(LDFLAGS)")
-endif
-
.PHONY: all
ifdef VOID_EMULATOR
all:
@@ -499,6 +494,15 @@ release_docs_spec:
_create_dirs := $(shell mkdir -p $(CREATE_DIRS))
+
+# has to be run after _create_dirs
+ifeq ($(TARGET), win32)
+TMPVAR := $(shell LANG=C $(PERL) utils/make_compiler_flags -o $(TTF_DIR)/erl_compile_flags.h -v CONFIG_H "N/A" -v CFLAGS "$(CFLAGS)" -v LDFLAGS "$(LDFLAGS)")
+else
+# We force this to be run every time this makefile is executed
+TMPVAR := $(shell LANG=C $(PERL) utils/make_compiler_flags -o $(TTF_DIR)/erl_compile_flags.h -f CONFIG_H "$(ERL_TOP)/erts/$(TARGET)/config.h" -v CFLAGS "$(CFLAGS)" -v LDFLAGS "$(LDFLAGS)")
+endif
+
GENERATE =
HIPE_ASM =
@@ -1031,11 +1035,10 @@ else
ifeq ($(findstring ose,$(TARGET)),ose)
$(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS) $(LCF)
$(call build-ose-load-module, $@, $(INIT_OBJS) $(OBJS), $(STATIC_NIF_LIBS) \
- $(STATIC_DRIVER_LIBS) $(LIBS), $(LMCONF))
+ $(STATIC_DRIVER_LIBS) $(LIBS), $(BEAM_LMCONF))
else
$(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
- echo $(DEPLIBS)
$(ld_verbose)$(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \
$(HIPEBEAMLDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) \
$(STATIC_NIF_LIBS) $(STATIC_DRIVER_LIBS) $(LIBS)
@@ -1087,7 +1090,9 @@ BEAM_SRC=$(wildcard beam/*.c)
DRV_COMMON_SRC=$(wildcard drivers/common/*.c)
DRV_OSTYPE_SRC=$(wildcard drivers/$(ERLANG_OSTYPE)/*.c)
ALL_SYS_SRC=$(wildcard sys/$(ERLANG_OSTYPE)/*.c) $(wildcard sys/common/*.c)
-TARGET_SRC=$(wildcard $(TARGET)/*.c) $(wildcard $(TTF_DIR)/*.c)
+# We use $(shell ls) here instead of wildcard as $(wildcard ) resolved at
+# loadtime of the makefile and at that time these files are not generated yet.
+TARGET_SRC=$(shell ls $(TARGET)/*.c) $(shell ls $(TTF_DIR)/*.c)
# I do not want the -MG flag on windows, it does not work properly for a
# windows build.
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index d28e519ae1..5d06a32941 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -116,6 +116,7 @@ atom binary_longest_prefix_trap
atom binary_longest_suffix_trap
atom binary_match_trap
atom binary_matches_trap
+atom binary_to_list_continue
atom binary_to_term_trap
atom block
atom blocked
@@ -315,6 +316,7 @@ atom line_length
atom linked_in_driver
atom links
atom list
+atom list_to_binary_continue
atom little
atom loaded
atom load_cancelled
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 49a34ab4ad..4e711c89e0 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -642,7 +642,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
erts_smp_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && erts_is_tracer_proc_valid(c_p)) {
Eterm w;
erts_trace_time_call(c_p, I, bp->time);
w = (BeamInstr) *c_p->cp;
@@ -730,7 +730,8 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
- IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ IS_TRACED_FL(p, F_TRACE_CALLS) &&
+ erts_is_tracer_proc_valid(p)) {
BeamInstr *pc = (BeamInstr *)ep->code+3;
erts_trace_time_call(p, pc, bp->time);
}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 0cec9ea3ec..8bfb7d2ad2 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1301,7 +1301,7 @@ void process_main(void)
(Eterm)fptr[1], (Uint)fptr[2],
NULL, fun_buf);
} else {
- erts_snprintf(fun_buf, sizeof(fun_buf),
+ erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
"<unknown/%p>", next);
}
}
@@ -3503,6 +3503,7 @@ get_map_elements_fail:
* I[0]: &&call_nif
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
*/
BifFunction vbf;
@@ -3523,13 +3524,6 @@ get_map_elements_fail:
reg[0] = r(0);
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
erts_post_nif(&env);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (is_non_value(nif_bif_result) && c_p->freason == TRAP) {
- Export* ep = (Export*) c_p->psd->data[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT];
- ep->code[0] = I[-3];
- ep->code[1] = I[-2];
- }
-#endif
}
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
PROCESS_MAIN_CHK_LOCKS(c_p);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index e96177cfd9..cfc6146b0a 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -2363,7 +2363,11 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- enum { MIN_FUNC_SZ = 3 };
+#ifdef ERTS_DIRTY_SCHEDULERS
+ enum { MIN_FUNC_SZ = 4 };
+#else
+ enum { MIN_FUNC_SZ = 3 };
+#endif
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
index 0f2d5d0c2a..cb6470638f 100644
--- a/erts/emulator/beam/beam_ranges.c
+++ b/erts/emulator/beam/beam_ranges.c
@@ -282,7 +282,7 @@ find_range(BeamInstr* pc)
while (low < high) {
if (pc < mid->start) {
high = mid;
- } else if (pc > RANGE_END(mid)) {
+ } else if (pc >= RANGE_END(mid)) {
low = mid + 1;
} else {
erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 06a1230ca0..42dd160e38 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1869,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1879,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_pid_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
}
return remote_send(p, dep, to, to, msg, suspend);
@@ -1886,8 +1888,13 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
Eterm id = erts_whereis_name_to_id(p, to);
rp = erts_proc_lookup(id);
- if (rp)
+ if (rp) {
+ if (IS_TRACED(p))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
goto send_message;
+ }
pt = erts_port_lookup(id,
(erts_port_synchronous_ops
@@ -1907,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1917,6 +1925,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
} else if (is_internal_port(to)) {
int ret_val;
@@ -2763,6 +2772,7 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
Eterm *integer, Eterm *rest)
{
Sint i = 0;
+ Uint ui = 0;
int skip = 0;
int neg = 0;
int n = 0;
@@ -2816,8 +2826,8 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
unsigned_val(CAR(list_val(lst))) > '9') {
break;
}
- i = i * 10;
- i = i + unsigned_val(CAR(list_val(lst))) - '0';
+ ui = ui * 10;
+ ui = ui + unsigned_val(CAR(list_val(lst))) - '0';
n++;
lst = CDR(list_val(lst));
if (is_nil(lst)) {
@@ -2841,7 +2851,8 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
*/
if (n <= SMALL_DIGITS) { /* It must be small */
- if (neg) i = -i;
+ if (neg) i = -(Sint)ui;
+ else i = (Sint)ui;
res = make_small(i);
} else {
lg2 = (n+1)*230/69+1;
@@ -2882,9 +2893,6 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2894,8 +2902,12 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
}
}
- if (is_big(res)) {
- hp += (big_arity(res)+1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res)+1);
+ }
}
HRelease(p,hp_end,hp);
}
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 51b77a95ed..72c55ccb55 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -124,12 +124,85 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ return THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_ERROR(Ret, Proc, Reason) \
do { \
(Proc)->freason = (Reason); \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
@@ -392,6 +465,51 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+#ifndef HIPE
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY)
+
+#else
+
+#include "erl_fun.h"
+#include "hipe_mode_switch.h"
+
+/*
+ * Hipe wrappers used by native code for BIFs that disable GC while trapping.
+ * Also add usage of the wrapper in ../hipe/hipe_bif_list.m4
+ *
+ * Problem:
+ * When native code calls a BIF that traps, hipe_mode_switch will push a
+ * "trap frame" on the Erlang stack in order to find its way back from beam_emu
+ * back to native caller when finally done. If GC is disabled and stack/heap
+ * is full there is no place to push the "trap frame".
+ *
+ * Solution:
+ * We reserve space on stack for the "trap frame" here before the BIF is called.
+ * If the BIF does not trap, the space is reclaimed here before returning.
+ * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
+ * already is reserved and use it.
+ */
+
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
+BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
+ Eterm* args); \
+BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
+ Eterm* args) \
+{ \
+ BIF_RETTYPE res; \
+ hipe_reserve_beam_trap_frame(c_p, args, ARITY); \
+ res = BIF_NAME ## _ ## ARITY (c_p, args); \
+ if (is_value(res) || c_p->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(c_p); \
+ } \
+ return res; \
+}
+
+#endif
+
+
#include "erl_bif_table.h"
#endif
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 2d888862bf..e68b8e6274 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -45,6 +45,7 @@ bif erlang:apply/3
bif erlang:atom_to_list/1
bif erlang:binary_to_list/1
bif erlang:binary_to_list/3
+bif erlang:binary_to_term/1
bif erlang:crc32/1
bif erlang:crc32/2
bif erlang:crc32_combine/3
@@ -151,12 +152,11 @@ bif erts_internal:port_command/3
bif erts_internal:port_control/3
bif erts_internal:port_close/1
bif erts_internal:port_connect/2
-bif erts_internal:binary_to_term/1
-bif erts_internal:binary_to_term/2
bif erts_internal:request_system_task/3
bif erts_internal:check_process_code/2
+bif erts_internal:map_to_tuple_keys/1
# inet_db support
bif erlang:port_set_data/2
@@ -480,6 +480,11 @@ bif erlang:call_on_load_function/1
bif erlang:finish_after_on_load/2
#
+# New Bifs in R13B04
+#
+bif erlang:binary_to_term/2
+
+#
# The binary match bifs (New in R14A - EEP9)
#
@@ -596,6 +601,10 @@ bif maps:values/1
bif erts_internal:cmp_term/2
#
+# New in 17.1.
+#
+bif erlang:fun_info_mfa/1
+#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 41a041eba6..de7d370938 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -274,7 +274,9 @@
_b = _b << _s; \
_vn1 = _b >> H_EXP; \
_vn0 = _b & LO_MASK; \
- _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ /* If needed to avoid undefined behaviour */ \
+ if (_s) _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ else _un32 = _a1; \
_un10 = _a0 << _s; \
_un1 = _un10 >> H_EXP; \
_un0 = _un10 & LO_MASK; \
@@ -1506,13 +1508,15 @@ Eterm uword_to_big(UWord x, Eterm *y)
*/
Eterm small_to_big(Sint x, Eterm *y)
{
+ Uint xu;
if (x >= 0) {
+ xu = x;
*y = make_pos_bignum_header(1);
} else {
- x = -x;
+ xu = -(Uint)x;
*y = make_neg_bignum_header(1);
}
- BIG_DIGIT(y, 0) = x;
+ BIG_DIGIT(y, 0) = xu;
return make_big(y);
}
@@ -1540,21 +1544,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
+ Uint64 ux;
int neg;
- if (x >= 0)
+ if (x >= 0) {
neg = 0;
+ ux = x;
+ }
else {
neg = 1;
- x = -x;
+ ux = -(Uint64)x;
}
#if defined(ARCH_32) || HALFWORD_HEAP
- if (x >= (((Uint64) 1) << 32)) {
+ if (ux >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
else
*hp = make_pos_bignum_header(2);
- BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
- BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
*hpp += 3;
}
else
@@ -1564,7 +1571,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_neg_bignum_header(1);
else
*hp = make_pos_bignum_header(1);
- BIG_DIGIT(hp, 0) = (Uint) x;
+ BIG_DIGIT(hp, 0) = (Uint) ux;
*hpp += 2;
}
return make_big(hp);
@@ -2667,9 +2674,6 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2679,8 +2683,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
}
}
- if (is_big(res)) {
- hp += (big_arity(res) + 1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res) + 1);
+ }
}
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_done;
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index d80111822e..da31876d75 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */
#define ERTS_SINT64_HEAP_SIZE(X) \
(IS_SSMALL((X)) \
? 0 \
- : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index c7926f18af..f50d484576 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -31,12 +31,11 @@
#include "erl_binary.h"
#include "erl_bits.h"
-#ifdef DEBUG
-static int list_to_bitstr_buf(Eterm obj, char* buf, Uint len);
-#else
-static int list_to_bitstr_buf(Eterm obj, char* buf);
-#endif
-static int bitstr_list_len(Eterm obj, Uint* num_bytes);
+static Export binary_to_list_continue_export;
+static Export list_to_binary_continue_export;
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1);
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1);
void
erts_init_binary(void)
@@ -49,6 +48,15 @@ erts_init_binary(void)
"Internal error: Address of orig_bytes[0] of a Binary"
" is *not* 8-byte aligned\n");
}
+
+ erts_init_trap_export(&binary_to_list_continue_export,
+ am_erts_internal, am_binary_to_list_continue, 1,
+ &binary_to_list_continue);
+
+ erts_init_trap_export(&list_to_binary_continue_export,
+ am_erts_internal, am_list_to_binary_continue, 1,
+ &list_to_binary_continue);
+
}
/*
@@ -333,6 +341,132 @@ BIF_RETTYPE integer_to_binary_1(BIF_ALIST_1)
BIF_RET(res);
}
+#define ERTS_B2L_BYTES_PER_REDUCTION 256
+
+typedef struct {
+ Eterm res;
+ Eterm *hp;
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+ byte *bytes;
+ Uint size;
+ Uint bitoffs;
+} ErtsB2LState;
+
+static void b2l_state_destructor(Binary *mbp)
+{
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+}
+
+static BIF_RETTYPE
+binary_to_list_chunk(Process *c_p,
+ Eterm mb_eterm,
+ ErtsB2LState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ BIF_RETTYPE ret;
+ int bump_reds;
+ Uint size;
+ byte *bytes;
+
+ size = (reds_left + 1)*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (size > sp->size)
+ size = sp->size;
+ bytes = sp->bytes + (sp->size - size);
+
+ bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+
+ ASSERT(is_list(sp->res) || is_nil(sp->res));
+
+ sp->res = erts_bin_bytes_to_list(sp->res,
+ sp->hp,
+ bytes,
+ size,
+ sp->bitoffs);
+ sp->size -= size;
+ sp->hp += 2*size;
+
+ if (sp->size > 0) {
+
+ if (!gc_disabled)
+ erts_set_gc_state(c_p, 0);
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+ ASSERT(is_value(mb_eterm));
+ ERTS_BIF_PREP_TRAP1(ret,
+ &binary_to_list_continue_export,
+ c_p,
+ mb_eterm);
+ }
+ else {
+
+ ASSERT(sp->hp == sp->hp_end);
+ ASSERT(sp->size == 0);
+
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, sp->res);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, sp->res);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ }
+
+ return ret;
+}
+
+static ERTS_INLINE BIF_RETTYPE
+binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes, Uint size, Uint bitoffs)
+{
+ int reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ if (size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION) {
+ Eterm res;
+ BIF_RETTYPE ret;
+ int bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+ res = erts_bin_bytes_to_list(tail, hp, bytes, size, bitoffs);
+ ERTS_BIF_PREP_RET(ret, res);
+ return ret;
+ }
+ else {
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsB2LState),
+ b2l_state_destructor);
+ ErtsB2LState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ Eterm mb;
+
+ sp->res = tail;
+ sp->hp = hp;
+#ifdef DEBUG
+ sp->hp_end = sp->hp + 2*size;
+#endif
+ sp->bytes = bytes;
+ sp->size = size;
+ sp->bitoffs = bitoffs;
+
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
+ }
+}
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
+{
+ Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return binary_to_list_chunk(BIF_P,
+ BIF_ARG_1,
+ (ErtsB2LState*) ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 1)
+
BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -354,14 +488,15 @@ BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
} else {
Eterm* hp = HAlloc(BIF_P, 2 * size);
byte* bytes = binary_bytes(real_bin)+offset;
-
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes, size, bitoffs));
+ return binary_to_list(BIF_P, hp, NIL, bytes, size, bitoffs);
}
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 3)
+
BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
{
byte* bytes;
@@ -387,12 +522,13 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
}
i = stop-start+1;
hp = HAlloc(BIF_P, 2*i);
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes+start-1, i, bitoffs));
-
+ return binary_to_list(BIF_P, hp, NIL, bytes+start-1, i, bitoffs);
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(bitstring_to_list, 1)
+
BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -431,124 +567,441 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
previous = CONS(hp, make_binary(last), previous);
hp += 2;
}
- BIF_RET(erts_bin_bytes_to_list(previous, hp, bytes, size, bitoffs));
+
+ return binary_to_list(BIF_P, hp, previous, bytes, size, bitoffs);
}
/* Turn a possibly deep list of ints (and binaries) into */
/* One large binary object */
-/*
- * This bif also exists in the binary module, under the name
- * binary:list_to_bin/1, why it's divided into interface and
- * implementation. Also the backend for iolist_to_binary_1.
- */
+typedef enum {
+ ERTS_L2B_OK,
+ ERTS_L2B_YIELD,
+ ERTS_L2B_TYPE_ERROR,
+ ERTS_L2B_OVERFLOW_ERROR
+} ErtsL2BResult;
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
-{
+#define ERTS_L2B_STATE_INITER(C_P, ARG, BIF, SZFunc, TBufFunc) \
+ {ERTS_IOLIST2BUF_STATE_INITER((C_P), (ARG)), \
+ (ARG), THE_NON_VALUE, (BIF), (SZFunc), (TBufFunc)}
+
+#define ERTS_L2B_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsL2BState))
+
+typedef struct ErtsL2BState_ ErtsL2BState;
+
+struct ErtsL2BState_ {
+ ErtsIOList2BufState buf;
+ Eterm arg;
Eterm bin;
- Eterm h,t;
- ErlDrvSizeT size;
- byte* bytes;
-#ifdef DEBUG
- ErlDrvSizeT offset;
-#endif
+ Export *bif;
+ int (*iolist_to_buf_size)(ErtsIOListState *);
+ ErlDrvSizeT (*iolist_to_buf)(ErtsIOList2BufState *);
+};
+
+static ERTS_INLINE ErtsL2BResult
+list_to_binary_engine(ErtsL2BState *sp)
+{
+ ErlDrvSizeT res;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ /*
+ * have_size == 0 while sp->iolist_to_buf_size()
+ * has not finished the calculation.
+ */
+
+ if (!sp->buf.iolist.have_size) {
+ switch (sp->iolist_to_buf_size(&sp->buf.iolist)) {
+ case ERTS_IOLIST_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TYPE:
+ return ERTS_L2B_TYPE_ERROR;
+ case ERTS_IOLIST_OK:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ ASSERT(sp->buf.iolist.have_size);
+
+ /*
+ * Size calculated... Setup state for
+ * sp->iolist_to_buf_*()
+ */
+
+ sp->bin = new_binary(c_p,
+ (byte *) NULL,
+ sp->buf.iolist.size);
+
+ if (sp->buf.iolist.size == 0)
+ return ERTS_L2B_OK;
+
+ sp->buf.buf = (char *) binary_bytes(sp->bin);
+ sp->buf.len = sp->buf.iolist.size;
+ sp->buf.iolist.obj = sp->arg;
- if (is_nil(arg)) {
- BIF_RET(new_binary(p,(byte*)"",0));
+ if (sp->buf.iolist.reds_left <= 0) {
+ BUMP_ALL_REDS(c_p);
+ return ERTS_L2B_YIELD;
+ }
}
- if (is_not_list(arg)) {
- goto error;
+
+ ASSERT(sp->buf.iolist.size != 0);
+ ASSERT(is_value(sp->bin));
+ ASSERT(sp->buf.buf);
+
+ res = sp->iolist_to_buf(&sp->buf);
+
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res)) {
+ ASSERT(res == 0);
+ return ERTS_L2B_OK;
}
- /* check for [binary()] case */
- h = CAR(list_val(arg));
- t = CDR(list_val(arg));
- if (is_binary(h) && is_nil(t) && !(
- HEADER_SUB_BIN == *(binary_val(h)) && (
- ((ErlSubBin *)binary_val(h))->bitoffs != 0 ||
- ((ErlSubBin *)binary_val(h))->bitsize != 0
- ))) {
- return h;
- }
- switch (erts_iolist_size(arg, &size)) {
- case ERTS_IOLIST_OVERFLOW: BIF_ERROR(p, SYSTEM_LIMIT);
- case ERTS_IOLIST_TYPE: goto error;
- default: ;
- }
- bin = new_binary(p, (byte *)NULL, size);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset =
-#endif
- erts_iolist_to_buf(arg, (char*) bytes, size);
- ASSERT(offset == 0);
- BIF_RET(bin);
+ switch (res) {
+ case ERTS_IOLIST_TO_BUF_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_TO_BUF_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TO_BUF_TYPE_ERROR:
+ return ERTS_L2B_TYPE_ERROR;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from iolist_to_buf_yielding()");
+ return ERTS_L2B_TYPE_ERROR;
+ }
+}
+
+static void
+l2b_state_destructor(Binary *mbp)
+{
+ ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+}
+
+static ERTS_INLINE Eterm
+l2b_final_touch(Process *c_p, ErtsL2BState *sp)
+{
+ Eterm *hp;
+ ErlSubBin* sbin;
+ if (sp->buf.offset == 0)
+ return sp->bin;
+
+ hp = HAlloc(c_p, ERL_SUB_BIN_SIZE);
+ ASSERT(sp->buf.offset > 0);
+ sbin = (ErlSubBin *) hp;
+ sbin->thing_word = HEADER_SUB_BIN;
+ sbin->size = sp->buf.iolist.size-1;
+ sbin->offs = 0;
+ sbin->orig = sp->bin;
+ sbin->bitoffs = 0;
+ sbin->bitsize = sp->buf.offset;
+ sbin->is_writable = 0;
+ return make_binary(sbin);
+}
+
+static BIF_RETTYPE
+list_to_binary_chunk(Eterm mb_eterm,
+ ErtsL2BState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ Eterm err = BADARG;
+ BIF_RETTYPE ret;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ sp->buf.iolist.reds_left = reds_left;
- error:
- BIF_ERROR(p, BADARG);
+ switch (list_to_binary_engine(sp)) {
+
+ case ERTS_L2B_OK: {
+ Eterm result = l2b_final_touch(c_p, sp);
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, result);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, result);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+ }
+ case ERTS_L2B_YIELD:
+ if (!gc_disabled) {
+ /* first yield... */
+ Eterm *hp;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsL2BState),
+ l2b_state_destructor);
+ ErtsL2BState *new_sp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_L2B_STATE_MOVE(new_sp, sp);
+ sp = new_sp;
+
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+
+ ASSERT(is_value(mb_eterm));
+
+ erts_set_gc_state(c_p, 0);
+ }
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+
+ ERTS_BIF_PREP_TRAP1(ret,
+ &list_to_binary_continue_export,
+ c_p,
+ mb_eterm);
+ break;
+
+ case ERTS_L2B_OVERFLOW_ERROR:
+ err = SYSTEM_LIMIT;
+ /* fall through */
+
+ case ERTS_L2B_TYPE_ERROR:
+ if (!gc_disabled)
+ ERTS_BIF_PREP_ERROR(ret, c_p, err);
+ else {
+ if (erts_set_gc_state(c_p, 1))
+ ERTS_VBUMP_ALL_REDS(c_p);
+
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret,
+ c_p,
+ err,
+ sp->bif,
+ sp->arg);
+ }
+
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from list_to_binary_engine()");
+ ERTS_BIF_PREP_ERROR(ret,c_p, EXC_INTERNAL_ERROR);
+ break;
+ }
+ return ret;
}
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
+{
+ Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return list_to_binary_chunk(BIF_ARG_1,
+ ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+BIF_RETTYPE erts_list_to_binary_bif(Process *c_p, Eterm arg, Export *bif)
+{
+ BIF_RETTYPE ret;
+
+ if (is_nil(arg))
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) "", 0));
+ else if (is_not_list(arg))
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ else {
+ /* check for [binary()] case */
+ Eterm h = CAR(list_val(arg));
+ Eterm t = CDR(list_val(arg));
+ if (is_binary(h)
+ && is_nil(t)
+ && !(HEADER_SUB_BIN == *(binary_val(h))
+ && (((ErlSubBin *)binary_val(h))->bitoffs != 0
+ || ((ErlSubBin *)binary_val(h))->bitsize != 0))) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(c_p,
+ arg,
+ bif,
+ erts_iolist_size_yielding,
+ erts_iolist_to_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(c_p);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding iolist_to_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (erts_iolist_size_yielding(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+ Eterm bin;
+ char *buf;
+
+ if (size == 0) {
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) NULL, 0));
+ break; /* done */
+ }
+
+ bin = new_binary(c_p, (byte *) NULL, size);
+ buf = (char *) binary_bytes(bin);
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = erts_iolist_to_buf(arg, buf, size);
+ if (res == 0) {
+ BUMP_REDS(c_p, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * state prepared for iolist_to_buf.
+ */
+ state.bin = bin;
+ state.buf.buf = buf;
+ state.buf.len = size;
+ state.buf.iolist.obj = arg;
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_binary, 1)
+
BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_list_to_binary_1]);
}
-/* Turn a possibly deep list of ints (and binaries) into */
-/* One large binary object */
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
if (is_binary(BIF_ARG_1)) {
BIF_RET(BIF_ARG_1);
}
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]);
}
+static int bitstr_list_len(ErtsIOListState *);
+static ErlDrvSizeT list_to_bitstr_buf_yielding(ErtsIOList2BufState *);
+static ErlDrvSizeT list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *);
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_bitstring, 1)
+
BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
{
- Eterm bin;
- Uint sz;
- int offset;
- byte* bytes;
- ErlSubBin* sb1;
- Eterm* hp;
-
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(new_binary(BIF_P,(byte*)"",0));
- }
- if (is_not_list(BIF_ARG_1)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
- }
- switch (bitstr_list_len(BIF_ARG_1, &sz)) {
- case ERTS_IOLIST_TYPE:
- goto error;
- case ERTS_IOLIST_OVERFLOW:
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
- bin = new_binary(BIF_P, (byte *)NULL, sz);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes, sz);
-#else
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes);
-#endif
- ASSERT(offset >= 0);
- if (offset > 0) {
- hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = sz-1;
- sb1->offs = 0;
- sb1->orig = bin;
- sb1->bitoffs = 0;
- sb1->bitsize = offset;
- sb1->is_writable = 0;
- bin = make_binary(sb1);
+ BIF_RETTYPE ret;
+
+ if (is_nil(BIF_ARG_1))
+ ERTS_BIF_PREP_RET(ret, new_binary(BIF_P, (byte *) "", 0));
+ else if (is_not_list(BIF_ARG_1))
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ else {
+ /* check for [bitstring()] case */
+ Eterm h = CAR(list_val(BIF_ARG_1));
+ Eterm t = CDR(list_val(BIF_ARG_1));
+ if (is_binary(h) && is_nil(t)) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(BIF_P,
+ BIF_ARG_1,
+ bif_export[BIF_list_to_bitstring_1],
+ bitstr_list_len,
+ list_to_bitstr_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding list_to_bitstr_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (bitstr_list_len(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+
+ state.bin = new_binary(BIF_P, (byte *) NULL, size);
+ state.buf.buf = (char *) binary_bytes(state.bin);
+ state.buf.len = size;
+ state.buf.iolist.obj = BIF_ARG_1;
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = list_to_bitstr_buf_not_yielding(&state.buf);
+ if (res == 0) {
+ Eterm res_bin = l2b_final_touch(BIF_P, &state);
+ BUMP_REDS(BIF_P, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, res_bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * the state prepared for list_to_bitstr_buf.
+ */
+
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ }
+ }
}
-
- BIF_RET(bin);
+
+ return ret;
}
BIF_RETTYPE split_binary_2(BIF_ALIST_2)
@@ -605,123 +1058,353 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
* Local functions.
*/
+static int
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
/*
* The input list is assumed to be type-correct and the buffer is
* assumed to be of sufficient size. Those assumptions are verified in
* the DEBUG-built emulator.
*/
-static int
+static ErlDrvSizeT
+list_to_bitstr_buf(int yield_support, ErtsIOList2BufState *state)
+{
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
#ifdef DEBUG
-list_to_bitstr_buf(Eterm obj, char* buf, Uint len)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG \
+ len -= size + (offset>7);
#else
-list_to_bitstr_buf(Eterm obj, char* buf)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG
#endif
-{
- Eterm* objp;
- int offset = 0;
+#define LIST_TO_BITSTR_BUF_BCOPY(CONSP) \
+ do { \
+ byte* bptr; \
+ Uint bitsize; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ size_t size = binary_size(obj); \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ ASSERT(size <= len); \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ num_bits = 8*size+bitsize; \
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); \
+ offset += bitsize; \
+ buf += size + (offset>7); \
+ LIST_TO_BITSTR_BUF_BCOPY_DBG; \
+ offset = offset & 7; \
+ } while(0)
+
+#ifdef DEBUG
+ ErlDrvSizeT len;
+#endif
+ Eterm obj;
+ char *buf;
+ Eterm *objp = NULL;
+ int offset;
+ int init_yield_count = 0, yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- ASSERT(len > 0);
- if (offset == 0) {
- *buf++ = unsigned_val(obj);
- } else {
- *buf = (char)((unsigned_val(obj) >> offset) |
- ((*buf >> (8-offset)) << (8-offset)));
- buf++;
- *buf = (unsigned_val(obj) << (8-offset));
- }
+
+ obj = state->iolist.obj;
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len--;
+ len = state->len;
#endif
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ if (list_to_bitstr_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ /* Yield again... */
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ }
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len -= size + (offset>7);
+ len = state->len;
#endif
- offset = offset & 7;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else {
- ASSERT(is_nil(obj));
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size+(offset>7);
+ objp = state->objp;
+ state->objp = NULL;
+
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ ASSERT(len > 0);
+ if (offset == 0) {
+ *buf++ = unsigned_val(obj);
+ } else {
+ *buf = (char)((unsigned_val(obj) >> offset) |
+ ((*buf >> (8-offset)) << (8-offset)));
+ buf++;
+ *buf = (unsigned_val(obj) << (8-offset));
+ }
#ifdef DEBUG
- len -= size+(offset>7);
+ len--;
#endif
- offset = offset & 7;
- } else {
- ASSERT(is_nil(obj));
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
+ }
+
+ L_tail:
+
+ obj = CDR(objp);
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
-#ifdef DEBUG
- len -= size + (offset>7);
-#endif
- offset = offset & 7;
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
} else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
ASSERT(is_nil(obj));
}
}
DESTROY_ESTACK(s);
- return offset;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+ state->buf = buf;
+ state->offset = offset;
+ return 0;
+
+L_bcopy_yield:
+
+ state->buf = buf;
+ state->offset = offset;
+#ifdef DEBUG
+ state->len = len;
+#endif
+
+ if (list_to_bitstr_buf_bcopy(state, obj, &yield_count) == 0)
+ ERTS_INTERNAL_ERROR("Missing yield");
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->offset = offset;
+ ESTACK_SAVE(s, &state->iolist.estack);
+#ifdef DEBUG
+ state->len = len;
+#endif
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
+
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(1, state);
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(0, state);
}
static int
-bitstr_list_len(Eterm obj, Uint* num_bytes)
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ int res;
+ char *buf = state->buf;
+ char *next_buf;
+ int offset = state->offset;
+ int next_offset;
+#ifdef DEBUG
+ ErlDrvSizeT len = state->len;
+ ErlDrvSizeT next_len;
+#endif
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ Uint bitsize;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ bitsize = state->bcopy.bitsize;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+
+ ASSERT(size <= len);
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ }
+
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ next_offset = offset + bitsize;
+ next_buf = buf + size+(next_offset>7);
+#ifdef DEBUG
+ next_len = len - size+(next_offset>7);
+#endif
+ next_offset &= 7;
+ num_bits = 8*size+bitsize;
+ res = 0;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.bitsize = bitsize;
+ state->bcopy.size = size - max_size;
+ next_buf = buf + max_size;
+#ifdef DEBUG
+ next_len = len - max_size;
+#endif
+ next_offset = offset;
+ num_bits = 8*max_size;
+ size = max_size;
+ res = 1;
+ }
+
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+
+ state->offset = next_offset;
+ state->buf = next_buf;
+#ifdef DEBUG
+ state->len = next_len;
+#endif
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+static int
+bitstr_list_len(ErtsIOListState *state)
{
Eterm* objp;
- Uint len = 0;
- Uint offs = 0;
+ Eterm obj;
+ Uint len, offs;
+ int res, init_yield_count, yield_count;
DECLARE_ESTACK(s);
+
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+
+ len = (Uint) state->size;
+ offs = state->offs;
+ obj = state->obj;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore estack... */
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -748,46 +1431,55 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- len++;
- if (len == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (--yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ len++;
+ if (len == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
+ } else {
+ if (--yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj)) {
SAFE_ADD(len, binary_size(obj));
SAFE_ADD_BITSIZE(offs, obj);
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
#undef SAFE_ADD_BITSIZE
- DESTROY_ESTACK(s);
-
/*
* Make sure that the number of bits in the bitstring will fit
* in an Uint to ensure that the binary can be matched using
@@ -800,15 +1492,42 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
if (len << 3 < len) {
goto L_overflow_error;
}
- *num_bytes = len;
- return ERTS_IOLIST_OK;
+ state->size = len;
- L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_OK;
+
+ L_return: {
+ int yc = init_yield_count - yield_count;
+ int reds;
+
+ DESTROY_ESTACK(s);
+ CLEAR_SAVED_ESTACK(&state->estack);
+
+ reds = (yc - 1)/ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) len;
+ state->have_size = 1;
+ return res;
+ }
L_overflow_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+ res = ERTS_IOLIST_OVERFLOW;
+ len = 0;
+ goto L_return;
+
+ L_type_error:
+ res = ERTS_IOLIST_TYPE;
+ len = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = len;
+ state->offs = offs;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 7d4f52ee23..08265b590d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -256,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p)
p->current[1],
p->current[2]);
}
+ erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix);
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
approx_started = (time_t) p->approx_started;
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 3a987e213b..50548850eb 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -48,7 +48,8 @@ copy_object(Eterm obj, Process* to)
if (DTRACE_ENABLED(copy_object)) {
DTRACE_CHARBUF(proc_name, 64);
- erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id);
+ erts_snprintf(proc_name, sizeof(DTRACE_CHARBUF_NAME(proc_name)),
+ "%T", to->common.id);
DTRACE2(copy_object, proc_name, size);
}
#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 6ecf3f0722..ec07ddcd9c 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -851,9 +851,12 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
#ifdef USE_VM_PROBES
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
- erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(receiver_name), "%T", remote);
+ erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
+ "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
+ "%T", remote);
msize = size_object(message);
if (token != NIL && token != am_have_dt_utag) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
@@ -908,9 +911,11 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
#ifdef USE_VM_PROBES
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
- erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(receiver_name),
+ erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
+ "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
"{%T,%s}", remote_name, node_name);
msize = size_object(message);
if (token != NIL && token != am_have_dt_utag) {
@@ -971,11 +976,14 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
#ifdef USE_VM_PROBES
*node_name = *sender_name = *remote_name = '\0';
if (DTRACE_ENABLED(process_exit_signal_remote)) {
- erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(remote_name, sizeof(remote_name),
+ erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
+ "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(remote_name, sizeof(DTRACE_CHARBUF_NAME(remote_name)),
"{%T,%s}", remote, node_name);
- erts_snprintf(reason_str, sizeof(reason), "%T", reason);
+ erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)),
+ "%T", reason);
if (token != NIL && token != am_have_dt_utag) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
@@ -1797,8 +1805,9 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", cid);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", dep->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
port_str, remote_str);
@@ -1855,9 +1864,11 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
DTRACE_CHARBUF(remote_str, 64);
DTRACE_CHARBUF(pid_str, 16);
- erts_snprintf(port_str, sizeof(port_str), "%T", cid);
- erts_snprintf(remote_str, sizeof(remote_str), "%T", dep->sysname);
- erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
+ "%T", dep->sysname);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)),
+ "%T", c_p->common.id);
DTRACE4(dist_port_busy, erts_this_node_sysname,
port_str, remote_str, pid_str);
}
@@ -1890,8 +1901,9 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_output, erts_this_node_sysname, port_str,
remote_str, size);
@@ -1944,8 +1956,9 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
remote_str, size);
@@ -2280,8 +2293,9 @@ erts_dist_port_not_busy(Port *prt)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", prt->dist_entry->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
port_str, remote_str);
@@ -3246,10 +3260,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
DTRACE_CHARBUF(type_str, 12);
DTRACE_CHARBUF(reason_str, 64);
- erts_snprintf(what_str, sizeof(what_str), "%T", what);
- erts_snprintf(node_str, sizeof(node_str), "%T", node);
- erts_snprintf(type_str, sizeof(type_str), "%T", type);
- erts_snprintf(reason_str, sizeof(reason_str), "%T", reason);
+ erts_snprintf(what_str, sizeof(DTRACE_CHARBUF_NAME(what_str)), "%T", what);
+ erts_snprintf(node_str, sizeof(DTRACE_CHARBUF_NAME(node_str)), "%T", node);
+ erts_snprintf(type_str, sizeof(DTRACE_CHARBUF_NAME(type_str)), "%T", type);
+ erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)), "%T", reason);
DTRACE5(dist_monitor, erts_this_node_sysname,
what_str, node_str, type_str, reason_str);
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 0519a9225e..f32b999198 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,6 +40,7 @@
#define DFLAG_SMALL_ATOM_TAGS 0x4000
#define DFLAG_INTERNAL_TAGS 0x8000
#define DFLAG_UTF8_ATOMS 0x10000
+#define DFLAG_MAP_TAG 0x20000
/* All flags that should be enabled when term_to_binary/1 is used. */
#define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \
@@ -47,7 +48,8 @@
| DFLAG_NEW_FLOATS \
| DFLAG_EXTENDED_PIDS_PORTS \
| DFLAG_EXPORT_PTR_TAG \
- | DFLAG_BIT_BINARIES)
+ | DFLAG_BIT_BINARIES \
+ | DFLAG_MAP_TAG)
/* opcodes used in distribution messages */
#define DOP_LINK 1
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 05ac24e04d..90cd227fae 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1873,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
size = va_arg(argp, Uint);
va_end(argp);
erl_exit(1,
- "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
- allctr_str, op, size, t_str);
+ "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n",
+ allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX());
break;
}
case ERTS_ALC_E_NOALLCTR:
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 942eaa47d0..d3109b9432 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -492,7 +492,7 @@ static TYPE * \
NAME##_alloc(void) \
{ \
ErtsSchedulerData *esdp = erts_get_scheduler_data(); \
- if (!esdp) \
+ if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp)) \
return NULL; \
return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
(int) esdp->no - 1); \
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 17ac6316b7..21434eb117 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -267,7 +267,6 @@ type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
-type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
@@ -357,12 +356,14 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD_LOW CODE nif_trap_export_entry
type EXPORT LONG_LIVED_LOW CODE export_entry
type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request
type GC_INFO_REQ STANDARD_LOW SYSTEM gc_info_request
+type PORT_DATA_HEAP STANDARD_LOW SYSTEM port_data_heap
+else # "fullword"
@@ -375,12 +376,14 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
+type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
+endif
@@ -395,6 +398,7 @@ type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
type FD_LIST SHORT_LIVED SYSTEM fd_list
+type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
type POLL_FDS LONG_LIVED SYSTEM poll_fds
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 45f0cc4312..e3172dc4fb 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -205,7 +205,7 @@ MBC after deallocating first block:
ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
(B)->bhdr = ((Sz) | (F)), \
(B)->u.carrier = (C))
-
+
# define IS_MBC_FIRST_ABLK(AP,B) \
((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
&& ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
@@ -378,9 +378,8 @@ do { \
#ifdef ERTS_SMP
#define SBC_HEADER_SIZE \
- (UNIT_CEILING(sizeof(Carrier_t) \
- - sizeof(ErtsAlcCPoolData_t) \
- + ABLK_HDR_SZ) \
+ (UNIT_CEILING(offsetof(Carrier_t, cpool) \
+ + ABLK_HDR_SZ) \
- ABLK_HDR_SZ)
#else
#define SBC_HEADER_SIZE \
@@ -929,6 +928,88 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
#ifdef ERTS_SMP
+#ifdef DEBUG
+static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* p;
+
+ ASSERT(node != sentinel);
+ for (p = sentinel->next; p != sentinel; p = p->next) {
+ if (p == node)
+ return 1;
+ }
+ return 0;
+}
+#endif /* DEBUG */
+
+static ERTS_INLINE void
+link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* before_me = after_me->next;
+ ASSERT(node != after_me && node != before_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* after_me = before_me->prev;
+ ASSERT(node != before_me && node != after_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+unlink_edl(ErtsDoubleLink_t* node)
+{
+ node->next->prev = node->prev;
+ node->prev->next = node->next;
+}
+
+static ERTS_INLINE void
+relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ if (node != before_me && node != before_me->prev) {
+ unlink_edl(node);
+ link_edl_before(before_me, node);
+ }
+}
+
+static ERTS_INLINE int is_abandoned(Carrier_t *crr)
+{
+ return crr->cpool.abandoned.next != NULL;
+}
+
+static ERTS_INLINE void
+link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr)
+{
+ ASSERT(!is_abandoned(crr));
+
+ link_edl_after(list, &crr->cpool.abandoned);
+
+ ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned);
+ ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned);
+}
+
+static ERTS_INLINE void
+unlink_abandoned_carrier(Carrier_t *crr)
+{
+ ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list,
+ &crr->cpool.abandoned) ||
+ is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list,
+ &crr->cpool.abandoned));
+
+ unlink_edl(&crr->cpool.abandoned);
+
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
+}
+
static ERTS_INLINE void
clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
{
@@ -955,7 +1036,7 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
}
}
-#endif
+#endif /* ERTS_SMP */
#if 0
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
@@ -1775,6 +1856,18 @@ handle_delayed_dealloc(Allctr_t *allctr,
* data has been overwritten by the queue.
*/
Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk);
+
+ /* Restore word overwritten by the dd-queue as it will be read
+ * if this carrier is pulled from dc_list by cpool_fetch()
+ */
+ ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
+ ERTS_ALC_CPOOL_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
+#ifdef MBC_ABLK_OFFSET_BITS
+ blk->u.carrier = crr;
+#else
+ blk->carrier = crr;
+#endif
+
ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
@@ -2563,10 +2656,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#ifdef ERTS_SMP
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
-#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 10
+#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
+#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10
#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
-#define ERTS_ALC_CPOOL_MAX_NO_CARRIERS 5
-#define ERTS_ALC_CPOOL_INSERT_ALLOWED_OFFSET 100
#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
#define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0)
@@ -2743,9 +2835,6 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
(erts_aint_t) CARRIER_SZ(crr));
erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers);
- erts_smp_atomic_set_nob(&crr->allctr,
- ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
-
/*
* We search in 'next' direction and begin by passing
* one element before trying to insert. This in order to
@@ -2804,6 +2893,9 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
cpool_set_mod_marked(&cpd2p->prev,
(erts_aint_t) &crr->cpool,
(erts_aint_t) cpd1p);
+
+ erts_smp_atomic_set_wb(&crr->allctr,
+ ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
}
static void
@@ -2904,59 +2996,163 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
static Carrier_t *
cpool_fetch(Allctr_t *allctr, UWord size)
{
- int i;
+ int i, i_stop, has_passed_sentinel;
Carrier_t *crr;
ErtsAlcCPoolData_t *cpdp;
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *cpool_entrance;
+ ErtsAlcCPoolData_t *sentinel;
+ ErtsDoubleLink_t* dl;
+ ErtsDoubleLink_t* first_old_traitor;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
- i = 0;
+ i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
+ first_old_traitor = allctr->cpool.traitor_list.next;
+ cpool_entrance = NULL;
- /* First; check our own pending dealloc carrier list... */
- crr = allctr->cpool.dc_list.last;
- while (crr && i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) {
- if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- unlink_carrier(&allctr->cpool.dc_list, crr);
-#ifdef ERTS_ALC_CPOOL_DEBUG
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
- ((erts_aint_t) allctr))
- == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
-#else
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
-#endif
- return crr;
+ /*
+ * Search my own pooled_list,
+ * i.e my abandoned carriers that were in the pool last time I checked.
+ */
+
+ dl = allctr->cpool.pooled_list.next;
+ while(dl != &allctr->cpool.pooled_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+
+ ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl));
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+ }
+ else { /* Not in pool, move to traitor_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.traitor_list, crr);
+ }
+ if (--i <= 0) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return NULL;
}
- crr = crr->prev;
- i++;
}
- /* ... then the pool ... */
+ /* Now search traitor_list.
+ * i.e carriers employed by other allocators last time I checked.
+ * They might have been abandoned since then.
+ */
+
+ i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ?
+ 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT);
+ dl = first_old_traitor;
+ while(dl != &allctr->cpool.traitor_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+ ASSERT(dl != &allctr->cpool.pooled_list);
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY)
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+
+ /* Move to pooled_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
+ }
+ if (--i <= i_stop) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ if (i > 0)
+ break;
+ else
+ return NULL;
+ }
+ }
/*
- * We search in 'prev' direction and begin by passing
- * one element before trying to fetch. This in order to
- * avoid contention with threads inserting elements.
+ * Finally search the shared pool and try employ foreign carriers
*/
- cpdp = cpool_aint2cpd(cpool_read(&sentinel->prev));
- if (cpdp == sentinel)
- return NULL;
+ sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ if (cpool_entrance) {
+ /* We saw a pooled carried above, use it as entrance into the pool
+ */
+ cpdp = cpool_entrance;
+ }
+ else {
+ /* No pooled carried seen above. Start search at cpool sentinel,
+ * but begin by passing one element before trying to fetch.
+ * This in order to avoid contention with threads inserting elements.
+ */
+ cpool_entrance = sentinel;
+ cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ }
- while (i < ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) {
+ has_passed_sentinel = 0;
+ while (1) {
erts_aint_t exp;
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
- if (cpdp == sentinel) {
+ if (cpdp == cpool_entrance) {
+ if (cpool_entrance == sentinel) {
+ cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ }
+ i = 0; /* Last one to inspect */
+ }
+ else if (cpdp == sentinel) {
+ if (has_passed_sentinel) {
+ /* We been here before. cpool_entrance must have been removed */
+ return NULL;
+ }
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == sentinel)
return NULL;
- i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; /* Last one to inspect */
+ has_passed_sentinel = 1;
}
- crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
+ crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool));
exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL|ERTS_CRR_ALCTR_FLG_BUSY))
- == ERTS_CRR_ALCTR_FLG_IN_POOL)
+ if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL)
&& (erts_atomic_read_nob(&cpdp->max_size) >= size)) {
erts_aint_t act;
/* Try to fetch it... */
@@ -2965,11 +3161,35 @@ cpool_fetch(Allctr_t *allctr, UWord size)
exp);
if (act == exp) {
cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ unlink_abandoned_carrier(crr);
+ }
return crr;
}
}
- i++;
+ if (--i <= 0)
+ return NULL;
}
+
+ /* Last; check our own pending dealloc carrier list... */
+ crr = allctr->cpool.dc_list.last;
+ while (crr) {
+ if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ unlink_carrier(&allctr->cpool.dc_list, crr);
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
+ ((erts_aint_t) allctr))
+ == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
+#else
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+#endif
+ return crr;
+ }
+ crr = crr->prev;
+ if (--i <= 0)
+ return NULL;
+ }
+
return NULL;
}
@@ -3066,6 +3286,9 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
return;
}
+ if (is_abandoned(crr))
+ unlink_abandoned_carrier(crr);
+
if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
dealloc_carrier(allctr, crr, 1);
@@ -3112,6 +3335,8 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
limit = (csz/100)*allctr->cpool.util_limit;
crr->cpool.abandon_limit = limit;
}
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
}
static void
@@ -3142,6 +3367,9 @@ abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
STAT_MBC_CPOOL_INSERT(allctr, crr);
unlink_carrier(&allctr->mbc_list, crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
allctr->remove_mbc(allctr, crr);
@@ -3274,6 +3502,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
+ if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) {
+ /* Do an overly conservative _overflow_ check here so we don't
+ * have to deal with it from here on. I guess we could be more accurate
+ * but I don't think the need to allocate over 99% of the address space
+ * will ever arise on any machine, neither 32 nor 64 bit.
+ */
+ return NULL;
+ }
+
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
#ifdef ERTS_SMP
@@ -3640,6 +3877,11 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
if (busy_pcrr_pp && *busy_pcrr_pp) {
ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
*busy_pcrr_pp = NULL;
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
+ == (((erts_aint_t) allctr)
+ | ERTS_CRR_ALCTR_FLG_IN_POOL
+ | ERTS_CRR_ALCTR_FLG_BUSY));
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
cpool_delete(allctr, allctr, crr);
}
else
@@ -5519,6 +5761,10 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->min_block_size = sz;
}
+ allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list;
+ allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list;
+ allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list;
+ allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list;
allctr->cpool.dc_list.first = NULL;
allctr->cpool.dc_list.last = NULL;
allctr->cpool.abandon_limit = 0;
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 7be6b1ed9d..eee920e66c 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -268,6 +268,11 @@ typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
#ifdef ERTS_SMP
+typedef struct ErtsDoubleLink_t_ {
+ struct ErtsDoubleLink_t_ *next;
+ struct ErtsDoubleLink_t_ *prev;
+}ErtsDoubleLink_t;
+
typedef struct {
erts_atomic_t next;
erts_atomic_t prev;
@@ -277,6 +282,7 @@ typedef struct {
UWord abandon_limit;
UWord blocks;
UWord blocks_size;
+ ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
} ErtsAlcCPoolData_t;
#endif
@@ -500,7 +506,12 @@ struct Allctr_t_ {
CarrierList_t sbc_list;
#ifdef ERTS_SMP
struct {
- CarrierList_t dc_list;
+ /* pooled_list, traitor list and dc_list contain only
+ carriers _created_ by this allocator */
+ ErtsDoubleLink_t pooled_list;
+ ErtsDoubleLink_t traitor_list;
+ CarrierList_t dc_list;
+
UWord abandon_limit;
int disable_abandon;
int check_limit_count;
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index b3dc327704..decae6b2ca 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -292,7 +292,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
if (DTRACE_ENABLED(aio_pool_add)) {
DTRACE_CHARBUF(port_str, 16);
- erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", a->port);
/* DTRACE TODO: Get the queue length from erts_thr_q_enqueue() ? */
len = -1;
DTRACE2(aio_pool_add, port_str, len);
@@ -327,7 +328,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
if (DTRACE_ENABLED(aio_pool_get)) {
DTRACE_CHARBUF(port_str, 16);
- erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", a->port);
/* DTRACE TODO: Get the length from erts_thr_q_dequeue() ? */
len = -1;
DTRACE2(aio_pool_get, port_str, len);
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index ff775691b3..3bf78adce7 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1324,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1555,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1644,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2213,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p,
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2294,18 +2294,11 @@ BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
BIF_ERROR(BIF_P,BADARG);
}
-/*
- * Ok, erlang:list_to_binary does not interrupt, and we really don't want
- * an alternative implementation for the exact same thing, why we
- * have descided to use the old non-restarting implementation for now.
- * In reality, there are seldom many iterations involved in doing this, so the
- * problem of long-running bifs is not really that big in this case.
- * So, for now we use the old implementation also in the module binary.
- */
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1)
BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_binary_list_to_bin_1]);
}
typedef struct {
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 1728b200f7..56cd2ba04f 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1548,8 +1548,10 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
switch (dp->extended_marker) {
case ERL_DRV_EXTENDED_MARKER:
- if (ERL_DRV_EXTENDED_MAJOR_VERSION != dp->major_version
- || ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version) {
+ if (dp->major_version < ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
+ || (ERL_DRV_EXTENDED_MAJOR_VERSION < dp->major_version
+ || (ERL_DRV_EXTENDED_MAJOR_VERSION == dp->major_version
+ && ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version))) {
/* Incompatible driver version */
res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
goto error;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 2adba9b240..b90362d82c 100755..100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -27,6 +27,7 @@
#include "erl_process.h"
#include "error.h"
#include "erl_driver.h"
+#include "erl_nif.h"
#include "bif.h"
#include "big.h"
#include "erl_version.h"
@@ -90,7 +91,7 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
" [smp:%beu:%beu]"
#endif
#ifdef USE_THREADS
-#ifdef ERTS_DIRTY_SCHEDULERS
+#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
" [ds:%beu:%beu:%beu]"
#endif
" [async-threads:%d]"
@@ -2459,6 +2460,13 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ERL_DRV_EXTENDED_MINOR_VERSION);
hp = HAlloc(BIF_P, 2*n);
BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
+ } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
+ char buf[42];
+ int n = erts_snprintf(buf, 42, "%d.%d",
+ ERL_NIF_MAJOR_VERSION,
+ ERL_NIF_MINOR_VERSION);
+ hp = HAlloc(BIF_P, 2*n);
+ BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
} else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
#ifdef ERTS_SMP
BIF_RET(am_true);
@@ -2691,6 +2699,14 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
+ BIF_RET(erts_disable_tolerant_timeofday
+ ? am_disabled
+ : am_enabled);
+ }
+ else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
+ BIF_RET(erts_eager_check_io ? am_true : am_false);
+ }
BIF_ERROR(BIF_P, BADARG);
}
@@ -3050,6 +3066,25 @@ fun_info_2(BIF_ALIST_2)
return TUPLE2(hp, what, val);
}
+BIF_RETTYPE
+fun_info_mfa_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm* hp;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ }
+ BIF_ERROR(p, BADARG);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
if(is_internal_pid(BIF_ARG_1)) {
@@ -3280,17 +3315,38 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(make_small((Uint) words));
}
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
- /* Used by (emulator) */
- int res;
+ /* Used by driver_SUITE (emulator) */
+ Uint sz, *szp;
+ Eterm res, *hp, **hpp;
+ int no_errors;
+ ErtsCheckIoDebugInfo ciodi = {0};
#ifdef HAVE_ERTS_CHECK_IO_DEBUG
erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
- res = erts_check_io_debug();
+ no_errors = erts_check_io_debug(&ciodi);
erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
#else
- res = 0;
+ no_errors = 0;
#endif
- ASSERT(res >= 0);
- BIF_RET(erts_make_integer((Uint) res, BIF_P));
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ res = erts_bld_tuple(hpp, szp, 4,
+ erts_bld_uint(hpp, szp,
+ (Uint) no_errors),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_used_fds),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_driver_select_structs),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_driver_event_structs));
+ if (hpp)
+ break;
+ hp = HAlloc(BIF_P, sz);
+ szp = NULL;
+ hpp = &hp;
+ }
+ BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
@@ -3851,16 +3907,19 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
Uint tries = 0, colls = 0;
unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
unsigned int line = 0;
+ unsigned int i;
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
+ Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
+ * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }]
*/
-
+
tries = (Uint) ethr_atomic_read(&stats->tries);
colls = (Uint) ethr_atomic_read(&stats->colls);
@@ -3869,23 +3928,27 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
timer_ns = stats->timer.ns;
timer_n = stats->timer_n;
- af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
+ af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
uil = erts_bld_uint( hpp, szp, line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
-
+ uit = erts_bld_uint( hpp, szp, tries);
+ uic = erts_bld_uint( hpp, szp, colls);
+
uits = erts_bld_uint( hpp, szp, timer_s);
uitns = erts_bld_uint( hpp, szp, timer_ns);
uitn = erts_bld_uint( hpp, szp, timer_n);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
-
- t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
+ vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ }
+ thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
+
+ t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3906,13 +3969,13 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
ASSERT(ltype);
- type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
+ type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
if (lock->flag & ERTS_LCNT_LT_ALLOC) {
/* use allocator types names as id's for allocator locks */
ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
proc = erts_proc_lookup(lock->id);
@@ -3923,16 +3986,15 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
id = lock->id;
}
} else {
- id = lock->id;
+ id = lock->id;
}
-
+
for (i = 0; i < lock->n_stats; i++) {
stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
}
-
- t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3952,12 +4014,12 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *da
dtns = erts_bld_uint( hpp, szp, data->duration.ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
- adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+ adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
- aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+ aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 77627a6897..7ce950e090 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -472,7 +472,7 @@ cleanup_old_port_data(erts_aint_t data)
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
size_t size;
ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
- size = sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm) - 1);
+ size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap,
(void *) pdhp,
&pdhp->later_op,
@@ -493,8 +493,8 @@ void
erts_cleanup_port_data(Port *prt)
{
ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
- cleanup_old_port_data(erts_smp_atomic_read_nob(&prt->data));
- erts_smp_atomic_set_nob(&prt->data, (erts_aint_t) THE_NON_VALUE);
+ cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data,
+ (erts_aint_t) NULL));
}
Uint
@@ -508,7 +508,7 @@ erts_port_data_size(Port *prt)
}
else {
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
- return (Uint) sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm)-1);
+ return (Uint) sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
}
}
@@ -550,10 +550,11 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
hsize = size_object(BIF_ARG_2);
pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP,
- sizeof(ErtsPortDataHeap) + hsize*(sizeof(Eterm)-1));
+ sizeof(ErtsPortDataHeap) + (hsize-1)*sizeof(Eterm));
hp = &pdhp->heap[0];
pdhp->off_heap.first = NULL;
pdhp->off_heap.overhead = 0;
+ pdhp->hsize = hsize;
pdhp->data = copy_struct(BIF_ARG_2, hsize, &hp, &pdhp->off_heap);
data = (erts_aint_t) pdhp;
ASSERT((data & 0x3) == 0);
@@ -561,8 +562,14 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
data = erts_smp_atomic_xchg_wb(&prt->data, data);
+ if (data == (erts_aint_t)NULL) {
+ /* Port terminated by racing thread */
+ data = erts_smp_atomic_xchg_wb(&prt->data, data);
+ ASSERT(data != (erts_aint_t)NULL);
+ cleanup_old_port_data(data);
+ BIF_ERROR(BIF_P, BADARG);
+ }
cleanup_old_port_data(data);
-
BIF_RET(am_true);
}
@@ -581,6 +588,8 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
data = erts_smp_atomic_read_ddrb(&prt->data);
+ if (data == (erts_aint_t)NULL)
+ BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */
if ((data & 0x3) != 0) {
res = (Eterm) (UWord) data;
@@ -882,7 +891,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
dtrace_proc_str(p, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", port->common.id);
DTRACE3(port_open, process_str, name_buf, port_str);
}
#endif
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 819b19e566..06dfeb1260 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -166,7 +166,7 @@ Eterm erts_bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size,
* Common implementation for erlang:list_to_binary/1 and binary:list_to_bin/1
*/
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg);
+BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg, Export *bif);
BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple);
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
@@ -236,6 +236,8 @@ erts_bin_drv_alloc_fnf(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ return NULL;
res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -246,6 +248,8 @@ erts_bin_drv_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -257,6 +261,8 @@ erts_bin_nrml_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -267,11 +273,12 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (bsize < size) /* overflow */
+ return NULL;
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
return nbp;
}
@@ -281,17 +288,14 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (bsize < size) /* overflow */
+ erts_realloc_enomem(type, bp, size);
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
if (!nbp)
- erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV
- ? ERTS_ALC_T_DRV_BINARY
- : ERTS_ALC_T_BINARY),
- bp,
- bsize);
+ erts_realloc_enomem(type, bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
return nbp;
}
@@ -312,6 +316,7 @@ erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
{
Uint bsize = ERTS_MAGIC_BIN_SIZE(size);
Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ ASSERT(bsize > size);
if (!bptr)
erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index a5d67571e2..8f246ffa07 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -259,10 +259,11 @@ static void schedule_free_dbtable(DbTable* tb)
/*
* NON-SMP case: Caller is *not* allowed to access the *tb
* structure after this function has returned!
- * SMP case: Caller is allowed to access the *tb structure
- * until the bif has returned (we typically
- * need to unlock the table lock after this
- * function has returned).
+ * SMP case: Caller is allowed to access the *common* part of the *tb
+ * structure until the bif has returned (we typically need to
+ * unlock the table lock after this function has returned).
+ * Caller is *not* allowed to access the specialized part
+ * (hash or tree) of *tb after this function has returned.
*/
ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
@@ -3279,34 +3280,37 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
}
erts_smp_rwmtx_runlock(mmtl);
if (tb) {
- int reds;
- DbFixation** pp;
+ int reds = 0;
db_lock(tb, LCK_WRITE_REC);
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
- reds = 10;
-
- for (pp = &tb->common.fixations; *pp != NULL;
- pp = &(*pp)->next) {
- if ((*pp)->pid == pid) {
- DbFixation* fix = *pp;
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
- *pp = fix->next;
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
- break;
+ if (!(tb->common.status & DB_DELETE)) {
+ DbFixation** pp;
+
+ #ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+ #endif
+ reds = 10;
+
+ for (pp = &tb->common.fixations; *pp != NULL;
+ pp = &(*pp)->next) {
+ if ((*pp)->pid == pid) {
+ DbFixation* fix = *pp;
+ erts_aint_t diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&tb->common.ref,diff,0);
+ *pp = fix->next;
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ tb, fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ break;
+ }
+ }
+ #ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+ #endif
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ db_unfix_table_hash(&(tb->hash));
+ reds += 40;
}
- }
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
- if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
- db_unfix_table_hash(&(tb->hash));
- reds += 40;
}
db_unlock(tb, LCK_WRITE_REC);
BUMP_REDS(c_p, reds);
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 25029ba90f..a62a83a928 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -485,7 +485,7 @@ static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
*ret = am_EOT;
return DB_ERROR_NONE;
}
- /* Walk down to the tree to the left */
+ /* Walk down the tree to the left */
if ((stack = get_static_stack(tb)) != NULL) {
stack->pos = stack->slot = 0;
}
@@ -531,7 +531,7 @@ static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
*ret = am_EOT;
return DB_ERROR_NONE;
}
- /* Walk down to the tree to the left */
+ /* Walk down the tree to the right */
if ((stack = get_static_stack(tb)) != NULL) {
stack->pos = stack->slot = 0;
}
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 5517c26ba4..e498ac70ec 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -133,7 +133,23 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 3
-#define ERL_DRV_EXTENDED_MINOR_VERSION 0
+#define ERL_DRV_EXTENDED_MINOR_VERSION 2
+
+/*
+ * The emulator will refuse to load a driver with a major version
+ * lower than ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD. The load
+ * may however fail if user have not removed use of deprecated
+ * symbols.
+ *
+ * The ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD have to allow
+ * loading of drivers built at least two major OTP releases
+ * ago.
+ *
+ * Bump of major version to 3 happened in OTP 17. That is, in
+ * OTP 19 we can increase ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
+ * to 3.
+ */
+#define ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD 2
/*
* The emulator will refuse to load a driver with different major
@@ -182,7 +198,7 @@ typedef long long ErlDrvSInt64;
#error No 64-bit integer type
#endif
-#if defined(__WIN32__)
+#if defined(__WIN32__) || defined(_WIN32)
typedef ErlDrvUInt ErlDrvSizeT;
typedef ErlDrvSInt ErlDrvSSizeT;
#else
@@ -345,6 +361,9 @@ typedef struct erl_drv_entry {
/* Called on behalf of driver_select when
it is safe to release 'event'. A typical
unix driver would call close(event) */
+ void (*emergency_close)(ErlDrvData drv_data);
+ /* called when the port is closed abruptly.
+ specifically when erl_crash_dump is called. */
/* When adding entries here, dont forget to pad in obsolete/driver.h */
} ErlDrvEntry;
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 3f829ea7ea..4e8c6dc68b 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -35,6 +35,7 @@ typedef struct {
int scheduler_threads;
int nif_major_version;
int nif_minor_version;
+ int dirty_scheduler_support;
} ErlDrvSysInfo;
typedef struct {
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index aa15d2cc57..0db42d4325 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2018,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
+
+ /*
+ * If a NIF has saved arguments, they need to be added
+ */
+ if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
+ Eterm* argv;
+ int argc;
+ if (erts_setup_nif_gc(p, &argv, &argc)) {
+ roots[n].v = argv;
+ roots[n].sz = argc;
+ n++;
+ }
+ }
+
ASSERT(n <= rootset->size);
mp = p->msg.first;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index d54658f1ea..61f8385efc 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -324,7 +324,6 @@ erl_init(int ncpu,
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
erts_init_trace();
- erts_init_binary();
erts_init_bits();
erts_code_ix_init();
erts_init_fun_table();
@@ -337,6 +336,7 @@ erl_init(int ncpu,
erts_ddll_init();
init_emulator();
erts_ptab_init(); /* Must be after init_emulator() */
+ erts_init_binary(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
erts_bif_timer_init();
@@ -548,6 +548,8 @@ void erts_usage(void)
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-secio bool enable/disable eager check I/O scheduling,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
erts_fprintf(stderr, "-sub bool enable/disable scheduler utilization balancing,\n");
#else
@@ -1674,6 +1676,22 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("ecio", sub_param)) {
+ arg = get_arg(sub_param+4, argv[i+1], &i);
+#ifndef __OSE__
+ if (sys_strcmp("true", arg) == 0)
+ erts_eager_check_io = 1;
+ else
+#endif
+ if (sys_strcmp("false", arg) == 0)
+ erts_eager_check_io = 0;
+ else {
+ erts_fprintf(stderr,
+ "bad schedule eager check I/O value '%s'\n",
+ arg);
+ erts_usage();
+ }
+ }
else if (has_prefix("pp", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (sys_strcmp(arg, "true") == 0)
@@ -2066,8 +2084,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
system_cleanup(flush_async);
save_statistics();
-
- an = abs(n);
+ if (n < 0)
+ an = -(unsigned int)n;
+ else
+ an = n;
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 7e3a90779d..b105ece6f1 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -139,7 +139,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
- { "sys_msg_q", NULL },
{ "atom_tab", NULL },
{ "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
@@ -148,6 +147,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
+ { "sys_msg_q", NULL },
{ "port_table", NULL },
#endif
{ "mtrace_op", NULL },
@@ -227,8 +227,7 @@ rw_op_str(Uint16 flags)
case ERTS_LC_FLG_LO_READ:
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
- erts_fprintf(stderr, "\nInternal error\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Only write flag present");
default:
break;
}
@@ -270,9 +269,9 @@ union erts_lc_free_block_t_ {
static ethr_tsd_key locks_key;
-static erts_lc_locked_locks_t *erts_locked_locks;
+static erts_lc_locked_locks_t *erts_locked_locks = NULL;
-static erts_lc_free_block_t *free_blocks;
+static erts_lc_free_block_t *free_blocks = NULL;
#ifdef ERTS_LC_STATIC_ALLOC
#define ERTS_LC_FB_CHUNK_SIZE 10000
@@ -311,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p)
static void *lc_core_alloc(void)
{
lc_unlock();
- erts_fprintf(stderr, "Lock checker out of memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
}
#else
@@ -325,8 +323,7 @@ static void *lc_core_alloc(void)
fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
- erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -366,11 +363,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
@@ -691,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("strdup failed");
}
l_lcks->emu_thread = 1;
}
@@ -1330,7 +1327,7 @@ erts_lc_init(void)
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- lc_abort();
+ ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 6f44bf097b..cf6996ea06 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -61,6 +61,25 @@ static ERTS_INLINE void lcnt_unlock(void) {
ethr_mutex_unlock(&lcnt_data_lock);
}
+const int log2_tab64[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+static ERTS_INLINE int lcnt_log2(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
static char* lcnt_lock_type(Uint16 flag) {
switch(flag & ERTS_LCNT_LT_ALL) {
@@ -81,19 +100,20 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
stats->timer_n = 0;
stats->file = (char *)str_undefined;
stats->line = 0;
+ sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
}
static void lcnt_time(erts_lcnt_time_t *time) {
-#ifdef HAVE_GETHRTIME
+#if 0 || defined(HAVE_GETHRTIME)
SysHrTime hr_time;
hr_time = sys_gethrtime();
time->s = (unsigned long)(hr_time / 1000000000LL);
time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
-#else
- SysTimeval tv;
- sys_gettimeofday(&tv);
- time->s = tv.tv_sec;
- time->ns = tv.tv_usec*1000LL;
+#else
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ time->s = tv.tv_sec;
+ time->ns = tv.tv_usec*1000LL;
#endif
}
@@ -111,28 +131,29 @@ static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_
dns += 1000000000LL;
}
+ ASSERT(ds >= 0);
+
d->s = ds;
d->ns = dns;
}
-/* difference d must be positive */
+/* difference d must be non-negative */
static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- unsigned long ngns = 0;
-
t->s += d->s;
t->ns += d->ns;
- ngns = t->ns / 1000000000LL;
+ t->s += t->ns / 1000000000LL;
t->ns = t->ns % 1000000000LL;
-
- t->s += ngns;
}
static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
erts_lcnt_thread_data_t *eltd;
eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
+ if (!eltd) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
@@ -158,59 +179,64 @@ static char* lock_opt(Uint16 flag) {
return "--";
}
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- erts_aint_t colls, tries, w_state, r_state;
- erts_lcnt_lock_stats_t *stats = NULL;
-
+static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
+ erts_aint_t w_state, r_state;
char *type;
- int i;
-
+
+ if (strcmp(lock->name, "run_queue") != 0) return;
type = lcnt_lock_type(lock->flag);
r_state = ethr_atomic_read(&lock->r_state);
w_state = ethr_atomic_read(&lock->w_state);
-
if (lock->flag & flag) {
- erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- lock->id,
- extra);
+ erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
+ action,
+ lock->name,
+ r_state,
+ w_state,
+ type,
+ lock->id);
}
}
-
-static void print_lock(erts_lcnt_lock_t *lock, char *action) {
- if (strcmp(lock->name, "proc_main") == 0) {
- print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
- }
-}
-
#endif
static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
unsigned int i;
erts_lcnt_lock_stats_t *stats = NULL;
-
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
+ for (i = 0; i < lock->n_stats; i++) {
+ if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
+ return &(lock->stats[i]);
+ }
+ }
+ if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ stats = &lock->stats[lock->n_stats];
+ lock->n_stats++;
+ stats->file = file;
+ stats->line = line;
+ return stats;
+ }
}
return &lock->stats[0];
+}
+static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
+ int idx;
+ unsigned long r;
+
+ if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+ if (r) idx = lcnt_log2(r);
+ else idx = 0;
+ }
+ hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) {
+static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
+ erts_lcnt_time_t *time_wait) {
ethr_atomic_inc(&stats->tries);
@@ -220,6 +246,7 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic
if (time_wait) {
lcnt_time_add(&(stats->timer), time_wait);
stats->timer_n++;
+ lcnt_update_stats_hist(&stats->hist,time_wait);
}
}
@@ -248,6 +275,9 @@ void erts_lcnt_init() {
/* init lcnt structure */
erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
+ if (!erts_lcnt_data) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
erts_lcnt_data->current_locks = erts_lcnt_list_init();
erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
@@ -269,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
erts_lcnt_lock_list_t *list;
list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
+ if (!list) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
list->head = NULL;
list->tail = NULL;
list->n = 0;
@@ -330,8 +363,9 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock)
/* interface to erl_threads.h */
/* only lock on init and destroy, all others should use atomics */
void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, am_undefined);
+ erts_lcnt_init_lock_x(lock, name, flag, NIL);
}
+
void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
int i;
if (!name) {
@@ -360,7 +394,6 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
}
erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
-
lcnt_unlock();
}
@@ -375,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* copy structure and insert the copy */
deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+ if (!deleted_lock) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
deleted_lock->next = NULL;
@@ -417,8 +453,9 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
if ((w_state > 0) || (r_state > 0)) {
eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
} else {
eltd->lock_in_conflict = 0;
@@ -433,7 +470,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc( &lock->w_state);
+ ethr_atomic_inc(&lock->w_state);
eltd = lcnt_get_thread_data();
@@ -446,10 +483,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
* 'atomicly'. All other locks will block the thread if w_state > 0
* i.e. locked.
*/
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
-
} else {
eltd->lock_in_conflict = 0;
}
@@ -459,11 +496,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
/* should check if this thread was "waiting" */
-
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- ethr_atomic_dec( &lock->w_state);
+ ethr_atomic_dec(&lock->w_state);
}
/* erts_lcnt_lock_post
@@ -491,7 +527,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
+ ethr_atomic_inc(&lock->flowstate);
}
#endif
@@ -500,19 +536,12 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
ASSERT(eltd);
/* if lock was in conflict, time it */
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- stats = lcnt_get_lock_stats(lock, file, line);
- } else {
- stats = &lock->stats[0];
- }
-
+ stats = lcnt_get_lock_stats(lock, file, line);
if (eltd->timer_set) {
lcnt_time(&timer);
lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
-
eltd->timer_set--;
ASSERT(eltd->timer_set >= 0);
} else {
@@ -541,11 +570,11 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
/* flowstate */
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
- ethr_atomic_dec( &lock->flowstate);
+ ethr_atomic_dec(&lock->flowstate);
/* write state */
w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0)
+ ASSERT(w_state > 0);
#endif
ethr_atomic_dec(&lock->w_state);
}
@@ -582,9 +611,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
ethr_atomic_inc( &lock->flowstate);
#endif
ethr_atomic_inc(&lock->w_state);
-
lcnt_update_stats(&(lock->stats[0]), 0, NULL);
-
} else {
ethr_atomic_inc(&lock->stats[0].tries);
ethr_atomic_inc(&lock->stats[0].colls);
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 75f7cd028b..ffbb93da1b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -35,6 +35,10 @@
* | | | - collisions (including trylock busy)
* | | | - timer (time spent in waiting for lock)
* | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
*
* Each instance of a lock is the unique lock, i.e. set and id in that set.
* For each lock there is a set of statistics with where and what impact
@@ -68,8 +72,17 @@
#include "ethread.h"
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+/* histogram */
+#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
+#if 0 || defined(HAVE_GETHRTIME)
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (0)
+#else
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
+#endif
#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
@@ -104,6 +117,10 @@ typedef struct {
extern erts_lcnt_time_t timer_start;
+typedef struct {
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
+} erts_lcnt_hist_t;
+
typedef struct erts_lcnt_lock_stats_s {
/* "tries" and "colls" needs to be atomic since
* trylock busy does not aquire a lock and there
@@ -118,6 +135,7 @@ typedef struct erts_lcnt_lock_stats_s {
unsigned long timer_n; /* #times waited for lock */
erts_lcnt_time_t timer; /* total wait time for lock */
+ erts_lcnt_hist_t hist;
} erts_lcnt_lock_stats_t;
/* rw locks uses both states, other locks only uses w_state */
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index 2fff7f9390..5e740aacdd 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -58,6 +58,8 @@
* - maps:size/1
* - maps:without/2
*
+ * DEBUG: for sharing calculation
+ * - erts_internal:map_to_tuple_keys/1
*/
/* erlang:map_size/1
@@ -647,22 +649,24 @@ int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res) {
*mhp++ = tup;
if (is_immed(key)) {
- while(n--) {
+ while (1) {
if (*ks == key) {
goto found_key;
- } else {
+ } else if (--n) {
*mhp++ = *vs++;
*thp++ = *ks++;
- }
+ } else
+ break;
}
} else {
- while(n--) {
+ while(1) {
if (EQ(*ks, key)) {
goto found_key;
- } else {
+ } else if (--n) {
*mhp++ = *vs++;
*thp++ = *ks++;
- }
+ } else
+ break;
}
}
@@ -676,7 +680,7 @@ int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res) {
found_key:
/* Copy rest of keys and values */
- if (n) {
+ if (--n) {
sys_memcpy(mhp, vs+1, n*sizeof(Eterm));
sys_memcpy(thp, ks+1, n*sizeof(Eterm));
}
@@ -817,3 +821,17 @@ int erts_validate_and_sort_map(map_t* mp)
}
return 1;
}
+
+/*
+ * erts_internal:map_to_tuple_keys/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_map_to_tuple_keys_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ map_t *mp = (map_t*)map_val(BIF_ARG_1);
+ BIF_RET(mp->keys);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 6a9030fd99..8870fac7d9 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -415,7 +415,13 @@ erts_queue_dist_message(Process *rcvr,
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr);
+ erts_proc_notify_new_message(rcvr,
+#ifdef ERTS_SMP
+ *rcvr_locks
+#else
+ 0
+#endif
+ );
}
}
@@ -542,7 +548,13 @@ queue_message(Process *c_p,
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver);
+ erts_proc_notify_new_message(receiver,
+#ifdef ERTS_SMP
+ *receiver_locks
+#else
+ 0
+#endif
+ );
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
@@ -896,8 +908,10 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
*sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send)) {
- erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
+ "%T", receiver->common.id);
}
#endif
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
@@ -1030,7 +1044,6 @@ erts_send_message(Process* sender,
}
BM_SWAP_TIMER(send,system);
} else {
-#ifdef ERTS_SMP
ErlOffHeap *ohp;
Eterm *hp;
erts_aint32_t state;
@@ -1062,42 +1075,6 @@ erts_send_message(Process* sender,
#endif
);
BM_SWAP_TIMER(send,system);
-#else
- ErlMessage* mp = message_alloc();
- Eterm *hp;
- BM_SWAP_TIMER(send,size);
- msize = size_object(message);
- BM_SWAP_TIMER(size,send);
-
- if (receiver->stop - receiver->htop <= msize) {
- BM_SWAP_TIMER(send,system);
- erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity);
- BM_SWAP_TIMER(system,send);
- }
- hp = receiver->htop;
- receiver->htop = hp + msize;
- BM_SWAP_TIMER(send,copy);
- message = copy_struct(message, msize, &hp, &receiver->off_heap);
- BM_MESSAGE_COPIED(msize);
- BM_SWAP_TIMER(copy,send);
- DTRACE6(message_send, sender_name, receiver_name,
- (uint32_t)msize, tok_label, tok_lastcnt, tok_serial);
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = NIL;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
-#endif
- mp->next = NULL;
- mp->data.attached = NULL;
- LINK_MESSAGE(receiver, mp);
- res = receiver->msg.len;
- erts_proc_notify_new_message(receiver);
-
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
- trace_receive(receiver, message);
- }
- BM_SWAP_TIMER(send,system);
-#endif /* #ifndef ERTS_SMP */
}
return res;
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 40860e141c..adc3520ebb 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -472,6 +472,18 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
+
+ if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term);
+ if (sb->is_writable) {
+ ProcBin* pb = (ProcBin*) binary_val(sb->orig);
+ ASSERT(pb->thing_word == HEADER_PROC_BIN);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ sb->is_writable = 0;
+ }
+ }
+ }
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
@@ -1513,59 +1525,264 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(env->proc) == 0;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv
+ * members are used to track the MFA and arguments of the top NIF in case a
+ * chain of one or more enif_schedule_nif() calls results in an exception,
+ * since in that case the original MFA and registers have to be restored
+ * before returning to Erlang to ensure stacktrace information associated
+ * with the exception is correct.
+ */
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
+typedef struct {
+ Export exp;
+ struct erl_module_nif* m;
+ NativeFunPtr fp;
+ Eterm saved_mfa[3];
+ int saved_argc;
+ int alloced_argv_sz;
+ Eterm argv[1];
+} NifExport;
-static void
-alloc_proc_psd(Process* proc, Export **ep)
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. This function is declared in erl_process.h.
+ */
+int
+erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ int gc = (ep && ep->saved_argc > 0);
+
+ if (gc) {
+ *objv = ep->argv;
+ *nobj = ep->saved_argc;
+ }
+ return gc;
+}
+
+/*
+ * Allocate a NifExport and set it in proc specific data
+ */
+static NifExport*
+allocate_nif_sched_data(Process* proc, int argc)
+{
+ NifExport* ep;
+ size_t argv_extra, total;
int i;
- if (!*ep) {
- *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(Export));
- sys_memset((void*) *ep, 0, sizeof(Export));
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- (*ep)->addressv[i] = &(*ep)->code[3];
- }
- (*ep)->code[3] = (BeamInstr) em_call_nif;
+
+ argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0;
+ total = sizeof(NifExport) + argv_extra;
+ ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
+ sys_memset((void*) ep, 0, total);
+ ep->alloced_argv_sz = argc;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->exp.addressv[i] = &ep->exp.code[3];
}
- (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, *ep);
+ ep->exp.code[3] = (BeamInstr) em_call_nif;
+ (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, ep);
+ return ep;
+}
+
+static ERTS_INLINE void
+destroy_nif_export(NifExport *nif_export)
+{
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
}
+void
+erts_destroy_nif_export(void *nif_export)
+{
+ destroy_nif_export((NifExport *) nif_export);
+}
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save MFA and registers only if the need_save
+ * parameter is true.
+ */
static ERL_NIF_TERM
-execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ int need_save, int argc, const ERL_NIF_TERM argv[])
{
- Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array;
- ERL_NIF_TERM result = (ERL_NIF_TERM) reg[0];
- typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM);
- FinalizerFP fp;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- enif_get_ulong(env, reg[1], (unsigned long *) &fp);
-#endif
- return (*fp)(env, result);
+ Process* proc = env->proc;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ NifExport* ep;
+ int i;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ if (!ep)
+ ep = allocate_nif_sched_data(proc, argc);
+ else if (need_save && ep->alloced_argv_sz < argc) {
+ NifExport* new_ep = allocate_nif_sched_data(proc, argc);
+ destroy_nif_export(ep);
+ ep = new_ep;
+ }
+ ERTS_VBUMP_ALL_REDS(proc);
+ for (i = 0; i < argc; i++) {
+ if (need_save)
+ ep->argv[i] = reg[i];
+ reg[i] = (Eterm) argv[i];
+ }
+ if (need_save) {
+ ep->saved_mfa[0] = proc->current[0];
+ ep->saved_mfa[1] = proc->current[1];
+ ep->saved_mfa[2] = proc->current[2];
+ ep->saved_argc = argc;
+ }
+ proc->i = (BeamInstr*) ep->exp.addressv[0];
+ ep->exp.code[0] = (BeamInstr) proc->current[0];
+ ep->exp.code[1] = (BeamInstr) proc->current[1];
+ ep->exp.code[2] = argc;
+ ep->exp.code[4] = (BeamInstr) direct_fp;
+ ep->m = env->mod_nif;
+ ep->fp = indirect_fp;
+ proc->freason = TRAP;
+ proc->arity = argc;
+ return THE_NON_VALUE;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
+/*
+ * Restore saved MFA and registers. Registers are restored only when the
+ * exception flag is true.
+ */
+static void
+restore_nif_mfa(Process* proc, NifExport* ep, int exception)
+{
+ int i;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ proc->current[0] = ep->saved_mfa[0];
+ proc->current[1] = ep->saved_mfa[1];
+ proc->current[2] = ep->saved_mfa[2];
+ if (exception)
+ for (i = 0; i < ep->saved_argc; i++)
+ reg[i] = ep->argv[i];
+ ep->saved_argc = 0;
+ ep->saved_mfa[0] = THE_NON_VALUE;
+}
-ERL_NIF_TERM
-enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
- int argc, const ERL_NIF_TERM argv[])
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+/*
+ * Finalize a dirty NIF call. This function is scheduled to cause the VM to
+ * switch the process off a dirty scheduler thread and back onto a regular
+ * scheduler thread, and then return the result from the dirty NIF. It also
+ * restores the original NIF MFA when necessary based on the value of
+ * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * means restore, NULL means do not restore.
+ */
+static ERL_NIF_TERM
+dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 0);
+ return argv[0];
+}
+
+/* Finalize a dirty NIF call that raised an exception. Otherwise same as
+ * the dirty_nif_finalizer() function.
+ */
+static ERL_NIF_TERM
+dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 1);
+ return enif_make_badarg(env);
+}
+
+/*
+ * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
+ * then check the result and schedule the appropriate finalizer function
+ * where needed. Also restore the original NIF MFA when appropriate.
+ */
+static ERL_NIF_TERM
+execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+
+ /*
+ * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
+ close_lib(env->mod_nif);
+ /*
+ * If no more NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF calling
+ * context. Reuse fp essentially as a boolean for this, passing it to
+ * init_nif_sched_data below. Both dirty_nif_exception and
+ * dirty_nif_finalizer then check ep->fp to decide whether or not to
+ * restore the original calling context.
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ fp = NULL;
+ if (is_non_value(result)) {
+ if (proc->freason != TRAP) {
+ ASSERT(proc->freason == BADARG);
+ return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
+ } else {
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, 1);
+ return result;
+ }
+ }
+ else
+ return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+}
+
+/*
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
+ * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
+ * type (CPU or I/O) is indicated in flags parameter.
+ */
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
erts_aint32_t state, n, a;
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- Export* ep = NULL;
- int i;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ int need_save;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND)
- return enif_make_badarg(env);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
a = erts_smp_atomic32_read_acqb(&proc->state);
while (1) {
@@ -1577,7 +1794,7 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
*/
n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
n |= ERTS_PSFLG_DIRTY_CPU_PROC;
else
n |= ERTS_PSFLG_DIRTY_IO_PROC;
@@ -1585,82 +1802,106 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
if (a == state)
break;
}
- if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->code[2] = argc;
- for (i = 0; i < argc; i++) {
- reg[i] = (Eterm) argv[i];
- }
- proc->i = (BeamInstr*) ep->addressv[0];
- ep->code[4] = (BeamInstr) fp;
- proc->freason = TRAP;
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+ return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+}
- return THE_NON_VALUE;
-#else
- return (*fp)(env, argc, argv);
-#endif
+static ERL_NIF_TERM
+schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+}
+
+static ERL_NIF_TERM
+schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+/*
+ * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
+ * calls the actual NIF, restores original NIF MFA if necessary, and
+ * then returns the NIF result.
+ */
+static ERL_NIF_TERM
+execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ /*
+ * If no NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF MFA.
+ */
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP);
+ return result;
}
ERL_NIF_TERM
-enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
+enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
+ ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
+ int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- Export* ep;
+ NifExport* ep;
+ ERL_NIF_TERM fun_name_atom, result;
+ int need_save;
- erts_smp_atomic32_read_band_mb(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- |ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
- |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->code[2] = 2;
- reg[0] = (Eterm) result;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp);
-#endif
- proc->i = (BeamInstr*) ep->addressv[0];
- ep->code[4] = (BeamInstr) execute_dirty_nif_finalizer;
- proc->freason = TRAP;
+ if (argc > MAX_ARG)
+ return enif_make_badarg(env);
+ fun_name_atom = enif_make_atom(env, fun_name);
+ if (enif_is_exception(env, fun_name_atom))
+ return fun_name_atom;
- return THE_NON_VALUE;
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+
+ if (flags) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ NativeFunPtr sched_fun;
+ int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
+ if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
+ sched_fun = schedule_dirty_io_nif;
+ else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ sched_fun = schedule_dirty_cpu_nif;
+ else
+ return enif_make_badarg(env);
+ result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
#else
- return (*fp)(env, result);
+ return enif_make_badarg(env);
#endif
-}
+ }
+ else
+ result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-/* A simple finalizer that just returns its result argument */
-ERL_NIF_TERM
-enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)
-{
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->exp.code[1] = (BeamInstr) fun_name_atom;
return result;
}
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
int
enif_is_on_dirty_scheduler(ErlNifEnv* env)
{
return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
}
-int
-enif_have_dirty_schedulers()
-{
-#ifdef USE_THREADS
- return 1;
-#else
- return 0;
-#endif
-}
-
#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
/* Maps */
@@ -1961,6 +2202,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+/*
+ * The function below is for looping through ErlNifFunc arrays, helping
+ * provide backwards compatibility across the version 2.7 change that added
+ * the "flags" field to ErlNifFunc.
+ */
+static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+{
+ ASSERT(incrp);
+ if (!*incrp) {
+ if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ *incrp = sizeof(ErlNifFunc);
+ else {
+ /*
+ * ErlNifFuncV1 below is what ErlNifFunc was before the
+ * addition of the flags field for 2.7, and is needed to handle
+ * backward compatibility.
+ */
+ typedef struct {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }ErlNifFuncV1;
+ *incrp = sizeof(ErlNifFuncV1);
+ }
+ }
+ return (ErlNifFunc*) ((char*)func + *incrp);
+}
+
+
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
@@ -2049,8 +2319,10 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
}
- else if (entry->major != ERL_NIF_MAJOR_VERSION
- || entry->minor > ERL_NIF_MINOR_VERSION
+ else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
+ || (ERL_NIF_MAJOR_VERSION < entry->major
+ || (ERL_NIF_MAJOR_VERSION == entry->major
+ && ERL_NIF_MINOR_VERSION < entry->minor))
|| (entry->major==2 && entry->minor == 5)) { /* experimental maps */
ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
@@ -2068,22 +2340,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
/*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
+
+ int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
BeamInstr** code_pp;
- ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
|| (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
- }
- else if (code_pp[1] - code_pp[0] < (5+3)) {
+ }
+ else if (maybe_dirty_nifs && f->flags) {
+ /*
+ * If the flags field is non-zero and this emulator was
+ * built with dirty scheduler support, check that the flags
+ * value is legal. But if this emulator was built without
+ * dirty scheduler support, treat a non-zero flags field as
+ * a load error.
+ */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ f->flags, mod_atom, f->name, f->arity);
+#else
+ ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
+ mod_atom, f->name, f->arity);
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (code_pp[1] - code_pp[0] < (5+4))
+#else
+ else if (code_pp[1] - code_pp[0] < (5+3))
+#endif
+ {
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u to small)",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ " in module (%T:%s/%u too small)",
+ mod_atom, f->name, f->arity);
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ mod_atom, f->name, f->arity);*/
+ f = next_func(entry, &incr, f);
}
}
@@ -2109,7 +2407,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* is deprecated and was only ment as a development feature not to
* be used in production systems. (See warning below)
*/
- int k;
+ int k, old_incr = 0;
+ ErlNifFunc* old_func;
lib->priv_data = mod->curr.nif->priv_data;
ASSERT(mod->curr.nif->entry != NULL);
@@ -2118,13 +2417,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
/* Check that no NIF is removed */
+ old_func = mod->curr.nif->entry->funcs;
for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == entry->funcs[i].arity
- && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
+ if (old_func->arity == f->arity
+ && sys_strcmp(old_func->name, f->name) == 0) {
break;
}
+ f = next_func(entry, &incr, f);
}
if (i == entry->num_of_funcs) {
ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
@@ -2132,7 +2434,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
old_func->name, old_func->arity);
goto error;
}
- }
+ old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
+ }
erts_pre_nif(&env, BIF_P, lib);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
@@ -2179,13 +2482,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->curr.nif = lib;
+
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
+
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
- erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
-
+ erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity);
+
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
@@ -2193,10 +2500,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
GenericBp* g = (GenericBp *) code_ptr[1];
ASSERT(code_ptr[5+0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
- }
- code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ }
+ if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ code_ptr[5+3] = (BeamInstr) f->fptr;
+ code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) schedule_dirty_io_nif :
+ (BeamInstr) schedule_dirty_cpu_nif;
+#endif
+ }
+ else
+ code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
+ f = next_func(entry, &incr, f);
}
}
else {
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index c12ba4d554..849024453c 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -42,9 +42,25 @@
** 2.5: R17 Maps API additions
** 2.6: R17 with maps
** R17 dirty schedulers
+** 2.7: 17.3 add enif_schedule_nif
+** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer
+** add ErlNifEntry options
+** add ErlNifFunc flags
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 6
+#define ERL_NIF_MINOR_VERSION 7
+
+/*
+ * The emulator will refuse to load a nif-lib with a major version
+ * lower than ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD. The load
+ * may however fail if user have not removed use of deprecated
+ * symbols.
+ *
+ * The ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD have to allow
+ * loading of nif-libs built at least two major OTP releases
+ * ago.
+ */
+#define ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD 2
#include <stdlib.h>
@@ -113,8 +129,10 @@ typedef struct
const char* name;
unsigned arity;
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
}ErlNifFunc;
+
typedef struct enif_entry_t
{
int major;
@@ -127,8 +145,11 @@ typedef struct enif_entry_t
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
const char* vm_variant;
+ unsigned options;
}ErlNifEntry;
+/* Field bits for ErlNifEntry options */
+#define ERL_NIF_DIRTY_NIF_OPTION 1
typedef struct
@@ -231,6 +252,11 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# endif
#endif
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
+#else
+# define ERL_NIF_ENTRY_OPTIONS 0
+#endif
#ifdef __cplusplus
}
@@ -256,7 +282,8 @@ ERL_NIF_INIT_DECL(NAME) \
sizeof(FUNCS) / sizeof(*FUNCS), \
FUNCS, \
LOAD, RELOAD, UPGRADE, UNLOAD, \
- ERL_NIF_VM_VARIANT \
+ ERL_NIF_VM_VARIANT, \
+ ERL_NIF_ENTRY_OPTIONS \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index d7c554e60b..630cefae93 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -22,7 +22,7 @@
#endif
/*
-** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
+** WARNING: Add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
** to keep compatibility on Windows!!!
**
** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h
@@ -141,14 +141,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM)));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM));
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
-ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void));
-#endif
-
ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env));
@@ -163,12 +155,22 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMap
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value));
-
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
/*
-** Add new entries here to keep compatibility on Windows!!!
+** ADD NEW ENTRIES HERE (before this comment) !!!
*/
+
+
+/*
+ * Conditional EXPERIMENTAL stuff always last.
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
#endif
+#endif /* ERL_NIF_API_FUNC_DECL */
/*
** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order
@@ -282,21 +284,12 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
#endif
-
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif)
-# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer)
-# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer)
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
-# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers)
-#endif
-
# define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map)
# define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size)
# define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map)
@@ -311,11 +304,21 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next)
# define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev)
# define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair)
+# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif)
/*
-** Add new entries here
+** ADD NEW ENTRIES HERE (before this comment)
*/
+
+/*
+ * Conditional EXPERIMENTAL stuff always last
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
#endif
+#endif /* ERL_NIF_API_FUNC_MACRO */
#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index fb6048b41f..2aa0a27197 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -32,6 +32,7 @@
#include "global.h"
#include "erl_port_task.h"
#include "dist.h"
+#include "erl_check_io.h"
#include "dtrace-wrapper.h"
#include <stdarg.h>
@@ -68,6 +69,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
#endif
+#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \
+ ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
+ erts_smp_atomic_read_nob(&(PP)->run_queue))); \
+ } while (0)
+
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PT_STATE_SCHEDULED 0
@@ -543,6 +551,16 @@ reset_handle(ErtsPortTask *ptp)
}
static ERTS_INLINE void
+reset_executed_io_task_handle(ErtsPortTask *ptp)
+{
+ if (ptp->u.alive.handle) {
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ erts_io_notify_port_task_executed(ptp->u.alive.handle);
+ reset_port_task_handle(ptp->u.alive.handle);
+ }
+}
+
+static ERTS_INLINE void
set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->u.alive.handle = pthp;
@@ -798,12 +816,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
static ERTS_INLINE void
abort_nosuspend_task(Port *pp,
ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp)
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
- if (!pp->sched.taskq.bpq)
+ if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
@@ -991,6 +1010,7 @@ static ERTS_INLINE int
finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
erts_aint32_t act;
+ unsigned int prof_runnable_ports;
if (!processing_busy_q)
pp->sched.taskq.local.first = *execq;
@@ -1007,6 +1027,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1018,12 +1042,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
- ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
if (exp == act)
break;
}
+ if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS))
+ trace_sched_ports(pp, am_out);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS)))
+ profile_runnable_port(pp, am_inactive);
+ erts_port_task_sched_unlock(&pp->sched);
+ }
+ }
+
return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
@@ -1345,7 +1381,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
#endif
schedule_port_task_handle_list_free(pthlp);
- abort_nosuspend_task(pp, type, &td);
+ abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
}
}
@@ -1369,11 +1405,9 @@ erts_port_task_schedule(Eterm id,
Port *pp;
ErtsPortTask *ptp = NULL;
erts_aint32_t act, add_flags;
+ unsigned int prof_runnable_ports;
- if (pthp && erts_port_task_is_scheduled(pthp)) {
- ASSERT(0);
- erts_port_task_abort(pthp);
- }
+ ERTS_LC_ASSERT(!pthp || !erts_port_task_is_scheduled(pthp));
ASSERT(is_internal_port(id));
@@ -1457,6 +1491,10 @@ erts_port_task_schedule(Eterm id,
if (ns_pthlp)
add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1481,6 +1519,13 @@ erts_port_task_schedule(Eterm id,
goto done; /* Died after our task insert... */
}
+ if (prof_runnable_ports) {
+ if (!(act & ERTS_PTS_FLG_EXEC_IMM))
+ profile_runnable_port(pp, am_active);
+ erts_port_task_sched_unlock(&pp->sched);
+ prof_runnable_ports = 0;
+ }
+
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
@@ -1489,8 +1534,10 @@ erts_port_task_schedule(Eterm id,
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
- /* Port emigrated ... */
+ /* Emigrate port ... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = erts_port_runq(pp);
@@ -1500,10 +1547,6 @@ erts_port_task_schedule(Eterm id,
#endif
enqueue_port(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
- }
erts_smp_runq_unlock(runq);
@@ -1511,6 +1554,9 @@ erts_port_task_schedule(Eterm id,
done:
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&pp->sched);
+
#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
@@ -1525,7 +1571,7 @@ abort_nosuspend:
erts_port_dec_refc(pp);
#endif
- abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
@@ -1609,6 +1655,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto done;
}
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+
erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
@@ -1659,8 +1707,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto aborted_port_task;
}
- reset_handle(ptp);
-
if (erts_system_monitor_long_schedule != 0) {
start_time = erts_timestamp_millis();
}
@@ -1671,6 +1717,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
switch (ptp->type) {
case ERTS_PORT_TASK_TIMEOUT:
+ reset_handle(ptp);
reds = ERTS_PORT_REDS_TIMEOUT;
if (!(state & ERTS_PORT_SFLGS_DEAD)) {
DTRACE_DRIVER(driver_timeout, pp);
@@ -1685,6 +1732,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_OUTPUT:
@@ -1693,6 +1741,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
DTRACE_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_EVENT:
@@ -1702,10 +1751,12 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
(*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event,
ptp->u.alive.td.io.event_data);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ reset_handle(ptp);
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
if (!pp->sched.taskq.bpq)
reds = ptp->u.alive.td.psig.callback(pp,
@@ -1723,6 +1774,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
break;
}
case ERTS_PORT_TASK_DIST_CMD:
+ reset_handle(ptp);
reds = erts_dist_command(pp, CONTEXT_REDS - pp->reds);
break;
default:
@@ -1765,10 +1817,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
@@ -1791,11 +1839,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
- if (!active) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- }
- else {
+ if (active) {
#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
#endif
@@ -1804,6 +1848,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
@@ -1811,7 +1857,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
}
else {
- /* Port emigrated ... */
+ /* Emigrate port... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
@@ -2035,9 +2081,9 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
DTRACE_CHARBUF(pid_str, 16);
ErtsProcList* plp2 = plp;
- erts_snprintf(port_str, sizeof(port_str), "%T", pp->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", pp->common.id);
while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->pid);
DTRACE2(process_port_unblocked, pid_str, port_str);
}
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 1d30465ec9..406cd3c492 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -78,6 +78,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11)
+#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12)
#define ERTS_PTS_FLGS_BUSY \
(ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
@@ -87,6 +88,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
| ERTS_PTS_FLG_HAVE_BUSY_TASKS \
| ERTS_PTS_FLG_HAVE_TASKS \
| ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_EXEC_IMM \
| ERTS_PTS_FLG_FORCE_SCHED \
| ERTS_PTS_FLG_EXITING)
@@ -154,7 +156,7 @@ erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
ERTS_GLB_INLINE int
erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
{
- return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL;
+ return ((void *) erts_smp_atomic_read_acqb(pthp)) != NULL;
}
ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 37e1d07107..ea63d20dfa 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -148,6 +148,12 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
+#ifdef __OSE__
+/* Eager check I/O not supported on OSE yet. */
+int erts_eager_check_io = 0;
+#else
+int erts_eager_check_io = 0;
+#endif
int erts_sched_compact_load;
int erts_sched_balance_util = 0;
Uint erts_no_schedulers;
@@ -590,12 +596,10 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
= ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS;
-#endif
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -712,72 +716,24 @@ sched_wall_time_ts(void)
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
-#ifdef ARCH_64
-
-static ERTS_INLINE Uint64
-aschedtime_read(ErtsAtomicSchedTime *var)
-{
- return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var);
-}
-
-static ERTS_INLINE void
-aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
-{
- erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val);
-}
-
-static ERTS_INLINE void
-aschedtime_init(ErtsAtomicSchedTime *var)
-{
- erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0);
-}
-
-#elif defined(ARCH_32)
-
static ERTS_INLINE Uint64
aschedtime_read(ErtsAtomicSchedTime *var)
{
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw);
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw.dw_sint;
-#else
- {
- Uint64 res;
- res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
- }
-#endif
+ return (Uint64) erts_atomic64_read_nob((erts_atomic64_t *) var);
}
static ERTS_INLINE void
aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
{
- erts_dw_aint_t dw;
-#ifdef ETHR_SU_DW_NAINT_T__
- dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
- erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw);
+ erts_atomic64_set_nob((erts_atomic64_t *) var, (erts_aint64_t) val);
}
static ERTS_INLINE void
aschedtime_init(ErtsAtomicSchedTime *var)
{
- erts_dw_aint_t dw;
- dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0;
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0;
- erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw);
+ erts_atomic64_init_nob((erts_atomic64_t *) var, (erts_aint64_t) 0);
}
-#else
-# error :-/
-#endif
-
#define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50
#define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0))
@@ -2211,6 +2167,9 @@ aux_work_timeout_early_init(int no_schedulers)
p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ sizeof(erts_atomic32_t)*(no_schedulers+1))
+ ERTS_CACHE_LINE_SIZE-1);
+ if (!p) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
if (p & ERTS_CACHE_LINE_MASK)
p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
@@ -2380,29 +2339,47 @@ try_set_sys_scheduling(void)
#endif
static ERTS_INLINE int
-prepare_for_sys_schedule(ErtsSchedulerData *esdp)
+prepare_for_sys_schedule(ErtsSchedulerData *esdp, int non_blocking)
{
+ if (non_blocking && erts_eager_check_io) {
#ifdef ERTS_SMP
- while (!erts_port_task_have_outstanding_io_tasks()
- && try_set_sys_scheduling()) {
#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
- if (esdp->no != 1) {
- /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
- then we make sure to wake scheduler 1 */
- ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
- clear_sys_scheduling();
- wake_scheduler(rq);
- return 0;
- }
+ if (esdp->no != 1) {
+ /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
+ then we make sure to wake scheduler 1 */
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
+ wake_scheduler(rq);
+ return 0;
+ }
#endif
- if (!erts_port_task_have_outstanding_io_tasks())
+ return try_set_sys_scheduling();
+#else
return 1;
- clear_sys_scheduling();
+#endif
}
- return 0;
+ else {
+#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && try_set_sys_scheduling()) {
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ if (esdp->no != 1) {
+ /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
+ then we make sure to wake scheduler 1 */
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
+ clear_sys_scheduling();
+ wake_scheduler(rq);
+ return 0;
+ }
+#endif
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
+ }
+ return 0;
#else
- return !erts_port_task_have_outstanding_io_tasks();
+ return !erts_port_task_have_outstanding_io_tasks();
#endif
+ }
}
#ifdef ERTS_SMP
@@ -2672,6 +2649,13 @@ aux_thread(void *unused)
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[] = "aux_thread";
+ erts_lc_set_thread_name(buf);
+ }
+#endif
+
ssi->event = erts_tse_fetch();
callbacks.arg = (void *) ssi;
@@ -2772,7 +2756,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* be waiting in erl_sys_schedule()
*/
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp, 0)) {
sched_waiting(esdp->no, rq);
@@ -2936,7 +2920,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
*/
- if (!prepare_for_sys_schedule(esdp)) {
+ if (!prepare_for_sys_schedule(esdp, 0)) {
spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
goto tse_wait;
}
@@ -2958,7 +2942,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (!prepare_for_sys_schedule(esdp)) {
+ if (!prepare_for_sys_schedule(esdp, 0)) {
/*
* Not allowed to wait in erl_sys_schedule;
* do tse wait instead...
@@ -3212,11 +3196,11 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
return 0;
wrq = ERTS_RUNQ_IX(ix);
flags = ERTS_RUNQ_FLGS_GET(wrq);
+ if (activate && !(flags & ERTS_RUNQ_FLG_SUSPENDED)) {
+ if (try_inc_no_active_runqs(ix+1))
+ (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
+ }
if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) {
- if (activate) {
- if (try_inc_no_active_runqs(ix+1))
- (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
- }
wake_scheduler(wrq);
return 1;
}
@@ -3748,17 +3732,25 @@ evacuate_run_queue(ErtsRunQueue *rq,
}
#ifdef ERTS_DIRTY_SCHEDULERS
else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
} else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_IO_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
}
@@ -5867,6 +5859,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
case ERTS_ENQUEUE_NOT:
if (erts_system_profile_flags.runnable_procs) {
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
if (!(a & ERTS_PSFLG_ACTIVE_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))) {
@@ -5980,7 +5975,8 @@ change_proc_schedule_state(Process *p,
erts_aint32_t clear_state_flags,
erts_aint32_t set_state_flags,
erts_aint32_t *statep,
- erts_aint32_t *enq_prio_p)
+ erts_aint32_t *enq_prio_p,
+ ErtsProcLocks locks)
{
/*
* NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
@@ -5989,6 +5985,11 @@ change_proc_schedule_state(Process *p,
*/
erts_aint32_t a = *statep, n;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int lock_status = (prof_runnable_procs
+ && !(locks & ERTS_PROC_LOCK_STATUS));
+
+ ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
@@ -5998,6 +5999,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ if (lock_status)
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
while (1) {
erts_aint32_t e;
n = e = a;
@@ -6033,7 +6037,9 @@ change_proc_schedule_state(Process *p,
break;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
@@ -6046,15 +6052,18 @@ change_proc_schedule_state(Process *p,
profile_runnable_proc(p, am_active);
}
+ if (lock_status)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
+
*statep = a;
return enqueue;
}
static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t in_state)
+schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
{
erts_aint32_t enq_prio = -1;
erts_aint32_t state = in_state;
@@ -6062,7 +6071,8 @@ schedule_process(Process *p, erts_aint32_t in_state)
0,
ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue != ERTS_ENQUEUE_NOT)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -6070,16 +6080,27 @@ schedule_process(Process *p, erts_aint32_t in_state)
}
void
-erts_schedule_process(Process *p, erts_aint32_t state)
+erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
{
- schedule_process(p, state);
+ schedule_process(p, state, locks);
}
static void
schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
{
+ /*
+ * Expects status lock to be locked when called, and
+ * returns with status lock unlocked...
+ */
erts_aint32_t a = state, n, enq_prio = -1;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
ASSERT(!(state & ERTS_PSFLG_PROXY));
@@ -6088,7 +6109,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
n = e = a;
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
+ goto cleanup; /* We don't want to schedule free processes... */
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
@@ -6101,7 +6122,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
goto cleanup;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
@@ -6111,6 +6132,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
profile_runnable_proc(p, am_active);
}
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ prof_runnable_procs = 0;
}
if (enqueue != ERTS_ENQUEUE_NOT) {
@@ -6125,8 +6148,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
cleanup:
+
+ if (prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (proxy)
free_proxy_proc(proxy);
+
+ ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
}
static ERTS_INLINE int
@@ -6144,7 +6173,7 @@ suspend_process(Process *c_p, Process *p)
if (c_p == p) {
state = erts_smp_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
- ASSERT(state & ERTS_PSFLG_RUNNING);
+ ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1;
}
else {
@@ -6193,7 +6222,7 @@ suspend_process(Process *c_p, Process *p)
}
static ERTS_INLINE void
-resume_process(Process *p)
+resume_process(Process *p, ErtsProcLocks locks)
{
erts_aint32_t state, enq_prio = -1;
int enqueue;
@@ -6210,7 +6239,8 @@ resume_process(Process *p)
ERTS_PSFLG_SUSPENDED,
0,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -7811,6 +7841,9 @@ erts_start_schedulers(void)
#ifdef ETHR_HAVE_THREAD_NAMES
opts.name = malloc(80);
+ if (!opts.name) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#endif
#ifdef ERTS_SMP
@@ -8023,7 +8056,8 @@ handle_pend_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspendee != suspender);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8058,7 +8092,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(c_p->flags & F_P2PNR_RESCHED);
c_p->flags &= ~F_P2PNR_RESCHED;
if (!suspend && rp)
- resume_process(rp);
+ resume_process(rp, rp_locks);
}
else {
@@ -8216,7 +8250,8 @@ handle_pend_bif_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspender != suspendee);
+ resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
}
@@ -8576,7 +8611,8 @@ resume_process_1(BIF_ALIST_1)
ASSERT(ERTS_PSFLG_SUSPENDED
& erts_smp_atomic32_read_nob(&suspendee->state));
- resume_process(suspendee);
+ ASSERT(BIF_P != suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
@@ -8706,7 +8742,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
- resume_process(process);
+ resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
@@ -8725,7 +8761,7 @@ erts_resume_processes(ErtsProcList *list)
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
if (erts_proclist_same(plp, proc)) {
- resume_process(proc);
+ resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
@@ -9140,7 +9176,7 @@ Process *schedule(Process *p, int calls)
}
else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
(fcalls > input_reductions &&
- prepare_for_sys_schedule(esdp))) {
+ prepare_for_sys_schedule(esdp, !0))) {
/*
* Schedule system-level activities.
*/
@@ -9148,8 +9184,6 @@ Process *schedule(Process *p, int calls)
erts_smp_atomic32_set_relb(&function_calls, 0);
fcalls = 0;
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
#if 0 /* Not needed since we wont wait in sys schedule */
erts_sys_schedule_interrupt(0);
#endif
@@ -9181,7 +9215,9 @@ Process *schedule(Process *p, int calls)
if (RUNQ_READ_LEN(&rq->ports.info.len)) {
int have_outstanding_io;
have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
- if ((have_outstanding_io && fcalls > 2*input_reductions)
+ if ((!erts_eager_check_io
+ && have_outstanding_io
+ && fcalls > 2*input_reductions)
|| rq->halt_in_progress) {
/*
* If we have performed more than 2*INPUT_REDUCTIONS since
@@ -9207,7 +9243,6 @@ Process *schedule(Process *p, int calls)
*/
pick_next_process: {
erts_aint32_t psflg_band_mask;
- erts_aint32_t running_flag;
int prio_q;
int qmask;
@@ -9269,12 +9304,6 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
}
-
- if (state & ERTS_PSFLG_ACTIVE_SYS)
- running_flag = ERTS_PSFLG_RUNNING_SYS;
- else
- running_flag = ERTS_PSFLG_RUNNING;
-
while (1) {
erts_aint32_t exp, new, tmp;
tmp = new = exp = state;
@@ -9284,8 +9313,12 @@ Process *schedule(Process *p, int calls)
tmp = state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_ACTIVE_SYS);
- if (tmp != ERTS_PSFLG_SUSPENDED)
- new |= running_flag;
+ if (tmp != ERTS_PSFLG_SUSPENDED) {
+ if (state & ERTS_PSFLG_ACTIVE_SYS)
+ new |= ERTS_PSFLG_RUNNING_SYS;
+ else
+ new |= ERTS_PSFLG_RUNNING;
+ }
}
state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
@@ -9964,8 +9997,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
rp_state = n;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
-
+ /*
+ * schedule_process_sys_task() unlocks status
+ * lock on process.
+ */
schedule_process_sys_task(rp, rp_state, NULL);
if (free_stqs)
@@ -10710,7 +10745,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- schedule_process(p, state);
+ schedule_process(p, state, 0);
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
@@ -11031,7 +11066,8 @@ set_proc_exiting(Process *p,
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
p->fvalue = reason;
if (bp)
@@ -11072,7 +11108,8 @@ set_proc_self_exiting(Process *c_p)
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
ASSERT(!enqueue);
return state;
@@ -11313,7 +11350,7 @@ send_exit_signal(Process *c_p, /* current process if and only
dtrace_pid_str(from, sender_str);
dtrace_proc_str(rp, receiver_str);
- erts_snprintf(reason_buf, sizeof(reason_buf) - 1, "%T", reason);
+ erts_snprintf(reason_buf, sizeof(DTRACE_CHARBUF_NAME(reason_buf)) - 1, "%T", reason);
DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
}
#endif
@@ -11717,8 +11754,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
smon->pid, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
+ ASSERT(suspendee != vc_p);
if (smon->active)
- resume_process(suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
erts_destroy_suspend_monitor(smon);
@@ -11810,6 +11848,7 @@ erts_continue_exit_process(Process *p)
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
erts_aint32_t state;
+ void *nif_export;
#ifdef DEBUG
int yield_allowed = 1;
@@ -11960,6 +11999,7 @@ erts_continue_exit_process(Process *p)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
+ nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
#ifdef BM_COUNTERS
@@ -12007,6 +12047,9 @@ erts_continue_exit_process(Process *p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+ if (nif_export)
+ erts_destroy_nif_export(nif_export);
+
delete_process(p);
#ifdef ERTS_SMP
@@ -12051,7 +12094,7 @@ timeout_proc(Process* p)
state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state);
+ schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index ed6dadbffa..f50b217d4a 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -104,6 +104,7 @@ struct saved_calls {
};
extern Export exp_send, exp_receive, exp_timeout;
+extern int erts_eager_check_io;
extern int erts_sched_compact_load;
extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
@@ -350,13 +351,7 @@ typedef struct {
#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
-#ifdef ARCH_64
-typedef erts_atomic_t ErtsAtomicSchedTime;
-#elif defined(ARCH_32)
-typedef erts_dw_atomic_t ErtsAtomicSchedTime;
-#else
-# error :-/
-#endif
+typedef erts_atomic64_t ErtsAtomicSchedTime;
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
typedef struct {
@@ -734,13 +729,9 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_DIST_ENTRY 3
#define ERTS_PSD_CALL_TIME_BP 4
#define ERTS_PSD_DELAYED_GC_TASK_QS 5
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6
+#define ERTS_PSD_NIF_TRAP_EXPORT 6
#define ERTS_PSD_SIZE 7
-#else
-#define ERTS_PSD_SIZE 6
-#endif
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -767,10 +758,8 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#endif
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -1367,6 +1356,9 @@ Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
+int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
+void erts_destroy_nif_export(void *); /* see erl_nif.c */
+
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1704,17 +1696,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
#endif
#endif
-void erts_schedule_process(Process *, erts_aint32_t);
+void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_proc_notify_new_message(Process *p)
+erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- erts_schedule_process(p, state);
+ erts_schedule_process(p, state, locks);
}
#endif
@@ -1817,12 +1809,10 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \
((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \
- ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT))
-#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \
- ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE)))
-#endif
+#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \
+ erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT)
+#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, NTE) \
+ erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index eabf016081..02943ee683 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -280,124 +280,38 @@ struct ErtsPTabListBifData_ {
};
-#ifdef ARCH_32
-
-static ERTS_INLINE Uint64
-dw_aint_to_uint64(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-static void
-unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
-}
-
static ERTS_INLINE void
last_data_init_nob(ErtsPTab *ptab, Uint64 val)
{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw);
+ erts_smp_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE void
last_data_set_relb(ErtsPTab *ptab, Uint64 val)
{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw);
+ erts_smp_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE Uint64
last_data_read_nob(ErtsPTab *ptab)
{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw);
- return dw_aint_to_uint64(&dw);
+ return (Uint64) erts_smp_atomic64_read_nob(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_read_acqb(ErtsPTab *ptab)
{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw);
- return dw_aint_to_uint64(&dw);
+ return (Uint64) erts_smp_atomic64_read_acqb(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
{
- erts_dw_aint_t dw_new, dw_xchg;
-
- unint64_to_dw_aint(&dw_new, new);
- unint64_to_dw_aint(&dw_xchg, exp);
-
- if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
- &dw_new,
- &dw_xchg))
- return exp;
- else
- return dw_aint_to_uint64(&dw_xchg);
-}
-
-#elif defined(ARCH_64)
-
-union {
- erts_smp_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-static ERTS_INLINE void
-last_data_init_nob(ErtsPTab *ptab, Uint64 val)
-{
- erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val);
+ return (Uint64) erts_smp_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data,
+ (erts_aint64_t) new,
+ (erts_aint64_t) exp);
}
-static ERTS_INLINE void
-last_data_set_relb(ErtsPTab *ptab, Uint64 val)
-{
- erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE Uint64
-last_data_read_nob(ErtsPTab *ptab)
-{
- return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data);
-}
-
-static ERTS_INLINE Uint64
-last_data_read_acqb(ErtsPTab *ptab)
-{
- return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data);
-}
-
-static ERTS_INLINE Uint64
-last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
-{
- return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
- (erts_aint_t) new,
- (erts_aint_t) exp);
-}
-
-#else
-# error "Not 64-bit, nor 32-bit architecture..."
-#endif
-
static ERTS_INLINE int
last_data_cmp(Uint64 ld1, Uint64 ld2)
{
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index e3e05f14af..876241159b 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -88,11 +88,7 @@ typedef struct {
} ErtsPTabListData;
typedef struct {
-#ifdef ARCH_32
- erts_smp_dw_atomic_t last_data;
-#else
- erts_smp_atomic_t last_data;
-#endif
+ erts_smp_atomic64_t last_data;
erts_smp_atomic32_t count;
erts_smp_atomic32_t aid_ix;
erts_smp_atomic32_t fid_ix;
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index c38ef47d87..6c40edeb3e 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -60,6 +60,7 @@ typedef erts_tsd_key_t erts_smp_tsd_key_t;
#define erts_smp_dw_atomic_t erts_dw_atomic_t
#define erts_smp_atomic_t erts_atomic_t
#define erts_smp_atomic32_t erts_atomic32_t
+#define erts_smp_atomic64_t erts_atomic64_t
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
@@ -95,6 +96,7 @@ typedef int erts_smp_tsd_key_t;
#define erts_smp_dw_atomic_t erts_no_dw_atomic_t
#define erts_smp_atomic_t erts_no_atomic_t
#define erts_smp_atomic32_t erts_no_atomic32_t
+#define erts_smp_atomic64_t erts_no_atomic64_t
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
typedef struct { } erts_smp_rwlock_t;
@@ -489,6 +491,116 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty
#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty
+/* 64-bit atomics */
+
+#define erts_smp_atomic64_init_nob erts_atomic64_init_nob
+#define erts_smp_atomic64_set_nob erts_atomic64_set_nob
+#define erts_smp_atomic64_read_nob erts_atomic64_read_nob
+#define erts_smp_atomic64_inc_read_nob erts_atomic64_inc_read_nob
+#define erts_smp_atomic64_dec_read_nob erts_atomic64_dec_read_nob
+#define erts_smp_atomic64_inc_nob erts_atomic64_inc_nob
+#define erts_smp_atomic64_dec_nob erts_atomic64_dec_nob
+#define erts_smp_atomic64_add_read_nob erts_atomic64_add_read_nob
+#define erts_smp_atomic64_add_nob erts_atomic64_add_nob
+#define erts_smp_atomic64_read_bor_nob erts_atomic64_read_bor_nob
+#define erts_smp_atomic64_read_band_nob erts_atomic64_read_band_nob
+#define erts_smp_atomic64_xchg_nob erts_atomic64_xchg_nob
+#define erts_smp_atomic64_cmpxchg_nob erts_atomic64_cmpxchg_nob
+#define erts_smp_atomic64_read_bset_nob erts_atomic64_read_bset_nob
+
+#define erts_smp_atomic64_init_mb erts_atomic64_init_mb
+#define erts_smp_atomic64_set_mb erts_atomic64_set_mb
+#define erts_smp_atomic64_read_mb erts_atomic64_read_mb
+#define erts_smp_atomic64_inc_read_mb erts_atomic64_inc_read_mb
+#define erts_smp_atomic64_dec_read_mb erts_atomic64_dec_read_mb
+#define erts_smp_atomic64_inc_mb erts_atomic64_inc_mb
+#define erts_smp_atomic64_dec_mb erts_atomic64_dec_mb
+#define erts_smp_atomic64_add_read_mb erts_atomic64_add_read_mb
+#define erts_smp_atomic64_add_mb erts_atomic64_add_mb
+#define erts_smp_atomic64_read_bor_mb erts_atomic64_read_bor_mb
+#define erts_smp_atomic64_read_band_mb erts_atomic64_read_band_mb
+#define erts_smp_atomic64_xchg_mb erts_atomic64_xchg_mb
+#define erts_smp_atomic64_cmpxchg_mb erts_atomic64_cmpxchg_mb
+#define erts_smp_atomic64_read_bset_mb erts_atomic64_read_bset_mb
+
+#define erts_smp_atomic64_init_acqb erts_atomic64_init_acqb
+#define erts_smp_atomic64_set_acqb erts_atomic64_set_acqb
+#define erts_smp_atomic64_read_acqb erts_atomic64_read_acqb
+#define erts_smp_atomic64_inc_read_acqb erts_atomic64_inc_read_acqb
+#define erts_smp_atomic64_dec_read_acqb erts_atomic64_dec_read_acqb
+#define erts_smp_atomic64_inc_acqb erts_atomic64_inc_acqb
+#define erts_smp_atomic64_dec_acqb erts_atomic64_dec_acqb
+#define erts_smp_atomic64_add_read_acqb erts_atomic64_add_read_acqb
+#define erts_smp_atomic64_add_acqb erts_atomic64_add_acqb
+#define erts_smp_atomic64_read_bor_acqb erts_atomic64_read_bor_acqb
+#define erts_smp_atomic64_read_band_acqb erts_atomic64_read_band_acqb
+#define erts_smp_atomic64_xchg_acqb erts_atomic64_xchg_acqb
+#define erts_smp_atomic64_cmpxchg_acqb erts_atomic64_cmpxchg_acqb
+#define erts_smp_atomic64_read_bset_acqb erts_atomic64_read_bset_acqb
+
+#define erts_smp_atomic64_init_relb erts_atomic64_init_relb
+#define erts_smp_atomic64_set_relb erts_atomic64_set_relb
+#define erts_smp_atomic64_read_relb erts_atomic64_read_relb
+#define erts_smp_atomic64_inc_read_relb erts_atomic64_inc_read_relb
+#define erts_smp_atomic64_dec_read_relb erts_atomic64_dec_read_relb
+#define erts_smp_atomic64_inc_relb erts_atomic64_inc_relb
+#define erts_smp_atomic64_dec_relb erts_atomic64_dec_relb
+#define erts_smp_atomic64_add_read_relb erts_atomic64_add_read_relb
+#define erts_smp_atomic64_add_relb erts_atomic64_add_relb
+#define erts_smp_atomic64_read_bor_relb erts_atomic64_read_bor_relb
+#define erts_smp_atomic64_read_band_relb erts_atomic64_read_band_relb
+#define erts_smp_atomic64_xchg_relb erts_atomic64_xchg_relb
+#define erts_smp_atomic64_cmpxchg_relb erts_atomic64_cmpxchg_relb
+#define erts_smp_atomic64_read_bset_relb erts_atomic64_read_bset_relb
+
+#define erts_smp_atomic64_init_ddrb erts_atomic64_init_ddrb
+#define erts_smp_atomic64_set_ddrb erts_atomic64_set_ddrb
+#define erts_smp_atomic64_read_ddrb erts_atomic64_read_ddrb
+#define erts_smp_atomic64_inc_read_ddrb erts_atomic64_inc_read_ddrb
+#define erts_smp_atomic64_dec_read_ddrb erts_atomic64_dec_read_ddrb
+#define erts_smp_atomic64_inc_ddrb erts_atomic64_inc_ddrb
+#define erts_smp_atomic64_dec_ddrb erts_atomic64_dec_ddrb
+#define erts_smp_atomic64_add_read_ddrb erts_atomic64_add_read_ddrb
+#define erts_smp_atomic64_add_ddrb erts_atomic64_add_ddrb
+#define erts_smp_atomic64_read_bor_ddrb erts_atomic64_read_bor_ddrb
+#define erts_smp_atomic64_read_band_ddrb erts_atomic64_read_band_ddrb
+#define erts_smp_atomic64_xchg_ddrb erts_atomic64_xchg_ddrb
+#define erts_smp_atomic64_cmpxchg_ddrb erts_atomic64_cmpxchg_ddrb
+#define erts_smp_atomic64_read_bset_ddrb erts_atomic64_read_bset_ddrb
+
+#define erts_smp_atomic64_init_rb erts_atomic64_init_rb
+#define erts_smp_atomic64_set_rb erts_atomic64_set_rb
+#define erts_smp_atomic64_read_rb erts_atomic64_read_rb
+#define erts_smp_atomic64_inc_read_rb erts_atomic64_inc_read_rb
+#define erts_smp_atomic64_dec_read_rb erts_atomic64_dec_read_rb
+#define erts_smp_atomic64_inc_rb erts_atomic64_inc_rb
+#define erts_smp_atomic64_dec_rb erts_atomic64_dec_rb
+#define erts_smp_atomic64_add_read_rb erts_atomic64_add_read_rb
+#define erts_smp_atomic64_add_rb erts_atomic64_add_rb
+#define erts_smp_atomic64_read_bor_rb erts_atomic64_read_bor_rb
+#define erts_smp_atomic64_read_band_rb erts_atomic64_read_band_rb
+#define erts_smp_atomic64_xchg_rb erts_atomic64_xchg_rb
+#define erts_smp_atomic64_cmpxchg_rb erts_atomic64_cmpxchg_rb
+#define erts_smp_atomic64_read_bset_rb erts_atomic64_read_bset_rb
+
+#define erts_smp_atomic64_init_wb erts_atomic64_init_wb
+#define erts_smp_atomic64_set_wb erts_atomic64_set_wb
+#define erts_smp_atomic64_read_wb erts_atomic64_read_wb
+#define erts_smp_atomic64_inc_read_wb erts_atomic64_inc_read_wb
+#define erts_smp_atomic64_dec_read_wb erts_atomic64_dec_read_wb
+#define erts_smp_atomic64_inc_wb erts_atomic64_inc_wb
+#define erts_smp_atomic64_dec_wb erts_atomic64_dec_wb
+#define erts_smp_atomic64_add_read_wb erts_atomic64_add_read_wb
+#define erts_smp_atomic64_add_wb erts_atomic64_add_wb
+#define erts_smp_atomic64_read_bor_wb erts_atomic64_read_bor_wb
+#define erts_smp_atomic64_read_band_wb erts_atomic64_read_band_wb
+#define erts_smp_atomic64_xchg_wb erts_atomic64_xchg_wb
+#define erts_smp_atomic64_cmpxchg_wb erts_atomic64_cmpxchg_wb
+#define erts_smp_atomic64_read_bset_wb erts_atomic64_read_bset_wb
+
+#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty
+#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty
+
#else /* !ERTS_SMP */
/* Double word size atomics */
@@ -751,6 +863,116 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_set_dirty erts_no_atomic32_set
#define erts_smp_atomic32_read_dirty erts_no_atomic32_read
+/* 64-bit atomics */
+
+#define erts_smp_atomic64_init_nob erts_no_atomic64_set
+#define erts_smp_atomic64_set_nob erts_no_atomic64_set
+#define erts_smp_atomic64_read_nob erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_nob erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_nob erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_nob erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_nob erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_nob erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_nob erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_nob erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_nob erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_nob erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_nob erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_mb erts_no_atomic64_set
+#define erts_smp_atomic64_set_mb erts_no_atomic64_set
+#define erts_smp_atomic64_read_mb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_mb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_mb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_mb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_mb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_mb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_mb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_mb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_mb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_mb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_mb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_acqb erts_no_atomic64_set
+#define erts_smp_atomic64_set_acqb erts_no_atomic64_set
+#define erts_smp_atomic64_read_acqb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_acqb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_acqb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_acqb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_acqb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_acqb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_acqb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_acqb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_acqb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_acqb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_acqb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_relb erts_no_atomic64_set
+#define erts_smp_atomic64_set_relb erts_no_atomic64_set
+#define erts_smp_atomic64_read_relb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_relb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_relb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_relb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_relb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_relb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_relb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_relb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_relb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_relb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_relb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_ddrb erts_no_atomic64_set
+#define erts_smp_atomic64_set_ddrb erts_no_atomic64_set
+#define erts_smp_atomic64_read_ddrb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_ddrb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_ddrb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_ddrb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_ddrb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_ddrb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_ddrb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_rb erts_no_atomic64_set
+#define erts_smp_atomic64_set_rb erts_no_atomic64_set
+#define erts_smp_atomic64_read_rb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_rb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_rb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_rb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_rb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_rb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_rb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_rb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_rb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_rb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_rb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_init_wb erts_no_atomic64_set
+#define erts_smp_atomic64_set_wb erts_no_atomic64_set
+#define erts_smp_atomic64_read_wb erts_no_atomic64_read
+#define erts_smp_atomic64_inc_read_wb erts_no_atomic64_inc_read
+#define erts_smp_atomic64_dec_read_wb erts_no_atomic64_dec_read
+#define erts_smp_atomic64_inc_wb erts_no_atomic64_inc
+#define erts_smp_atomic64_dec_wb erts_no_atomic64_dec
+#define erts_smp_atomic64_add_read_wb erts_no_atomic64_add_read
+#define erts_smp_atomic64_add_wb erts_no_atomic64_add
+#define erts_smp_atomic64_read_bor_wb erts_no_atomic64_read_bor
+#define erts_smp_atomic64_read_band_wb erts_no_atomic64_read_band
+#define erts_smp_atomic64_xchg_wb erts_no_atomic64_xchg
+#define erts_smp_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
+#define erts_smp_atomic64_read_bset_wb erts_no_atomic64_read_bset
+
+#define erts_smp_atomic64_set_dirty erts_no_atomic64_set
+#define erts_smp_atomic64_read_dirty erts_no_atomic64_read
+
#endif /* !ERTS_SMP */
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index f10a3a9d38..37014ccf94 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -112,11 +112,11 @@ struct erl_node_; /* Declared in erl_node_tables.h */
* 1000 REFC_BINARY | |
* 1001 HEAP_BINARY | BINARIES |
* 1010 SUB_BINARY | |
- * 1011 Not used
+ * 1011 Not used; see comment below
* 1100 EXTERNAL_PID | |
* 1101 EXTERNAL_PORT | EXTERNAL THINGS |
* 1110 EXTERNAL_REF | |
- * 1111 Not used
+ * 1111 MAP
*
* COMMENTS:
*
@@ -140,10 +140,11 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define REFC_BINARY_SUBTAG (0x8 << _TAG_PRIMARY_SIZE) /* BINARY */
#define HEAP_BINARY_SUBTAG (0x9 << _TAG_PRIMARY_SIZE) /* BINARY */
#define SUB_BINARY_SUBTAG (0xA << _TAG_PRIMARY_SIZE) /* BINARY */
-#define MAP_SUBTAG (0xB << _TAG_PRIMARY_SIZE) /* MAP */
+/* _BINARY_XXX_MASK depends on 0xB being unused */
#define EXTERNAL_PID_SUBTAG (0xC << _TAG_PRIMARY_SIZE) /* EXTERNAL_PID */
#define EXTERNAL_PORT_SUBTAG (0xD << _TAG_PRIMARY_SIZE) /* EXTERNAL_PORT */
#define EXTERNAL_REF_SUBTAG (0xE << _TAG_PRIMARY_SIZE) /* EXTERNAL_REF */
+#define MAP_SUBTAG (0xF << _TAG_PRIMARY_SIZE) /* MAP */
#define _TAG_HEADER_ARITYVAL (TAG_PRIMARY_HEADER|ARITYVAL_SUBTAG)
@@ -156,11 +157,11 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define _TAG_HEADER_REFC_BIN (TAG_PRIMARY_HEADER|REFC_BINARY_SUBTAG)
#define _TAG_HEADER_HEAP_BIN (TAG_PRIMARY_HEADER|HEAP_BINARY_SUBTAG)
#define _TAG_HEADER_SUB_BIN (TAG_PRIMARY_HEADER|SUB_BINARY_SUBTAG)
-#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG)
#define _TAG_HEADER_EXTERNAL_PID (TAG_PRIMARY_HEADER|EXTERNAL_PID_SUBTAG)
#define _TAG_HEADER_EXTERNAL_PORT (TAG_PRIMARY_HEADER|EXTERNAL_PORT_SUBTAG)
#define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG)
#define _TAG_HEADER_BIN_MATCHSTATE (TAG_PRIMARY_HEADER|BIN_MATCHSTATE_SUBTAG)
+#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG)
#define _TAG_HEADER_MASK 0x3F
@@ -892,7 +893,8 @@ typedef struct external_thing_ {
(((x) & _TAG_HEADER_MASK) == _TAG_HEADER_EXTERNAL_REF)
#define is_external_header(x) \
- (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_EXTERNAL_PID)
+ (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_EXTERNAL_PID \
+ && ((x) & _TAG_HEADER_MASK) != _TAG_HEADER_MAP)
#define is_external(x) (is_boxed((x)) && is_external_header(*boxed_val((x))))
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 545a0343d0..664c479eb6 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -115,70 +115,24 @@
#undef read_nob
#define read_nob erts_thr_prgr_read_nob__
-#ifdef ARCH_64
-
static ERTS_INLINE void
set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_set_mb(atmc, val);
+ erts_atomic64_set_mb(atmc, (erts_aint64_t) val);
}
static ERTS_INLINE void
set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_set_nob(atmc, val);
+ erts_atomic64_set_nob(atmc, (erts_aint64_t) val);
}
static ERTS_INLINE void
init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
{
- erts_atomic_init_nob(atmc, val);
-}
-
-#else
-
-#undef dw_aint_to_val
-#define dw_aint_to_val erts_thr_prgr_dw_aint_to_val__
-
-static void
-val_to_dw_aint(erts_dw_aint_t *dw_aint, ErtsThrPrgrVal val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw_aint->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw_aint->sint[ERTS_DW_AINT_LOW_WORD]
- = (erts_aint_t) (val & 0xffffffff);
- dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]
- = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
+ erts_atomic64_init_nob(atmc, (erts_aint64_t) val);
}
-static ERTS_INLINE void
-set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_set_mb(atmc, &dw_aint);
-}
-
-static ERTS_INLINE void
-set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_set_nob(atmc, &dw_aint);
-}
-
-static ERTS_INLINE void
-init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
-{
- erts_dw_aint_t dw_aint;
- val_to_dw_aint(&dw_aint, val);
- erts_dw_atomic_init_nob(atmc, &dw_aint);
-}
-
-#endif
-
/* #define ERTS_THR_PROGRESS_STATE_DEBUG */
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 5f392944c2..03ddbd467c 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -115,11 +115,7 @@ struct ErtsThrPrgrLaterOp_ {
extern erts_tsd_key_t erts_thr_prgr_data_key__;
-#ifdef ARCH_64
-# define ERTS_THR_PRGR_ATOMIC erts_atomic_t
-#else /* ARCH_32 */
-# define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
-#endif
+#define ERTS_THR_PRGR_ATOMIC erts_atomic64_t
typedef struct {
void *arg;
@@ -158,10 +154,6 @@ void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
void erts_thr_progress_dbg_print_state(void);
-#ifdef ARCH_32
-#define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
-ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint);
-#endif
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
@@ -184,68 +176,24 @@ ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ARCH_64
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_nob(atmc);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_acqb(atmc);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
-{
- return (ErtsThrPrgrVal) erts_atomic_read_mb(atmc);
-}
-
-#else /* ARCH_32 */
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (ErtsThrPrgrVal) dw_aint->dw_sint;
-#else
- ErtsThrPrgrVal res;
- res = (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_nob(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_nob(atmc);
}
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_acqb(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_acqb(atmc);
}
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
{
- erts_dw_aint_t dw_aint;
- erts_dw_atomic_read_mb(atmc, &dw_aint);
- return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+ return (ErtsThrPrgrVal) erts_atomic64_read_mb(atmc);
}
-#endif
-
ERTS_GLB_INLINE int
erts_thr_progress_is_managed_thread(void)
{
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 80026104db..7214f3ea33 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -344,6 +344,16 @@ typedef ethr_ts_event erts_tse_t;
#define erts_aint32_t ethr_sint32_t
#define erts_atomic32_t ethr_atomic32_t
+#if defined(ARCH_32)
+# define erts_atomic64_t ethr_dw_atomic_t
+# define erts_aint64_t ethr_sint64_t
+#elif defined(ARCH_64)
+# define erts_atomic64_t ethr_atomic_t
+# define erts_aint64_t ethr_sint_t
+#else
+# error "Not supported architecture"
+#endif
+
#define ERTS_DW_AINT_HIGH_WORD ETHR_DW_SINT_HIGH_WORD
#define ERTS_DW_AINT_LOW_WORD ETHR_DW_SINT_LOW_WORD
@@ -414,10 +424,12 @@ typedef int erts_tse_t;
typedef struct { SWord sint[2]; } erts_dw_aint_t;
typedef SWord erts_aint_t;
typedef Sint32 erts_aint32_t;
+typedef Sint64 erts_aint64_t;
#define erts_dw_atomic_t erts_dw_aint_t
#define erts_atomic_t erts_aint_t
#define erts_atomic32_t erts_aint32_t
+#define erts_atomic64_t erts_aint64_t
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -446,6 +458,7 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#define erts_no_dw_atomic_t erts_dw_aint_t
#define erts_no_atomic_t erts_aint_t
#define erts_no_atomic32_t erts_aint32_t
+#define erts_no_atomic64_t erts_aint64_t
#define ERTS_AINT_NULL ((erts_aint_t) NULL)
@@ -570,6 +583,29 @@ ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp
ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
erts_aint32_t mask,
erts_aint32_t set);
+ERTS_GLB_INLINE void erts_no_atomic64_set(erts_no_atomic64_t *var,
+ erts_aint64_t i);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read(erts_no_atomic64_t *var);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_inc_read(erts_no_atomic64_t *incp);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_dec_read(erts_no_atomic64_t *decp);
+ERTS_GLB_INLINE void erts_no_atomic64_inc(erts_no_atomic64_t *incp);
+ERTS_GLB_INLINE void erts_no_atomic64_dec(erts_no_atomic64_t *decp);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_add_read(erts_no_atomic64_t *addp,
+ erts_aint64_t i);
+ERTS_GLB_INLINE void erts_no_atomic64_add(erts_no_atomic64_t *addp,
+ erts_aint64_t i);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bor(erts_no_atomic64_t *var,
+ erts_aint64_t mask);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_band(erts_no_atomic64_t *var,
+ erts_aint64_t mask);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp,
+ erts_aint64_t new);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp,
+ erts_aint64_t new,
+ erts_aint64_t expected);
+ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
+ erts_aint64_t mask,
+ erts_aint64_t set);
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
@@ -1200,6 +1236,441 @@ erts_atomic32_read_dirty(erts_atomic32_t *var)
#endif
+/* 64-bit atomics */
+
+#if defined(ARCH_64)
+
+#define erts_atomic64_init_nob ethr_atomic_init
+#define erts_atomic64_set_nob ethr_atomic_set
+#define erts_atomic64_read_nob ethr_atomic_read
+#define erts_atomic64_inc_read_nob ethr_atomic_inc_read
+#define erts_atomic64_dec_read_nob ethr_atomic_dec_read
+#define erts_atomic64_inc_nob ethr_atomic_inc
+#define erts_atomic64_dec_nob ethr_atomic_dec
+#define erts_atomic64_add_read_nob ethr_atomic_add_read
+#define erts_atomic64_add_nob ethr_atomic_add
+#define erts_atomic64_read_bor_nob ethr_atomic_read_bor
+#define erts_atomic64_read_band_nob ethr_atomic_read_band
+#define erts_atomic64_xchg_nob ethr_atomic_xchg
+#define erts_atomic64_cmpxchg_nob ethr_atomic_cmpxchg
+#define erts_atomic64_read_bset_nob erts_atomic_read_bset_nob
+
+#define erts_atomic64_init_mb ethr_atomic_init_mb
+#define erts_atomic64_set_mb ethr_atomic_set_mb
+#define erts_atomic64_read_mb ethr_atomic_read_mb
+#define erts_atomic64_inc_read_mb ethr_atomic_inc_read_mb
+#define erts_atomic64_dec_read_mb ethr_atomic_dec_read_mb
+#define erts_atomic64_inc_mb ethr_atomic_inc_mb
+#define erts_atomic64_dec_mb ethr_atomic_dec_mb
+#define erts_atomic64_add_read_mb ethr_atomic_add_read_mb
+#define erts_atomic64_add_mb ethr_atomic_add_mb
+#define erts_atomic64_read_bor_mb ethr_atomic_read_bor_mb
+#define erts_atomic64_read_band_mb ethr_atomic_read_band_mb
+#define erts_atomic64_xchg_mb ethr_atomic_xchg_mb
+#define erts_atomic64_cmpxchg_mb ethr_atomic_cmpxchg_mb
+#define erts_atomic64_read_bset_mb erts_atomic_read_bset_mb
+
+#define erts_atomic64_init_acqb ethr_atomic_init_acqb
+#define erts_atomic64_set_acqb ethr_atomic_set_acqb
+#define erts_atomic64_read_acqb ethr_atomic_read_acqb
+#define erts_atomic64_inc_read_acqb ethr_atomic_inc_read_acqb
+#define erts_atomic64_dec_read_acqb ethr_atomic_dec_read_acqb
+#define erts_atomic64_inc_acqb ethr_atomic_inc_acqb
+#define erts_atomic64_dec_acqb ethr_atomic_dec_acqb
+#define erts_atomic64_add_read_acqb ethr_atomic_add_read_acqb
+#define erts_atomic64_add_acqb ethr_atomic_add_acqb
+#define erts_atomic64_read_bor_acqb ethr_atomic_read_bor_acqb
+#define erts_atomic64_read_band_acqb ethr_atomic_read_band_acqb
+#define erts_atomic64_xchg_acqb ethr_atomic_xchg_acqb
+#define erts_atomic64_cmpxchg_acqb ethr_atomic_cmpxchg_acqb
+#define erts_atomic64_read_bset_acqb erts_atomic_read_bset_acqb
+
+#define erts_atomic64_init_relb ethr_atomic_init_relb
+#define erts_atomic64_set_relb ethr_atomic_set_relb
+#define erts_atomic64_read_relb ethr_atomic_read_relb
+#define erts_atomic64_inc_read_relb ethr_atomic_inc_read_relb
+#define erts_atomic64_dec_read_relb ethr_atomic_dec_read_relb
+#define erts_atomic64_inc_relb ethr_atomic_inc_relb
+#define erts_atomic64_dec_relb ethr_atomic_dec_relb
+#define erts_atomic64_add_read_relb ethr_atomic_add_read_relb
+#define erts_atomic64_add_relb ethr_atomic_add_relb
+#define erts_atomic64_read_bor_relb ethr_atomic_read_bor_relb
+#define erts_atomic64_read_band_relb ethr_atomic_read_band_relb
+#define erts_atomic64_xchg_relb ethr_atomic_xchg_relb
+#define erts_atomic64_cmpxchg_relb ethr_atomic_cmpxchg_relb
+#define erts_atomic64_read_bset_relb erts_atomic_read_bset_relb
+
+#define erts_atomic64_init_ddrb ethr_atomic_init_ddrb
+#define erts_atomic64_set_ddrb ethr_atomic_set_ddrb
+#define erts_atomic64_read_ddrb ethr_atomic_read_ddrb
+#define erts_atomic64_inc_read_ddrb ethr_atomic_inc_read_ddrb
+#define erts_atomic64_dec_read_ddrb ethr_atomic_dec_read_ddrb
+#define erts_atomic64_inc_ddrb ethr_atomic_inc_ddrb
+#define erts_atomic64_dec_ddrb ethr_atomic_dec_ddrb
+#define erts_atomic64_add_read_ddrb ethr_atomic_add_read_ddrb
+#define erts_atomic64_add_ddrb ethr_atomic_add_ddrb
+#define erts_atomic64_read_bor_ddrb ethr_atomic_read_bor_ddrb
+#define erts_atomic64_read_band_ddrb ethr_atomic_read_band_ddrb
+#define erts_atomic64_xchg_ddrb ethr_atomic_xchg_ddrb
+#define erts_atomic64_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb
+#define erts_atomic64_read_bset_ddrb erts_atomic_read_bset_ddrb
+
+#define erts_atomic64_init_rb ethr_atomic_init_rb
+#define erts_atomic64_set_rb ethr_atomic_set_rb
+#define erts_atomic64_read_rb ethr_atomic_read_rb
+#define erts_atomic64_inc_read_rb ethr_atomic_inc_read_rb
+#define erts_atomic64_dec_read_rb ethr_atomic_dec_read_rb
+#define erts_atomic64_inc_rb ethr_atomic_inc_rb
+#define erts_atomic64_dec_rb ethr_atomic_dec_rb
+#define erts_atomic64_add_read_rb ethr_atomic_add_read_rb
+#define erts_atomic64_add_rb ethr_atomic_add_rb
+#define erts_atomic64_read_bor_rb ethr_atomic_read_bor_rb
+#define erts_atomic64_read_band_rb ethr_atomic_read_band_rb
+#define erts_atomic64_xchg_rb ethr_atomic_xchg_rb
+#define erts_atomic64_cmpxchg_rb ethr_atomic_cmpxchg_rb
+#define erts_atomic64_read_bset_rb erts_atomic_read_bset_rb
+
+#define erts_atomic64_init_wb ethr_atomic_init_wb
+#define erts_atomic64_set_wb ethr_atomic_set_wb
+#define erts_atomic64_read_wb ethr_atomic_read_wb
+#define erts_atomic64_inc_read_wb ethr_atomic_inc_read_wb
+#define erts_atomic64_dec_read_wb ethr_atomic_dec_read_wb
+#define erts_atomic64_inc_wb ethr_atomic_inc_wb
+#define erts_atomic64_dec_wb ethr_atomic_dec_wb
+#define erts_atomic64_add_read_wb ethr_atomic_add_read_wb
+#define erts_atomic64_add_wb ethr_atomic_add_wb
+#define erts_atomic64_read_bor_wb ethr_atomic_read_bor_wb
+#define erts_atomic64_read_band_wb ethr_atomic_read_band_wb
+#define erts_atomic64_xchg_wb ethr_atomic_xchg_wb
+#define erts_atomic64_cmpxchg_wb ethr_atomic_cmpxchg_wb
+#define erts_atomic64_read_bset_wb erts_atomic_read_bset_wb
+
+#define erts_atomic64_set_dirty erts_atomic_set_dirty
+#define erts_atomic64_read_dirty erts_atomic_read_dirty
+
+#elif defined(ARCH_32)
+
+#undef ERTS_ATOMIC64_OPS_DECL__
+
+#define ERTS_ATOMIC64_OPS_DECL__(BARRIER) \
+ERTS_GLB_INLINE void \
+erts_atomic64_init_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE void \
+erts_atomic64_set_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_inc_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_dec_read_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE void \
+erts_atomic64_inc_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE void \
+erts_atomic64_dec_ ## BARRIER(erts_atomic64_t *var); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_add_read_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE void \
+erts_atomic64_add_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bor_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_band_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_xchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_cmpxchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t new, \
+ erts_aint64_t exp); \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bset_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t mask, \
+ erts_aint64_t set)
+
+ERTS_ATOMIC64_OPS_DECL__(nob);
+ERTS_ATOMIC64_OPS_DECL__(mb);
+ERTS_ATOMIC64_OPS_DECL__(acqb);
+ERTS_ATOMIC64_OPS_DECL__(relb);
+ERTS_ATOMIC64_OPS_DECL__(ddrb);
+ERTS_ATOMIC64_OPS_DECL__(rb);
+ERTS_ATOMIC64_OPS_DECL__(wb);
+
+#undef ERTS_ATOMIC64_OPS_DECL__
+
+ERTS_GLB_INLINE void
+erts_atomic64_set_dirty(erts_atomic64_t *var, erts_aint64_t val);
+ERTS_GLB_INLINE erts_aint64_t
+erts_atomic64_read_dirty(erts_atomic64_t *var);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * The ethr_dw_atomic_*_nob() functions below
+ * are here to make it possible for the
+ * ERTS_ATOMIC64_OPS_IMPL__() to map erts
+ * barriers to ethread barriers...
+ */
+static ERTS_INLINE void
+ethr_dw_atomic_init_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_init(var, val);
+}
+
+static ERTS_INLINE void
+ethr_dw_atomic_set_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_set(var, val);
+}
+
+static ERTS_INLINE void
+ethr_dw_atomic_read_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *val)
+{
+ ethr_dw_atomic_read(var, val);
+}
+
+static ERTS_INLINE int
+ethr_dw_atomic_cmpxchg_nob(ethr_dw_atomic_t *var,
+ ethr_dw_sint_t *new,
+ ethr_dw_sint_t *xchg)
+{
+ return ethr_dw_atomic_cmpxchg(var, new, xchg);
+}
+
+#undef ERTS_ATOMIC64_OPS_IMPL__
+#undef ERTS_ATOMIC64_DW_CMPXCHG_IMPL__
+#undef ERTS_DW_SINT_TO_AINT64__
+#undef ERTS_AINT64_TO_DW_SINT__
+
+#ifdef ETHR_SU_DW_NAINT_T__
+#define ERTS_DW_SINT_TO_AINT64__(DW) \
+ ((erts_aint64_t) DW.dw_sint)
+#define ERTS_AINT64_TO_DW_SINT__(DW, AINT64) \
+ (DW.dw_sint = (ETHR_SU_DW_NAINT_T__) AINT64)
+#else /* !ETHR_SU_DW_NAINT_T__ */
+#define ERTS_DW_SINT_TO_AINT64__(DW) \
+ ((((erts_aint64_t) DW.sint[ETHR_DW_SINT_HIGH_WORD]) << 32) \
+ | (((erts_aint64_t) DW.sint[ETHR_DW_SINT_LOW_WORD]) \
+ & ((erts_aint64_t) 0xffffffff)))
+#define ERTS_AINT64_TO_DW_SINT__(DW, AINT64) \
+ do { \
+ DW.sint[ETHR_DW_SINT_LOW_WORD] = \
+ (ethr_sint_t) (AINT64 & 0xffffffff); \
+ DW.sint[ETHR_DW_SINT_HIGH_WORD] = \
+ (ethr_sint_t) ((AINT64 >> 32) & 0xffffffff); \
+ } while (0)
+#endif /* !ETHR_SU_DW_NAINT_T__ */
+
+#define ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(CmpXchgOp, \
+ AVarP, XchgVar, NewVar, \
+ ModificationCode) \
+do { \
+ ethr_dw_sint_t dw_xchg__, dw_new__; \
+ ethr_dw_atomic_read(AVarP, &dw_xchg__); \
+ do { \
+ XchgVar = ERTS_DW_SINT_TO_AINT64__(dw_xchg__); \
+ { \
+ ModificationCode; \
+ } \
+ ERTS_AINT64_TO_DW_SINT__(dw_new__, NewVar); \
+ } while (!CmpXchgOp((AVarP), &dw_new__, &dw_xchg__)); \
+} while (0)
+
+#define ERTS_ATOMIC64_OPS_IMPL__(BARRIER) \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_init_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ ethr_dw_sint_t dw; \
+ ERTS_AINT64_TO_DW_SINT__(dw, val); \
+ ethr_dw_atomic_init_ ## BARRIER(var, &dw); \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_set_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ ethr_dw_sint_t dw; \
+ ERTS_AINT64_TO_DW_SINT__(dw, val); \
+ ethr_dw_atomic_set_ ## BARRIER(var, &dw); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ ethr_dw_sint_t dw; \
+ ethr_dw_atomic_read_ ## BARRIER(var, &dw); \
+ return ERTS_DW_SINT_TO_AINT64__(dw); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_inc_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + 1); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_dec_read_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg - 1); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_inc_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + 1); \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_dec_ ## BARRIER(erts_atomic64_t *var) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg - 1); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_add_read_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + val); \
+ return new; \
+} \
+ \
+ERTS_GLB_INLINE void \
+erts_atomic64_add_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg + val); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bor_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg | val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_band_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = xchg & val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_xchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t val) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ new = val); \
+ return xchg; \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_cmpxchg_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t new, \
+ erts_aint64_t exp) \
+{ \
+ ethr_dw_sint_t dw_xchg, dw_new; \
+ ERTS_AINT64_TO_DW_SINT__(dw_xchg, exp); \
+ ERTS_AINT64_TO_DW_SINT__(dw_new, new); \
+ if (ethr_dw_atomic_cmpxchg_ ## BARRIER(var, &dw_new, &dw_xchg)) \
+ return exp; \
+ return ERTS_DW_SINT_TO_AINT64__(dw_xchg); \
+} \
+ \
+ERTS_GLB_INLINE erts_aint64_t \
+erts_atomic64_read_bset_ ## BARRIER(erts_atomic64_t *var, \
+ erts_aint64_t mask, \
+ erts_aint64_t set) \
+{ \
+ erts_aint64_t xchg, new; \
+ ERTS_ATOMIC64_DW_CMPXCHG_IMPL__(ethr_dw_atomic_cmpxchg_ ## BARRIER, \
+ var, xchg, new, \
+ { \
+ new = xchg & ~mask; \
+ new |= mask & set; \
+ }); \
+ return xchg; \
+}
+
+ERTS_ATOMIC64_OPS_IMPL__(nob)
+ERTS_ATOMIC64_OPS_IMPL__(mb)
+ERTS_ATOMIC64_OPS_IMPL__(acqb)
+ERTS_ATOMIC64_OPS_IMPL__(relb)
+ERTS_ATOMIC64_OPS_IMPL__(ddrb)
+ERTS_ATOMIC64_OPS_IMPL__(rb)
+ERTS_ATOMIC64_OPS_IMPL__(wb)
+
+#undef ERTS_ATOMIC64_OPS_IMPL__
+#undef ERTS_ATOMIC64_DW_CMPXCHG_IMPL__
+
+ERTS_GLB_INLINE void
+erts_atomic64_set_dirty(erts_atomic64_t *var, erts_aint64_t val)
+{
+ ethr_sint_t *sint = ethr_dw_atomic_addr(var);
+ ethr_dw_sint_t dw;
+ ERTS_AINT64_TO_DW_SINT__(dw, val);
+ sint[0] = dw.sint[0];
+ sint[1] = dw.sint[1];
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_atomic64_read_dirty(erts_atomic64_t *var)
+{
+ ethr_sint_t *sint;
+ ethr_dw_sint_t dw;
+ sint = ethr_dw_atomic_addr(var);
+ dw.sint[0] = sint[0];
+ dw.sint[1] = sint[1];
+ return ERTS_DW_SINT_TO_AINT64__(dw);
+}
+
+#undef ERTS_DW_SINT_TO_AINT64__
+#undef ERTS_AINT64_TO_DW_SINT__
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ARCH_32 */
+
#else /* !USE_THREADS */
/* Double word size atomics */
@@ -1462,6 +1933,116 @@ erts_atomic32_read_dirty(erts_atomic32_t *var)
#define erts_atomic32_set_dirty erts_no_atomic32_set
#define erts_atomic32_read_dirty erts_no_atomic32_read
+/* 64-bit atomics */
+
+#define erts_atomic64_init_nob erts_no_atomic64_set
+#define erts_atomic64_set_nob erts_no_atomic64_set
+#define erts_atomic64_read_nob erts_no_atomic64_read
+#define erts_atomic64_inc_read_nob erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_nob erts_no_atomic64_dec_read
+#define erts_atomic64_inc_nob erts_no_atomic64_inc
+#define erts_atomic64_dec_nob erts_no_atomic64_dec
+#define erts_atomic64_add_read_nob erts_no_atomic64_add_read
+#define erts_atomic64_add_nob erts_no_atomic64_add
+#define erts_atomic64_read_bor_nob erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_nob erts_no_atomic64_read_band
+#define erts_atomic64_xchg_nob erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_nob erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_mb erts_no_atomic64_set
+#define erts_atomic64_set_mb erts_no_atomic64_set
+#define erts_atomic64_read_mb erts_no_atomic64_read
+#define erts_atomic64_inc_read_mb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_mb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_mb erts_no_atomic64_inc
+#define erts_atomic64_dec_mb erts_no_atomic64_dec
+#define erts_atomic64_add_read_mb erts_no_atomic64_add_read
+#define erts_atomic64_add_mb erts_no_atomic64_add
+#define erts_atomic64_read_bor_mb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_mb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_mb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_mb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_acqb erts_no_atomic64_set
+#define erts_atomic64_set_acqb erts_no_atomic64_set
+#define erts_atomic64_read_acqb erts_no_atomic64_read
+#define erts_atomic64_inc_read_acqb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_acqb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_acqb erts_no_atomic64_inc
+#define erts_atomic64_dec_acqb erts_no_atomic64_dec
+#define erts_atomic64_add_read_acqb erts_no_atomic64_add_read
+#define erts_atomic64_add_acqb erts_no_atomic64_add
+#define erts_atomic64_read_bor_acqb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_acqb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_acqb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_acqb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_relb erts_no_atomic64_set
+#define erts_atomic64_set_relb erts_no_atomic64_set
+#define erts_atomic64_read_relb erts_no_atomic64_read
+#define erts_atomic64_inc_read_relb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_relb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_relb erts_no_atomic64_inc
+#define erts_atomic64_dec_relb erts_no_atomic64_dec
+#define erts_atomic64_add_read_relb erts_no_atomic64_add_read
+#define erts_atomic64_add_relb erts_no_atomic64_add
+#define erts_atomic64_read_bor_relb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_relb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_relb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_relb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_ddrb erts_no_atomic64_set
+#define erts_atomic64_set_ddrb erts_no_atomic64_set
+#define erts_atomic64_read_ddrb erts_no_atomic64_read
+#define erts_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_ddrb erts_no_atomic64_inc
+#define erts_atomic64_dec_ddrb erts_no_atomic64_dec
+#define erts_atomic64_add_read_ddrb erts_no_atomic64_add_read
+#define erts_atomic64_add_ddrb erts_no_atomic64_add
+#define erts_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_ddrb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_ddrb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_rb erts_no_atomic64_set
+#define erts_atomic64_set_rb erts_no_atomic64_set
+#define erts_atomic64_read_rb erts_no_atomic64_read
+#define erts_atomic64_inc_read_rb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_rb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_rb erts_no_atomic64_inc
+#define erts_atomic64_dec_rb erts_no_atomic64_dec
+#define erts_atomic64_add_read_rb erts_no_atomic64_add_read
+#define erts_atomic64_add_rb erts_no_atomic64_add
+#define erts_atomic64_read_bor_rb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_rb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_rb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_rb erts_no_atomic64_read_bset
+
+#define erts_atomic64_init_wb erts_no_atomic64_set
+#define erts_atomic64_set_wb erts_no_atomic64_set
+#define erts_atomic64_read_wb erts_no_atomic64_read
+#define erts_atomic64_inc_read_wb erts_no_atomic64_inc_read
+#define erts_atomic64_dec_read_wb erts_no_atomic64_dec_read
+#define erts_atomic64_inc_wb erts_no_atomic64_inc
+#define erts_atomic64_dec_wb erts_no_atomic64_dec
+#define erts_atomic64_add_read_wb erts_no_atomic64_add_read
+#define erts_atomic64_add_wb erts_no_atomic64_add
+#define erts_atomic64_read_bor_wb erts_no_atomic64_read_bor
+#define erts_atomic64_read_band_wb erts_no_atomic64_read_band
+#define erts_atomic64_xchg_wb erts_no_atomic64_xchg
+#define erts_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
+#define erts_atomic64_read_bset_wb erts_no_atomic64_read_bset
+
+#define erts_atomic64_set_dirty erts_no_atomic64_set
+#define erts_atomic64_read_dirty erts_no_atomic64_read
+
#endif /* !USE_THREADS */
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -2383,6 +2964,104 @@ erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
return old;
}
+/* atomic64 */
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_set(erts_no_atomic64_t *var, erts_aint64_t i)
+{
+ *var = i;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read(erts_no_atomic64_t *var)
+{
+ return *var;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_inc_read(erts_no_atomic64_t *incp)
+{
+ return ++(*incp);
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_dec_read(erts_no_atomic64_t *decp)
+{
+ return --(*decp);
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_inc(erts_no_atomic64_t *incp)
+{
+ ++(*incp);
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_dec(erts_no_atomic64_t *decp)
+{
+ --(*decp);
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_add_read(erts_no_atomic64_t *addp, erts_aint64_t i)
+{
+ return *addp += i;
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic64_add(erts_no_atomic64_t *addp, erts_aint64_t i)
+{
+ *addp += i;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read_bor(erts_no_atomic64_t *var, erts_aint64_t mask)
+{
+ erts_aint64_t old;
+ old = *var;
+ *var |= mask;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read_band(erts_no_atomic64_t *var, erts_aint64_t mask)
+{
+ erts_aint64_t old;
+ old = *var;
+ *var &= mask;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, erts_aint64_t new)
+{
+ erts_aint64_t old = *xchgp;
+ *xchgp = new;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp,
+ erts_aint64_t new,
+ erts_aint64_t expected)
+{
+ erts_aint64_t old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+}
+
+ERTS_GLB_INLINE erts_aint64_t
+erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
+ erts_aint64_t mask,
+ erts_aint64_t set)
+{
+ erts_aint64_t old = *var;
+ *var &= ~mask;
+ *var |= (mask & set);
+ return old;
+}
+
/* spinlock */
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 6978a5f11a..ea5c850a30 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -151,6 +151,11 @@ do { \
message dispatcher thread takes care of that). */
#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
do { (RES) = (TPID); } while(0)
+int
+erts_is_tracer_proc_valid(Process* p)
+{
+ return 1;
+}
#else
#define ERTS_NULL_TRACER_REF NULL
#define ERTS_TRACER_REF_TYPE Process *
@@ -163,6 +168,20 @@ do { \
return; \
} \
} while (0)
+int
+erts_is_tracer_proc_valid(Process* p)
+{
+ Process* tracer;
+
+ tracer = erts_proc_lookup(ERTS_TRACER_PROC(p));
+ if (tracer && ERTS_TRACE_FLAGS(tracer) & F_TRACER) {
+ return 1;
+ } else {
+ ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACE_FLAGS(p) = ~TRACEE_FLAGS;
+ return 0;
+ }
+}
#endif
static Uint active_sched;
@@ -2505,7 +2524,7 @@ monitor_large_heap(Process *p) {
#ifndef ERTS_SMP
ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
- if (monitor_p || p == monitor_p) {
+ if (!monitor_p || p == monitor_p) {
return;
}
#endif
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 853c6cb0d8..4f2c70d6e7 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -39,6 +39,7 @@ void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
+int erts_is_tracer_proc_valid(Process* p);
#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 3a968594f3..f8e1431a53 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -2126,6 +2126,8 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
mac = 1;
case ERL_FILENAME_UTF8:
size = strlen((char *) bytes);
+ if (size == 0)
+ return NIL;
if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) {
goto noconvert;
}
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 0807649ea1..c32f8fd61c 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -32,11 +32,7 @@ typedef struct {
#endif
union {
Uint64 not_atomic;
-#ifdef ARCH_64
- erts_atomic_t atomic;
-#else
- erts_dw_atomic_t atomic;
-#endif
+ erts_atomic64_t atomic;
} counter;
} erts_interval_t;
@@ -50,9 +46,6 @@ Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-#ifdef ARCH_32
-ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
-#endif
ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
@@ -62,46 +55,16 @@ ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ARCH_32
-
-ERTS_GLB_INLINE Uint64
-erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-#endif
-
ERTS_GLB_INLINE Uint64
erts_current_interval_nob__(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
+ return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
}
ERTS_GLB_INLINE Uint64
erts_current_interval_acqb__(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
+ return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
}
ERTS_GLB_INLINE Uint64
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 9671cde228..9b9b4b2a62 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -44,9 +44,6 @@
#include "erl_zlib.h"
#include "erl_map.h"
-#ifdef HIPE
-#include "hipe_mode_switch.h"
-#endif
#define in_area(ptr,start,nbytes) ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define MAX_STRING_LEN 0xffff
@@ -111,26 +108,17 @@ static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acm
static Export binary_to_term_trap_export;
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1);
-static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b);
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif, Eterm arg0, Eterm arg1);
void erts_init_external(void) {
-#if 1 /* In R16 */
erts_init_trap_export(&term_to_binary_trap_export,
- am_erlang, am_term_to_binary_trap, 1,
+ am_erts_internal, am_term_to_binary_trap, 1,
&term_to_binary_trap_1);
erts_init_trap_export(&binary_to_term_trap_export,
- am_erlang, am_binary_to_term_trap, 1,
+ am_erts_internal, am_binary_to_term_trap, 1,
&binary_to_term_trap_1);
-#else
- sys_memset((void *) &term_to_binary_trap_export, 0, sizeof(Export));
- term_to_binary_trap_export.address = &term_to_binary_trap_export.code[3];
- term_to_binary_trap_export.code[0] = am_erlang;
- term_to_binary_trap_export.code[1] = am_term_to_binary_trap;
- term_to_binary_trap_export.code[2] = 1;
- term_to_binary_trap_export.code[3] = (BeamInstr) em_apply_bif;
- term_to_binary_trap_export.code[4] = (BeamInstr) &term_to_binary_trap_1;
-#endif
return;
}
@@ -1069,6 +1057,8 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 1)
+
BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL);
@@ -1081,6 +1071,8 @@ BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 2)
+
BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
{
Process* p = BIF_P;
@@ -1185,6 +1177,8 @@ typedef struct B2TContext_t {
Uint32 flags;
SWord reds;
Eterm trap_bin;
+ Export *bif;
+ Eterm arg[2];
enum B2TState state;
union {
B2TSizeContext sc;
@@ -1356,7 +1350,8 @@ static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
- return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin);
+ return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
+ THE_NON_VALUE, THE_NON_VALUE);
}
@@ -1391,8 +1386,10 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src)
return ctx;
}
-static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b)
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif_init, Eterm arg0, Eterm arg1)
{
+ BIF_RETTYPE ret_val;
#ifdef EXTREME_B2T_TRAPPING
SWord initial_reds = 1 + b2t_rand() % 4;
#else
@@ -1409,6 +1406,9 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
ctx->state = B2TPrepare;
ctx->aligned_alloc = NULL;
ctx->flags = flags;
+ ctx->bif = bif_init;
+ ctx->arg[0] = arg0;
+ ctx->arg[1] = arg1;
IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;)
} else {
is_first_call = 0;
@@ -1504,12 +1504,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start);
/*fall through*/
case B2TBadArg:
- b2t_destroy_context(ctx);
- if (!is_first_call) {
- erts_set_gc_state(p, 1);
- }
BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- BIF_ERROR(p, BADARG & ~EXF_SAVETRACE);
+
+ ASSERT(ctx->bif == bif_export[BIF_binary_to_term_1]
+ || ctx->bif == bif_export[BIF_binary_to_term_2]);
+
+ if (is_first_call)
+ ERTS_BIF_PREP_ERROR(ret_val, p, BADARG);
+ else {
+ erts_set_gc_state(p, 1);
+ if (is_non_value(ctx->arg[1]))
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0]);
+ else
+ ERTS_BIF_PREP_ERROR_TRAPPED2(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0], ctx->arg[1]);
+ }
+ b2t_destroy_context(ctx);
+ return ret_val;
case B2TDone:
b2t_destroy_context(ctx);
@@ -1524,7 +1536,8 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
erts_set_gc_state(p, 1);
}
BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- return ctx->u.dc.res;
+ ERTS_BIF_PREP_RET(ret_val, ctx->u.dc.res);
+ return ret_val;
default:
ASSERT(!"Unknown state in binary_to_term");
@@ -1541,15 +1554,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
erts_set_gc_state(p, 0);
}
BUMP_ALL_REDS(p);
- BIF_TRAP1(&binary_to_term_trap_export, p, ctx->trap_bin);
+
+ ERTS_BIF_PREP_TRAP1(ret_val, &binary_to_term_trap_export,
+ p, ctx->trap_bin);
+
+ return ret_val;
}
-BIF_RETTYPE erts_internal_binary_to_term_1(BIF_ALIST_1)
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1)
+
+BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
{
- return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL);
+ return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1],
+ BIF_ARG_1, THE_NON_VALUE);
}
-BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2)
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2)
+
+BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
{
Eterm opts;
Eterm opt;
@@ -1570,7 +1592,8 @@ BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2)
if (is_not_nil(opts))
goto error;
- return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL);
+ return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2],
+ BIF_ARG_1, BIF_ARG_2);
error:
BIF_ERROR(BIF_P, BADARG);
@@ -1902,6 +1925,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
real_size = endp - bytes;
result_bin = erts_bin_realloc(context->s.ec.result_bin,real_size);
+ result_bin->orig_size = real_size;
level = context->s.ec.level;
BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
if (level == 0 || real_size < 6) { /* We are done */
@@ -1981,6 +2005,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
erl_zlib_deflate_finish(&(context->s.cc.stream));
result_bin = erts_bin_realloc(context->s.cc.destination_bin,
context->s.cc.dest_len+6);
+ result_bin->orig_size = context->s.cc.dest_len+6;
context->s.cc.destination_bin = NULL;
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
@@ -2562,29 +2587,25 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
{
map_t *mp = (map_t*)map_val(obj);
Uint size = map_get_size(mp);
- Eterm *mptr;
*ep++ = MAP_EXT;
put_int32(size, ep); ep += 4;
- /* Push values first */
if (size > 0) {
- mptr = map_get_values(mp);
+ Eterm *kptr = map_get_keys(mp);
+ Eterm *vptr = map_get_values(mp);
+
for (i = size-1; i >= 1; i--) {
WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) mptr[i]);
+ WSTACK_PUSH(s, (UWord) vptr[i]);
+ WSTACK_PUSH(s, ENC_TERM);
+ WSTACK_PUSH(s, (UWord) kptr[i]);
}
WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) mptr[0]);
+ WSTACK_PUSH(s, (UWord) vptr[0]);
- mptr = map_get_keys(mp);
- for (i = size-1; i >= 1; i--) {
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) mptr[i]);
- }
-
- obj = mptr[0];
+ obj = kptr[0];
goto L_jump_start;
}
}
@@ -3518,16 +3539,16 @@ dec_term_atom_common:
keys = make_tuple(hp);
*hp++ = make_arityval(size);
- kptr = hp;
hp += size;
+ kptr = hp - 1;
mp = (map_t*)hp;
hp += MAP_HEADER_SIZE;
- vptr = hp;
hp += size;
+ vptr = hp - 1;
- /* kptr, first word for keys
- * vptr, first word for values
+ /* kptr, last word for keys
+ * vptr, last word for values
*/
/*
@@ -3542,27 +3563,12 @@ dec_term_atom_common:
mp->keys = keys;
*objp = make_map(mp);
- /* We assume the map is wellformed, meaning:
- * - ascending key order
- * - unique keys
- */
-
- objp = vptr + size - 1;
- n = size;
-
- while (n-- > 0) {
- *objp = (Eterm) COMPRESS_POINTER(next);
- next = objp;
- objp--;
- }
-
- objp = kptr + size - 1;
- n = size;
-
- while (n-- > 0) {
- *objp = (Eterm) COMPRESS_POINTER(next);
- next = objp;
- objp--;
+ for (n = size; n; n--) {
+ *vptr = (Eterm) COMPRESS_POINTER(next);
+ *kptr = (Eterm) COMPRESS_POINTER(vptr);
+ next = kptr;
+ vptr--;
+ kptr--;
}
}
break;
@@ -4459,66 +4465,3 @@ error:
#undef SKIP2
#undef CHKSIZE
}
-
-
-#ifdef HIPE
-BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1);
-BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2);
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1);
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2);
-
-/* Hipe wrappers used by native code for BIFs that disable GC while trapping.
- *
- * Problem:
- * When native code calls a BIF that traps, hipe_mode_switch will push a
- * "trap frame" on the Erlang stack in order to find its way back from beam_emu
- * back to native caller when finally done. If GC is disabled and stack/heap
- * is full there is no place to push the "trap frame".
- *
- * Solution:
- * We reserve space on stack for the "trap frame" here before the BIF is called.
- * If the BIF does not trap, the space is reclaimed here before returning.
- * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
- * already is reserved and use it.
- */
-BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1);
- res = term_to_binary_1(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2);
- res = term_to_binary_2(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1);
- res = erts_internal_binary_to_term_1(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2);
- res = erts_internal_binary_to_term_2(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-#endif /*HIPE*/
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 8fcb95d0e2..32a2dc43e8 100755..100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -160,6 +160,7 @@ struct erts_driver_t_ {
void (*ready_async)(ErlDrvData drv_data, ErlDrvThreadData thread_data); /* Might be NULL */
void (*process_exit)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
void (*stop_select)(ErlDrvEvent event, void*); /* Might be NULL */
+ void (*emergency_close)(ErlDrvData drv_data); /* Might be NULL */
};
extern erts_driver_t *driver_list;
@@ -435,6 +436,8 @@ do {\
}\
} while(0)
+#define CLEAR_SAVED_ESTACK(estack) ((void) ((estack)->start = NULL))
+
/*
* Use on empty stack, only the allocator can be changed before this.
* The src stack is reset to NULL.
@@ -551,6 +554,8 @@ do {\
}\
} while(0)
+#define CLEAR_SAVED_WSTACK(wstack) ((void) ((wstack)->wstart = NULL))
+
/*
* Use on empty stack, only the allocator can be changed before this.
* The src stack is reset to NULL.
@@ -848,6 +853,7 @@ Uint erts_port_ioq_size(Port *pp);
void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
+void erts_emergency_close_ports(void);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_enable_io_lock_count(int enable);
@@ -951,20 +957,67 @@ struct Sint_buf {
};
char* Sint_to_buf(Sint, struct Sint_buf*);
+#define ERTS_IOLIST_STATE_INITER(C_P, OBJ) \
+ {(C_P), 0, 0, (OBJ), {NULL, NULL, NULL, ERTS_ALC_T_INVALID}, 0, 0}
+
+#define ERTS_IOLIST_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOListState))
+
+#define ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED 8
+
+typedef struct {
+ Process *c_p;
+ ErlDrvSizeT size;
+ Uint offs;
+ Eterm obj;
+ ErtsEStack estack;
+ int reds_left;
+ int have_size;
+} ErtsIOListState;
+
+#define ERTS_IOLIST2BUF_STATE_INITER(C_P, OBJ) \
+ {ERTS_IOLIST_STATE_INITER((C_P), (OBJ)), {NULL, 0, 0, 0}, NULL, 0, NULL, 0}
+
+#define ERTS_IOLIST2BUF_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOList2BufState))
+
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT 32
+#define ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED 8
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_RED \
+ (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED*ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT)
+
+typedef struct {
+ ErtsIOListState iolist;
+ struct {
+ byte *bptr;
+ size_t size;
+ Uint bitoffs;
+ Uint bitsize;
+ } bcopy;
+ char *buf;
+ ErlDrvSizeT len;
+ Eterm *objp;
+ int offset;
+} ErtsIOList2BufState;
+
#define ERTS_IOLIST_OK 0
#define ERTS_IOLIST_OVERFLOW 1
#define ERTS_IOLIST_TYPE 2
+#define ERTS_IOLIST_YIELD 3
Eterm buf_to_intlist(Eterm**, const char*, size_t, Eterm); /* most callers pass plain char*'s */
#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0))
#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1))
+#define ERTS_IOLIST_TO_BUF_YIELD (~((ErlDrvSizeT) 2))
#define ERTS_IOLIST_TO_BUF_FAILED(R) \
- (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1)))
+ (((R) & (~((ErlDrvSizeT) 3))) == (~((ErlDrvSizeT) 3)))
#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \
(!ERTS_IOLIST_TO_BUF_FAILED((R)))
ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT);
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *);
+int erts_iolist_size_yielding(ErtsIOListState *state);
int erts_iolist_size(Eterm, ErlDrvSizeT *);
int is_string(Eterm);
void erl_at_exit(void (*) (void*), void*);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index cd5060ebb3..012a7d1a4b 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -914,8 +914,8 @@ int erts_port_handle_xports(Port *prt)
(iov)->iov_base = (ptr); \
(iov)->iov_len = (len); \
if (sizeof((iov)->iov_len) < sizeof(len) \
- /* Check if (len) overflowed (iov)->iov_len */ \
- && ((len) >> (sizeof((iov)->iov_len)*CHAR_BIT)) != 0) { \
+ /* Check if (len) overflowed (iov)->iov_len */ \
+ && (iov)->iov_len != (len)) { \
goto L_overflow; \
} \
*(bv)++ = (bin); \
@@ -1218,9 +1218,10 @@ typedef struct {
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
{
+ unsigned int prof_runnable_ports;
ErtsTryImmDrvCallResult res;
int reds_left_in;
- erts_aint32_t invalid_state, invalid_sched_flags;
+ erts_aint32_t act, exp, invalid_state, invalid_sched_flags;
Port *prt = sp->port;
Process *c_p = sp->c_p;
@@ -1247,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
goto locked_fail;
}
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- if (sp->sched_flags & invalid_sched_flags) {
- res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
- goto locked_fail;
- }
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+ act = erts_smp_atomic32_read_nob(&prt->sched.flags);
+
+ do {
+ erts_aint32_t new;
+
+ if (act & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ sp->sched_flags = act;
+ goto locked_fail;
+ }
+ exp = act;
+ new = act | ERTS_PTS_FLG_EXEC_IMM;
+ act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ } while (act != exp);
+
+ sp->sched_flags = act;
if (!c_p)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_out);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_inactive);
@@ -1273,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_in, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_active);
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_active);
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&prt->sched);
+ }
sp->fpe_was_unmasked = erts_block_fpe();
@@ -1294,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
int reds;
Port *prt = sp->port;
Process *c_p = sp->c_p;
+ erts_aint32_t act;
+ unsigned int prof_runnable_ports;
reds = prt->reds;
reds += erts_port_driver_callback_epilogue(prt, NULL);
erts_unblock_fpe(sp->fpe_was_unmasked);
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_out, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_inactive);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_band_mb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_inactive);
+ erts_port_task_sched_unlock(&prt->sched);
+ }
+ }
erts_port_release(prt);
@@ -1319,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_in);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_active);
@@ -2473,7 +2519,7 @@ set_port_connected(int bang_op,
DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
dtrace_pid_str(connect, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", prt->common.id);
dtrace_proc_str(rp, newprocess_str);
DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
}
@@ -3591,9 +3637,9 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
DTRACE_CHARBUF(rreason_str, 64);
- erts_snprintf(from_str, sizeof(from_str), "%T", from);
+ erts_snprintf(from_str, sizeof(DTRACE_CHARBUF_NAME(from_str)), "%T", from);
dtrace_port_str(p, port_str);
- erts_snprintf(rreason_str, sizeof(rreason_str), "%T", rreason);
+ erts_snprintf(rreason_str, sizeof(DTRACE_CHARBUF_NAME(rreason_str)), "%T", rreason);
DTRACE4(port_exit, from_str, port_str, p->name, rreason_str);
}
#endif
@@ -4660,7 +4706,7 @@ set_busy_port(ErlDrvPort dprt, int on)
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_busy)) {
- erts_snprintf(port_str, sizeof(port_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
DTRACE1(port_busy, port_str);
}
@@ -4673,7 +4719,7 @@ set_busy_port(ErlDrvPort dprt, int on)
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_not_busy)) {
- erts_snprintf(port_str, sizeof(port_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
DTRACE1(port_not_busy, port_str);
}
@@ -4725,9 +4771,9 @@ erts_port_resume_procs(Port *prt)
DTRACE_CHARBUF(pid_str, 16);
ErtsProcList* plp2 = plp;
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", prt->common.id);
while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->pid);
DTRACE2(process_port_unblocked, pid_str, port_str);
}
}
@@ -6129,7 +6175,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init(&pdl->mtx, "port_data_lock");
+ erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7166,7 +7212,7 @@ char *driver_dl_error(void)
#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
- (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ (offsetof(ErlDrvSysInfo, LAST_FIELD) \
+ sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
void
@@ -7228,6 +7274,18 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->nif_major_version = ERL_NIF_MAJOR_VERSION;
sip->nif_minor_version = ERL_NIF_MINOR_VERSION;
}
+ /*
+ * 'dirty_scheduler_support' is the last field in the 4th version
+ * (driver version 3.1, NIF version 2.7)
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
+#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS)
+ sip->dirty_scheduler_support = 1;
+#else
+ sip->dirty_scheduler_support = 0;
+#endif
+ }
+
}
@@ -7291,6 +7349,8 @@ no_stop_select_callback(ErlDrvEvent event, void* private)
erts_send_error_to_logger_nogl(dsbufp);
}
+#define IS_DRIVER_VERSION_GE(DE,MAJOR,MINOR) \
+ ((DE)->major_version >= (MAJOR) && (DE)->minor_version >= (MINOR))
static int
init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
@@ -7338,6 +7398,7 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
drv->ready_async = de->ready_async;
drv->process_exit = de->process_exit;
+ drv->emergency_close = IS_DRIVER_VERSION_GE(de,3,2) ? de->emergency_close : NULL;
if (de->stop_select)
drv->stop_select = de->stop_select;
else
@@ -7356,6 +7417,8 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
}
}
+#undef IS_DRIVER_VERSION_GE
+
void
erts_destroy_driver(erts_driver_t *drv)
{
@@ -7499,7 +7562,7 @@ Port *erts_get_heart_port(void)
if (!port)
continue;
/* only examine undead or alive ports */
- if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD)
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
continue;
/* immediate atom compare */
reg = port->common.u.alive.reg;
@@ -7510,3 +7573,23 @@ Port *erts_get_heart_port(void)
return NULL;
}
+
+void erts_emergency_close_ports(void)
+{
+ int ix, max = erts_ptab_max(&erts_port);
+
+ for (ix = 0; ix < max; ix++) {
+ Port *port = erts_pix2port(ix);
+
+ if (!port)
+ continue;
+ /* only examine undead or alive ports */
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ continue;
+
+ /* emergency close socket */
+ if (port->drv_ptr->emergency_close) {
+ port->drv_ptr->emergency_close((ErlDrvData) port->drv_data);
+ }
+ }
+}
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 73630fda8e..68fcc177ae 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1484,7 +1484,8 @@ new_map j d I I
update_map_assoc j s d I I
update_map_exact j s d I I
-is_map Fail cq => jump Fail
+is_map Fail Literal=q => move Literal x | is_map Fail x
+is_map Fail c => jump Fail
%macro: is_map IsMap -fail_action
is_map f r
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index e273056a2b..c29d4b3777 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -66,8 +66,12 @@
*/
#ifndef ERTS_SYS_FD_TYPE
+#define ERTS_SYS_FD_INVALID ((ErtsSysFdType) -1)
typedef int ErtsSysFdType;
#else
+#ifndef ERTS_SYS_FD_INVALID
+# error missing ERTS_SYS_FD_INVALID
+#endif
typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
#endif
@@ -154,10 +158,14 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
/* In VC++, noreturn is a declspec that has to be before the types,
* but in GNUC it is an att ribute to be placed between return type
* and function name, hence __decl_noreturn <types> __noreturn <function name>
+ *
+ * at some platforms (e.g. Android) __noreturn is defined at sys/cdef.h
*/
#if __GNUC__
# define __decl_noreturn
-# define __noreturn __attribute__((noreturn))
+# ifndef __noreturn
+# define __noreturn __attribute__((noreturn))
+# endif
#else
# if defined(__WIN32__) && defined(_MSC_VER)
# define __noreturn
@@ -270,6 +278,7 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
typedef unsigned int Eterm;
typedef unsigned int Uint;
typedef int Sint;
+#define ERTS_UINT_MAX UINT_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#else
@@ -343,6 +352,7 @@ typedef long long Sint;
typedef Uint UWord;
typedef Sint SWord;
+#define ERTS_UINT_MAX ERTS_UWORD_MAX
#endif /* HALFWORD_HEAP */
@@ -495,7 +505,7 @@ extern volatile int erts_writing_erl_crash_dump;
# define NO_ERF
# define NO_ERFC
/* This definition doesn't take NaN into account, but matherr() gets those */
-# define finite(x) (fabs(x) != HUGE_VAL)
+# define isfinite(x) (fabs(x) != HUGE_VAL)
# define USE_MATHERR
# define HAVE_FINITE
#endif
@@ -738,6 +748,14 @@ void init_getenv_state(GETENV_STATE *);
char * getenv_string(GETENV_STATE *);
void fini_getenv_state(GETENV_STATE *);
+#define HAVE_ERTS_CHECK_IO_DEBUG
+typedef struct {
+ int no_used_fds;
+ int no_driver_select_structs;
+ int no_driver_event_structs;
+} ErtsCheckIoDebugInfo;
+int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
+
/* xxxP */
#define SYS_DEFAULT_FLOAT_DECIMALS 20
void init_sys_float(void);
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 738f793020..c505c44905 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -48,6 +48,7 @@
#include "erl_sched_spec_pre_alloc.h"
#include "beam_bp.h"
#include "erl_ptab.h"
+#include "erl_check_io.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -3197,106 +3198,303 @@ buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
**
*/
-ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+typedef enum {
+ ERTS_IL2B_BCOPY_OK,
+ ERTS_IL2B_BCOPY_YIELD,
+ ERTS_IL2B_BCOPY_OVERFLOW,
+ ERTS_IL2B_BCOPY_TYPE_ERROR
+} ErtsIL2BBCopyRes;
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
+static ERTS_INLINE ErlDrvSizeT
+iolist_to_buf(const int yield_support,
+ ErtsIOList2BufState *state,
+ Eterm obj,
+ char* buf,
+ ErlDrvSizeT alloced_len)
{
- ErlDrvSizeT len = (ErlDrvSizeT) alloced_len;
- Eterm* objp;
+#undef IOLIST_TO_BUF_BCOPY
+#define IOLIST_TO_BUF_BCOPY(CONSP) \
+do { \
+ size_t size = binary_size(obj); \
+ if (size > 0) { \
+ Uint bitsize; \
+ byte* bptr; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ if (len < size) \
+ goto L_overflow; \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ if (bitsize != 0) \
+ goto L_type_error; \
+ num_bits = 8*size; \
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \
+ buf += size; \
+ len -= size; \
+ } \
+} while (0)
+
+ ErlDrvSizeT res, len;
+ Eterm* objp = NULL;
+ int init_yield_count;
+ int yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0) {
- goto L_overflow;
- }
- *buf++ = unsigned_val(obj);
- len--;
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- if (len < size) {
+
+ len = (ErlDrvSizeT) alloced_len;
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ break;
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
+ obj = state->iolist.obj;
+ buf = state->buf;
+ len = state->len;
+ objp = state->objp;
+ state->objp = NULL;
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0) {
+ goto L_overflow;
+ }
+ *buf++ = unsigned_val(obj);
+ len--;
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+
+ L_tail:
+
+ obj = CDR(objp);
+
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(NULL);
+ } else if (is_not_nil(obj)) {
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
- goto L_type_error;
- }
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
+ IOLIST_TO_BUF_BCOPY(NULL);
} else if (is_not_nil(obj)) {
goto L_type_error;
- }
+ } else if (yield_support && --yield_count <= 0)
+ goto L_yield;
}
+ res = len;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return len;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+
+
+ return res;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ goto L_return;
L_overflow:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_OVERFLOW;
+ res = ERTS_IOLIST_TO_BUF_OVERFLOW;
+ goto L_return;
+
+ L_bcopy_yield:
+
+ state->buf = buf;
+ state->len = len;
+
+ switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ ERTS_INTERNAL_ERROR("Missing yield");
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
+ goto L_overflow;
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
+ goto L_type_error;
+ }
+
+ L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->len = len;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+#undef IOLIST_TO_BUF_BCOPY
+}
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ ErtsIL2BBCopyRes res;
+ char *buf = state->buf;
+ ErlDrvSizeT len = state->len;
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+ Uint bitsize;
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+ if (size <= 0)
+ return ERTS_IL2B_BCOPY_OK;
+
+ if (len < size)
+ return ERTS_IL2B_BCOPY_OVERFLOW;
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0)
+ return ERTS_IL2B_BCOPY_TYPE_ERROR;
+ }
+
+ ASSERT(size > 0);
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ res = ERTS_IL2B_BCOPY_OK;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.size = size - max_size;
+ size = max_size;
+ res = ERTS_IL2B_BCOPY_YIELD;
+ }
+
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ state->buf += size;
+ state->len -= size;
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
+{
+ return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
+}
+
+ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+{
+ return iolist_to_buf(0, NULL, obj, buf, alloced_len);
}
/*
@@ -3307,11 +3505,32 @@ ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
* Any input term error detected in erts_iolist_to_buf should also
* be detected in this function!
*/
-int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+
+static ERTS_INLINE int
+iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
{
+ int res, init_yield_count, yield_count;
Eterm* objp;
- Uint size = 0; /* Intentionally Uint due to halfword heap */
+ Uint size = (Uint) *sizep; /* Intentionally Uint due to halfword heap */
DECLARE_ESTACK(s);
+
+ if (!yield_support)
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ else {
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->estack);
+ size = (Uint) state->size;
+ obj = state->obj;
+ }
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -3327,51 +3546,101 @@ int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- size++;
- if (size == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ size++;
+ if (size == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
- SAFE_ADD(size, binary_size(obj));
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ } else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
SAFE_ADD(size, binary_size(obj));
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
- SAFE_ADD(size, binary_size(obj));
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
- DESTROY_ESTACK(s);
*sizep = (ErlDrvSizeT) size;
- return ERTS_IOLIST_OK;
- L_overflow_error:
+ res = ERTS_IOLIST_OK;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+
+ if (yield_support) {
+ int yc, reds;
+ CLEAR_SAVED_ESTACK(&state->estack);
+ yc = init_yield_count - yield_count;
+ reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) size;
+ state->have_size = 1;
+ }
+
+ return res;
+
+ L_overflow_error:
+ res = ERTS_IOLIST_OVERFLOW;
+ size = 0;
+ goto L_return;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_TYPE;
+ size = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = size;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
+}
+
+int erts_iolist_size_yielding(ErtsIOListState *state)
+{
+ ErlDrvSizeT size = state->size;
+ return iolist_size(1, state, state->obj, &size);
+}
+
+int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+{
+ *sizep = 0;
+ return iolist_size(0, NULL, obj, sizep);
}
/* return 0 if item is not a non-empty flat list of bytes */
@@ -3680,6 +3949,9 @@ erts_save_emu_args(int argc, char **argv)
size += sz+1;
}
ptr = (char *) malloc(size);
+ if (!ptr) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#ifdef DEBUG
end_ptr = ptr + size;
#endif
@@ -3944,19 +4216,7 @@ void erts_silence_warn_unused_result(long unused)
void
erts_interval_init(erts_interval_t *icp)
{
-#ifdef ARCH_64
- erts_atomic_init_nob(&icp->counter.atomic, 0);
-#else
- erts_dw_aint_t dw;
-#ifdef ETHR_SU_DW_NAINT_T__
- dw.dw_sint = 0;
-#else
- dw.sint[ERTS_DW_AINT_HIGH_WORD] = 0;
- dw.sint[ERTS_DW_AINT_LOW_WORD] = 0;
-#endif
- erts_dw_atomic_init_nob(&icp->counter.atomic, &dw);
-
-#endif
+ erts_atomic64_init_nob(&icp->counter.atomic, 0);
#ifdef DEBUG
icp->smp_api = 0;
#endif
@@ -3978,55 +4238,13 @@ erts_smp_interval_init(erts_interval_t *icp)
static ERTS_INLINE Uint64
step_interval_nob(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
}
static ERTS_INLINE Uint64
step_interval_relb(erts_interval_t *icp)
{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_inc_read_relb(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_relb(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic);
}
@@ -4034,38 +4252,10 @@ static ERTS_INLINE Uint64
ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
{
Uint64 curr_ic;
-#ifdef ARCH_64
- curr_ic = (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
- if (curr_ic > ic)
- return curr_ic;
- return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
+ curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
if (curr_ic > ic)
return curr_ic;
-
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
}
@@ -4073,38 +4263,10 @@ static ERTS_INLINE Uint64
ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
{
Uint64 curr_ic;
-#ifdef ARCH_64
- curr_ic = (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
+ curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
if (curr_ic > ic)
return curr_ic;
- return (Uint64) erts_atomic_inc_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t exp;
-
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &exp);
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
-
- while (1) {
- erts_dw_aint_t new = exp;
-
-#ifdef ETHR_SU_DW_NAINT_T__
- new.dw_sint++;
-#else
- new.sint[ERTS_DW_AINT_LOW_WORD]++;
- if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
- new.sint[ERTS_DW_AINT_HIGH_WORD]++;
-#endif
-
- if (erts_dw_atomic_cmpxchg_acqb(&icp->counter.atomic, &new, &exp))
- return erts_interval_dw_aint_to_val__(&new);
-
- curr_ic = erts_interval_dw_aint_to_val__(&exp);
- if (curr_ic > ic)
- return curr_ic;
- }
-#endif
+ return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic);
}
Uint64
diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c
index ef539f8f9b..1ef1602ec9 100644
--- a/erts/emulator/drivers/common/gzio.c
+++ b/erts/emulator/drivers/common/gzio.c
@@ -230,6 +230,7 @@ local ErtsGzFile gz_open (path, mode)
errno = 0;
#if defined(FILENAMES_16BIT)
{
+ FILE* efile_wfopen(const WCHAR* name, const WCHAR* mode);
WCHAR wfmode[80];
int i = 0;
int j;
@@ -237,7 +238,7 @@ local ErtsGzFile gz_open (path, mode)
wfmode[i++] = (WCHAR) fmode[j];
}
wfmode[i++] = L'\0';
- s->file = _wfopen((WCHAR *)path, wfmode);
+ s->file = efile_wfopen((WCHAR *)path, wfmode);
if (s->file == NULL) {
return s->destroy(s), (ErtsGzFile)Z_NULL;
}
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 357a4b7bcb..3c6922eb8e 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -93,6 +93,10 @@ typedef unsigned long long llu_t;
#define INT16_MAX (32767)
#endif
+#ifdef __OSE__
+#include "inet.h"
+#endif
+
#ifdef __WIN32__
#define STRNCASECMP strncasecmp
@@ -121,10 +125,6 @@ typedef unsigned long long llu_t;
#undef WANT_NONBLOCKING
#include "sys.h"
-#ifdef __OSE__
-#include "inet.h"
-#endif
-
#undef EWOULDBLOCK
#undef ETIMEDOUT
@@ -200,6 +200,7 @@ typedef unsigned long long llu_t;
#define HAVE_MULTICAST_SUPPORT
+#define HAVE_UDP
#define ERRNO_BLOCK WSAEWOULDBLOCK
@@ -267,14 +268,13 @@ static BOOL (WINAPI *fpSetHandleInformation)(HANDLE,DWORD,DWORD);
#define sock_htonl(x) htonl((x))
#define sock_send(s,buf,len,flag) send((s),(buf),(len),(flag))
#define sock_sendv(s, vec, size, np, flag) \
- WSASend((s),(WSABUF*)(vec),\
- (size),(np),(flag),NULL,NULL)
+ WSASend((s),(WSABUF*)(vec),(size),(np),(flag),NULL,NULL)
#define sock_recv(s,buf,len,flag) recv((s),(buf),(len),(flag))
#define sock_recvfrom(s,buf,blen,flag,addr,alen) \
- recvfrom((s),(buf),(blen),(flag),(addr),(alen))
+ recvfrom((s),(buf),(blen),(flag),(addr),(alen))
#define sock_sendto(s,buf,blen,flag,addr,alen) \
- sendto((s),(buf),(blen),(flag),(addr),(alen))
+ sendto((s),(buf),(blen),(flag),(addr),(alen))
#define sock_hostname(buf, len) gethostname((buf), (len))
#define sock_getservbyname(name,proto) getservbyname((name),(proto))
@@ -294,29 +294,55 @@ static unsigned long zero_value = 0;
static unsigned long one_value = 1;
#elif defined (__OSE__)
+
+/*
+ * Some notes about how inet (currently only tcp) works on OSE.
+ * The driver uses OSE signals to communicate with the one_inet
+ * process. Because of the difference in how signals and file descriptors
+ * work the whole select/deselect mechanic is very different.
+ * In ose when a sock_select is done a function is called. That function
+ * notes the changes that the driver want to do, but does not act on it.
+ * later when the function returns the new desired state is compared
+ * to the previous state and the apprioriate actions are taken. The action
+ * is usually to either request more data from the stack or stop requesting
+ * data.
+ *
+ * One thing to note is that the driver never does select/deselect. It always
+ * listens for the signals. Flow of data is regulated by sending or not sending
+ * signals to the ose inet process.
+ *
+ * The interesting functions to look at are:
+ * * inet_driver_select : called when sock_select is called
+ * * tcp_inet_ose_dispatch_signal : checks state changes and sends new signals
+ * * tcp_inet_drv_output_ose : ready output callback, reads signals and calls
+ * dispatch_signal
+ * * tcp_inet_drv_input_ose : ready input callback.
+ */
+
+#include "efs.h"
#include "sys/socket.h"
#include "sys/uio.h"
#include "sfk/sys/sfk_uio.h"
#include "netinet/in.h"
#include "netinet/tcp.h"
#include "netdb.h"
+#include "ose_spi/socket.sig"
-ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
-{
- return 0;
-}
+
+static ssize_t writev_fallback(int fd, const struct iovec *iov, int iovcnt, int max_sz);
#define INVALID_SOCKET -1
#define INVALID_EVENT -1
#define SOCKET_ERROR -1
#define SOCKET int
-#define HANDLE long int
-#define FD_READ ERL_DRV_READ
-#define FD_WRITE ERL_DRV_WRITE
-#define FD_CLOSE 0
-#define FD_CONNECT ERL_DRV_WRITE
-#define FD_ACCEPT ERL_DRV_READ
+#define HANDLE int
+#define FD_READ ERL_DRV_READ
+#define FD_WRITE ERL_DRV_WRITE
+#define FD_CLOSE 0
+#define FD_CONNECT (1<<4)
+#define FD_ACCEPT (1<<5)
+#define SOCK_FD_ERROR (1<<6)
#define sock_connect(s, addr, len) connect((s), (addr), (len))
#define sock_listen(s, b) listen((s), (b))
@@ -333,13 +359,14 @@ ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
#define sock_accept(s, addr, len) accept((s), (addr), (len))
#define sock_send(s,buf,len,flag) inet_send((s),(buf),(len),(flag))
#define sock_sendto(s,buf,blen,flag,addr,alen) \
- sendto((s),(buf),(blen),(flag),(addr),(alen))
+ sendto((s),(buf),(blen),(flag),(addr),(alen))
#define sock_sendv(s, vec, size, np, flag) \
- (*(np) = writev((s), (struct iovec*)(vec), (size)))
+ (*(np) = writev_fallback((s), (struct iovec*)(vec), (size), (*(np))))
#define sock_sendmsg(s,msghdr,flag) sendmsg((s),(msghdr),(flag))
#define sock_open(af, type, proto) socket((af), (type), (proto))
#define sock_close(s) close((s))
+#define sock_dup(s) dup((s))
#define sock_shutdown(s, how) shutdown((s), (how))
#define sock_hostname(buf, len) gethostname((buf), (len))
@@ -355,19 +382,6 @@ ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
#define sock_create_event(d) ((d)->s) /* return file descriptor */
#define sock_close_event(e) /* do nothing */
-#define inet_driver_select(port, e, mode, on) \
- driver_select(port, e, mode | (on?ERL_DRV_USE:0), on)
-
-#define sock_select(d, flags, onoff) do { \
- ASSERT(!(d)->is_ignored); \
- (d)->event_mask = (onoff) ? \
- ((d)->event_mask | (flags)) : \
- ((d)->event_mask & ~(flags)); \
- DEBUGF(("sock_select(%ld): flags=%02X, onoff=%d, event_mask=%02lX\r\n", \
- (long) (d)->port, (flags), (onoff), (unsigned long) (d)->event_mask)); \
- inet_driver_select((d)->port, (ErlDrvEvent)(long)(d)->event, (flags), (onoff)); \
- } while(0)
-
#ifndef WANT_NONBLOCKING
#define WANT_NONBLOCKING
#endif
@@ -397,6 +411,16 @@ typedef unsigned long u_long;
#define IN_EXPERIMENTAL(a) ((((in_addr_t)(a)) & 0xe0000000) == 0xe0000000)
#define IN_BADCLASS(a) ((((in_addr_t)(a)) & 0xf0000000) == 0xf0000000)
+#define sock_select(d, flags, onoff) do { \
+ ASSERT(!(d)->is_ignored); \
+ (d)->event_mask = (onoff) ? \
+ ((d)->event_mask | (flags)) : \
+ ((d)->event_mask & ~(flags)); \
+ DEBUGF(("(%s / %d) sock_select(%ld): flags=%02X, onoff=%d, event_mask=%02lX, s=%d\r\n", \
+ __FILE__, __LINE__, (long) (d)->port, (flags), (onoff), (unsigned long) (d)->event_mask, (d)->s)); \
+ inet_driver_select((d), (flags), (onoff)); \
+ } while(0)
+
#else /* !__OSE__ && !__WIN32__ */
#include <sys/time.h>
@@ -438,6 +462,8 @@ typedef unsigned long u_long;
#include <setns.h>
#endif
+#define HAVE_UDP
+
/* SCTP support -- currently for UNIX platforms only: */
#undef HAVE_SCTP
#if defined(HAVE_SCTP_H)
@@ -660,13 +686,13 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
(d)->event_mask = (onoff) ? \
((d)->event_mask | (flags)) : \
((d)->event_mask & ~(flags)); \
- DEBUGF(("sock_select(%ld): flags=%02X, onoff=%d, event_mask=%02lX\r\n", \
- (long) (d)->port, (flags), (onoff), (unsigned long) (d)->event_mask)); \
+ DEBUGF(("(%s / %d) sock_select(%ld): flags=%02X, onoff=%d, event_mask=%02lX\r\n", \
+ __FILE__, __LINE__, (long) (d)->port, (flags), (onoff), (unsigned long) (d)->event_mask)); \
inet_driver_select((d)->port, (ErlDrvEvent)(long)(d)->event, (flags), (onoff)); \
} while(0)
-#endif /* #ifdef __WIN32__ #else */
+#endif /* !__WIN32__ && !__OSE__ */
#ifdef HAVE_SOCKLEN_T
# define SOCKLEN_T socklen_t
@@ -1119,6 +1145,13 @@ typedef struct {
char *netns; /* Socket network namespace name
as full file path */
#endif
+#ifdef __OSE__
+ int select_state; /* state to keep track of whether we
+ should trigger another read/write
+ request at end of ready_input/output */
+ ErlDrvEvent events[6];
+#endif
+
} inet_descriptor;
@@ -1134,20 +1167,88 @@ static void tcp_inet_stop(ErlDrvData);
static void tcp_inet_command(ErlDrvData, char*, ErlDrvSizeT);
static void tcp_inet_commandv(ErlDrvData, ErlIOVec*);
static void tcp_inet_flush(ErlDrvData drv_data);
+#ifndef __OSE__
static void tcp_inet_drv_input(ErlDrvData, ErlDrvEvent);
static void tcp_inet_drv_output(ErlDrvData data, ErlDrvEvent event);
+#endif
static ErlDrvData tcp_inet_start(ErlDrvPort, char* command);
static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData, unsigned int,
char*, ErlDrvSizeT, char**, ErlDrvSizeT);
static void tcp_inet_timeout(ErlDrvData);
static void tcp_inet_process_exit(ErlDrvData, ErlDrvMonitor *);
static void inet_stop_select(ErlDrvEvent, void*);
+static void inet_emergency_close(ErlDrvData);
#ifdef __WIN32__
static void tcp_inet_event(ErlDrvData, ErlDrvEvent);
static void find_dynamic_functions(void);
#endif
-static struct erl_drv_entry tcp_inet_driver_entry =
+#ifdef __OSE__
+/* The structure of the signal used for requesting asynchronous
+ * notification from the stack. Under normal circumstances the network stack
+ * shouldn't overwrite the value set in the fd field by the sender
+ * of the request */
+struct OseAsyncSig {
+ struct FmEvent event;
+ int fd;
+};
+
+union SIGNAL {
+ SIGSELECT signo;
+ struct OseAsyncSig async;
+};
+
+static ErlDrvSSizeT tcp_inet_ctl_ose(ErlDrvData e, unsigned int cmd,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize);
+static void tcp_inet_commandv_ose(ErlDrvData e, ErlIOVec* ev);
+static void tcp_inet_drv_output_ose(ErlDrvData data, ErlDrvEvent event);
+static void tcp_inet_drv_input_ose(ErlDrvData data, ErlDrvEvent event);
+static ErlDrvOseEventId inet_resolve_signal(union SIGNAL *sig);
+
+#ifdef INET_DRV_DEBUG
+
+static char *read_req = "SO_EVENT_READ_REQUEST";
+static char *read_rep = "SO_EVENT_READ_REPLY";
+static char *write_req = "SO_EVENT_WRITE_REQUEST";
+static char *write_rep = "SO_EVENT_WRITE_REPLY";
+static char *eof_req = "SO_EVENT_EOF_REQUEST";
+static char *eof_rep = "SO_EVENT_EOF_REPLY";
+static char *accept_req = "SO_EVENT_ACCEPT_REQUEST";
+static char *accept_rep = "SO_EVENT_ACCEPT_REPLY";
+static char *connect_req = "SO_EVENT_CONNECT_REQUEST";
+static char *connect_rep = "SO_EVENT_CONNECT_REPLY";
+static char *error_req = "SO_EVENT_ERROR_REQUEST";
+static char *error_rep = "SO_EVENT_ERROR_REPLY";
+static char signo_tmp[32];
+
+static char *signo_to_string(SIGSELECT signo) {
+ switch (signo) {
+ case SO_EVENT_READ_REQUEST: { return read_req; }
+ case SO_EVENT_READ_REPLY: { return read_rep; }
+ case SO_EVENT_WRITE_REQUEST: { return write_req; }
+ case SO_EVENT_WRITE_REPLY: { return write_rep; }
+ case SO_EVENT_EOF_REQUEST: { return eof_req; }
+ case SO_EVENT_EOF_REPLY: { return eof_rep; }
+ case SO_EVENT_ACCEPT_REQUEST: { return accept_req; }
+ case SO_EVENT_ACCEPT_REPLY: { return accept_rep; }
+ case SO_EVENT_CONNECT_REQUEST: { return connect_req; }
+ case SO_EVENT_CONNECT_REPLY: { return connect_rep; }
+ case SO_EVENT_ERROR_REQUEST: { return error_req; }
+ case SO_EVENT_ERROR_REPLY: { return error_rep; }
+ }
+
+ snprintf(signo_tmp,32,"0x%x",signo);
+
+ return signo_tmp;
+}
+
+#endif
+
+#endif /* __OSE__ */
+
+
+static struct erl_drv_entry tcp_inet_driver_entry =
{
tcp_inet_init, /* inet_init will add this driver !! */
tcp_inet_start,
@@ -1156,6 +1257,9 @@ static struct erl_drv_entry tcp_inet_driver_entry =
#ifdef __WIN32__
tcp_inet_event,
NULL,
+#elif defined(__OSE__)
+ tcp_inet_drv_input_ose, /*ready_input*/
+ tcp_inet_drv_output_ose, /*ready_output*/
#else
tcp_inet_drv_input,
tcp_inet_drv_output,
@@ -1163,9 +1267,17 @@ static struct erl_drv_entry tcp_inet_driver_entry =
"tcp_inet",
NULL,
NULL,
+#ifdef __OSE__
+ tcp_inet_ctl_ose,
+#else
tcp_inet_ctl,
+#endif
tcp_inet_timeout,
+#ifdef __OSE__
+ tcp_inet_commandv_ose,
+#else
tcp_inet_commandv,
+#endif
NULL,
tcp_inet_flush,
NULL,
@@ -1176,11 +1288,13 @@ static struct erl_drv_entry tcp_inet_driver_entry =
ERL_DRV_FLAG_USE_PORT_LOCKING|ERL_DRV_FLAG_SOFT_BUSY,
NULL,
tcp_inet_process_exit,
- inet_stop_select
+ inet_stop_select,
+ inet_emergency_close
};
+#ifdef HAVE_UDP
static int packet_inet_init(void);
static void packet_inet_stop(ErlDrvData);
static void packet_inet_command(ErlDrvData, char*, ErlDrvSizeT);
@@ -1228,8 +1342,10 @@ static struct erl_drv_entry udp_inet_driver_entry =
ERL_DRV_FLAG_USE_PORT_LOCKING,
NULL,
NULL,
- inet_stop_select
+ inet_stop_select,
+ inet_emergency_close
};
+#endif
#ifdef HAVE_SCTP
static struct erl_drv_entry sctp_inet_driver_entry =
@@ -1261,7 +1377,8 @@ static struct erl_drv_entry sctp_inet_driver_entry =
ERL_DRV_FLAG_USE_PORT_LOCKING,
NULL,
NULL, /* process_exit */
- inet_stop_select
+ inet_stop_select,
+ inet_emergency_close
};
#endif
@@ -1293,6 +1410,7 @@ static int tcp_deliver(tcp_descriptor* desc, int len);
static int tcp_inet_output(tcp_descriptor* desc, HANDLE event);
static int tcp_inet_input(tcp_descriptor* desc, HANDLE event);
+#ifdef HAVE_UDP
typedef struct {
inet_descriptor inet; /* common data structure (DON'T MOVE) */
int read_packets; /* Number of packets to read per invocation */
@@ -1304,10 +1422,19 @@ typedef struct {
static int packet_inet_input(udp_descriptor* udesc, HANDLE event);
static int packet_inet_output(udp_descriptor* udesc, HANDLE event);
+#endif
-/* convert descriptor poiner to inet_descriptor pointer */
+/* convert descriptor pointer to inet_descriptor pointer */
#define INETP(d) (&(d)->inet)
+#ifdef __OSE__
+static void inet_driver_select(inet_descriptor* desc,
+ int flags, int onoff);
+static void tcp_inet_ose_dispatch_signals(tcp_descriptor *desc,
+ int prev_select_state,
+ union SIGNAL *sig);
+#endif
+
static int async_ref = 0; /* async reference id generator */
#define NEW_ASYNC_ID() ((async_ref++) & 0xffff)
@@ -1324,7 +1451,6 @@ static int async_ref = 0; /* async reference id generator */
static ErlDrvTermData am_ok;
static ErlDrvTermData am_tcp;
-static ErlDrvTermData am_udp;
static ErlDrvTermData am_error;
static ErlDrvTermData am_einval;
static ErlDrvTermData am_inet_async;
@@ -1334,10 +1460,13 @@ static ErlDrvTermData am_closed;
static ErlDrvTermData am_tcp_passive;
static ErlDrvTermData am_tcp_closed;
static ErlDrvTermData am_tcp_error;
-static ErlDrvTermData am_udp_passive;
-static ErlDrvTermData am_udp_error;
static ErlDrvTermData am_empty_out_q;
static ErlDrvTermData am_ssl_tls;
+#ifdef HAVE_UDP
+static ErlDrvTermData am_udp;
+static ErlDrvTermData am_udp_passive;
+static ErlDrvTermData am_udp_error;
+#endif
#ifdef HAVE_SCTP
static ErlDrvTermData am_sctp;
static ErlDrvTermData am_sctp_passive;
@@ -1545,6 +1674,7 @@ static void *realloc_wrapper(void *current, ErlDrvSizeT size){
# define SCTP_ANC_BUFF_SIZE INET_DEF_BUFFER/2 /* XXX: not very good... */
#endif
+#ifdef HAVE_UDP
static int load_ip_port(ErlDrvTermData* spec, int i, char* buf)
{
spec[i++] = ERL_DRV_INT;
@@ -1579,6 +1709,7 @@ static int load_ip_address(ErlDrvTermData* spec, int i, int family, char* buf)
}
return i;
}
+#endif
#ifdef HAVE_SCTP
@@ -1745,10 +1876,12 @@ static void release_buffer(ErlDrvBinary* buf)
}
}
+#ifdef HAVE_UDP
static ErlDrvBinary* realloc_buffer(ErlDrvBinary* buf, ErlDrvSizeT newsz)
{
return driver_realloc_binary(buf, newsz);
}
+#endif
/* use a TRICK, access the refc field to see if any one else has
* a ref to this buffer then call driver_free_binary else
@@ -2760,6 +2893,9 @@ static ErlDrvTermData am_sctp_rtoinfo, /* Option names */
/* For #sctp_paddrinfo{}: */
am_active, am_inactive,
+# if HAVE_DECL_SCTP_UNCONFIRMED
+ am_unconfirmed,
+# endif
/* For #sctp_status{}: */
# if HAVE_DECL_SCTP_EMPTY
@@ -3409,6 +3545,7 @@ static int tcp_error_message(tcp_descriptor* desc, int err)
return erl_drv_output_term(desc->inet.dport, spec, i);
}
+#ifdef HAVE_UDP
/*
** active mode message:
** {udp, S, IP, Port, [H1,...Hsz | Data]} or
@@ -3499,6 +3636,7 @@ static int packet_binary_message
ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN);
return erl_drv_output_term(desc->dport, spec, i);
}
+#endif
/*
** active mode message: send active-to-passive transition message
@@ -3513,6 +3651,9 @@ static int packet_binary_message
DEBUGF(("packet_passive_message(%ld):\r\n", (long)desc->port));
+#if !defined(HAVE_UDP) && !defined(HAVE_SCTP)
+ i = LOAD_ATOM(spec, i, am_tcp_passive);
+#else
if (desc->sprotocol == IPPROTO_TCP)
i = LOAD_ATOM(spec, i, am_tcp_passive);
else {
@@ -3522,12 +3663,14 @@ static int packet_binary_message
i = LOAD_ATOM(spec, i, am_udp_passive);
#endif
}
+#endif
i = LOAD_PORT(spec, i, desc->dport);
i = LOAD_TUPLE(spec, i, 2);
ASSERT(i <= 6);
return erl_drv_output_term(desc->dport, spec, i);
}
+#ifdef HAVE_UDP
/*
** send active message {udp_error|sctp_error, S, Error}
*/
@@ -3554,7 +3697,7 @@ static int packet_error_message(udp_descriptor* udesc, int err)
ASSERT(i == sizeof(spec)/sizeof(*spec));
return erl_drv_output_term(desc->dport, spec, i);
}
-
+#endif
/*
** active=TRUE:
@@ -3619,7 +3762,7 @@ tcp_reply_binary_data(tcp_descriptor* desc, ErlDrvBinary* bin, int offs, int len
return code;
}
-
+#ifdef HAVE_UDP
static int
packet_reply_binary_data(inet_descriptor* desc, unsigned int hsz,
ErlDrvBinary * bin, int offs, int len,
@@ -3642,6 +3785,7 @@ packet_reply_binary_data(inet_descriptor* desc, unsigned int hsz,
return code;
}
}
+#endif
/* ----------------------------------------------------------------------------
@@ -3781,7 +3925,10 @@ static void inet_init_sctp(void) {
/* For #sctp_paddrinfo{}: */
INIT_ATOM(active);
INIT_ATOM(inactive);
-
+# if HAVE_DECL_SCTP_UNCONFIRMED
+ INIT_ATOM(unconfirmed);
+# endif
+
/* For #sctp_status{}: */
# if HAVE_DECL_SCTP_EMPTY
INIT_ATOM(empty);
@@ -3817,7 +3964,9 @@ static int inet_init()
INIT_ATOM(ok);
INIT_ATOM(tcp);
+#ifdef HAVE_UDP
INIT_ATOM(udp);
+#endif
INIT_ATOM(error);
INIT_ATOM(einval);
INIT_ATOM(inet_async);
@@ -3827,8 +3976,10 @@ static int inet_init()
INIT_ATOM(tcp_passive);
INIT_ATOM(tcp_closed);
INIT_ATOM(tcp_error);
+#ifdef HAVE_UDP
INIT_ATOM(udp_passive);
INIT_ATOM(udp_error);
+#endif
INIT_ATOM(empty_out_q);
INIT_ATOM(ssl_tls);
@@ -3847,7 +3998,10 @@ static int inet_init()
/* add TCP, UDP and SCTP drivers */
add_driver_entry(&tcp_inet_driver_entry);
+#ifdef HAVE_UDP
add_driver_entry(&udp_inet_driver_entry);
+#endif
+
#ifdef HAVE_SCTP
/* Check the size of SCTP AssocID -- currently both this driver and the
Erlang part require 32 bit: */
@@ -4176,6 +4330,16 @@ static void desc_close(inet_descriptor* desc)
desc->forced_events = 0;
desc->send_would_block = 0;
#endif
+#ifdef __OSE__
+ if (desc->events[0]) {
+ driver_select(desc->port,desc->events[0],FD_READ|FD_WRITE|ERL_DRV_USE,0);
+ driver_select(desc->port,desc->events[1],FD_READ|FD_WRITE|ERL_DRV_USE,0);
+ driver_select(desc->port,desc->events[2],FD_READ|FD_WRITE|ERL_DRV_USE,0);
+ driver_select(desc->port,desc->events[3],FD_READ|FD_WRITE|ERL_DRV_USE,0);
+ driver_select(desc->port,desc->events[4],FD_READ|FD_WRITE|ERL_DRV_USE,0);
+ driver_select(desc->port,desc->events[5],FD_READ|FD_WRITE|ERL_DRV_USE,0);
+ }
+#else
/*
* We should close the fd here, but the other driver might still
* be selecting on it.
@@ -4185,6 +4349,7 @@ static void desc_close(inet_descriptor* desc)
ERL_DRV_USE, 0);
else
inet_stop_select((ErlDrvEvent)(long)desc->event,NULL);
+#endif
desc->event = INVALID_EVENT; /* closed by stop_select callback */
desc->s = INVALID_SOCKET;
desc->event_mask = 0;
@@ -4216,7 +4381,7 @@ static int erl_inet_close(inet_descriptor* desc)
desc_close(desc);
desc->state = INET_STATE_CLOSED;
} else if (desc->prebound && (desc->s != INVALID_SOCKET)) {
- sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE, 0);
+ sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE | ERL_DRV_USE_NO_CALLBACK, 0);
desc->event_mask = 0;
#ifdef __WIN32__
desc->forced_events = 0;
@@ -4226,6 +4391,64 @@ static int erl_inet_close(inet_descriptor* desc)
return 0;
}
+#ifdef __OSE__
+static void inet_select_init(inet_descriptor* desc)
+{
+ desc->events[0] =
+ erl_drv_ose_event_alloc(SO_EVENT_READ_REPLY,
+ desc->s,
+ inet_resolve_signal,
+ NULL);
+ driver_select(desc->port, desc->events[0],
+ ERL_DRV_READ|ERL_DRV_USE, 1);
+
+ desc->events[1] =
+ erl_drv_ose_event_alloc(SO_EVENT_EOF_REPLY,
+ desc->s,
+ inet_resolve_signal,
+ NULL);
+ driver_select(desc->port, desc->events[1],
+ ERL_DRV_READ|ERL_DRV_USE, 1);
+
+ desc->events[2] =
+ erl_drv_ose_event_alloc(SO_EVENT_ACCEPT_REPLY,
+ desc->s,
+ inet_resolve_signal,
+ NULL);
+ driver_select(desc->port, desc->events[2],
+ ERL_DRV_READ|ERL_DRV_USE, 1);
+
+ /* trigger tcp_inet_input */
+ desc->events[3] =
+ erl_drv_ose_event_alloc(SO_EVENT_WRITE_REPLY,
+ desc->s,
+ inet_resolve_signal,
+ NULL);
+ driver_select(desc->port, desc->events[3],
+ ERL_DRV_WRITE|ERL_DRV_USE, 1);
+
+ desc->events[4] =
+ erl_drv_ose_event_alloc(SO_EVENT_CONNECT_REPLY,
+ desc->s,
+ inet_resolve_signal,
+ NULL);
+ driver_select(desc->port, desc->events[4],
+ ERL_DRV_WRITE|ERL_DRV_USE, 1);
+
+ desc->events[5] =
+ erl_drv_ose_event_alloc(SO_EVENT_ERROR_REPLY,
+ desc->s,
+ inet_resolve_signal,
+ NULL);
+ driver_select(desc->port, desc->events[5],
+ ERL_DRV_WRITE|ERL_DRV_USE, 1);
+
+ /* Issue a select on error event before any other select to be sure we are
+ prepared to receive error notifications from the stack, even in the
+ situations when select isn't issued */
+ sock_select(desc, SOCK_FD_ERROR, 1);
+}
+#endif
static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type,
char** rbuf, ErlDrvSizeT rsize)
@@ -4309,6 +4532,10 @@ static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type,
#ifdef __WIN32__
driver_select(desc->port, desc->event, ERL_DRV_READ, 1);
#endif
+#ifdef __OSE__
+ inet_select_init(desc);
+#endif
+
desc->state = INET_STATE_OPEN;
desc->stype = type;
desc->sfamily = domain;
@@ -4318,28 +4545,50 @@ static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type,
/* as inet_open but pass in an open socket (MUST BE OF RIGHT TYPE) */
static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
- SOCKET s, char** rbuf, ErlDrvSizeT rsize)
+ SOCKET s, Uint32 bound,
+ char** rbuf, ErlDrvSizeT rsize)
{
inet_address name;
unsigned int sz = sizeof(name);
- /* check that it is a socket and that the socket is bound */
- if (IS_SOCKET_ERROR(sock_name(s, (struct sockaddr*) &name, &sz)))
- return ctl_error(sock_errno(), rbuf, rsize);
- if (name.sa.sa_family != domain)
- return ctl_error(EINVAL, rbuf, rsize);
+ if (bound) {
+ /* check that it is a socket and that the socket is bound */
+ if (IS_SOCKET_ERROR(sock_name(s, (struct sockaddr*) &name, &sz)))
+ return ctl_error(sock_errno(), rbuf, rsize);
+ if (name.sa.sa_family != domain)
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+#ifdef __OSE__
+ /* for fdopen duplicating the sd will allow to uniquely identify
+ the signal from OSE with erlang port */
+ desc->s = sock_dup(s);
+#else
desc->s = s;
+#endif
+
if ((desc->event = sock_create_event(desc)) == INVALID_EVENT)
return ctl_error(sock_errno(), rbuf, rsize);
SET_NONBLOCKING(desc->s);
#ifdef __WIN32__
driver_select(desc->port, desc->event, ERL_DRV_READ, 1);
#endif
- desc->state = INET_STATE_BOUND; /* assume bound */
+
+ if (bound)
+ desc->state = INET_STATE_BOUND;
+ else
+ desc->state = INET_STATE_OPEN;
+
if (type == SOCK_STREAM) { /* check if connected */
sz = sizeof(name);
- if (!IS_SOCKET_ERROR(sock_peer(s, (struct sockaddr*) &name, &sz)))
+ if (!IS_SOCKET_ERROR(sock_peer(s, (struct sockaddr*) &name, &sz))) {
desc->state = INET_STATE_CONNECTED;
+#ifdef __OSE__
+ /* since we are dealing with different descriptors (i.e. inet and
+ socket) the select part should be initialized with the right
+ values */
+ inet_select_init(desc);
+#endif
+ }
}
desc->prebound = 1; /* used to prevent a real close since
@@ -4365,8 +4614,7 @@ struct addr_if {
#ifndef SIOCGIFNETMASK
-static struct in_addr net_mask(in)
-struct in_addr in;
+static struct in_addr net_mask(struct in_addr in)
{
register u_long i = sock_ntohl(in.s_addr);
@@ -4482,6 +4730,36 @@ static char* sockaddr_to_buf(struct sockaddr* addr, char* ptr, char* end)
return NULL;
}
+/* sockaddr_bufsz_need
+ * Returns the number of bytes needed to store the information
+ * through sockaddr_to_buf
+ */
+
+static size_t sockaddr_bufsz_need(struct sockaddr* addr)
+{
+ if (addr->sa_family == AF_INET || addr->sa_family == 0) {
+ return 1 + sizeof(struct in_addr);
+ }
+#if defined(HAVE_IN6) && defined(AF_INET6)
+ else if (addr->sa_family == AF_INET6) {
+ return 1 + sizeof(struct in6_addr);
+ }
+#endif
+#if defined(AF_LINK)
+ if (addr->sa_family == AF_LINK) {
+ struct sockaddr_dl *sdl_p = (struct sockaddr_dl*) addr;
+ return 2 + sdl_p->sdl_alen;
+ }
+#endif
+#if defined(AF_PACKET) && defined(HAVE_NETPACKET_PACKET_H)
+ else if(addr->sa_family == AF_PACKET) {
+ struct sockaddr_ll *sll_p = (struct sockaddr_ll*) addr;
+ return 2 + sll_p->sll_halen;
+ }
+#endif
+ return 0;
+}
+
static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr)
{
buf_check(ptr,end,1);
@@ -5541,7 +5819,7 @@ done:
ia_p->Ipv6IfIndex &&
ia_p->Ipv6IfIndex != index)
{
- /* Oops, there was an other interface for IPv6. Possible? XXX */
+ /* Oops, there was another interface for IPv6. Possible? XXX */
index = ia_p->Ipv6IfIndex;
goto index;
}
@@ -5560,6 +5838,11 @@ done:
}
#elif defined(HAVE_GETIFADDRS)
+#ifdef DEBUG
+#define GETIFADDRS_BUFSZ (1)
+#else
+#define GETIFADDRS_BUFSZ (512)
+#endif
static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
char **rbuf_pp, ErlDrvSizeT rsize)
@@ -5570,15 +5853,15 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
char *buf_p;
char *buf_alloc_p;
- buf_size = 512;
- buf_alloc_p = ALLOC(buf_size);
+ buf_size = GETIFADDRS_BUFSZ;
+ buf_alloc_p = ALLOC(GETIFADDRS_BUFSZ);
buf_p = buf_alloc_p;
# define BUF_ENSURE(Size) \
do { \
int NEED_, GOT_ = buf_p - buf_alloc_p; \
NEED_ = GOT_ + (Size); \
if (NEED_ > buf_size) { \
- buf_size = NEED_ + 512; \
+ buf_size = NEED_ + GETIFADDRS_BUFSZ; \
buf_alloc_p = REALLOC(buf_alloc_p, buf_size); \
buf_p = buf_alloc_p + GOT_; \
} \
@@ -5591,7 +5874,7 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
while (! (P_ = sockaddr_to_buf((sa), buf_p, \
buf_alloc_p+buf_size))) { \
int GOT_ = buf_p - buf_alloc_p; \
- buf_size += 512; \
+ buf_size += GETIFADDRS_BUFSZ; \
buf_alloc_p = REALLOC(buf_alloc_p, buf_size); \
buf_p = buf_alloc_p + GOT_; \
} \
@@ -5648,10 +5931,11 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
|| ifa_p->ifa_addr->sa_family == AF_PACKET
#endif
) {
- char *bp = buf_p;
- BUF_ENSURE(1);
- SOCKADDR_TO_BUF(INET_IFOPT_HWADDR, ifa_p->ifa_addr);
- if (buf_p - bp < 4) buf_p = bp; /* Empty hwaddr */
+ size_t need = sockaddr_bufsz_need(ifa_p->ifa_addr);
+ if (need > 3) {
+ BUF_ENSURE(1 + need);
+ SOCKADDR_TO_BUF(INET_IFOPT_HWADDR, ifa_p->ifa_addr);
+ }
}
#endif
}
@@ -5666,6 +5950,7 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
return buf_size;
# undef BUF_ENSURE
}
+#undef GETIFADDRS_BUFSZ
#else
@@ -5929,6 +6214,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
}
continue;
+#ifdef HAVE_UDP
case INET_LOPT_UDP_READ_PACKETS:
if (desc->stype == SOCK_DGRAM) {
udp_descriptor* udesc = (udp_descriptor*) desc;
@@ -5936,6 +6222,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
udesc->read_packets = ival;
}
continue;
+#endif
#ifdef HAVE_SETNS
case INET_LOPT_NETNS:
@@ -6902,6 +7189,7 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
}
continue;
+#ifdef HAVE_UDP
case INET_LOPT_UDP_READ_PACKETS:
if (desc->stype == SOCK_DGRAM) {
*ptr++ = opt;
@@ -6911,6 +7199,7 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
TRUNCATE_TO(0,ptr);
}
continue;
+#endif
#ifdef HAVE_SETNS
case INET_LOPT_NETNS:
@@ -7095,8 +7384,13 @@ static int load_paddrinfo (ErlDrvTermData * spec, int i,
case SCTP_INACTIVE:
i = LOAD_ATOM (spec, i, am_inactive);
break;
+# if HAVE_DECL_SCTP_UNCONFIRMED
+ case SCTP_UNCONFIRMED:
+ i = LOAD_ATOM (spec, i, am_unconfirmed);
+ break;
+# endif
default:
- ASSERT(0); /* NB: SCTP_UNCONFIRMED modifier not yet supported */
+ i = LOAD_ATOM (spec, i, am_undefined);
}
i = LOAD_INT (spec, i, pai->spinfo_cwnd);
i = LOAD_INT (spec, i, pai->spinfo_srtt);
@@ -7961,6 +8255,19 @@ static void inet_stop(inet_descriptor* desc)
FREE(desc);
}
+static void inet_emergency_close(ErlDrvData data)
+{
+ /* valid for any (UDP, TCP or SCTP) descriptor */
+ tcp_descriptor* tcp_desc = (tcp_descriptor*)data;
+ inet_descriptor* desc = INETP(tcp_desc);
+ DEBUGF(("inet_emergency_close(%ld) {s=%d\r\n",
+ (long)desc->port, desc->s));
+ if (desc->s != INVALID_SOCKET) {
+ sock_close(desc->s);
+ }
+}
+
+
static void set_default_msgq_limits(ErlDrvPort port)
{
ErlDrvSizeT q_high = INET_HIGH_MSGQ_WATERMARK;
@@ -8041,6 +8348,15 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
#ifdef HAVE_SETNS
desc->netns = NULL;
#endif
+#ifdef __OSE__
+ desc->select_state = 0;
+ desc->events[0] = NULL;
+ desc->events[1] = NULL;
+ desc->events[2] = NULL;
+ desc->events[3] = NULL;
+ desc->events[4] = NULL;
+ desc->events[5] = NULL;
+#endif
return (ErlDrvData)desc;
}
@@ -8761,6 +9077,11 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
copy_desc->inet.port = port;
copy_desc->inet.dport = driver_mk_port(port);
+
+#ifdef __OSE__
+ inet_select_init(&copy_desc->inet);
+#endif
+
*err = 0;
return copy_desc;
}
@@ -8822,8 +9143,22 @@ static void tcp_inet_stop(ErlDrvData e)
inet_stop(INETP(desc));
}
+#ifdef __OSE__
-
+static ErlDrvSSizeT tcp_inet_ctl_ose(ErlDrvData e, unsigned int cmd,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize) {
+
+ tcp_descriptor* desc = (tcp_descriptor*)e;
+ int prev_select_state = INETP(desc)->select_state;
+
+ ErlDrvSSizeT res = tcp_inet_ctl(e,cmd,buf,len,rbuf,rsize);
+
+ tcp_inet_ose_dispatch_signals((tcp_descriptor*)e,prev_select_state,NULL);
+
+ return res;
+}
+#endif
/* TCP requests from Erlang */
static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
@@ -8858,10 +9193,11 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
break;
}
- case INET_REQ_FDOPEN: { /* pass in an open socket */
+ case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */
int domain;
+ int bound;
DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
- if (len != 6) return ctl_error(EINVAL, rbuf, rsize);
+ if (len != 6 && len != 10) return ctl_error(EINVAL, rbuf, rsize);
switch(buf[0]) {
case INET_AF_INET:
domain = AF_INET;
@@ -8879,8 +9215,13 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
return ctl_error(EINVAL, rbuf, rsize);
}
if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize);
+
+ if (len == 6) bound = 1;
+ else bound = get_int32(buf+2+4);
+
return inet_ctl_fdopen(INETP(desc), domain, SOCK_STREAM,
- (SOCKET) get_int32(buf+2), rbuf, rsize);
+ (SOCKET) get_int32(buf+2),
+ bound, rbuf, rsize);
break;
}
@@ -9039,7 +9380,7 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
ErlDrvTermData caller = driver_caller(desc->inet.port);
tcp_descriptor* accept_desc;
int err;
-
+
if ((accept_desc = tcp_inet_copy(desc,s,caller,&err)) == NULL) {
sock_close(s);
return ctl_error(err, rbuf, rsize);
@@ -9073,7 +9414,8 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
char tbuf[2];
int n;
- DEBUGF(("tcp_inet_ctl(%ld): RECV\r\n", (long)desc->inet.port));
+ DEBUGF(("tcp_inet_ctl(%ld): RECV (s=%d)\r\n",
+ (long)desc->inet.port, desc->inet.s));
/* INPUT: Timeout(4), Length(4) */
if (!IS_CONNECTED(INETP(desc))) {
if (desc->tcp_add_flags & TCP_ADDF_DELAYED_CLOSE_RECV) {
@@ -9245,6 +9587,16 @@ static void tcp_inet_command(ErlDrvData e, char *buf, ErlDrvSizeT len)
DEBUGF(("tcp_inet_command(%ld) }\r\n", (long)desc->inet.port));
}
+#ifdef __OSE__
+
+static void tcp_inet_commandv_ose(ErlDrvData e, ErlIOVec* ev) {
+ int prev_select_state = INETP((tcp_descriptor*)e)->select_state;
+ tcp_inet_commandv(e, ev);
+ tcp_inet_ose_dispatch_signals((tcp_descriptor*)e,prev_select_state,NULL);
+}
+
+#endif
+
static void tcp_inet_commandv(ErlDrvData e, ErlIOVec* ev)
{
@@ -9308,6 +9660,22 @@ static void inet_stop_select(ErlDrvEvent event, void* _)
{
#ifdef __WIN32__
WSACloseEvent((HANDLE)event);
+#elif defined(__OSE__)
+ ErlDrvOseEventId id;
+ union SIGNAL *sig;
+ erl_drv_ose_event_fetch(event, NULL, &id,NULL);
+ DEBUGF(("inet_stop_select(?#?) {s=%d\n",id));
+ sock_close((int)id);
+ /* On socket close all the signals waiting to be processed as part of the
+ select should be deallocated */
+ while((sig = erl_drv_ose_get_signal(event))) {
+ DEBUGF(("inet_stop_select(?#?): Freeing signal %s\n",
+ signo_to_string(sig->signo)));
+ free_buf(&sig);
+ }
+ erl_drv_ose_event_free(event);
+ DEBUGF(("inet_stop_select(?#?) }\n"));
+
#else
sock_close((SOCKET)(long)event);
#endif
@@ -9427,12 +9795,13 @@ static int tcp_remain(tcp_descriptor* desc, int* len)
int n = desc->i_ptr - ptr; /* number of bytes read */
int tlen;
- DEBUGF(("tcp_remain(%ld): s=%d, n=%d, nfill=%d nsz=%d\r\n",
- (long)desc->inet.port, desc->inet.s, n, nfill, nsz));
-
tlen = packet_get_length(desc->inet.htype, ptr, n,
desc->inet.psize, desc->i_bufsz,
&desc->http_state);
+
+ DEBUGF(("tcp_remain(%ld): s=%d, n=%d, nfill=%d nsz=%d, tlen %d\r\n",
+ (long)desc->inet.port, desc->inet.s, n, nfill, nsz, tlen));
+
if (tlen > 0) {
if (tlen <= n) { /* got a packet */
*len = tlen;
@@ -9840,7 +10209,146 @@ static void tcp_inet_event(ErlDrvData e, ErlDrvEvent event)
return;
}
-#endif /* WIN32 */
+#elif defined(__OSE__) /* !__WIN32__ */
+/* The specific resolve signal function. It will return the socket descriptor
+ for which the select was issued */
+static ErlDrvOseEventId inet_resolve_signal(union SIGNAL *sig) {
+ DEBUGF(("%s(?#?): s=%d got signal %s, status = %d, extra = %d, sender = 0x%x\n",
+ __FUNCTION__,sig->async.fd,signo_to_string(sig->signo),
+ sig->async.event.status,
+ sig->async.event.extra,sender(&sig)));
+ if (sig->signo == SO_EVENT_READ_REPLY ||
+ sig->signo == SO_EVENT_ACCEPT_REPLY ||
+ sig->signo == SO_EVENT_EOF_REPLY ||
+ sig->signo == SO_EVENT_WRITE_REPLY ||
+ sig->signo == SO_EVENT_ERROR_REPLY ||
+ sig->signo == SO_EVENT_CONNECT_REPLY ) {
+ return sig->async.fd;
+ }
+
+ return -1;
+}
+
+static void inet_driver_select(inet_descriptor* desc,
+ int flags, int onoff) {
+ ASSERT(!desc->is_ignored);
+
+ if(onoff) {
+ desc->select_state |= flags;
+ } else {
+ desc->select_state &= ~flags;
+ }
+}
+
+static ssize_t writev_fallback(int fd, const struct iovec *iov, int iovcnt, int max_sz)
+{
+ size_t data_len = 0;
+ size_t sent = 0;
+ ssize_t n;
+ int i;
+
+ for(i = 0; i < iovcnt; i++)
+ {
+ data_len = iov[i].iov_len;
+tryagain:
+ n = sock_send(fd, iov[i].iov_base, data_len, 0);
+ if (IS_SOCKET_ERROR(n)) {
+ /* If buffer length is greater than the amount stack is able to
+ * send out then try to send at least max_sz (this comes with
+ * SO_EVENT_WRITE_REPLY signal*/
+ if ((errno == EMSGSIZE) && (max_sz > 0) && (data_len > max_sz)) {
+ data_len = max_sz;
+ goto tryagain;
+ }
+ break;
+ }
+ sent += n;
+ }
+ return sent;
+}
+
+#define OSE_EVENT_REQ(TCP_DESC,EVENT) do { \
+ union SIGNAL *sig = alloc(sizeof(struct OseAsyncSig), EVENT); \
+ sig->async.fd = INETP(TCP_DESC)->s; \
+ ose_request_event(INETP(TCP_DESC)->s, &sig, 1); \
+ DEBUGF(("%s(%ld): s=%d sent %s\r\n",__FUNCTION__, \
+ INETP(TCP_DESC)->port,INETP(TCP_DESC)->s,signo_to_string(EVENT))); \
+ } while(0)
+
+static void tcp_inet_ose_dispatch_signals(tcp_descriptor *desc,
+ int prev_select_state,
+ union SIGNAL *sig) {
+ if (sig) {
+ DEBUGF(("tcp_inet_ose_dispatch_signals(%ld) {s=%d resend\r\n",
+ (long)INETP(desc)->port,INETP(desc)->s));
+ /* We are reacting to a signal, which means that if
+ the select_state for that signal is still activated
+ we should send a new signal */
+ switch (sig->signo) {
+ case SO_EVENT_READ_REPLY: {
+ if (INETP(desc)->select_state & FD_READ)
+ OSE_EVENT_REQ(desc,SO_EVENT_READ_REQUEST);
+ break;
+ }
+ case SO_EVENT_WRITE_REPLY: {
+ if (INETP(desc)->select_state & FD_WRITE)
+ OSE_EVENT_REQ(desc,SO_EVENT_WRITE_REQUEST);
+ break;
+ }
+ case SO_EVENT_CONNECT_REPLY: {
+ if (INETP(desc)->select_state & FD_CONNECT)
+ OSE_EVENT_REQ(desc,SO_EVENT_CONNECT_REQUEST);
+ break;
+ }
+ case SO_EVENT_ACCEPT_REPLY: {
+ if (INETP(desc)->select_state & FD_ACCEPT)
+ OSE_EVENT_REQ(desc,SO_EVENT_ACCEPT_REQUEST);
+ break;
+ }
+ case SO_EVENT_ERROR_REPLY: {
+ if (INETP(desc)->select_state & SOCK_FD_ERROR)
+ OSE_EVENT_REQ(desc,SO_EVENT_ERROR_REQUEST);
+ break;
+ }
+
+ }
+ DEBUGF(("tcp_inet_ose_dispatch_signals(%ld) }\r\n",
+ (long)INETP(desc)->port));
+ }
+
+ if (INETP(desc)->select_state != prev_select_state) {
+ /* If the select state has changed we have to issue signals for
+ the state parts that have changed. */
+ int xor_select_state = INETP(desc)->select_state ^ prev_select_state;
+ DEBUGF(("tcp_inet_ose_dispatch_signals(%ld) {s=%d select change\r\n",
+ (long)INETP(desc)->port,INETP(desc)->s));
+ if ((xor_select_state & FD_READ) &&
+ (INETP(desc)->select_state & FD_READ)) {
+ OSE_EVENT_REQ(desc,SO_EVENT_READ_REQUEST);
+ }
+ if ((xor_select_state & FD_WRITE) &&
+ (INETP(desc)->select_state & FD_WRITE)) {
+ OSE_EVENT_REQ(desc,SO_EVENT_WRITE_REQUEST);
+ }
+ if ((xor_select_state & FD_CONNECT) &&
+ (INETP(desc)->select_state & FD_CONNECT)) {
+ OSE_EVENT_REQ(desc,SO_EVENT_CONNECT_REQUEST);
+ }
+ if ((xor_select_state & FD_ACCEPT) &&
+ (INETP(desc)->select_state & FD_ACCEPT)) {
+ OSE_EVENT_REQ(desc,SO_EVENT_ACCEPT_REQUEST);
+ }
+ if ((xor_select_state & SOCK_FD_ERROR) &&
+ (INETP(desc)->select_state & SOCK_FD_ERROR)) {
+ OSE_EVENT_REQ(desc,SO_EVENT_ERROR_REQUEST);
+ }
+
+ DEBUGF(("tcp_inet_ose_dispatch_signals(%ld) }\r\n",
+ (long)INETP(desc)->port));
+ }
+}
+
+#endif /* __OSE__ */
/* socket has input:
@@ -9861,7 +10369,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
unsigned int len;
inet_address remote;
inet_async_op *this_op = desc->inet.opt;
-
+
len = sizeof(desc->inet.remote);
s = sock_accept(desc->inet.s, (struct sockaddr*) &remote, &len);
if (s == INVALID_SOCKET && sock_errno() == ERRNO_BLOCK) {
@@ -9930,7 +10438,6 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
while (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
len = sizeof(desc->inet.remote);
s = sock_accept(desc->inet.s, (struct sockaddr*) &remote, &len);
-
if (s == INVALID_SOCKET && sock_errno() == ERRNO_BLOCK) {
/* Just try again, no real error, keep the last return code */
goto done;
@@ -10256,6 +10763,49 @@ static int tcp_send(tcp_descriptor* desc, char* ptr, ErlDrvSizeT len)
return 0;
}
+#ifdef __OSE__
+
+static void tcp_inet_drv_output_ose(ErlDrvData data, ErlDrvEvent event)
+{
+ union SIGNAL *event_sig = erl_drv_ose_get_signal(event);
+
+ while (event_sig) {
+ int prev_select_state = INETP((tcp_descriptor*)data)->select_state;
+ int res = tcp_inet_output((tcp_descriptor*)data, (HANDLE)event_sig);
+ if (res != -1) {
+ tcp_inet_ose_dispatch_signals((tcp_descriptor*)data,
+ prev_select_state,event_sig);
+ free_buf(&event_sig);
+ event_sig = erl_drv_ose_get_signal(event);
+ } else {
+ /* NOTE: here the event object could have been deallocated!!!!
+ inet_stop_select is called when doing driver_select(ERL_DRV_USE,0)
+ */
+ free_buf(&event_sig);
+ return;
+ }
+ }
+}
+
+static void tcp_inet_drv_input_ose(ErlDrvData data, ErlDrvEvent event)
+{
+ union SIGNAL *event_sig = erl_drv_ose_get_signal(event);
+
+ while (event_sig) {
+ int prev_select_state = INETP((tcp_descriptor*)data)->select_state;
+ int res = tcp_inet_input((tcp_descriptor*)data, (HANDLE)event);
+ if (res != -1) {
+ tcp_inet_ose_dispatch_signals((tcp_descriptor*)data, prev_select_state,
+ event_sig);
+ free_buf(&event_sig);
+ event_sig = erl_drv_ose_get_signal(event);
+ } else {
+ free_buf(&event_sig);
+ return;
+ }
+ }
+}
+#else
static void tcp_inet_drv_output(ErlDrvData data, ErlDrvEvent event)
{
(void)tcp_inet_output((tcp_descriptor*)data, (HANDLE)event);
@@ -10265,6 +10815,7 @@ static void tcp_inet_drv_input(ErlDrvData data, ErlDrvEvent event)
{
(void)tcp_inet_input((tcp_descriptor*)data, (HANDLE)event);
}
+#endif
/* socket ready for ouput:
** 1. INET_STATE_CONNECTING => non block connect ?
@@ -10330,6 +10881,13 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
ssize_t n;
SysIOVec* iov;
+#ifdef __OSE__
+ /* For large size buffers case the amount of data that the stack is
+ able to send out (received in the .extra field) should be passed
+ down to writev_fallback */
+ n = event ? ((union SIGNAL*)event)->async.event.extra : 0;
+#endif
+
if ((iov = driver_peekq(ix, &vsize)) == NULL) {
sock_select(INETP(desc), FD_WRITE, 0);
send_empty_out_q_msgs(INETP(desc));
@@ -10341,8 +10899,8 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s, iov, vsize, &n, 0))) {
write_error:
if ((sock_errno() != ERRNO_BLOCK) && (sock_errno() != EINTR)) {
- DEBUGF(("tcp_inet_output(%ld): sock_sendv(%d) errno = %d\r\n",
- (long)desc->inet.port, vsize, sock_errno()));
+ DEBUGF(("tcp_inet_output(%ld): sock_sendv(%d) errno = %d (errno %d)\r\n",
+ (long)desc->inet.port, vsize, sock_errno(), errno));
ret = tcp_send_error(desc, sock_errno());
goto done;
}
@@ -10355,6 +10913,12 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
sizes > (max 32 bit signed int) */
size_t howmuch = 0x7FFFFFFF; /* max signed 32 bit */
int x;
+#ifdef __OSE__
+ /* For EWOULDBLOCK sock_sendv returns 0 so we have to be sure it
+ wasn't the case */
+ if(sock_errno() == ERRNO_BLOCK)
+ goto done;
+#endif
for(x = 0; x < vsize && iov[x].iov_len == 0; ++x)
;
if (x < vsize) {
@@ -10499,6 +11063,7 @@ static udp_descriptor* sctp_inet_copy(udp_descriptor* desc, SOCKET s, int* err)
+#ifdef HAVE_UDP
static int packet_inet_init()
{
return 0;
@@ -10529,6 +11094,7 @@ static ErlDrvData udp_inet_start(ErlDrvPort port, char *args)
set_default_msgq_limits(port);
return data;
}
+#endif
#ifdef HAVE_SCTP
static ErlDrvData sctp_inet_start(ErlDrvPort port, char *args)
@@ -10539,6 +11105,7 @@ static ErlDrvData sctp_inet_start(ErlDrvPort port, char *args)
}
#endif
+#ifdef HAVE_UDP
static void packet_inet_stop(ErlDrvData e)
{
/* There should *never* be any "empty out q" subscribers on
@@ -10627,10 +11194,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf,
return replen;
- case INET_REQ_FDOPEN: { /* pass in an open (and bound) socket */
+ case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */
SOCKET s;
+ int bound;
DEBUGF(("packet inet_ctl(%ld): FDOPEN\r\n", (long)desc->port));
- if (len != 6) {
+ if (len != 6 && len != 10) {
return ctl_error(EINVAL, rbuf, rsize);
}
switch (buf[0]) {
@@ -10655,7 +11223,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf,
return ctl_error(EINVAL, rbuf, rsize);
}
s = (SOCKET)get_int32(buf+2);
- replen = inet_ctl_fdopen(desc, af, type, s, rbuf, rsize);
+
+ if (len == 6) bound = 1;
+ else bound = get_int32(buf+2+4);
+
+ replen = inet_ctl_fdopen(desc, af, type, s, bound, rbuf, rsize);
if ((*rbuf)[0] != INET_REP_ERROR) {
if (desc->active)
@@ -11049,7 +11621,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len)
else
inet_reply_ok(desc);
}
-
+#endif
#ifdef __WIN32__
static void packet_inet_event(ErlDrvData e, ErlDrvEvent event)
@@ -11071,6 +11643,7 @@ static void packet_inet_event(ErlDrvData e, ErlDrvEvent event)
#endif
+#ifdef HAVE_UDP
static void packet_inet_drv_input(ErlDrvData e, ErlDrvEvent event)
{
(void) packet_inet_input((udp_descriptor*)e, (HANDLE)event);
@@ -11327,6 +11900,7 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
DEBUGF(("packet_inet_output(%ld) }\r\n", (long)desc->port));
return ret;
}
+#endif
/*---------------------------------------------------------------------------*/
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
index 3fe5d282dc..3143e4511d 100644
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ b/erts/emulator/drivers/common/zlib_drv.c
@@ -21,6 +21,9 @@
* ZLib interface for erlang
*
*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include <stdio.h>
#include <zlib.h>
#include <errno.h>
diff --git a/erts/emulator/drivers/unix/multi_drv.c b/erts/emulator/drivers/unix/multi_drv.c
index 822c96730c..724d325ed5 100644
--- a/erts/emulator/drivers/unix/multi_drv.c
+++ b/erts/emulator/drivers/unix/multi_drv.c
@@ -20,7 +20,7 @@
/* Purpose: Multidriver interface
This is an example of a driver which allows multiple instances of itself.
I.e have one erlang process execute open_port(multi......) and
- at the same time have an other erlang process open an other port
+ at the same time have another erlang process open another port
running multi there as well.
*/
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index 42f41c5f3d..878beb055b 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -360,7 +360,12 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
int fd;
int mode; /* Open mode. */
- if (stat(name, &statbuf) >= 0 && !ISREG(statbuf)) {
+ if (stat(name, &statbuf) < 0) {
+ /* statbuf is undefined: if the caller depends on it,
+ i.e. invoke_read_file(), fail the call immediately */
+ if (pSize && flags == EFILE_MODE_READ)
+ return check_error(-1, errInfo);
+ } else if (!ISREG(statbuf)) {
/*
* For UNIX only, here is some ugly code to allow
* /dev/null to be opened as a file.
diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c
index 480ba23239..7e4043fc1b 100644
--- a/erts/emulator/drivers/win32/win_efile.c
+++ b/erts/emulator/drivers/win32/win_efile.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -29,12 +29,27 @@
#include <wchar.h>
#include "erl_efile.h"
+#define DBG_TRACE_MASK 0
+/* 1 = file name ops
+ * 2 = file descr ops
+ * 4 = errors
+ * 8 = path name conversion
+ */
+#if !DBG_TRACE_MASK
+# define DBG_TRACE(M,S)
+# define DBG_TRACE1(M,FMT,A)
+# define DBG_TRACE2(M,FMT,A,B)
+#else
+# define DBG_TRACE(M,S) do { if ((M)&DBG_TRACE_MASK) fwprintf(stderr, L"DBG_TRACE %d: %s\r\n", __LINE__, (WCHAR*)(S)); }while(0)
+# define DBG_TRACE1(M,FMT,A) do { if ((M)&DBG_TRACE_MASK) fwprintf(stderr, L"DBG_TRACE %d: " L##FMT L"\r\n", __LINE__, (A)); }while(0)
+# define DBG_TRACE2(M,FMT,A,B) do { if ((M)&DBG_TRACE_MASK) fwprintf(stderr, L"DBG_TRACE %d: " L##FMT L"\r\n", __LINE__, (A), (B)); }while(0)
+#endif
+
/*
* Microsoft-specific function to map a WIN32 error code to a Posix errno.
*/
#define ISSLASH(a) ((a) == L'\\' || (a) == L'/')
-
#define ISDIR(st) (((st).st_mode&S_IFMT) == S_IFDIR)
#define ISREG(st) (((st).st_mode&S_IFMT) == S_IFREG)
@@ -69,10 +84,92 @@
static int check_error(int result, Efile_error* errInfo);
static int set_error(Efile_error* errInfo);
+static int set_os_errno(Efile_error* errInfo, DWORD os_errno);
static int is_root_unc_name(const WCHAR *path);
static int extract_root(WCHAR *name);
static unsigned short dos_to_posix_mode(int attr, const WCHAR *name);
+
+struct wpath_tmp_buffer {
+ struct wpath_tmp_buffer* next;
+ WCHAR buffer[1];
+};
+
+typedef struct {
+ Efile_error* errInfo;
+ struct wpath_tmp_buffer* buf_list;
+}Efile_call_state;
+
+static void call_state_init(Efile_call_state* state, Efile_error* errInfo)
+{
+ state->errInfo = errInfo;
+ state->buf_list = NULL;
+}
+static WCHAR* wpath_tmp_alloc(Efile_call_state* state, size_t len)
+{
+ size_t sz = offsetof(struct wpath_tmp_buffer, buffer)
+ + (len+1)*sizeof(WCHAR);
+ struct wpath_tmp_buffer* p = driver_alloc(sz);
+ p->next = state->buf_list;
+ state->buf_list = p;
+ return p->buffer;
+}
+static void call_state_free(Efile_call_state* state)
+{
+ while(state->buf_list) {
+ struct wpath_tmp_buffer* next = state->buf_list->next;
+ driver_free(state->buf_list);
+ state->buf_list = next;
+ }
+}
+static WCHAR* get_cwd_wpath_tmp(Efile_call_state* state)
+{
+ WCHAR dummy;
+ DWORD size = GetCurrentDirectoryW(0, &dummy);
+ WCHAR* ret = NULL;
+
+ if (size) {
+ ret = wpath_tmp_alloc(state, size);
+ if (!GetCurrentDirectoryW(size, ret)) {
+ ret = NULL;
+ }
+ }
+ return ret;
+}
+static WCHAR* get_full_wpath_tmp(Efile_call_state* state,
+ const WCHAR* file,
+ WCHAR** file_part,
+ DWORD extra)
+{
+ WCHAR dummy;
+ DWORD size = GetFullPathNameW(file, 0, &dummy, NULL);
+ WCHAR* ret = NULL;
+
+ if (size) {
+ int ok;
+ ret = wpath_tmp_alloc(state, size + extra);
+ if (file_part) {
+ ok = (GetFullPathNameW(file, size, ret, file_part) != 0);
+ }
+ else {
+ ok = (_wfullpath(ret, file, size) != NULL);
+ }
+ if (!ok) {
+ ret = NULL;
+ }
+ }
+ return ret;
+}
+
+static void ensure_wpath_max(Efile_call_state* state, WCHAR** pathp, size_t max);
+static int do_rmdir(Efile_call_state*, char* name);
+static int do_rename(Efile_call_state*, char* src, char* dst);
+static int do_readdir(Efile_call_state*, char* name, EFILE_DIR_HANDLE*, char* buffer, size_t *size);
+static int do_fileinfo(Efile_call_state*, Efile_info*, char* orig_name, int info_for_link);
+static char* do_readlink(Efile_call_state*, char* name, char* buffer, size_t size);
+static int do_altname(Efile_call_state*, char* orig_name, char* buffer, size_t size);
+
+
static int errno_map(DWORD last_error) {
switch (last_error) {
@@ -154,6 +251,8 @@ static int errno_map(DWORD last_error) {
return EAGAIN;
case ERROR_CANT_RESOLVE_FILENAME:
return EMLINK;
+ case ERROR_PRIVILEGE_NOT_HELD:
+ return EPERM;
case ERROR_ARENA_TRASHED:
case ERROR_INVALID_BLOCK:
case ERROR_BAD_ENVIRONMENT:
@@ -176,11 +275,23 @@ check_error(int result, Efile_error* errInfo)
if (result < 0) {
errInfo->posix_errno = errno;
errInfo->os_errno = GetLastError();
+ DBG_TRACE2(4, "ERROR os_error=%d errno=%d @@@@@@@@@@@@@@@@@@@@@@@@@@@@",
+ errInfo->os_errno, errInfo->posix_errno);
return 0;
}
return 1;
}
+static void
+save_last_error(Efile_error* errInfo)
+{
+ errInfo->posix_errno = errno;
+ errInfo->os_errno = GetLastError();
+ DBG_TRACE2(4, "ERROR os_error=%d errno=%d $$$$$$$$$$$$$$$$$$$$$$$$$$$$$",
+ errInfo->os_errno, errInfo->posix_errno);
+}
+
+
/*
* Fills the provided error information structure with information
* with the error code given by GetLastError() and its corresponding
@@ -192,7 +303,18 @@ check_error(int result, Efile_error* errInfo)
static int
set_error(Efile_error* errInfo)
{
- errInfo->posix_errno = errno_map(errInfo->os_errno = GetLastError());
+ set_os_errno(errInfo, GetLastError());
+ return 0;
+}
+
+
+static int
+set_os_errno(Efile_error* errInfo, DWORD os_errno)
+{
+ errInfo->os_errno = os_errno;
+ errInfo->posix_errno = errno_map(os_errno);
+ DBG_TRACE2(4, "ERROR os_error=%d errno=%d ############################",
+ errInfo->os_errno, errInfo->posix_errno);
return 0;
}
@@ -226,21 +348,151 @@ win_writev(Efile_error* errInfo,
}
+/* Check '*pathp' and convert it if needed to something that windows will accept.
+ * Typically use UNC path with \\?\ prefix if absolute path is longer than 260.
+ */
+static void ensure_wpath(Efile_call_state* state, WCHAR** pathp)
+{
+ ensure_wpath_max(state, pathp, MAX_PATH);
+}
+
+static void ensure_wpath_max(Efile_call_state* state, WCHAR** pathp, size_t max)
+{
+ WCHAR* path = *pathp;
+ WCHAR* p;
+ size_t len = wcslen(path);
+ int unc_fixup = 0;
+
+ if (path[0] == 0) {
+ DBG_TRACE(8, L"Let empty path pass through");
+ return;
+ }
+
+ DBG_TRACE1(8,"IN: %s", path);
+
+ if (path[1] == L':' && ISSLASH(path[2])) { /* absolute path */
+ if (len >= max) {
+ WCHAR *src, *dst;
+
+ *pathp = wpath_tmp_alloc(state, 4+len+1);
+ dst = *pathp;
+ wcscpy(dst, L"\\\\?\\");
+ for (src=path,dst+=4; *src; src++) {
+ if (*src == L'/') {
+ if (dst[-1] != L'\\') {
+ *dst++ = L'\\';
+ }
+ /*else ignore redundant slashes */
+ }
+ else
+ *dst++ = *src;
+ }
+ *dst = 0;
+ unc_fixup = 1;
+ }
+ }
+ else if (!(ISSLASH(path[0]) && ISSLASH(path[1]))) { /* relative path */
+ DWORD cwdLen = GetCurrentDirectoryW(0, NULL);
+ DWORD absLen = cwdLen + 1 + len;
+ if (absLen >= max) {
+ WCHAR *fullPath = wpath_tmp_alloc(state, 4+4+absLen);
+ DWORD fullLen;
+
+ fullLen = GetFullPathNameW(path, 4 + absLen, fullPath+4, NULL);
+ if (fullLen >= 4+absLen) {
+ *pathp = path;
+ DBG_TRACE2(8,"ensure_wpath FAILED absLen=%u %s", (int)absLen, path);
+ return;
+ }
+ /* GetFullPathNameW can return paths longer than MAX_PATH without the \\?\ prefix.
+ * At least seen on Windows 7. Go figure...
+ */
+ if (fullLen >= max && wcsncmp(fullPath+4, L"\\\\?\\", 4) != 0) {
+ wcsncpy(fullPath, L"\\\\?\\", 4);
+ *pathp = fullPath;
+ }
+ else {
+ *pathp = fullPath + 4;
+ }
+ }
+ }
+
+ if (unc_fixup) {
+ WCHAR* endp;
+
+ p = *pathp;
+ len = wcslen(p);
+ endp = p + len;
+ if (len > 4) {
+ p += 4;
+ while (*p) {
+ if (p[0] == L'\\' && p[1] == L'.') {
+ if (p[2] == L'\\' || !p[2]) { /* single dot */
+ wmemmove(p, p+2, (&endp[1] - &p[2]));
+ endp -= 2;
+ }
+ else if (p[2] == L'.' && (p[3] == L'\\' || !p[3])) { /* double dot */
+ WCHAR* r;
+ for (r=p-1; *r == L'\\'; --r)
+ /*skip redundant slashes*/;
+ for (; *r != L'\\'; --r)
+ /*find start of prev directory*/;
+ if (r < *pathp + 6)
+ break;
+ wmemmove(r, p+3, (&endp[1] - &p[3]));
+ p = r;
+ }
+ else p += 3;
+ }
+ else ++p;
+ }
+ }
+ }
+ DBG_TRACE1(8,"OUT: %s", *pathp);
+}
int
efile_mkdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to create. */
{
- return check_error(_wmkdir((WCHAR *) name), errInfo);
+ Efile_call_state state;
+ WCHAR* wname = (WCHAR*)name;
+ int ret;
+
+ DBG_TRACE(1, name);
+ call_state_init(&state, errInfo);
+ ensure_wpath_max(&state, &wname, 248); /* Yes, 248 limit for normal paths */
+
+ ret = (int) CreateDirectoryW(wname, NULL);
+ if (!ret)
+ set_error(errInfo);
+
+ call_state_free(&state);
+ return ret;
}
int
efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to delete. */
{
+ Efile_call_state state;
+ int ret;
+
+ DBG_TRACE(1, name);
+ call_state_init(&state, errInfo);
+ ret = do_rmdir(&state, name);
+ call_state_free(&state);
+ return ret;
+}
+
+static int do_rmdir(Efile_call_state* state, char* name)
+{
OSVERSIONINFO os;
DWORD attr;
WCHAR *wname = (WCHAR *) name;
+ WCHAR *buffer = NULL;
+
+ ensure_wpath(state, &wname);
if (RemoveDirectoryW(wname) != FALSE) {
return 1;
@@ -270,10 +522,9 @@ efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */
if (os.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS) {
HANDLE handle;
WIN32_FIND_DATAW data;
- WCHAR buffer[2*MAX_PATH];
- int len;
+ int len = wcslen(wname);
- len = wcslen(wname);
+ buffer = wpath_tmp_alloc(state, len + 4);
wcscpy(buffer, wname);
if (buffer[0] && buffer[len-1] != L'\\' && buffer[len-1] != L'/') {
wcscat(buffer, L"\\");
@@ -311,16 +562,30 @@ efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */
}
end:
- return check_error(-1, errInfo);
+ save_last_error(state->errInfo);
+ return 0;
}
int
efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of file to delete. */
{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE(1, name);
+ call_state_init(&state, errInfo);
+ ret = do_delete_file(&state, name);
+ call_state_free(&state);
+ return ret;
+}
+
+static int do_delete_file(Efile_call_state* state, char* name)
+{
DWORD attr;
WCHAR *wname = (WCHAR *) name;
+ ensure_wpath(state, &wname);
+
if (DeleteFileW(wname) != FALSE) {
return 1;
}
@@ -359,7 +624,7 @@ efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */
errno = EACCES;
}
- return check_error(-1, errInfo);
+ return check_error(-1, state->errInfo);
}
/*
@@ -393,14 +658,29 @@ efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */
*/
int
-efile_rename(Efile_error* errInfo, /* Where to return error codes. */
- char* src, /* Original name. */
- char* dst) /* New name. */
+efile_rename(Efile_error* errInfo, char* src, char* dst)
+{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE(1, src);
+ call_state_init(&state, errInfo);
+ ret = do_rename(&state, src, dst);
+ call_state_free(&state);
+ return ret;
+}
+
+static int
+do_rename(Efile_call_state* state,
+ char* src, /* Original name. */
+ char* dst) /* New name. */
{
DWORD srcAttr, dstAttr;
WCHAR *wsrc = (WCHAR *) src;
WCHAR *wdst = (WCHAR *) dst;
-
+
+ ensure_wpath(state, &wsrc);
+ ensure_wpath(state, &wdst);
+
if (MoveFileW(wsrc, wdst) != FALSE) {
return 1;
}
@@ -417,23 +697,27 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */
if (errno == EBADF) {
errno = EACCES;
- return check_error(-1, errInfo);
+ return check_error(-1, state->errInfo);
}
if (errno == EACCES) {
decode:
if (srcAttr & FILE_ATTRIBUTE_DIRECTORY) {
- WCHAR srcPath[MAX_PATH], dstPath[MAX_PATH];
+ WCHAR *srcPath, *dstPath;
WCHAR *srcRest, *dstRest;
int size;
- size = GetFullPathNameW(wsrc, MAX_PATH, srcPath, &srcRest);
- if ((size == 0) || (size > MAX_PATH)) {
- return check_error(-1, errInfo);
+ srcPath = get_full_wpath_tmp(state, wsrc, &srcRest, 0);
+ if (!srcPath) {
+ save_last_error(state->errInfo);
+ return 0;
}
- size = GetFullPathNameW(wdst, MAX_PATH, dstPath, &dstRest);
- if ((size == 0) || (size > MAX_PATH)) {
- return check_error(-1, errInfo);
+
+ dstPath = get_full_wpath_tmp(state, wdst, &dstRest, 0);
+ if (!dstPath) {
+ save_last_error(state->errInfo);
+ return 0;
}
+
if (srcRest == NULL) {
srcRest = srcPath + wcslen(srcPath);
}
@@ -538,14 +822,16 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */
* put temp file back to old name.
*/
- WCHAR tempName[MAX_PATH];
- int result, size;
+ WCHAR *tempName;
+ int result;
WCHAR *rest;
- size = GetFullPathNameW(wdst, MAX_PATH, tempName, &rest);
- if ((size == 0) || (size > MAX_PATH) || (rest == NULL)) {
- return check_error(-1, errInfo);
+ tempName = get_full_wpath_tmp(state, wdst, &rest, 14);
+ if (!tempName || !rest) {
+ save_last_error(state->errInfo);
+ return 0;
}
+
*rest = L'\0';
result = -1;
if (GetTempFileNameW(tempName, L"erlr", 0, tempName) != 0) {
@@ -578,7 +864,6 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */
/*
* Decode the EACCES to a more meaningful error.
*/
-
goto decode;
}
}
@@ -586,16 +871,20 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */
}
}
}
- return check_error(-1, errInfo);
+ return check_error(-1, state->errInfo);
}
int
efile_chdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to make current. */
-{
- int success = check_error(_wchdir((WCHAR *) name), errInfo);
- if (!success && errInfo->posix_errno == EINVAL)
- /* POSIXification of errno */
+{
+ /* We don't even try to handle long paths here
+ * as current working directory is always limited to MAX_PATH
+ * even if we use UNC paths and SetCurrentDirectoryW()
+ */
+ int success = check_error(_wchdir((WCHAR *) name), errInfo);
+ if (!success && errInfo->posix_errno == EINVAL)
+ /* POSIXification of errno */
errInfo->posix_errno = ENOENT;
return success;
}
@@ -608,28 +897,45 @@ efile_getdcwd(Efile_error* errInfo, /* Where to return error codes. */
{
WCHAR *wbuffer = (WCHAR *) buffer;
size_t wbuffer_size = size / 2;
- if (_wgetdcwd(drive, wbuffer, wbuffer_size) == NULL)
+ DBG_TRACE(1, L"#getdcwd#");
+ if (_wgetdcwd(drive, wbuffer, wbuffer_size) == NULL) {
return check_error(-1, errInfo);
+ }
+ DBG_TRACE1(8, "getdcwd OS=%s", wbuffer);
+ if (wcsncmp(wbuffer, L"\\\\?\\", 4) == 0) {
+ wmemmove(wbuffer, wbuffer+4, wcslen(wbuffer+4)+1);
+ }
for ( ; *wbuffer; wbuffer++)
if (*wbuffer == L'\\')
*wbuffer = L'/';
+ DBG_TRACE1(8, "getdcwd ERLANG=%s", (WCHAR*)buffer);
return 1;
}
int
-efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
- char* name, /* Name of directory to list */
- EFILE_DIR_HANDLE* dir_handle, /* Handle of opened directory or NULL */
- char* buffer, /* Buffer to put one filename in */
- size_t *size) /* in-out size of buffer/size of filename excluding zero
- termination in bytes*/
+efile_readdir(Efile_error* errInfo, char* name, EFILE_DIR_HANDLE* dir_handle,
+ char* buffer, size_t *size)
+{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE(dir_handle?2:1, name);
+ call_state_init(&state, errInfo);
+ ret = do_readdir(&state, name, dir_handle, buffer, size);
+ call_state_free(&state);
+ return ret;
+}
+
+static int do_readdir(Efile_call_state* state,
+ char* name, /* Name of directory to list */
+ EFILE_DIR_HANDLE* dir_handle, /* Handle of opened directory or NULL */
+ char* buffer, /* Buffer to put one filename in */
+ size_t *size) /* in-out size of buffer/size of filename excluding zero
+ termination in bytes*/
{
HANDLE dir; /* Handle to directory. */
- WCHAR wildcard[MAX_PATH]; /* Wildcard to search for. */
WIN32_FIND_DATAW findData; /* Data found by FindFirstFile() or FindNext(). */
/* Alignment is not honored, this works on x86 because of alignment fixup by processor.
Not perfect, but faster than alinging by hand (really) */
- WCHAR *wname = (WCHAR *) name;
WCHAR *wbuffer = (WCHAR *) buffer;
/*
@@ -637,13 +943,15 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
*/
if (*dir_handle == NULL) {
- int length = wcslen(wname);
+ WCHAR *wname = (WCHAR *) name;
+ WCHAR* wildcard;
+ int length;
WCHAR* s;
- if (length+3 >= MAX_PATH) {
- errno = ENAMETOOLONG;
- return check_error(-1, errInfo);
- }
+ ensure_wpath_max(state, &wname, MAX_PATH-2);
+ length = wcslen(wname);
+
+ wildcard = wpath_tmp_alloc(state, length+3);
wcscpy(wildcard, wname);
s = wildcard+length-1;
@@ -653,8 +961,10 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
*++s = L'\0';
DEBUGF(("Reading %ws\n", wildcard));
dir = FindFirstFileW(wildcard, &findData);
- if (dir == INVALID_HANDLE_VALUE)
- return set_error(errInfo);
+ if (dir == INVALID_HANDLE_VALUE) {
+ set_error(state->errInfo);
+ return 0;
+ }
*dir_handle = (EFILE_DIR_HANDLE) dir;
if (!IS_DOT_OR_DOTDOT(findData.cFileName)) {
@@ -664,7 +974,6 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
}
}
-
/*
* Retrieve the name of the next file using the directory handle.
*/
@@ -681,24 +990,36 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
}
if (GetLastError() == ERROR_NO_MORE_FILES) {
- FindClose(dir);
- errInfo->posix_errno = errInfo->os_errno = 0;
- return 0;
+ state->errInfo->posix_errno = state->errInfo->os_errno = 0;
+ }
+ else {
+ set_error(state->errInfo);
}
-
- set_error(errInfo);
FindClose(dir);
return 0;
}
}
int
-efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
- char* name, /* Name of directory to open. */
- int flags, /* Flags to use for opening. */
- int* pfd, /* Where to store the file descriptor. */
- Sint64* pSize) /* Where to store the size of the file. */
+efile_openfile(Efile_error* errInfo, char* name, int flags, int* pfd, Sint64* pSize)
{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE1(1, "openfile(%s)", name);
+ call_state_init(&state, errInfo);
+ ret = do_openfile(&state, name, flags, pfd, pSize);
+ call_state_free(&state);
+ return ret;
+}
+
+static
+int do_openfile(Efile_call_state* state, /* Where to return error codes. */
+ char* name, /* Name of directory to open. */
+ int flags, /* Flags to use for opening. */
+ int* pfd, /* Where to store the file descriptor. */
+ Sint64* pSize) /* Where to store the size of the file. */
+{
+ Efile_error* errInfo = state->errInfo;
BY_HANDLE_FILE_INFORMATION fileInfo; /* File information from a handle. */
HANDLE fd; /* Handle to open file. */
DWORD access; /* Access mode: GENERIC_READ, GENERIC_WRITE. */
@@ -735,6 +1056,7 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
if (flags & EFILE_MODE_EXCL) {
crFlags = CREATE_NEW;
}
+ ensure_wpath(state, &wname);
fd = CreateFileW(wname, access,
FILE_SHARE_FLAGS,
NULL, crFlags, flagsAndAttrs, NULL);
@@ -777,34 +1099,56 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
}
int
-efile_may_openfile(Efile_error* errInfo, char *name) {
+efile_may_openfile(Efile_error* errInfo, char *name)
+{
+ Efile_call_state state;
WCHAR *wname = (WCHAR *) name;
DWORD attr;
+ int ret;
+ DBG_TRACE(1, name);
+ call_state_init(&state, errInfo);
+ ensure_wpath(&state, &wname);
if ((attr = GetFileAttributesW(wname)) == INVALID_FILE_ATTRIBUTES) {
errno = ENOENT;
- return check_error(-1, errInfo);
+ ret = check_error(-1, errInfo);
}
-
- if (attr & FILE_ATTRIBUTE_DIRECTORY) {
+ else if (attr & FILE_ATTRIBUTE_DIRECTORY) {
errno = EISDIR;
- return check_error(-1, errInfo);
+ ret = check_error(-1, errInfo);
}
- return 1;
+ else ret = 1;
+
+ call_state_free(&state);
+ return ret;
}
void
efile_closefile(fd)
int fd; /* File descriptor for file to close. */
{
+ DBG_TRACE(2, L"");
CloseHandle((HANDLE) fd);
}
+FILE* efile_wfopen(const WCHAR* name, const WCHAR* mode)
+{
+ Efile_call_state state;
+ Efile_error dummy;
+ FILE* f;
+ call_state_init(&state, &dummy);
+ ensure_wpath(&state, (WCHAR**)&name);
+ f = _wfopen(name, mode);
+ call_state_free(&state);
+ return f;
+}
+
int
efile_fdatasync(errInfo, fd)
Efile_error* errInfo; /* Where to return error codes. */
int fd; /* File descriptor for file to sync. */
{
+ DBG_TRACE(2, L"");
/* Not available in Windows, just call regular fsync */
return efile_fsync(errInfo, fd);
}
@@ -814,6 +1158,7 @@ efile_fsync(errInfo, fd)
Efile_error* errInfo; /* Where to return error codes. */
int fd; /* File descriptor for file to sync. */
{
+ DBG_TRACE(2, L"");
if (!FlushFileBuffers((HANDLE) fd)) {
return check_error(-1, errInfo);
}
@@ -824,64 +1169,87 @@ int
efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
char* orig_name, int info_for_link)
{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE(1, L"");
+ call_state_init(&state, errInfo);
+ ret = do_fileinfo(&state, pInfo, orig_name, info_for_link);
+ call_state_free(&state);
+ return ret;
+}
+
+static int
+do_fileinfo(Efile_call_state* state, Efile_info* pInfo,
+ char* orig_name, int info_for_link)
+{
+ Efile_error* errInfo = state->errInfo;
HANDLE findhandle; /* Handle returned by FindFirstFile(). */
WIN32_FIND_DATAW findbuf; /* Data return by FindFirstFile(). */
- WCHAR name[_MAX_PATH];
+ WCHAR* name = NULL;
+ WCHAR* win_path;
int name_len;
- WCHAR *path;
- WCHAR pathbuf[_MAX_PATH];
int drive; /* Drive for filename (1 = A:, 2 = B: etc). */
- WCHAR *worig_name = (WCHAR *) orig_name;
+ WCHAR *worig_name = (WCHAR *) orig_name;
+ ensure_wpath(state, &worig_name);
/* Don't allow wildcards to be interpreted by system */
- if (wcspbrk(worig_name, L"?*")) {
- enoent:
- errInfo->posix_errno = ENOENT;
- errInfo->os_errno = ERROR_FILE_NOT_FOUND;
- return 0;
- }
/*
* Move the name to a buffer and make sure to remove a trailing
* slash, because it causes FindFirstFile() to fail on Win95.
*/
- if ((name_len = wcslen(worig_name)) >= _MAX_PATH) {
- goto enoent;
- } else {
- wcscpy(name, worig_name);
- if (name_len > 2 && ISSLASH(name[name_len-1]) &&
- name[name_len-2] != L':') {
- name[name_len-1] = L'\0';
- }
+ name_len = wcslen(worig_name);
+
+ name = wpath_tmp_alloc(state, name_len+1);
+ wcscpy(name, worig_name);
+ if (name_len > 2 && ISSLASH(name[name_len-1]) &&
+ name[name_len-2] != L':') {
+ name[name_len-1] = L'\0';
}
-
+
+ win_path = name;
+ if (wcsncmp(name, L"\\\\?\\", 4) == 0) {
+ win_path += 4;
+ }
+
+ if (wcspbrk(win_path, L"?*")) {
+ enoent:
+ errInfo->posix_errno = ENOENT;
+ errInfo->os_errno = ERROR_FILE_NOT_FOUND;
+ return 0;
+ }
+
/* Try to get disk from name. If none, get current disk. */
- if (name[1] != L':') {
+ if (win_path[1] != L':') {
+ WCHAR* cwd_path = get_cwd_wpath_tmp(state);
drive = 0;
- if (GetCurrentDirectoryW(_MAX_PATH, pathbuf) &&
- pathbuf[1] == L':') {
- drive = towlower(pathbuf[0]) - L'a' + 1;
+ if (cwd_path[1] == L':') {
+ drive = towlower(cwd_path[0]) - L'a' + 1;
}
- } else if (*name && name[2] == L'\0') {
+ } else if (*win_path && win_path[2] == L'\0') {
/*
* X: and nothing more is an error.
*/
errInfo->posix_errno = ENOENT;
errInfo->os_errno = ERROR_FILE_NOT_FOUND;
return 0;
- } else
- drive = towlower(*name) - L'a' + 1;
+ } else {
+ drive = towlower(*win_path) - L'a' + 1;
+ }
findhandle = FindFirstFileW(name, &findbuf);
if (findhandle == INVALID_HANDLE_VALUE) {
+ WCHAR* path = NULL;
+
if (!(wcspbrk(name, L"./\\") &&
- (path = _wfullpath(pathbuf, name, _MAX_PATH)) &&
+ (path = get_full_wpath_tmp(state, name, NULL, 0)) &&
/* root dir. ('C:\') or UNC root dir. ('\\server\share\') */
((wcslen(path) == 3) || is_root_unc_name(path)) &&
(GetDriveTypeW(path) > 1) ) ) {
+
errInfo->posix_errno = ENOENT;
errInfo->os_errno = ERROR_FILE_NOT_FOUND;
return 0;
@@ -908,13 +1276,11 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
/*
* given that we know this is a symlink,
we should be able to find its target */
- WCHAR target_name[_MAX_PATH];
- if (efile_readlink(errInfo, (char *) name,
- (char *) target_name,
- _MAX_PATH * sizeof(WCHAR)) == 1) {
+ WCHAR* target_name = (WCHAR*) do_readlink(state, (char *) name, NULL, 0);
+ if (target_name) {
FindClose(findhandle);
- return efile_fileinfo(errInfo, pInfo,
- (char *) target_name, info_for_link);
+ return do_fileinfo(state, pInfo,
+ (char *) target_name, info_for_link);
}
}
@@ -922,6 +1288,10 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
{
HANDLE handle; /* Handle returned by CreateFile() */
BY_HANDLE_FILE_INFORMATION fileInfo; /* from CreateFile() */
+
+ /* We initialise nNumberOfLinks as GetFileInformationByHandle
+ does not always initialise this field */
+ fileInfo.nNumberOfLinks = 1;
if (handle = CreateFileW(name, GENERIC_READ, FILE_SHARE_FLAGS, NULL,
OPEN_EXISTING, 0, NULL)) {
GetFileInformationByHandle(handle, &fileInfo);
@@ -981,6 +1351,20 @@ efile_write_info(Efile_error* errInfo,
Efile_info* pInfo,
char* name)
{
+ Efile_call_state state;
+ int ret;
+ call_state_init(&state, errInfo);
+ ret = do_write_info(&state, pInfo, name);
+ call_state_free(&state);
+ return ret;
+}
+
+static int
+do_write_info(Efile_call_state* state,
+ Efile_info* pInfo,
+ char* name)
+{
+ Efile_error* errInfo = state->errInfo;
SYSTEMTIME timebuf;
FILETIME ModifyFileTime;
FILETIME AccessFileTime;
@@ -990,6 +1374,10 @@ efile_write_info(Efile_error* errInfo,
DWORD tempAttr;
WCHAR *wname = (WCHAR *) name;
+ DBG_TRACE(1, name);
+
+ ensure_wpath(state, &wname);
+
/*
* Get the attributes for the file.
*/
@@ -1066,7 +1454,9 @@ char* buf; /* Buffer to write. */
size_t count; /* Number of bytes to write. */
Sint64 offset; /* where to write it */
{
- int res = efile_seek(errInfo, fd, offset, EFILE_SEEK_SET, NULL);
+ int res;
+ DBG_TRACE(2, L"");
+ res = efile_seek(errInfo, fd, offset, EFILE_SEEK_SET, NULL);
if (res) {
return efile_write(errInfo, EFILE_MODE_WRITE, fd, buf, count);
} else {
@@ -1084,7 +1474,9 @@ char* buf; /* Buffer to read into. */
size_t count; /* Number of bytes to read. */
size_t* pBytesRead; /* Where to return number of bytes read. */
{
- int res = efile_seek(errInfo, fd, offset, EFILE_SEEK_SET, NULL);
+ int res;
+ DBG_TRACE(2, L"");
+ res = efile_seek(errInfo, fd, offset, EFILE_SEEK_SET, NULL);
if (res) {
return efile_read(errInfo, EFILE_MODE_READ, fd, buf, count, pBytesRead);
} else {
@@ -1106,6 +1498,7 @@ size_t count; /* Number of bytes to write. */
OVERLAPPED overlapped;
OVERLAPPED* pOverlapped = NULL;
+ DBG_TRACE(2, L"");
if (flags & EFILE_MODE_APPEND) {
memset(&overlapped, 0, sizeof(overlapped));
overlapped.Offset = 0xffffffff;
@@ -1135,6 +1528,7 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */
OVERLAPPED overlapped;
OVERLAPPED* pOverlapped = NULL;
+ DBG_TRACE(2, L"");
ASSERT(iovcnt >= 0);
if (flags & EFILE_MODE_APPEND) {
@@ -1171,6 +1565,8 @@ size_t count; /* Number of bytes to read. */
size_t* pBytesRead; /* Where to return number of bytes read. */
{
DWORD nbytes = 0;
+
+ DBG_TRACE(2, L"");
if (!ReadFile((HANDLE) fd, buf, count, &nbytes, NULL))
return set_error(errInfo);
@@ -1190,6 +1586,7 @@ Sint64* new_location; /* Resulting new location in file. */
{
LARGE_INTEGER off, new_loc;
+ DBG_TRACE(2, L"");
switch (origin) {
case EFILE_SEEK_SET: origin = FILE_BEGIN; break;
case EFILE_SEEK_CUR: origin = FILE_CURRENT; break;
@@ -1221,6 +1618,7 @@ Efile_error* errInfo; /* Where to return error codes. */
int *fd; /* File descriptor for file to truncate. */
int flags;
{
+ DBG_TRACE(2, L"");
if (!SetEndOfFile((HANDLE) (*fd)))
return set_error(errInfo);
return 1;
@@ -1373,9 +1771,24 @@ dos_to_posix_mode(int attr, const WCHAR *name)
return uxmode;
}
+
int
efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE(1, name);
+ call_state_init(&state, errInfo);
+ ret = !!do_readlink(&state, name, buffer, size);
+ call_state_free(&state);
+ return ret;
+}
+
+/* If buffer==0, return buffer allocated by wpath_tmp_allocate
+*/
+static char*
+do_readlink(Efile_call_state* state, char* name, char* buffer, size_t size)
+{
/*
* load dll and see if we have CreateSymbolicLink at runtime:
* (Vista only)
@@ -1383,6 +1796,9 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
HINSTANCE hModule = NULL;
WCHAR *wname = (WCHAR *) name;
WCHAR *wbuffer = (WCHAR *) buffer;
+ DWORD wsize = size / sizeof(WCHAR);
+ char* ret = NULL;
+
if ((hModule = LoadLibrary("kernel32.dll")) != NULL) {
typedef DWORD (WINAPI * GETFINALPATHNAMEBYHANDLEPTR)(
HANDLE hFile,
@@ -1393,58 +1809,84 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
GETFINALPATHNAMEBYHANDLEPTR pGetFinalPathNameByHandle =
(GETFINALPATHNAMEBYHANDLEPTR)GetProcAddress(hModule, "GetFinalPathNameByHandleW");
- if (pGetFinalPathNameByHandle == NULL) {
- FreeLibrary(hModule);
- } else {
+ if (pGetFinalPathNameByHandle != NULL) {
+ DWORD fileAttributes;
+ ensure_wpath(state, &wname);
/* first check if file is a symlink; {error, einval} otherwise */
- DWORD fileAttributes = GetFileAttributesW(wname);
+ fileAttributes = GetFileAttributesW(wname);
if ((fileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
- BOOLEAN success = 0;
+ DWORD success = 0;
HANDLE h = CreateFileW(wname, GENERIC_READ, FILE_SHARE_FLAGS, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
int len;
if(h != INVALID_HANDLE_VALUE) {
- success = pGetFinalPathNameByHandle(h, wbuffer, size / sizeof(WCHAR),0);
- /* GetFinalPathNameByHandle prepends path with "\\?\": */
- len = wcslen(wbuffer);
- wmemmove(wbuffer,wbuffer+4,len-3);
- if (len - 4 >= 2 && wbuffer[1] == L':' && wbuffer[0] >= L'A' &&
- wbuffer[0] <= L'Z') {
- wbuffer[0] = wbuffer[0] + L'a' - L'A';
+ if (!wbuffer) { /* dynamic allocation */
+ WCHAR dummy;
+ wsize = pGetFinalPathNameByHandle(h, &dummy, 0, 0);
+ if (wsize) {
+ wbuffer = wpath_tmp_alloc(state, wsize);
+ }
}
+ if (wbuffer
+ && (success = pGetFinalPathNameByHandle(h, wbuffer, wsize, 0))
+ && success < wsize) {
+ WCHAR* wp;
+
+ /* GetFinalPathNameByHandle prepends path with "\\?\": */
+ len = wcslen(wbuffer);
+ wmemmove(wbuffer,wbuffer+4,len-3);
+ if (len - 4 >= 2 && wbuffer[1] == L':' && wbuffer[0] >= L'A' &&
+ wbuffer[0] <= L'Z') {
+ wbuffer[0] = wbuffer[0] + L'a' - L'A';
+ }
- for ( ; *wbuffer; wbuffer++)
- if (*wbuffer == L'\\')
- *wbuffer = L'/';
+ for (wp=wbuffer ; *wp; wp++)
+ if (*wp == L'\\')
+ *wp = L'/';
+ }
CloseHandle(h);
- }
- FreeLibrary(hModule);
+ }
if (success) {
- return 1;
+ ret = (char*) wbuffer;
} else {
- return set_error(errInfo);
+ set_error(state->errInfo);
}
} else {
- FreeLibrary(hModule);
errno = EINVAL;
- return check_error(-1, errInfo);
+ save_last_error(state->errInfo);
}
+ goto done;
}
}
errno = ENOTSUP;
- return check_error(-1, errInfo);
+ save_last_error(state->errInfo);
+
+done:
+ if (hModule)
+ FreeLibrary(hModule);
+ return ret;
}
int
efile_altname(Efile_error* errInfo, char* orig_name, char* buffer, size_t size)
{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE(1, orig_name);
+ call_state_init(&state, errInfo);
+ ret = do_altname(&state, orig_name, buffer, size);
+ call_state_free(&state);
+ return ret;
+}
+
+static int
+do_altname(Efile_call_state* state, char* orig_name, char* buffer, size_t size)
+{
WIN32_FIND_DATAW wfd;
HANDLE fh;
- WCHAR name[_MAX_PATH+1];
+ WCHAR* name;
int name_len;
- WCHAR* path;
- WCHAR pathbuf[_MAX_PATH+1]; /* Unclear weather GetCurrentDirectory will access one char after
- _MAX_PATH */
+ WCHAR* full_path = NULL;
WCHAR *worig_name = (WCHAR *) orig_name;
WCHAR *wbuffer = (WCHAR *) buffer;
int drive; /* Drive for filename (1 = A:, 2 = B: etc). */
@@ -1453,8 +1895,8 @@ efile_altname(Efile_error* errInfo, char* orig_name, char* buffer, size_t size)
if (wcspbrk(worig_name, L"?*")) {
enoent:
- errInfo->posix_errno = ENOENT;
- errInfo->os_errno = ERROR_FILE_NOT_FOUND;
+ state->errInfo->posix_errno = ENOENT;
+ state->errInfo->os_errno = ERROR_FILE_NOT_FOUND;
return 0;
}
@@ -1462,24 +1904,23 @@ efile_altname(Efile_error* errInfo, char* orig_name, char* buffer, size_t size)
* Move the name to a buffer and make sure to remove a trailing
* slash, because it causes FindFirstFile() to fail on Win95.
*/
-
- if ((name_len = wcslen(worig_name)) >= _MAX_PATH) {
- goto enoent;
- } else {
- wcscpy(name, worig_name);
- if (name_len > 2 && ISSLASH(name[name_len-1]) &&
- name[name_len-2] != L':') {
- name[name_len-1] = L'\0';
- }
+ ensure_wpath(state, &worig_name);
+ name_len = wcslen(worig_name);
+
+ name = wpath_tmp_alloc(state, name_len + 1);
+ wcscpy(name, worig_name);
+ if (name_len > 2 && ISSLASH(name[name_len-1]) &&
+ name[name_len-2] != L':') {
+ name[name_len-1] = L'\0';
}
/* Try to get disk from name. If none, get current disk. */
if (name[1] != L':') {
+ WCHAR* cwd_path = get_cwd_wpath_tmp(state);
drive = 0;
- if (GetCurrentDirectoryW(_MAX_PATH, pathbuf) &&
- pathbuf[1] == L':') {
- drive = towlower(pathbuf[0]) - L'a' + 1;
+ if (cwd_path[1] == L':') {
+ drive = towlower(cwd_path[0]) - L'a' + 1;
}
} else if (*name && name[2] == L'\0') {
/*
@@ -1491,13 +1932,15 @@ efile_altname(Efile_error* errInfo, char* orig_name, char* buffer, size_t size)
}
fh = FindFirstFileW(name,&wfd);
if (fh == INVALID_HANDLE_VALUE) {
+ DWORD fff_error = GetLastError();
if (!(wcspbrk(name, L"./\\") &&
- (path = _wfullpath(pathbuf, name, _MAX_PATH)) &&
+ (full_path = get_full_wpath_tmp(state, name, NULL, 0)) &&
/* root dir. ('C:\') or UNC root dir. ('\\server\share\') */
- ((wcslen(path) == 3) || is_root_unc_name(path)) &&
- (GetDriveTypeW(path) > 1) ) ) {
- errno = errno_map(GetLastError());
- return check_error(-1, errInfo);
+ ((wcslen(full_path) == 3) || is_root_unc_name(full_path)) &&
+ (GetDriveTypeW(full_path) > 1) ) ) {
+
+ set_os_errno(state->errInfo, fff_error);
+ return 0;
}
/*
* Root directories (such as C:\ or \\server\share\ are fabricated.
@@ -1518,17 +1961,37 @@ efile_altname(Efile_error* errInfo, char* orig_name, char* buffer, size_t size)
int
efile_link(Efile_error* errInfo, char* old, char* new)
{
+ Efile_call_state state;
WCHAR *wold = (WCHAR *) old;
WCHAR *wnew = (WCHAR *) new;
+ int ret;
+ DBG_TRACE(1, old);
+ call_state_init(&state, errInfo);
+ ensure_wpath(&state, &wold);
+ ensure_wpath(&state, &wnew);
if(!CreateHardLinkW(wnew, wold, NULL)) {
- return set_error(errInfo);
+ ret = set_error(errInfo);
}
- return 1;
+ else ret =1;
+ call_state_free(&state);
+ return ret;
}
int
efile_symlink(Efile_error* errInfo, char* old, char* new)
{
+ Efile_call_state state;
+ int ret;
+ DBG_TRACE2(1, "symlink(%s <- %s)", old, new);
+ call_state_init(&state, errInfo);
+ ret = do_symlink(&state, old, new);
+ call_state_free(&state);
+ return ret;
+}
+
+static int
+do_symlink(Efile_call_state* state, char* old, char* new)
+{
/*
* Load dll and see if we have CreateSymbolicLink at runtime:
* (Vista only)
@@ -1536,6 +1999,8 @@ efile_symlink(Efile_error* errInfo, char* old, char* new)
HINSTANCE hModule = NULL;
WCHAR *wold = (WCHAR *) old;
WCHAR *wnew = (WCHAR *) new;
+
+ DBG_TRACE(1, old);
if ((hModule = LoadLibrary("kernel32.dll")) != NULL) {
typedef BOOLEAN (WINAPI * CREATESYMBOLICLINKFUNCPTR) (
LPCWSTR lpSymlinkFileName,
@@ -1547,6 +2012,9 @@ efile_symlink(Efile_error* errInfo, char* old, char* new)
"CreateSymbolicLinkW");
/* A for MBCS, W for UNICODE... char* above implies 'W'! */
if (pCreateSymbolicLink != NULL) {
+ ensure_wpath(state, &wold);
+ ensure_wpath(state, &wnew);
+ {
DWORD attr = GetFileAttributesW(wold);
int flag = (attr != INVALID_FILE_ATTRIBUTES &&
attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0;
@@ -1557,19 +2025,21 @@ efile_symlink(Efile_error* errInfo, char* old, char* new)
if (success) {
return 1;
} else {
- return set_error(errInfo);
+ return set_error(state->errInfo);
}
+ }
} else
FreeLibrary(hModule);
}
errno = ENOTSUP;
- return check_error(-1, errInfo);
+ return check_error(-1, state->errInfo);
}
int
efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
Sint64 length, int advise)
{
+ DBG_TRACE(2, L"");
/* posix_fadvise is not available on Windows, do nothing */
errno = ERROR_SUCCESS;
return check_error(0, errInfo);
@@ -1578,6 +2048,7 @@ efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
int
efile_fallocate(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length)
{
+ DBG_TRACE(2, L"");
/* No file preallocation method available in Windows. */
errno = errno_map(ERROR_NOT_SUPPORTED);
SetLastError(ERROR_NOT_SUPPORTED);
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index 0de69a617f..a3219c7586 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -39,7 +39,10 @@ define(HANDLE_GOT_MBUF,`
jmp 2b')
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) movq $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+# define CALL_BIF(F) \
+ movq CSYM(F)@GOTPCREL(%rip), %r11; \
+ movq %r11, P_BIF_CALLEE(P); \
+ call CSYM(hipe_debug_bif_wrapper)
#else
# define CALL_BIF(F) call CSYM(F)
#endif'
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index bd8bc5ab6b..3ca9a1bcdb 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -25,6 +25,7 @@ include(`hipe/hipe_arm_asm.m4')
.text
.p2align 2
+ .arm
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define CALL_BIF(F) ldr r14, =F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
@@ -391,7 +392,14 @@ $1:
mov r0, P
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(0)
+#else
QUICK_CALL_RET($2,0)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -407,7 +415,14 @@ $1:
NBIF_ARG(r1,1,0)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(1)
+#else
QUICK_CALL_RET($2,1)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -424,7 +439,14 @@ $1:
NBIF_ARG(r2,2,1)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(2)
+#else
QUICK_CALL_RET($2,2)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -442,7 +464,14 @@ $1:
NBIF_ARG(r3,3,2)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(3)
+#else
QUICK_CALL_RET($2,3)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -466,7 +495,14 @@ $1:
NBIF_ARG(r3,5,2)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(5)
+#else
QUICK_CALL_RET($2,5)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -488,9 +524,16 @@ define(noproc_primop_interface_0,
#`define' HAVE_$1
.global $1
$1:
- /* XXX: this case is always trivial; how to suppress the branch? */
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(0)
+#else
+ /* XXX: this case is always trivial; how to suppress the branch? */
QUICK_CALL_RET($2,0)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -505,7 +548,14 @@ $1:
NBIF_ARG(r0,1,0)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(1)
+#else
QUICK_CALL_RET($2,1)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -521,7 +571,14 @@ $1:
NBIF_ARG(r1,2,1)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(2)
+#else
QUICK_CALL_RET($2,2)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -538,7 +595,14 @@ $1:
NBIF_ARG(r2,3,2)
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(3)
+#else
QUICK_CALL_RET($2,3)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
@@ -558,7 +622,14 @@ $1:
str r4, [sp, #0]
/* Perform a quick save;call;restore;ret sequence. */
+#ifdef __thumb__
+ SAVE_CONTEXT_QUICK
+ bl $2
+ RESTORE_CONTEXT_QUICK
+ NBIF_RET(5)
+#else
QUICK_CALL_RET($2,5)
+#endif
.size $1, .-$1
.type $1, %function
#endif')
diff --git a/erts/emulator/hipe/hipe_arm_glue.S b/erts/emulator/hipe/hipe_arm_glue.S
index 2e2b8604a6..5723afac12 100644
--- a/erts/emulator/hipe/hipe_arm_glue.S
+++ b/erts/emulator/hipe/hipe_arm_glue.S
@@ -25,6 +25,7 @@
.text
.p2align 2
+ .arm
/*
* Enter Erlang from C.
@@ -70,6 +71,7 @@
* Emulated code recursively calls native code.
*/
.global hipe_arm_call_to_native
+ .type hipe_arm_call_to_native, %function
hipe_arm_call_to_native:
ENTER_FROM_C
/* get argument registers */
@@ -85,6 +87,7 @@ hipe_arm_call_to_native:
* This is where native code returns to emulated code.
*/
.global nbif_return
+ .type nbif_return, %function
nbif_return:
str r0, [P, #P_ARG0] /* save retval */
mov r0, #HIPE_MODE_SWITCH_RES_RETURN
@@ -95,6 +98,7 @@ nbif_return:
* Emulated code returns to its native code caller.
*/
.global hipe_arm_return_to_native
+ .type hipe_arm_return_to_native, %function
hipe_arm_return_to_native:
ENTER_FROM_C
/* get return value */
@@ -111,6 +115,7 @@ hipe_arm_return_to_native:
* Emulated code tailcalls native code.
*/
.global hipe_arm_tailcall_to_native
+ .type hipe_arm_tailcall_to_native, %function
hipe_arm_tailcall_to_native:
ENTER_FROM_C
/* get argument registers */
@@ -125,6 +130,7 @@ hipe_arm_tailcall_to_native:
* Emulated code throws an exception to its native code caller.
*/
.global hipe_arm_throw_to_native
+ .type hipe_arm_throw_to_native, %function
hipe_arm_throw_to_native:
ENTER_FROM_C
/* invoke the handler */
@@ -142,6 +148,7 @@ hipe_arm_throw_to_native:
* XXX: Different stubs for different number of register parameters?
*/
.global nbif_callemu
+ .type nbif_callemu, %function
nbif_callemu:
str r8, [P, #P_BEAM_IP]
str r0, [P, #P_ARITY]
@@ -153,6 +160,7 @@ nbif_callemu:
* nbif_apply
*/
.global nbif_apply
+ .type nbif_apply, %function
nbif_apply:
STORE_ARG_REGS
mov r0, #HIPE_MODE_SWITCH_RES_APPLY
@@ -169,6 +177,7 @@ nbif_apply:
*/
#if NR_ARG_REGS >= 6
.global nbif_ccallemu6
+ .type nbif_ccallemu6, %function
nbif_ccallemu6:
str ARG5, [P, #P_ARG5]
#if NR_ARG_REGS > 6
@@ -181,6 +190,7 @@ nbif_ccallemu6:
#if NR_ARG_REGS >= 5
.global nbif_ccallemu5
+ .type nbif_ccallemu5, %function
nbif_ccallemu5:
str ARG4, [P, #P_ARG4]
#if NR_ARG_REGS > 5
@@ -193,6 +203,7 @@ nbif_ccallemu5:
#if NR_ARG_REGS >= 4
.global nbif_ccallemu4
+ .type nbif_ccallemu4, %function
nbif_ccallemu4:
str ARG3, [P, #P_ARG3]
#if NR_ARG_REGS > 4
@@ -205,6 +216,7 @@ nbif_ccallemu4:
#if NR_ARG_REGS >= 3
.global nbif_ccallemu3
+ .type nbif_ccallemu3, %function
nbif_ccallemu3:
str ARG2, [P, #P_ARG2]
#if NR_ARG_REGS > 3
@@ -217,6 +229,7 @@ nbif_ccallemu3:
#if NR_ARG_REGS >= 2
.global nbif_ccallemu2
+ .type nbif_ccallemu2, %function
nbif_ccallemu2:
str ARG1, [P, #P_ARG1]
#if NR_ARG_REGS > 2
@@ -229,6 +242,7 @@ nbif_ccallemu2:
#if NR_ARG_REGS >= 1
.global nbif_ccallemu1
+ .type nbif_ccallemu1, %function
nbif_ccallemu1:
str ARG0, [P, #P_ARG0]
#if NR_ARG_REGS > 1
@@ -240,6 +254,7 @@ nbif_ccallemu1:
#endif
.global nbif_ccallemu0
+ .type nbif_ccallemu0, %function
nbif_ccallemu0:
/* We use r1 not ARG0 here because ARG0 is not
defined when NR_ARG_REGS == 0. */
@@ -254,6 +269,7 @@ nbif_ccallemu0:
* This is where native code suspends.
*/
.global nbif_suspend_0
+ .type nbif_suspend_0, %function
nbif_suspend_0:
mov r0, #HIPE_MODE_SWITCH_RES_SUSPEND
b .suspend_exit
@@ -262,6 +278,7 @@ nbif_suspend_0:
* Suspend from a receive (waiting for a message)
*/
.global nbif_suspend_msg
+ .type nbif_suspend_msg, %function
nbif_suspend_msg:
mov r0, #HIPE_MODE_SWITCH_RES_WAIT
b .suspend_exit
@@ -272,6 +289,7 @@ nbif_suspend_msg:
* else { return 0; }
*/
.global nbif_suspend_msg_timeout
+ .type nbif_suspend_msg_timeout, %function
nbif_suspend_msg_timeout:
ldr r1, [P, #P_FLAGS]
mov r0, #HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT
@@ -286,23 +304,31 @@ nbif_suspend_msg_timeout:
* This is the default exception handler for native code.
*/
.global nbif_fail
+ .type nbif_fail, %function
nbif_fail:
mov r0, #HIPE_MODE_SWITCH_RES_THROW
b .flush_exit /* no need to save RA */
.global nbif_0_gc_after_bif
- .global nbif_1_gc_after_bif
- .global nbif_2_gc_after_bif
- .global nbif_3_gc_after_bif
+ .type nbif_0_gc_after_bif, %function
nbif_0_gc_after_bif:
mov r1, #0
b .gc_after_bif
+
+ .global nbif_1_gc_after_bif
+ .type nbif_1_gc_after_bif, %function
nbif_1_gc_after_bif:
mov r1, #1
b .gc_after_bif
+
+ .global nbif_2_gc_after_bif
+ .type nbif_2_gc_after_bif, %function
nbif_2_gc_after_bif:
mov r1, #2
b .gc_after_bif
+
+ .global nbif_3_gc_after_bif
+ .type nbif_3_gc_after_bif, %function
nbif_3_gc_after_bif:
mov r1, #3
/*FALLTHROUGH*/
@@ -330,18 +356,25 @@ nbif_3_gc_after_bif:
* TEMP_LR contains a copy of LR
*/
.global nbif_0_simple_exception
+ .type nbif_0_simple_exception, %function
nbif_0_simple_exception:
mov r1, #0
b .nbif_simple_exception
+
.global nbif_1_simple_exception
+ .type nbif_1_simple_exception, %function
nbif_1_simple_exception:
mov r1, #1
b .nbif_simple_exception
+
.global nbif_2_simple_exception
+ .type nbif_2_simple_exception, %function
nbif_2_simple_exception:
mov r1, #2
b .nbif_simple_exception
+
.global nbif_3_simple_exception
+ .type nbif_3_simple_exception, %function
nbif_3_simple_exception:
mov r1, #3
/*FALLTHROUGH*/
@@ -385,6 +418,7 @@ nbif_3_simple_exception:
* the gray/white stack boundary
*/
.global nbif_stack_trap_ra
+ .type nbif_stack_trap_ra, %function
nbif_stack_trap_ra: /* a return address, not a function */
# This only handles a single return value.
# If we have more, we need to save them in the PCB.
@@ -401,6 +435,7 @@ nbif_stack_trap_ra: /* a return address, not a function */
* Caller saved its LR in TEMP_LR (== TEMP1) before calling us.
*/
.global hipe_arm_inc_stack
+ .type hipe_arm_inc_stack, %function
hipe_arm_inc_stack:
STORE_ARG_REGS
mov TEMP_ARG0, lr
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 2497d51df1..c9eee2acf2 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -1022,7 +1022,7 @@ BIF_RETTYPE hipe_conv_big_to_float(BIF_ALIST_1)
*/
void hipe_emulate_fpe(Process* p)
{
- if (!finite(p->hipe.float_result)) {
+ if (!isfinite(p->hipe.float_result)) {
p->fp_exception = 1;
}
}
diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c
index 7637049bc3..054911e822 100644
--- a/erts/emulator/hipe/hipe_bif2.c
+++ b/erts/emulator/hipe/hipe_bif2.c
@@ -182,3 +182,10 @@ BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2)
BIF_RET(am_ok);
}
+/* Stub-BIF for LLVM:
+ * Reloads BP, SP (in llvm unwind label) */
+
+BIF_RETTYPE hipe_bifs_llvm_fix_pinned_regs_0(BIF_ALIST_0)
+{
+ BIF_RET(am_ok);
+}
diff --git a/erts/emulator/hipe/hipe_bif2.tab b/erts/emulator/hipe/hipe_bif2.tab
index 45a395bf57..1b659cfa90 100644
--- a/erts/emulator/hipe/hipe_bif2.tab
+++ b/erts/emulator/hipe/hipe_bif2.tab
@@ -30,3 +30,4 @@ bif hipe_bifs:in_native/0
bif hipe_bifs:modeswitch_debug_on/0
bif hipe_bifs:modeswitch_debug_off/0
bif hipe_bifs:debug_native_called/2
+bif hipe_bifs:llvm_fix_pinned_regs/0
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index 0997d81b2f..5f92b6bac4 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -268,9 +268,16 @@ noproc_primop_interface_1(nbif_atomic_inc, hipe_atomic_inc)
*/
define(CFUN,`ifelse($1,term_to_binary_1,hipe_wrapper_term_to_binary_1,
ifelse($1,term_to_binary_2,hipe_wrapper_term_to_binary_2,
-ifelse($1,erts_internal_binary_to_term_1,hipe_wrapper_erts_internal_binary_to_term_1,
-ifelse($1,erts_internal_binary_to_term_2,hipe_wrapper_erts_internal_binary_to_term_2,
-$1))))')
+ifelse($1,binary_to_term_1,hipe_wrapper_binary_to_term_1,
+ifelse($1,binary_to_term_2,hipe_wrapper_binary_to_term_2,
+ifelse($1,binary_to_list_1,hipe_wrapper_binary_to_list_1,
+ifelse($1,binary_to_list_3,hipe_wrapper_binary_to_list_3,
+ifelse($1,bitstring_to_list_1,hipe_wrapper_bitstring_to_list_1,
+ifelse($1,list_to_binary_1,hipe_wrapper_list_to_binary_1,
+ifelse($1,iolist_to_binary_1,hipe_wrapper_iolist_to_binary_1,
+ifelse($1,binary_list_to_bin_1,hipe_wrapper_binary_list_to_bin_1,
+ifelse($1,list_to_bitstring_1,hipe_wrapper_list_to_bitstring_1,
+$1)))))))))))')
define(BIF_LIST,`standard_bif_interface_$3(nbif_$4, CFUN($4))')
include(TARGET/`erl_bif_list.h')
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index 4ddc2790b1..1ae1d17b7f 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -2,7 +2,7 @@
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -187,6 +187,9 @@ void hipe_set_call_trap(Uint *bfun, void *nfun, int is_closure)
void hipe_reserve_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
{
+ if (!hipe_bifcall_from_native_is_recursive(p))
+ return;
+
/* ensure that at least 2 words are available on the BEAM stack */
if ((p->stop - 2) < p->htop) {
DPRINTF("calling gc to reserve BEAM stack size");
@@ -195,25 +198,26 @@ void hipe_reserve_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
}
p->stop -= 2;
p->stop[0] = NIL;
- p->stop[1] = NIL;
+ p->stop[1] = hipe_beam_catch_throw;
}
static __inline__ void
hipe_push_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
{
- if (p->flags & F_DISABLE_GC) {
+ if (&p->stop[1] < p->hend && p->stop[1] == hipe_beam_catch_throw) {
/* Trap frame already reserved */
- ASSERT(p->stop[0] == NIL && p->stop[1] == NIL);
+ ASSERT(p->stop[0] == NIL);
}
else {
+ ASSERT(!(p->flags & F_DISABLE_GC));
if ((p->stop - 2) < p->htop) {
DPRINTF("calling gc to increase BEAM stack size");
p->fcalls -= erts_garbage_collect(p, 2, reg, arity);
ASSERT(!((p->stop - 2) < p->htop));
}
p->stop -= 2;
+ p->stop[1] = hipe_beam_catch_throw;
}
- p->stop[1] = hipe_beam_catch_throw;
p->stop[0] = make_cp(p->cp);
++p->catches;
p->cp = hipe_beam_pc_return;
@@ -221,12 +225,16 @@ hipe_push_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
void hipe_unreserve_beam_trap_frame(Process *p)
{
- ASSERT(p->stop[0] == NIL && p->stop[1] == NIL);
+ if (!hipe_bifcall_from_native_is_recursive(p))
+ return;
+
+ ASSERT(p->stop[0] == NIL && p->stop[1] == hipe_beam_catch_throw);
p->stop += 2;
}
static __inline__ void hipe_pop_beam_trap_frame(Process *p)
{
+ ASSERT(p->stop[1] == hipe_beam_catch_throw);
p->cp = cp_val(p->stop[0]);
--p->catches;
p->stop += 2;
diff --git a/erts/emulator/hipe/hipe_risc_glue.h b/erts/emulator/hipe/hipe_risc_glue.h
index cc2671c016..dbb7086dae 100644
--- a/erts/emulator/hipe/hipe_risc_glue.h
+++ b/erts/emulator/hipe/hipe_risc_glue.h
@@ -214,6 +214,14 @@ hipe_trap_from_native_is_recursive(Process *p)
return 0;
}
+/* Native called BIF. Is it a recursive call?
+ i.e should we return back to native when BIF is done? */
+static __inline__ int
+hipe_bifcall_from_native_is_recursive(Process *p)
+{
+ return (p->hipe.nra != (void(*)(void))&nbif_return);
+}
+
/* Native makes a call which needs to unload the parameters.
This differs from hipe_call_from_native_is_recursive() in
diff --git a/erts/emulator/hipe/hipe_x86_glue.h b/erts/emulator/hipe/hipe_x86_glue.h
index 63ad250d60..4b6e495b9a 100644
--- a/erts/emulator/hipe/hipe_x86_glue.h
+++ b/erts/emulator/hipe/hipe_x86_glue.h
@@ -207,6 +207,14 @@ hipe_trap_from_native_is_recursive(Process *p)
return 0;
}
+/* Native called BIF. Is it a recursive call?
+ i.e should we return back to native when BIF is done? */
+static __inline__ int
+hipe_bifcall_from_native_is_recursive(Process *p)
+{
+ return (*p->hipe.nsp != (Eterm)nbif_return);
+}
+
/* Native makes a call which needs to unload the parameters.
This differs from hipe_call_from_native_is_recursive() in
diff --git a/erts/emulator/hipe/hipe_x86_signal.c b/erts/emulator/hipe/hipe_x86_signal.c
index 8f997aafab..f5668013e2 100644
--- a/erts/emulator/hipe/hipe_x86_signal.c
+++ b/erts/emulator/hipe/hipe_x86_signal.c
@@ -2,7 +2,7 @@
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -304,7 +304,9 @@ static void hipe_sigaltstack(void *ss_sp)
*/
void hipe_thread_signal_init(void)
{
- hipe_sigaltstack(erts_alloc(ERTS_ALC_T_HIPE, SIGSTKSZ));
+ /* Stack don't really need to be cache aligned.
+ We use it to suppress false leak report from valgrind */
+ hipe_sigaltstack(erts_alloc_permanent_cache_aligned(ERTS_ALC_T_HIPE, SIGSTKSZ));
}
#endif
diff --git a/erts/emulator/internal_doc/CarrierMigration.md b/erts/emulator/internal_doc/CarrierMigration.md
index b93c11c6ec..2a9594db25 100644
--- a/erts/emulator/internal_doc/CarrierMigration.md
+++ b/erts/emulator/internal_doc/CarrierMigration.md
@@ -16,12 +16,12 @@ When a carrier is empty, i.e. contains only one large free block, it
is deallocated. Since multiblock carriers can contain both allocated
blocks and free blocks at the same time, an allocator instance might
be stuck with a large amount of poorly utilized carriers if the memory
-load decrease. After a peak in memory usage it is expected that not
-all memory can be returned since the blocks still allocated is likely
+load decreases. After a peak in memory usage it is expected that not
+all memory can be returned since the blocks still allocated are likely
to be dispersed over multiple carriers. Such poorly utilized carriers
-can usually be reused if the memory load increase again. However,
+can usually be reused if the memory load increases again. However,
since each scheduler thread manages its own set of allocator
-instances, and memory load is not necessarily connected to CPU load we
+instances, and memory load is not necessarily correlated to CPU load, we
might get into a situation where there are lots of poorly utilized
multiblock carriers on some allocator instances while we need to
allocate new multiblock carriers on other allocator instances. In
@@ -50,13 +50,13 @@ the allocator instance manages. Free blocks in one specific carrier
can be referred to from potentially every other carrier that is
managed, and the amount of such references can be huge. That is, the
work of removing the free blocks of such a carrier from the search
-tree will be huge. One way of solving this could be to not migrate
+tree will be huge. One way of solving this could be not to migrate
carriers that contain lots of free blocks, but this would prevent us
-from migrating carriers that potentially needs to be migrated in order
+from migrating carriers that potentially need to be migrated in order
to solve the problem we set out to solve.
By using one data structure of free blocks in each carrier and an
-allocator instance wide data structure of carriers managed by the
+allocator instance-wide data structure of carriers managed by the
allocator instance, the work needed in order to remove and add
carriers can be kept to a minimum. When migration of carriers is
enabled on a specific allocator type, we require that an allocation
@@ -76,9 +76,9 @@ through a pool of carriers. In order for a carrier migration to
complete, one scheduler needs to move the carrier into the pool, and
another scheduler needs to take the carrier out of the pool.
-The pool is implemented as a lock free, circular, double linked,
+The pool is implemented as a lock-free, circular, double linked,
list. The list contains a sentinel which is used as the starting point
-when inserting to, or fetching from the pool. Carriers in the pool are
+when inserting to, or fetching from, the pool. Carriers in the pool are
elements in this list.
The list can be modified by all scheduler threads
@@ -108,19 +108,19 @@ all search operations need to read the content of the sentinel. If we
were to modify the sentinel, the cache line containing the sentinel
would unnecessarily be bounced between processors.
-The `prev`, and `next` fields in the elements of the list contains the
+The `prev` and `next` fields in the elements of the list contain the
value of the pointer, a modification marker, and a deleted
marker. Memory operations on these fields are done using atomic memory
operations. When a thread has set the modification marker in a field,
no-one except the thread that set the marker is allowed to modify the
-field. If multiple modification markers needs to be set, we always
+field. If multiple modification markers need to be set, we always
begin with `next` fields followed by `prev` fields in the order
following the actual pointers. This guarantees that no deadlocks will
occur.
When a carrier is being removed from a pool, we mark it with a thread
progress value that needs to be reached before we are allowed to
-modify the `next`, and `prev` fields. That is, until we reach this
+modify the `next` and `prev` fields. That is, until we reach this
thread progress we are not allowed to insert the carrier into the pool
again, and we are not allowed to deallocate the carrier. This ensures
that threads inspecting the pool always will be able to traverse the
@@ -130,12 +130,12 @@ threads may have references to it via the pool.
### Migration ###
-There exist one pool for each allocator type enabling migration of
+There exists one pool for each allocator type enabling migration of
carriers between scheduler specific allocator instances of the same
allocator type.
Each allocator instance keeps track of the current utilization of its
-multiblock carriers. When the utilization falls below the "abandon
+multiblock carriers. When the total utilization falls below the "abandon
carrier utilization limit" it starts to inspect the utilization of the
current carrier when deallocations are made. If also the utilization
of the carrier falls below the "abandon carrier utilization limit" it
@@ -146,28 +146,53 @@ Since the carrier has been unlinked from the data structure of
available free blocks, no more allocations will be made in the
carrier. The allocator instance putting the carrier into the pool,
however, still has the responsibility of performing deallocations in
-it while it remains in the pool.
+it while it remains in the pool. The allocator instance with this
+deallocation responsibility is here called the **employer**.
-Each carrier has a flag field containing information about allocator
-instance owning the carrier, a flag indicating if the carrier is in
+Each carrier has a flag field containing information about the
+employing allocator instance, a flag indicating if the carrier is in
the pool or not, and a flag indicating if it is busy or not. When the
-carrier is in the pool, the owning allocator instance needs to mark it
+carrier is in the pool, the employing allocator instance needs to mark it
as busy while operating on it. If another thread inspects it in order
-to try to fetch it from the pool, it will abort the fetch if it is
-busy. When fetching the carrier from the pool, ownership will changed
-and further deallocations in the carrier will be redirected to the new
-owner using the delayed dealloc functionality.
+to try to fetch it from the pool, it will skip it if it is busy. When
+fetching the carrier from the pool, employment will change and further
+deallocations in the carrier will be redirected to the new
+employer using the delayed dealloc functionality.
If a carrier in the pool becomes empty, it will be withdrawn from the
pool. All carriers that become empty are also always passed to its
-originating allocator instance for deallocation using the delayed
+**owning** allocator instance for deallocation using the delayed
dealloc functionality. Since carriers this way always will be
-deallocated by the allocator instance that allocated the carrier the
+deallocated by the owner that allocated the carrier, the
underlying functionality of allocating and deallocating carriers can
remain simple and doesn't have to bother about multiple threads. In a
NUMA system we will also not mix carriers originating from multiple
NUMA nodes.
+In short:
+
+* The allocator instance that created a carrier **owns** it.
+* An empty carrier is always deallocated by its **owner**.
+* **Ownership** never changes.
+* The allocator instance that uses a carrier **employs** it.
+* An **employer** can abandon a carrier into the pool.
+* Pooled carriers are not allocated from.
+* Deallocation in a pooled carrier is still performed by its **employer**.
+* **Employment** can only change when a carrier is fetched from the pool.
+
+### Searching the pool ###
+
+To harbor real time characteristics, searching the pool is
+limited. We only inspect a limited number of carriers. If none of
+those carriers had a free block large enough to satisfy the allocation
+request, the search will fail. A carrier in the pool can also be busy
+if another thread is currently doing block deallocation work on the
+carrier. A busy carrier will also be skipped by the search as it can
+not satisfy the request. The pool is lock-free and we do not want to
+block, waiting for the other thread to finish.
+
+#### Before OTP 17.4 ####
+
When an allocator instance needs more carrier space, it always begins
by inspecting its own carriers that are waiting for thread progress
before they can be deallocated. If no such carrier could be found, it
@@ -176,10 +201,69 @@ it will allocate a new carrier. Regardless of where the allocator
instance gets the carrier from it the just links in the carrier into
its data structure of free blocks.
+#### After OTP 17.4 ####
+
+The old search algorithm had a problem as the search always started at
+the same position in the pool, the sentinel. This could lead to
+contention from concurrent searching processes. But even worse, it
+could lead to a "bad" state when searches fail with a high rate
+leading to new carriers instead being allocated. These new carriers
+may later be inserted into the pool due to bad utilization. If the
+frequency of insertions into the pool is higher than successful
+fetching from the pool, memory will eventually get exhausted.
+
+This "bad" state consists of a cluster of small and/or highly
+fragmented carriers located at the sentinel in the pool. The largest free
+block in such a "bad" carrier is rather small, making it unable to satisfy
+most allocation requests. As the search always started at the
+sentinel, any such "bad" carriers that had been left in the pool would
+eventually cluster together at the sentinel. All searches first
+have to skip past this cluster of "bad" carriers to reach a "good"
+carrier. When the cluster gets to the same size as the search limit,
+all searches will essentially fail.
+
+To counter the "bad cluster" problem and also ease the contention, the
+search will now always start by first looking at the allocators **own**
+carriers. That is, carriers that were initially created by the
+allocator itself and later had been abandoned to the pool. If none of
+our own abandoned carrier would do, then the search continues into the
+pool, as before, to look for carriers created by other
+allocators. However, if we have at least one abandoned carrier of our
+own that could not satisfy the request, we can use that as entry point
+into the pool.
+
+The result is that we prefer carriers created by the thread itself,
+which is good for NUMA performance. And we get more entry points when
+searching the pool, which will ease contention and clustering.
+
+To do the first search among own carriers, every allocator instance
+has two new lists: `pooled_list` and `traitor_list`. These lists are only
+accessed by the allocator itself and they only contain the allocator's
+own carriers. When an owned carrier is abandoned and put in the
+pool, it is also linked into `pooled_list`. When we search our
+`pooled_list` and find a carrier that is no longer in the pool, we
+move that carrier from `pooled_list` to `traitor_list` as it is now
+employed by another allocator. If searching `pooled_list` fails, we
+also do a limited search of `traitor_list`. When finding an abandoned
+carrier in `traitor_list` it is either employed or moved back to
+`pooled_list` if it could not satisfy the allocation request.
+
+When searching `pooled_list` and `traitor_list` we always start at the
+point where the last search ended. This to avoid clustering
+problems and increase the probability to find a "good" carrier. As
+`pooled_list` and `traitor_list` are only accessed by the owning
+allocator instance, they need no thread synchronization at all.
+
+Furthermore, the search for own carriers that are scheduled
+for deallocation is now done as the last search option. The idea is
+that it is better to reuse a poorly utilized carrier than to
+resurrect an empty carrier that was just about to be released back to
+the OS.
+
### Result ###
The use of this strategy of abandoning carriers with poor utilization
-and reusing these in allocator instances with an increased carrier
+and reusing them in allocator instances with an increased carrier
demand is extremely effective and completely eliminates the problems
that otherwise sometimes occurred when CPU load dropped while memory
load did not.
diff --git a/erts/emulator/internal_doc/SuperCarrier.md b/erts/emulator/internal_doc/SuperCarrier.md
new file mode 100644
index 0000000000..0ad6af41de
--- /dev/null
+++ b/erts/emulator/internal_doc/SuperCarrier.md
@@ -0,0 +1,191 @@
+Super Carrier
+=============
+
+A super carrier is large memory area, allocated at VM start, which can
+be used during runtime to allocate normal carriers from.
+
+The super carrier feature was introduced in OTP R16B03. It is
+enabled with command line option +MMscs <size in Mb>
+and can be configured with other options.
+
+Problem
+-------
+
+The initial motivation for this feature was customers asking for a way
+to pre-allocate physcial memory at VM start for it to use.
+
+Other problems were different experienced limitations of the OS
+implementation of mmap:
+
+* Increasingly bad performance of mmap/munmap as the number of mmap'ed areas grow.
+* Fragmentation problem between mmap'ed areas.
+
+A third problem was management of low memory in the halfword
+emulator. The implementation used a naive linear search structure to
+hold free segments which would lead to poor performance when
+fragmentation increased.
+
+
+Solution
+--------
+
+Allocate one large continious area of address space at VM start and
+then use that area to satisfy our dynamic memory need during
+runtime. In other words: implement our own mmap.
+
+### Use cases ###
+
+If command line option +MMscrpm (Reserve Physical Memory) is set to
+false, only virtual space is allocated for the super carrier from
+start. The super carrier then acts as an "alternative mmap" implementation
+without changing the consumption of physical memory pages. Physical
+pages will be reserved on demand when an allocation is done from the super
+carrier and be unreserved when the memory is released back to the
+super carrier.
+
+If +MMscrpm is set to true, which is default, the initial allocation
+will reserve physical memory for the entire super carrier. This can be
+used by users that want to ensure a certain *minimum* amount of
+physical memory for the VM.
+
+However, what reservation of physical memory actually means highly
+depends on the operating system, and how it is configured. For
+example, different memory overcommit settings on Linux drastically
+change the behaviour.
+
+A third feature is to have the super carrier limit the *maximum*
+amount of memory used by the VM. If +MMsco (Super Carrier Only) is set
+to true, which is default, allocations will only be done from the
+super carrier. When the super carrier gets full, the VM will fail due
+to out of memory.
+If +MMsco is false, allocations will use mmap directly if the super
+carrier is full.
+
+
+
+### Implementation ###
+
+The entire super carrier implementation is kept in erl_mmap.c. The
+name suggest that it can be viewed as our own mmap implementation.
+
+A super carrier needs to satisfy two slightly different kinds of
+allocation requests; multi block carriers (MBC) and single block
+carriers (SBC). They are both rather large blocks of continious
+memory, but MBCs and SBCs have different demands on alignment and
+size.
+
+SBCs can have arbitrary size and do only need minimum 8-byte
+alignment.
+
+MBCs are more restricted. They can only have a number of fixed
+sizes that are powers of 2. The start address need to have a very
+large aligment (currently 256 kb, called "super alignment"). This is a
+design choice that allows very low overhead per allocated block in the
+MBC.
+
+To reduce fragmentation within the super carrier, it is good to keep SBCs
+and MBCs apart. MBCs with their uniform alignment and sizes can be
+packed very efficiently together. SBCs without demand for aligment can
+also be allocated quite efficiently together. But mixing them can lead
+to a lot of memory wasted when we need to create large holes of
+padding to the next alignment limit.
+
+The super carrier thus contains two areas. One area for MBCs growing from
+the bottom and up. And one area for SBCs growing from the top and
+down. Like a process with a heap and a stack growing towards each
+other.
+
+
+### Data structures ###
+
+The MBC area is called **sa** as in super aligned and the SBC area is
+called **sua** as in super un-aligned.
+
+Note that the "super" in super alignment and the "super" in super
+carrier has nothing to do with each other. We could have choosen
+another naming to avoid confusion, such as "meta" carrier or "giant"
+aligment.
+
+ +-------+ <---- sua.top
+ | sua |
+ | |
+ |-------| <---- sua.bot
+ | |
+ | |
+ | |
+ |-------| <---- sa.top
+ | |
+ | sa |
+ | |
+ +-------+ <---- sa.bot
+
+
+When a carrier is deallocated a free memory segment will be created
+inside the corresponding area, unless the carrier was at the very top
+(in `sa`) or bottom (in `sua`) in which case the area will just shrink
+down or up.
+
+We need to keep track of all the free segments in order to reuse them
+for new carrier allocations. One initial idea was to use the same
+mechanism that is used to keep track of free blocks within MBCs
+(alloc_util and the different strategies). However, that would not be
+as straight forward as one can think and can also waste quite a lot of
+memory as it uses prepended block headers. The granularity of the
+super carrier is one memory page (usually 4kb). We want to allocate
+and free entire pages and we don't want to waste an entire page just
+to hold the block header of the following pages.
+
+Instead we store the meta information about all the free segments in a
+dedicated area apart from the `sa` and `sua` areas. Every free segment is
+represented by a descriptor struct (`ErtsFreeSegDesc`).
+
+ typedef struct {
+ RBTNode snode; /* node in 'stree' */
+ RBTNode anode; /* node in 'atree' */
+ char* start;
+ char* end;
+ }ErtsFreeSegDesc;
+
+To find the smallest free segment that will satisfy a carrier allocation
+(best fit), the free segments are organized in a tree sorted by
+size (`stree`). We search in this tree at allocation. If no free segment of
+sufficient size was found, the area (`sa` or `sua`) is instead expanded.
+If two or more free segments with equal size exist, the one at lowest
+address is choosen for `sa` and highest address for `sua`.
+
+At carrier deallocation, we want to coalesce with any adjacent free
+segments, to form one large free segment. To do that, all free
+segments are also organized in a tree sorted in address order (`atree`).
+
+So, in total we keep four trees of free descriptors for the super
+carrier; two for `sa` and two for `sua`. They all use the same
+red-black-tree implementation that support the different sorting
+orders used.
+
+When allocating a new MBC we first search after a free segment in `sa`,
+then try to raise `sa.top`, and then as a fallback try to search after a
+free segment in `sua`. When an MBC is allocated in `sua`, a larger segment
+is allocated which is then trimmed to obtain the right
+alignment. Allocation search for an SBC is done in reverse order. When
+an SBC is allocated in `sa`, the size is aligned up to super aligned
+size.
+
+### The free descriptor area ###
+
+As mentioned above, the descriptors for the free segments are
+allocated in a separate area. This area has a constant configurable
+size (+MMscrfsd) that defaults to 65536 descriptors. This should be
+more than enough in most cases. If the descriptors area should fill up,
+new descriptor areas will be allocated first directly from the OS, and
+then from `sua` and `sa` in the super carrier, and lastly from the memory
+segment itself which is being deallocated. Allocating free descriptor
+areas from the super carrier is only a last resort, and should be
+avoided, as it creates fragmentation.
+
+### Halfword emulator ###
+
+The halfword emulator uses the super carrier implementation to manage
+its low memory mappings thar are needed for all term storage. The
+super carrier can here not be configured by command line options. One
+could imagine a second configurable instance of the super carrier used
+by high memory allocation, but that has not been implemented.
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 245841a768..0051b45b31 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -52,8 +52,17 @@ typedef char EventStateType;
#define ERTS_EV_TYPE_STOP_USE ((EventStateType) 3) /* pending stop_select */
typedef char EventStateFlags;
-#define ERTS_EV_FLAG_USED ((EventStateFlags) 1) /* ERL_DRV_USE has been turned on */
+#define ERTS_EV_FLAG_USED ((EventStateFlags) 1) /* ERL_DRV_USE has been turned on */
+#define ERTS_EV_FLAG_DEFER_IN_EV ((EventStateFlags) 2)
+#define ERTS_EV_FLAG_DEFER_OUT_EV ((EventStateFlags) 4)
+#ifdef DEBUG
+# define ERTS_ACTIVE_FD_INC 2
+#else
+# define ERTS_ACTIVE_FD_INC 128
+#endif
+
+#define ERTS_CHECK_IO_POLL_RES_LEN 512
#if defined(ERTS_KERNEL_POLL_VERSION)
# define ERTS_CIO_EXPORT(FUNC) FUNC ## _kp
@@ -67,6 +76,7 @@ typedef char EventStateFlags;
(ERTS_POLL_USE_POLL && !ERTS_POLL_USE_KERNEL_POLL)
#define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control)
+#define ERTS_CIO_POLL_CTLV ERTS_POLL_EXPORT(erts_poll_controlv)
#define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait)
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)
@@ -85,6 +95,13 @@ static struct pollset_info
{
ErtsPollSet ps;
erts_smp_atomic_t in_poll_wait; /* set while doing poll */
+ struct {
+ int six; /* start index */
+ int eix; /* end index */
+ erts_smp_atomic32_t no;
+ int size;
+ ErtsSysFdType *array;
+ } active_fd;
#ifdef ERTS_SMP
struct removed_fd* removed_list; /* list of deselected fd's*/
erts_smp_spinlock_t removed_list_lock;
@@ -97,9 +114,11 @@ typedef struct {
SafeHashBucket hb;
#endif
ErtsSysFdType fd;
- union {
- ErtsDrvEventDataState *event; /* ERTS_EV_TYPE_DRV_EV */
+ struct {
ErtsDrvSelectDataState *select; /* ERTS_EV_TYPE_DRV_SEL */
+#if ERTS_CIO_HAVE_DRV_EVENT
+ ErtsDrvEventDataState *event; /* ERTS_EV_TYPE_DRV_EV */
+#endif
erts_driver_t* drv_ptr; /* ERTS_EV_TYPE_STOP_USE */
} driver;
ErtsPollEvents events;
@@ -169,6 +188,10 @@ static ERTS_INLINE ErtsDrvEventState* hash_new_drv_ev_state(ErtsSysFdType fd)
ErtsDrvEventState tmpl;
tmpl.fd = fd;
tmpl.driver.select = NULL;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ tmpl.driver.event = NULL;
+#endif
+ tmpl.driver.drv_ptr = NULL;
tmpl.events = 0;
tmpl.remove_cnt = 0;
tmpl.type = ERTS_EV_TYPE_NONE;
@@ -209,6 +232,69 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_F
#endif
static ERTS_INLINE void
+init_iotask(ErtsIoTask *io_task)
+{
+ erts_port_task_handle_init(&io_task->task);
+ erts_smp_atomic_init_nob(&io_task->executed_time, ~((erts_aint_t) 0));
+}
+
+static ERTS_INLINE int
+is_iotask_active(ErtsIoTask *io_task, erts_aint_t current_cio_time)
+{
+ if (erts_port_task_is_scheduled(&io_task->task))
+ return 1;
+ if (erts_smp_atomic_read_nob(&io_task->executed_time) == current_cio_time)
+ return 1;
+ return 0;
+}
+
+static ERTS_INLINE ErtsDrvSelectDataState *
+alloc_drv_select_data(void)
+{
+ ErtsDrvSelectDataState *dsp = erts_alloc(ERTS_ALC_T_DRV_SEL_D_STATE,
+ sizeof(ErtsDrvSelectDataState));
+ dsp->inport = NIL;
+ dsp->outport = NIL;
+ init_iotask(&dsp->iniotask);
+ init_iotask(&dsp->outiotask);
+ return dsp;
+}
+
+static ERTS_INLINE void
+free_drv_select_data(ErtsDrvSelectDataState *dsp)
+{
+ ASSERT(!erts_port_task_is_scheduled(&dsp->iniotask.task));
+ ASSERT(!erts_port_task_is_scheduled(&dsp->outiotask.task));
+ erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, dsp);
+}
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+
+static ERTS_INLINE ErtsDrvEventDataState *
+alloc_drv_event_data(void)
+{
+ ErtsDrvEventDataState *dep = erts_alloc(ERTS_ALC_T_DRV_EV_D_STATE,
+ sizeof(ErtsDrvEventDataState));
+ dep->port = NIL;
+ dep->data = NULL;
+ dep->removed_events = 0;
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ dep->deferred_events = 0;
+#endif
+ init_iotask(&dep->iotask);
+ return dep;
+}
+
+static ERTS_INLINE void
+free_drv_event_data(ErtsDrvEventDataState *dep)
+{
+ ASSERT(!erts_port_task_is_scheduled(&dep->iotask.task));
+ erts_free(ERTS_ALC_T_DRV_EV_D_STATE, dep);
+}
+
+#endif /* ERTS_CIO_HAVE_DRV_EVENT */
+
+static ERTS_INLINE void
remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
{
#ifdef ERTS_SMP
@@ -288,7 +374,7 @@ forget_removed(struct pollset_info* psi)
drv_ptr = state->driver.drv_ptr;
ASSERT(drv_ptr);
state->type = ERTS_EV_TYPE_NONE;
- state->flags = 0;
+ state->flags &= ~ERTS_EV_FLAG_USED;
state->driver.drv_ptr = NULL;
/* Fall through */
case ERTS_EV_TYPE_NONE:
@@ -345,6 +431,10 @@ grow_drv_ev_state(int min_ix)
for (i = erts_smp_atomic_read_nob(&drv_ev_state_len); i < new_len; i++) {
drv_ev_state[i].fd = (ErtsSysFdType) i;
drv_ev_state[i].driver.select = NULL;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ drv_ev_state[i].driver.event = NULL;
+#endif
+ drv_ev_state[i].driver.drv_ptr = NULL;
drv_ev_state[i].events = 0;
drv_ev_state[i].remove_cnt = 0;
drv_ev_state[i].type = ERTS_EV_TYPE_NONE;
@@ -365,11 +455,7 @@ grow_drv_ev_state(int min_ix)
static ERTS_INLINE void
abort_task(Eterm id, ErtsPortTaskHandle *pthp, EventStateType type)
{
- if (is_nil(id)) {
- ASSERT(type == ERTS_EV_TYPE_NONE
- || !erts_port_task_is_scheduled(pthp));
- }
- else if (erts_port_task_is_scheduled(pthp)) {
+ if (is_not_nil(id) && erts_port_task_is_scheduled(pthp)) {
erts_port_task_abort(pthp);
ASSERT(erts_is_port_alive(id));
}
@@ -384,7 +470,7 @@ abort_tasks(ErtsDrvEventState *state, int mode)
#if ERTS_CIO_HAVE_DRV_EVENT
case ERTS_EV_TYPE_DRV_EV:
abort_task(state->driver.event->port,
- &state->driver.event->task,
+ &state->driver.event->iotask.task,
ERTS_EV_TYPE_DRV_EV);
return;
#endif
@@ -398,14 +484,14 @@ abort_tasks(ErtsDrvEventState *state, int mode)
case ERL_DRV_WRITE:
ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
abort_task(state->driver.select->outport,
- &state->driver.select->outtask,
+ &state->driver.select->outiotask.task,
state->type);
if (mode == ERL_DRV_WRITE)
break;
case ERL_DRV_READ:
ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
abort_task(state->driver.select->inport,
- &state->driver.select->intask,
+ &state->driver.select->iniotask.task,
state->type);
break;
default:
@@ -443,16 +529,14 @@ deselect(ErtsDrvEventState *state, int mode)
if (!(state->events)) {
switch (state->type) {
case ERTS_EV_TYPE_DRV_SEL:
- ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask));
- ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask));
- erts_free(ERTS_ALC_T_DRV_SEL_D_STATE,
- state->driver.select);
+ state->driver.select->inport = NIL;
+ state->driver.select->outport = NIL;
break;
#if ERTS_CIO_HAVE_DRV_EVENT
case ERTS_EV_TYPE_DRV_EV:
- ASSERT(!erts_port_task_is_scheduled(&state->driver.event->task));
- erts_free(ERTS_ALC_T_DRV_EV_D_STATE,
- state->driver.event);
+ state->driver.event->port = NIL;
+ state->driver.event->data = NULL;
+ state->driver.event->removed_events = (ErtsPollEvents) 0;
break;
#endif
case ERTS_EV_TYPE_NONE:
@@ -462,20 +546,297 @@ deselect(ErtsDrvEventState *state, int mode)
break;
}
- state->driver.select = NULL;
state->type = ERTS_EV_TYPE_NONE;
- state->flags = 0;
+ state->flags &= ~ERTS_EV_FLAG_USED;
remember_removed(state, &pollset);
}
}
-
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
# define IS_FD_UNKNOWN(state) ((state)->type == ERTS_EV_TYPE_NONE && (state)->remove_cnt == 0)
#else
# define IS_FD_UNKNOWN(state) ((state) == NULL)
#endif
+static ERTS_INLINE void
+check_fd_cleanup(ErtsDrvEventState *state,
+#if ERTS_CIO_HAVE_DRV_EVENT
+ ErtsDrvEventDataState **free_event,
+#endif
+ ErtsDrvSelectDataState **free_select)
+{
+ erts_aint_t current_cio_time;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
+
+ current_cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time);
+ *free_select = NULL;
+ if (state->driver.select
+ && (state->type != ERTS_EV_TYPE_DRV_SEL)
+ && !is_iotask_active(&state->driver.select->iniotask, current_cio_time)
+ && !is_iotask_active(&state->driver.select->outiotask, current_cio_time)) {
+
+ *free_select = state->driver.select;
+ state->driver.select = NULL;
+ }
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+ *free_event = NULL;
+ if (state->driver.event
+ && (state->type != ERTS_EV_TYPE_DRV_EV)
+ && !is_iotask_active(&state->driver.event->iotask, current_cio_time)) {
+
+ *free_event = state->driver.event;
+ state->driver.event = NULL;
+ }
+#endif
+
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (((state->type != ERTS_EV_TYPE_NONE)
+ | state->remove_cnt
+#if ERTS_CIO_HAVE_DRV_EVENT
+ | (state->driver.event != NULL)
+#endif
+ | (state->driver.select != NULL)) == 0) {
+
+ hash_erase_drv_ev_state(state);
+
+ }
+#endif
+}
+
+static ERTS_INLINE int
+check_cleanup_active_fd(ErtsSysFdType fd,
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ ErtsPollControlEntry *pce,
+ int *pce_ix,
+#endif
+ erts_aint_t current_cio_time)
+{
+ ErtsDrvEventState *state;
+ int active = 0;
+ erts_smp_mtx_t *mtx = fd_mtx(fd);
+ void *free_select = NULL;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ void *free_event = NULL;
+#endif
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ ErtsPollEvents evon = 0, evoff = 0;
+#endif
+
+ erts_smp_mtx_lock(mtx);
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ state = &drv_ev_state[(int) fd];
+#else
+ state = hash_get_drv_ev_state(fd); /* may be NULL! */
+ if (state)
+#endif
+ {
+ if (state->driver.select) {
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ if (is_iotask_active(&state->driver.select->iniotask, current_cio_time)) {
+ active = 1;
+ if ((state->events & ERTS_POLL_EV_IN)
+ && !(state->flags & ERTS_EV_FLAG_DEFER_IN_EV)) {
+ evoff |= ERTS_POLL_EV_IN;
+ state->flags |= ERTS_EV_FLAG_DEFER_IN_EV;
+ }
+ }
+ else if (state->flags & ERTS_EV_FLAG_DEFER_IN_EV) {
+ if (state->events & ERTS_POLL_EV_IN)
+ evon |= ERTS_POLL_EV_IN;
+ state->flags &= ~ERTS_EV_FLAG_DEFER_IN_EV;
+ }
+ if (is_iotask_active(&state->driver.select->outiotask, current_cio_time)) {
+ active = 1;
+ if ((state->events & ERTS_POLL_EV_OUT)
+ && !(state->flags & ERTS_EV_FLAG_DEFER_OUT_EV)) {
+ evoff |= ERTS_POLL_EV_OUT;
+ state->flags |= ERTS_EV_FLAG_DEFER_OUT_EV;
+ }
+ }
+ else if (state->flags & ERTS_EV_FLAG_DEFER_OUT_EV) {
+ if (state->events & ERTS_POLL_EV_OUT)
+ evon |= ERTS_POLL_EV_OUT;
+ state->flags &= ~ERTS_EV_FLAG_DEFER_OUT_EV;
+ }
+ if (active)
+ (void) 0;
+ else
+#else
+ if (is_iotask_active(&state->driver.select->iniotask, current_cio_time)
+ || is_iotask_active(&state->driver.select->outiotask, current_cio_time))
+ active = 1;
+ else
+#endif
+ if (state->type != ERTS_EV_TYPE_DRV_SEL) {
+ free_select = state->driver.select;
+ state->driver.select = NULL;
+ }
+ }
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (state->driver.event) {
+ if (is_iotask_active(&state->driver.event->iotask, current_cio_time)) {
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ ErtsPollEvents evs = state->events & ~state->driver.event->deferred_events;
+ if (evs) {
+ evoff |= evs;
+ state->driver.event->deferred_events |= evs;
+ }
+#endif
+ active = 1;
+ }
+ else if (state->type != ERTS_EV_TYPE_DRV_EV) {
+ free_event = state->driver.event;
+ state->driver.event = NULL;
+ }
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ else {
+ ErtsPollEvents evs = state->events & state->driver.event->deferred_events;
+ if (evs) {
+ evon |= evs;
+ state->driver.event->deferred_events = 0;
+ }
+ }
+#endif
+
+ }
+#endif
+
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (((state->type != ERTS_EV_TYPE_NONE) | state->remove_cnt | active) == 0)
+ hash_erase_drv_ev_state(state);
+#endif
+
+ }
+
+ erts_smp_mtx_unlock(mtx);
+
+ if (free_select)
+ free_drv_select_data(free_select);
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (free_event)
+ free_drv_event_data(free_event);
+#endif
+
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ if (evoff) {
+ ErtsPollControlEntry *pcep = &pce[(*pce_ix)++];
+ pcep->fd = fd;
+ pcep->events = evoff;
+ pcep->on = 0;
+ }
+ if (evon) {
+ ErtsPollControlEntry *pcep = &pce[(*pce_ix)++];
+ pcep->fd = fd;
+ pcep->events = evon;
+ pcep->on = 1;
+ }
+#endif
+
+ return active;
+}
+
+static void
+check_cleanup_active_fds(erts_aint_t current_cio_time)
+{
+ int six = pollset.active_fd.six;
+ int eix = pollset.active_fd.eix;
+ erts_aint32_t no = erts_smp_atomic32_read_dirty(&pollset.active_fd.no);
+ int size = pollset.active_fd.size;
+ int ix = six;
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ /* every fd might add two entries */
+ Uint pce_sz = 2*sizeof(ErtsPollControlEntry)*no;
+ ErtsPollControlEntry *pctrl_entries = (pce_sz
+ ? erts_alloc(ERTS_ALC_T_TMP, pce_sz)
+ : NULL);
+ int pctrl_ix = 0;
+#endif
+
+ while (ix != eix) {
+ ErtsSysFdType fd = pollset.active_fd.array[ix];
+ int nix = ix + 1;
+ if (nix >= size)
+ nix = 0;
+ ASSERT(fd != ERTS_SYS_FD_INVALID);
+ if (!check_cleanup_active_fd(fd,
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ pctrl_entries,
+ &pctrl_ix,
+#endif
+ current_cio_time)) {
+ no--;
+ if (ix == six) {
+#ifdef DEBUG
+ pollset.active_fd.array[ix] = ERTS_SYS_FD_INVALID;
+#endif
+ six = nix;
+ }
+ else {
+ pollset.active_fd.array[ix] = pollset.active_fd.array[six];
+#ifdef DEBUG
+ pollset.active_fd.array[six] = ERTS_SYS_FD_INVALID;
+#endif
+ six++;
+ if (six >= size)
+ six = 0;
+ }
+ }
+ ix = nix;
+ }
+
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ ASSERT(pctrl_ix <= pce_sz/sizeof(ErtsPollControlEntry));
+ if (pctrl_ix)
+ ERTS_CIO_POLL_CTLV(pollset.ps, pctrl_entries, pctrl_ix);
+ if (pctrl_entries)
+ erts_free(ERTS_ALC_T_TMP, pctrl_entries);
+#endif
+
+ pollset.active_fd.six = six;
+ pollset.active_fd.eix = eix;
+ erts_smp_atomic32_set_relb(&pollset.active_fd.no, no);
+}
+
+static ERTS_INLINE void
+add_active_fd(ErtsSysFdType fd)
+{
+ int eix = pollset.active_fd.eix;
+ int size = pollset.active_fd.size;
+
+
+ pollset.active_fd.array[eix] = fd;
+
+ erts_smp_atomic32_set_relb(&pollset.active_fd.no,
+ (erts_smp_atomic32_read_dirty(&pollset.active_fd.no)
+ + 1));
+
+ eix++;
+ if (eix >= size)
+ eix = 0;
+ if (pollset.active_fd.six == eix) {
+ pollset.active_fd.six = 0;
+ eix = size;
+ size += ERTS_ACTIVE_FD_INC;
+ pollset.active_fd.array = erts_realloc(ERTS_ALC_T_ACTIVE_FD_ARR,
+ pollset.active_fd.array,
+ sizeof(ErtsSysFdType)*size);
+ pollset.active_fd.size = size;
+#ifdef DEBUG
+ {
+ int i;
+ for (i = eix + 1; i < size; i++)
+ pollset.active_fd.array[i] = ERTS_SYS_FD_INVALID;
+ }
+#endif
+
+ }
+
+ pollset.active_fd.eix = eix;
+}
int
ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
@@ -492,6 +853,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ErtsDrvEventState *state;
int wake_poller;
int ret;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ ErtsDrvEventDataState *free_event = NULL;
+#endif
+ ErtsDrvSelectDataState *free_select = NULL;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(name, 64);
#endif
@@ -527,7 +892,8 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
/* fast track to stop_select callback */
stop_select_fn = prt->drv_ptr->stop_select;
#ifdef USE_VM_PROBES
- strncpy(name, prt->drv_ptr->name, sizeof(name)-1);
+ strncpy(name, prt->drv_ptr->name,
+ sizeof(DTRACE_CHARBUF_NAME(name))-1);
name[sizeof(name)-1] = '\0';
#endif
ret = 0;
@@ -592,9 +958,9 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (new_events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
if (state->type == ERTS_EV_TYPE_DRV_SEL && !state->events) {
state->type = ERTS_EV_TYPE_NONE;
- state->flags = 0;
- erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, state->driver.select);
- state->driver.select = NULL;
+ state->flags &= ~ERTS_EV_FLAG_USED;
+ state->driver.select->inport = NIL;
+ state->driver.select->outport = NIL;
}
ret = -1;
goto done;
@@ -612,18 +978,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
state->events = new_events;
if (ctl_events) {
if (on) {
- if (state->type == ERTS_EV_TYPE_NONE) {
- ErtsDrvSelectDataState *dsdsp
- = erts_alloc(ERTS_ALC_T_DRV_SEL_D_STATE,
- sizeof(ErtsDrvSelectDataState));
- dsdsp->inport = NIL;
- dsdsp->outport = NIL;
- erts_port_task_handle_init(&dsdsp->intask);
- erts_port_task_handle_init(&dsdsp->outtask);
- ASSERT(state->driver.select == NULL);
- state->driver.select = dsdsp;
+ if (!state->driver.select)
+ state->driver.select = alloc_drv_select_data();
+ if (state->type == ERTS_EV_TYPE_NONE)
state->type = ERTS_EV_TYPE_DRV_SEL;
- }
ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
if (ctl_events & ERTS_POLL_EV_IN)
state->driver.select->inport = id;
@@ -644,17 +1002,12 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
state->driver.select->outport = NIL;
}
if (new_events == 0) {
- ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask));
- ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask));
if (old_events != 0) {
remember_removed(state, &pollset);
}
if ((mode & ERL_DRV_USE) || !(state->flags & ERTS_EV_FLAG_USED)) {
state->type = ERTS_EV_TYPE_NONE;
- state->flags = 0;
- erts_free(ERTS_ALC_T_DRV_SEL_D_STATE,
- state->driver.select);
- state->driver.select = NULL;
+ state->flags &= ~ERTS_EV_FLAG_USED;
}
/*else keep it, as fd will probably be selected upon again */
}
@@ -685,13 +1038,15 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ret = 0;
-done:;
-#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
- if (state->type == ERTS_EV_TYPE_NONE && state->remove_cnt == 0) {
- hash_erase_drv_ev_state(state);
- }
+done:
+
+ check_fd_cleanup(state,
+#if ERTS_CIO_HAVE_DRV_EVENT
+ &free_event,
#endif
-done_unknown:
+ &free_select);
+
+done_unknown:
erts_smp_mtx_unlock(fd_mtx(fd));
if (stop_select_fn) {
int was_unmasked = erts_block_fpe();
@@ -699,6 +1054,12 @@ done_unknown:
(*stop_select_fn)(e, NULL);
erts_unblock_fpe(was_unmasked);
}
+ if (free_select)
+ free_drv_select_data(free_select);
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (free_event)
+ free_drv_event_data(free_event);
+#endif
return ret;
}
@@ -718,6 +1079,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ErtsDrvEventState *state;
int do_wake = 0;
int ret;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ ErtsDrvEventDataState *free_event;
+#endif
+ ErtsDrvSelectDataState *free_select;
Port *prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
@@ -798,10 +1163,8 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
state->driver.event->removed_events |= remove_events;
}
else {
- state->driver.event
- = erts_alloc(ERTS_ALC_T_DRV_EV_D_STATE,
- sizeof(ErtsDrvEventDataState));
- erts_port_task_handle_init(&state->driver.event->task);
+ if (!state->driver.event)
+ state->driver.event = alloc_drv_event_data();
state->driver.event->port = id;
state->driver.event->removed_events = (ErtsPollEvents) 0;
state->type = ERTS_EV_TYPE_DRV_EV;
@@ -811,10 +1174,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
else {
if (state->type == ERTS_EV_TYPE_DRV_EV) {
abort_tasks(state, 0);
- erts_free(ERTS_ALC_T_DRV_EV_D_STATE,
- state->driver.event);
+ state->driver.event->port = NIL;
+ state->driver.event->data = NULL;
+ state->driver.event->removed_events = (ErtsPollEvents) 0;
}
- state->driver.select = NULL;
state->type = ERTS_EV_TYPE_NONE;
remember_removed(state, &pollset);
}
@@ -824,12 +1187,22 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ret = 0;
done:
-#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
- if (state->type == ERTS_EV_TYPE_NONE && state->remove_cnt == 0) {
- hash_erase_drv_ev_state(state);
- }
+
+ check_fd_cleanup(state,
+#if ERTS_CIO_HAVE_DRV_EVENT
+ &free_event,
#endif
+ &free_select);
+
erts_smp_mtx_unlock(fd_mtx(fd));
+
+ if (free_select)
+ free_drv_select_data(free_select);
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (free_event)
+ free_drv_event_data(free_event);
+#endif
+
return ret;
#endif
}
@@ -1026,7 +1399,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
* In either case stop_select should not be called.
*/
state->type = ERTS_EV_TYPE_NONE;
- state->flags = 0;
+ state->flags &= ~ERTS_EV_FLAG_USED;
if (state->driver.drv_ptr->handle) {
erts_ddll_dereference_driver(state->driver.drv_ptr->handle);
}
@@ -1098,38 +1471,103 @@ event_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data
#endif
#endif
+static ERTS_INLINE int
+io_task_schedule_allowed(ErtsDrvEventState *state,
+ ErtsPortTaskType type,
+ erts_aint_t current_cio_time)
+{
+ ErtsIoTask *io_task;
+
+ switch (type) {
+ case ERTS_PORT_TASK_INPUT:
+ if (!state->driver.select)
+ return 0;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (state->driver.event)
+ return 0;
+#endif
+ io_task = &state->driver.select->iniotask;
+ break;
+ case ERTS_PORT_TASK_OUTPUT:
+ if (!state->driver.select)
+ return 0;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (state->driver.event)
+ return 0;
+#endif
+ io_task = &state->driver.select->outiotask;
+ break;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_PORT_TASK_EVENT:
+ if (!state->driver.event)
+ return 0;
+ if (state->driver.select)
+ return 0;
+ io_task = &state->driver.event->iotask;
+ break;
+#endif
+ default:
+ ERTS_INTERNAL_ERROR("Invalid I/O-task type");
+ return 0;
+ }
+
+ return !is_iotask_active(io_task, current_cio_time);
+}
+
static ERTS_INLINE void
-iready(Eterm id, ErtsDrvEventState *state)
+iready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time)
{
- if (erts_port_task_schedule(id,
- &state->driver.select->intask,
- ERTS_PORT_TASK_INPUT,
- (ErlDrvEvent) state->fd) != 0) {
- stale_drv_select(id, state, ERL_DRV_READ);
+ if (io_task_schedule_allowed(state,
+ ERTS_PORT_TASK_INPUT,
+ current_cio_time)) {
+ ErtsIoTask *iotask = &state->driver.select->iniotask;
+ erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time);
+ if (erts_port_task_schedule(id,
+ &iotask->task,
+ ERTS_PORT_TASK_INPUT,
+ (ErlDrvEvent) state->fd) != 0) {
+ stale_drv_select(id, state, ERL_DRV_READ);
+ }
+ add_active_fd(state->fd);
}
}
static ERTS_INLINE void
-oready(Eterm id, ErtsDrvEventState *state)
+oready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time)
{
- if (erts_port_task_schedule(id,
- &state->driver.select->outtask,
- ERTS_PORT_TASK_OUTPUT,
- (ErlDrvEvent) state->fd) != 0) {
- stale_drv_select(id, state, ERL_DRV_WRITE);
+ if (io_task_schedule_allowed(state,
+ ERTS_PORT_TASK_OUTPUT,
+ current_cio_time)) {
+ ErtsIoTask *iotask = &state->driver.select->outiotask;
+ erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time);
+ if (erts_port_task_schedule(id,
+ &iotask->task,
+ ERTS_PORT_TASK_OUTPUT,
+ (ErlDrvEvent) state->fd) != 0) {
+ stale_drv_select(id, state, ERL_DRV_WRITE);
+ }
+ add_active_fd(state->fd);
}
}
#if ERTS_CIO_HAVE_DRV_EVENT
static ERTS_INLINE void
-eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
+eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data,
+ erts_aint_t current_cio_time)
{
- if (erts_port_task_schedule(id,
- &state->driver.event->task,
- ERTS_PORT_TASK_EVENT,
- (ErlDrvEvent) state->fd,
- event_data) != 0) {
- stale_drv_select(id, state, 0);
+ if (io_task_schedule_allowed(state,
+ ERTS_PORT_TASK_EVENT,
+ current_cio_time)) {
+ ErtsIoTask *iotask = &state->driver.event->iotask;
+ erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time);
+ if (erts_port_task_schedule(id,
+ &iotask->task,
+ ERTS_PORT_TASK_EVENT,
+ (ErlDrvEvent) state->fd,
+ event_data) != 0) {
+ stale_drv_select(id, state, 0);
+ }
+ add_active_fd(state->fd);
}
}
#endif
@@ -1160,10 +1598,11 @@ ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set,
void
ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
{
- ErtsPollResFd pollres[256];
+ ErtsPollResFd *pollres;
int pollres_len;
SysTimeval wait_time;
int poll_ret, i;
+ erts_aint_t current_cio_time;
restart:
@@ -1180,10 +1619,24 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
wait_time.tv_usec = 0;
}
+ /*
+ * No need for an atomic inc op when incrementing
+ * erts_check_io_time, since only one thread can
+ * check io at a time.
+ */
+ current_cio_time = erts_smp_atomic_read_dirty(&erts_check_io_time);
+ current_cio_time++;
+ erts_smp_atomic_set_relb(&erts_check_io_time, current_cio_time);
+
+ check_cleanup_active_fds(current_cio_time);
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd);
+
+ pollres_len = erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + ERTS_CHECK_IO_POLL_RES_LEN;
+
+ pollres = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollResFd)*pollres_len);
erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1);
@@ -1203,6 +1656,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
if (poll_ret != 0) {
erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
forget_removed(&pollset);
+ erts_free(ERTS_ALC_T_TMP, pollres);
if (poll_ret == EAGAIN) {
goto restart;
}
@@ -1262,15 +1716,15 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
if ((revents & ERTS_POLL_EV_IN)
|| (!(revents & ERTS_POLL_EV_OUT)
&& state->events & ERTS_POLL_EV_IN)) {
- iready(state->driver.select->inport, state);
+ iready(state->driver.select->inport, state, current_cio_time);
}
else if (state->events & ERTS_POLL_EV_OUT) {
- oready(state->driver.select->outport, state);
+ oready(state->driver.select->outport, state, current_cio_time);
}
}
else if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
if (revents & ERTS_POLL_EV_OUT) {
- oready(state->driver.select->outport, state);
+ oready(state->driver.select->outport, state, current_cio_time);
}
/* Someone might have deselected input since revents
was read (true also on the non-smp emulator since
@@ -1278,7 +1732,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
revents... */
revents &= ~(~state->events & ERTS_POLL_EV_IN);
if (revents & ERTS_POLL_EV_IN) {
- iready(state->driver.select->inport, state);
+ iready(state->driver.select->inport, state, current_cio_time);
}
}
else if (revents & ERTS_POLL_EV_NVAL) {
@@ -1286,6 +1740,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
state->driver.select->inport,
state->driver.select->outport,
state->events);
+ add_active_fd(state->fd);
}
break;
}
@@ -1303,8 +1758,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
if (revents) {
event_data->events = state->events;
event_data->revents = revents;
-
- eready(state->driver.event->port, state, event_data);
+ eready(state->driver.event->port, state, event_data, current_cio_time);
}
break;
}
@@ -1322,6 +1776,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
(int) state->type);
ASSERT(0);
deselect(state, 0);
+ add_active_fd(state->fd);
break;
}
}
@@ -1333,6 +1788,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
}
erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
+ erts_free(ERTS_ALC_T_TMP, pollres);
forget_removed(&pollset);
}
@@ -1468,10 +1924,27 @@ static void drv_ev_state_free(void *des)
void
ERTS_CIO_EXPORT(erts_init_check_io)(void)
{
+ erts_smp_atomic_init_nob(&erts_check_io_time, 0);
erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0);
+
ERTS_CIO_POLL_INIT();
pollset.ps = ERTS_CIO_NEW_POLLSET();
+ pollset.active_fd.six = 0;
+ pollset.active_fd.eix = 0;
+ erts_smp_atomic32_init_nob(&pollset.active_fd.no, 0);
+ pollset.active_fd.size = ERTS_ACTIVE_FD_INC;
+ pollset.active_fd.array = erts_alloc(ERTS_ALC_T_ACTIVE_FD_ARR,
+ sizeof(ErtsSysFdType)*ERTS_ACTIVE_FD_INC);
+#ifdef DEBUG
+ {
+ int i;
+ for (i = 0; i < ERTS_ACTIVE_FD_INC; i++)
+ pollset.active_fd.array[i] = ERTS_SYS_FD_INVALID;
+ }
+#endif
+
+
#ifdef ERTS_SMP
init_removed_fd_alloc();
pollset.removed_list = NULL;
@@ -1547,12 +2020,27 @@ Eterm
ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
{
Process *p = (Process *) proc;
- Eterm tags[15], values[15], res;
+ Eterm tags[16], values[16], res;
Uint sz, *szp, *hp, **hpp, memory_size;
Sint i;
ErtsPollInfo pi;
-
- ERTS_CIO_POLL_INFO(pollset.ps, &pi);
+ erts_aint_t cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time);
+ int active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no);
+
+ while (1) {
+ erts_aint_t post_cio_time;
+ int post_active_fds;
+
+ ERTS_CIO_POLL_INFO(pollset.ps, &pi);
+
+ post_cio_time = erts_smp_atomic_read_mb(&erts_check_io_time);
+ post_active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no);
+ if (cio_time == post_cio_time && active_fds == post_active_fds)
+ break;
+ cio_time = post_cio_time;
+ active_fds = post_active_fds;
+ }
+
memory_size = pi.memory_size;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
@@ -1616,6 +2104,9 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
tags[i] = erts_bld_atom(hpp, szp, "max_fds");
values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.max_fds);
+ tags[i] = erts_bld_atom(hpp, szp, "active_fds");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) active_fds);
+
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
tags[i] = erts_bld_atom(hpp, szp, "no_avoided_wakeups");
values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_avoided_wakeups);
@@ -1670,6 +2161,8 @@ print_events(ErtsPollEvents ev)
typedef struct {
int used_fds;
int num_errors;
+ int no_driver_select_structs;
+ int no_driver_event_structs;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
int internal_fds;
ErtsPollEvents *epep;
@@ -1692,6 +2185,13 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters)
struct stat stat_buf;
#endif
+ if (state->driver.select)
+ counters->no_driver_select_structs++;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (state->driver.event)
+ counters->no_driver_event_structs++;
+#endif
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if (state->events || ep_events) {
if (ep_events & ERTS_POLL_EV_NVAL) {
@@ -1830,6 +2330,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters)
}
}
}
+#if ERTS_CIO_HAVE_DRV_EVENT
else if (state->type == ERTS_EV_TYPE_DRV_EV) {
Eterm id;
erts_printf("driver_event ");
@@ -1865,6 +2366,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters)
erts_free_port_names(pnp);
}
}
+#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
else if (internal) {
erts_printf("internal ");
@@ -1904,7 +2406,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters)
}
int
-ERTS_CIO_EXPORT(erts_check_io_debug)(void)
+ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip)
{
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
int fd, len;
@@ -1914,6 +2416,10 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
ErtsDrvEventState null_des;
null_des.driver.select = NULL;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ null_des.driver.event = NULL;
+#endif
+ null_des.driver.drv_ptr = NULL;
null_des.events = 0;
null_des.remove_cnt = 0;
null_des.type = ERTS_EV_TYPE_NONE;
@@ -1934,6 +2440,8 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
#endif
counters.used_fds = 0;
counters.num_errors = 0;
+ counters.no_driver_select_structs = 0;
+ counters.no_driver_event_structs = 0;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
len = erts_smp_atomic_read_nob(&drv_ev_state_len);
@@ -1950,8 +2458,16 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
erts_smp_thr_progress_unblock();
+ ciodip->no_used_fds = counters.used_fds;
+ ciodip->no_driver_select_structs = counters.no_driver_select_structs;
+ ciodip->no_driver_event_structs = counters.no_driver_event_structs;
+
erts_printf("\n");
erts_printf("used fds=%d\n", counters.used_fds);
+ erts_printf("Number of driver_select() structures=%d\n", counters.no_driver_select_structs);
+#if ERTS_CIO_HAVE_DRV_EVENT
+ erts_printf("Number of driver_event() structures=%d\n", counters.no_driver_event_structs);
+#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
erts_printf("internal fds=%d\n", counters.internal_fds);
#endif
@@ -1960,6 +2476,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
erts_free(ERTS_ALC_T_TMP, (void *) counters.epep);
#endif
+
return counters.num_errors;
}
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index edab7947ba..d01297d55c 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -26,6 +26,7 @@
#ifndef ERL_CHECK_IO_H__
#define ERL_CHECK_IO_H__
+#include "sys.h"
#include "erl_sys_driver.h"
#ifdef ERTS_ENABLE_KERNEL_POLL
@@ -52,8 +53,8 @@ void erts_check_io_kp(int);
void erts_check_io_nkp(int);
void erts_init_check_io_kp(void);
void erts_init_check_io_nkp(void);
-int erts_check_io_debug_kp(void);
-int erts_check_io_debug_nkp(void);
+int erts_check_io_debug_kp(ErtsCheckIoDebugInfo *);
+int erts_check_io_debug_nkp(ErtsCheckIoDebugInfo *);
#else /* !ERTS_ENABLE_KERNEL_POLL */
@@ -70,6 +71,27 @@ void erts_init_check_io(void);
#endif
+extern erts_smp_atomic_t erts_check_io_time;
+
+typedef struct {
+ ErtsPortTaskHandle task;
+ erts_smp_atomic_t executed_time;
+} ErtsIoTask;
+
+ERTS_GLB_INLINE void erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp)
+{
+ ErtsIoTask *itp = (ErtsIoTask *) (((char *) pthp) - offsetof(ErtsIoTask, task));
+ erts_aint_t ci_time = erts_smp_atomic_read_acqb(&erts_check_io_time);
+ erts_smp_atomic_set_relb(&itp->executed_time, ci_time);
+}
+
+#endif
+
#endif /* ERL_CHECK_IO_H__ */
#if !defined(ERL_CHECK_IO_C__) && !defined(ERTS_ALLOC_C__)
@@ -81,6 +103,16 @@ void erts_init_check_io(void);
#include "erl_poll.h"
#include "erl_port_task.h"
+#ifdef __WIN32__
+/*
+ * Current erts_poll implementation for Windows cannot handle
+ * active events in the set of events polled.
+ */
+# define ERTS_CIO_DEFER_ACTIVE_EVENTS 1
+#else
+# define ERTS_CIO_DEFER_ACTIVE_EVENTS 0
+#endif
+
/*
* ErtsDrvEventDataState is used by driver_event() which is almost never
* used. We allocate ErtsDrvEventDataState separate since we dont wan't
@@ -91,13 +123,16 @@ typedef struct {
Eterm port;
ErlDrvEventData data;
ErtsPollEvents removed_events;
- ErtsPortTaskHandle task;
+#if ERTS_CIO_DEFER_ACTIVE_EVENTS
+ ErtsPollEvents deferred_events;
+#endif
+ ErtsIoTask iotask;
} ErtsDrvEventDataState;
typedef struct {
Eterm inport;
Eterm outport;
- ErtsPortTaskHandle intask;
- ErtsPortTaskHandle outtask;
+ ErtsIoTask iniotask;
+ ErtsIoTask outiotask;
} ErtsDrvSelectDataState;
#endif /* #ifndef ERL_CHECK_IO_INTERNAL__ */
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 0a58a625b2..aa412a20c8 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -2157,7 +2157,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Entering erts_poll_wait(), timeout=%d\n",
- (int) tv->tv_sec*1000 + tv->tv_usec/1000);
+ (int) tvp->tv_sec*1000 + tvp->tv_usec/1000);
#endif
if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c
index e3ba741058..e63f0bda54 100644
--- a/erts/emulator/sys/common/erl_sys_common_misc.c
+++ b/erts/emulator/sys/common/erl_sys_common_misc.c
@@ -44,6 +44,14 @@
#endif
#endif
+/*
+ * erts_check_io_time is used by the erl_check_io implementation. The
+ * global erts_check_io_time variable is declared here since there
+ * (often) exist two versions of erl_check_io (kernel-poll and
+ * non-kernel-poll), and we dont want two versions of this variable.
+ */
+erts_smp_atomic_t erts_check_io_time;
+
/* Written once and only once */
static int filename_encoding = ERL_FILENAME_UNKNOWN;
diff --git a/erts/emulator/sys/ose/default.lmconf b/erts/emulator/sys/ose/beam.lmconf
index a66b0ece56..4ad46b01d9 100644
--- a/erts/emulator/sys/ose/default.lmconf
+++ b/erts/emulator/sys/ose/beam.lmconf
@@ -4,12 +4,13 @@ OSE_LM_POOL_SIZE=0x200000
OSE_LM_MAIN_NAME=main
OSE_LM_MAIN_STACK_SIZE=0xF000
OSE_LM_MAIN_PRIORITY=20
+## Has to be of a type that allows MAM
OSE_LM_PROGRAM_TYPE=APP_RAM
OSE_LM_DATA_INIT=YES
OSE_LM_BSS_INIT=YES
OSE_LM_EXEC_MODEL=SHARED
HEAP_MAX_SIZE=1000000000
-HEAP_SMALL_BUF_INIT_SIZE=64000000
+HEAP_SMALL_BUF_INIT_SIZE=20971520
HEAP_LARGE_BUF_THRESHOLD=16000000
HEAP_LOCK_TYPE=2
diff --git a/erts/emulator/sys/ose/erl_main.c b/erts/emulator/sys/ose/erl_main.c
index 03119c3fec..23a9bc93a4 100644
--- a/erts/emulator/sys/ose/erl_main.c
+++ b/erts/emulator/sys/ose/erl_main.c
@@ -30,6 +30,8 @@
int
main(int argc, char **argv) {
+ (void)stdin;(void)stdout;(void)stderr;
+
/* When starting using pm_create -c ARGV="-- -root ..", argv[0] is the first
part of ARGV and not the name of the executable. So we shuffle some
pointers here to make erl_start happy. */
diff --git a/erts/emulator/sys/ose/erl_poll.c b/erts/emulator/sys/ose/erl_poll.c
index ca1ed6e53a..7d2a3d1e0b 100644
--- a/erts/emulator/sys/ose/erl_poll.c
+++ b/erts/emulator/sys/ose/erl_poll.c
@@ -551,7 +551,12 @@ int erts_poll_wait(ErtsPollSet ps,
fd.id, fd.signo, current_process());
erts_send_error_to_logger_nogl(dsbufp);
timeout = 0;
- ASSERT(0);
+ /* Under normal circumstances the signal is deallocated by the
+ * driver that issued the select operation. But in this case
+ * there's no driver waiting for such signal so we have to
+ * deallocate it here */
+ if (sig)
+ free_buf(&sig);
} else {
int i;
struct erts_sys_fd_type *fd = NULL;
@@ -737,6 +742,7 @@ union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent drv_ev) {
ev->msgs = msg->next;
ethr_mutex_unlock(&ev->mtx);
erts_free(ERTS_ALC_T_FD_SIG_LIST,msg);
+ restore(sig);
return sig;
}
}
diff --git a/erts/emulator/sys/ose/sys.c b/erts/emulator/sys/ose/sys.c
index c892cc69c7..5b950a7dae 100644
--- a/erts/emulator/sys/ose/sys.c
+++ b/erts/emulator/sys/ose/sys.c
@@ -195,7 +195,9 @@ static volatile int children_died;
write_buff += sizeof(struct aiocb *); \
memcpy(write_buff,BUFF,SIZE+1); \
SET_AIO(*write_req,FD,SIZE,write_buff); \
- aio_write(write_req); \
+ if (aio_write(write_req)) \
+ ramlog_printf("%s:%d: write failed with %d\n", \
+ __FILE__,__LINE__,errno); \
} \
} while(0)
@@ -210,13 +212,13 @@ static volatile int children_died;
driver_free(buffer_ptr); \
} while(0)
-/* When we have several schedulers, we need to make sure
- * that scheduler issuing aio_dispatch() is the owner on the signal */
#define DISPATCH_AIO(sig) do { \
- restore(sig); \
- aio_dispatch(sig); \
+ if (aio_dispatch(sig)) \
+ ramlog_printf("%s:%d: dispatch failed with %d\n", \
+ __FILE__,__LINE__,errno); \
} while(0)
+#define AIO_PIPE_SIZE 1024
/* debug print macros */
#define DEBUG_RES 0
@@ -371,6 +373,63 @@ thr_create_prepare_child(void *vtcdp)
#endif /* #ifdef USE_THREADS */
+/* The two functions below are stolen from win_con.c
+ They have to use malloc/free/realloc directly becasue
+ we want to do able to do erts_printf very early on.
+ */
+#define VPRINTF_BUF_INC_SIZE 128
+static erts_dsprintf_buf_t *
+grow_vprintf_buf(erts_dsprintf_buf_t *dsbufp, size_t need)
+{
+ char *buf;
+ size_t size;
+
+ ASSERT(dsbufp);
+
+ if (!dsbufp->str) {
+ size = (((need + VPRINTF_BUF_INC_SIZE - 1)
+ / VPRINTF_BUF_INC_SIZE)
+ * VPRINTF_BUF_INC_SIZE);
+ buf = (char *) malloc(size * sizeof(char));
+ }
+ else {
+ size_t free_size = dsbufp->size - dsbufp->str_len;
+
+ if (need <= free_size)
+ return dsbufp;
+
+ size = need - free_size + VPRINTF_BUF_INC_SIZE;
+ size = (((size + VPRINTF_BUF_INC_SIZE - 1)
+ / VPRINTF_BUF_INC_SIZE)
+ * VPRINTF_BUF_INC_SIZE);
+ size += dsbufp->size;
+ buf = (char *) realloc((void *) dsbufp->str,
+ size * sizeof(char));
+ }
+ if (!buf)
+ return NULL;
+ if (buf != dsbufp->str)
+ dsbufp->str = buf;
+ dsbufp->size = size;
+ return dsbufp;
+}
+
+static int erts_sys_ramlog_printf(char *format, va_list arg_list)
+{
+ int res,i;
+ erts_dsprintf_buf_t dsbuf = ERTS_DSPRINTF_BUF_INITER(grow_vprintf_buf);
+ res = erts_vdsprintf(&dsbuf, format, arg_list);
+ if (res >= 0) {
+ for (i = 0; i < dsbuf.str_len; i+= 50)
+ /* We print 50 characters at a time because otherwise
+ the ramlog looks broken */
+ ramlog_printf("%.*s",dsbuf.str_len-50 < 0?dsbuf.str_len:50,dsbuf.str+i);
+ }
+ if (dsbuf.str)
+ free((void *) dsbuf.str);
+ return res;
+}
+
void
erts_sys_pre_init(void)
{
@@ -409,6 +468,9 @@ erts_sys_pre_init(void)
children_died = 0;
#endif
#endif /* USE_THREADS */
+
+ erts_printf_stdout_func = erts_sys_ramlog_printf;
+
erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
}
@@ -650,7 +712,7 @@ static void stop_select(ErlDrvEvent, void*);
static PROCESS
get_signal_proxy_pid(void) {
union SIGNAL *sig;
- SIGSELECT any_sig[] = {0};
+ SIGSELECT any_sig[] = {1,ERTS_SIGNAL_OSE_DRV_ATTACH};
if (!sig_proxy_pid) {
sig = alloc(sizeof(union SIGNAL), ERTS_SIGNAL_OSE_DRV_ATTACH);
@@ -685,7 +747,7 @@ resolve_signal(union SIGNAL* sig) {
struct erl_drv_entry spawn_driver_entry = {
spawn_init,
spawn_start,
- erl_stop,
+ NULL, /* erl_stop, */
output,
ready_input,
ready_output,
@@ -784,7 +846,11 @@ set_driver_data(ErlDrvPort port_num,
/* READ */
if (read_write & DO_READ) {
- efs_examine_fd(ifd, FLIB_FD_HANDLE, &driver_data[ifd].handle, 0);
+ EfsStatus res = efs_examine_fd(ifd, FLIB_FD_HANDLE,
+ &driver_data[ifd].handle, 0);
+ if (res != EFS_SUCCESS)
+ ramlog_printf("%s:%d: efs_examine_fd(%d) failed with %d\n",
+ __FILE__,__LINE__,ifd,errno);
driver_data[ifd].ifd = ifd;
driver_data[ifd].packet_bytes = packet_bytes;
driver_data[ifd].port_num = port_num;
@@ -792,10 +858,9 @@ set_driver_data(ErlDrvPort port_num,
/* async read struct */
memset(&driver_data[ifd].aiocb, 0, sizeof(struct aiocb));
- driver_data[ifd].aiocb.aio_buf = driver_alloc(255);
+ driver_data[ifd].aiocb.aio_buf = driver_alloc(AIO_PIPE_SIZE);
driver_data[ifd].aiocb.aio_fildes = ifd;
- driver_data[ifd].aiocb.aio_nbytes = 255;
-
+ driver_data[ifd].aiocb.aio_nbytes = (packet_bytes?packet_bytes:AIO_PIPE_SIZE);
driver_data[ifd].alive = 1;
driver_data[ifd].status = 0;
driver_data[ifd].input_event =
@@ -826,7 +891,9 @@ set_driver_data(ErlDrvPort port_num,
(void) driver_select(port_num, driver_data[ifd].input_event,
(ERL_DRV_READ | ERL_DRV_USE), 1);
- aio_read(&driver_data[ifd].aiocb);
+ if (aio_read(&driver_data[ifd].aiocb))
+ ramlog_printf("%s:%d: aio_read(%d) failed with %d\n",
+ __FILE__,__LINE__,ifd,errno);
}
else { /* WRITE ONLY */
efs_examine_fd(ofd, FLIB_FD_HANDLE, &driver_data[ofd].handle, 0);
@@ -926,7 +993,7 @@ spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
{
int ifd[2];
int ofd[2];
- static uint32_t ticker = 0;
+ static uint32_t ticker = 1;
PmStatus pm_status;
OSDOMAIN domain = PM_NEW_DOMAIN;
PROCESS progpid, mainbid, mainpid;
@@ -938,39 +1005,53 @@ spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
int handle_size;
char *ptr;
- /* handle arguments */
- ptr = strchr(name, ' ');
- if (ptr != NULL) {
- *ptr ='\0';
- ptr++;
- args = ptr;
+
+ args = driver_alloc(strlen(name)+1);
+ strcpy(args, name);
+ /* We need to handle name in three parts
+ * - install handle (must be unique)
+ * - install binary (needed for ose_pm_install_load_module())
+ * - full path (as argument to the spawned applications env.var
+ */
+
+ /* full path including arguments */
+ args = driver_alloc(strlen(name)+1);
+ strcpy(args, name);
+
+ /* handle path */
+ tmp_handle = strrchr(name, '/');
+ if (tmp_handle == NULL) {
+ tmp_handle = name;
}
else {
- args = NULL;
+ tmp_handle++;
}
- /* create an install handle */
- ptr = strrchr(name, '/');
+ /* handle args */
+ ptr = strchr(tmp_handle, ' ');
if (ptr != NULL) {
- ptr++;
- tmp_handle = ptr;
+ *ptr = '\0';
+ handle_size = ptr - tmp_handle;
}
else {
- tmp_handle = name;
+ handle_size = strlen(name)+1;
}
- handle_size = strlen(tmp_handle)+1;
- handle_size += (ticker<10)?3:((ticker<100)?4:5);
+ /* make room for ticker */
+ handle_size += (ticker<10)?3:((ticker<100)?4:5);
handle = driver_alloc(handle_size);
- snprintf(handle, handle_size, "%s_%d", tmp_handle, ticker);
-
+
do {
- snprintf(handle, handle_size, "%s_%d", tmp_handle, ticker++);
+ snprintf(handle, handle_size, "%s_%d", tmp_handle, ticker);
pm_status = ose_pm_install_load_module(0, "ELF", name, handle,
0, 0, NULL);
-
+ ticker++;
} while (pm_status == PM_EINSTALL_HANDLE_ALREADY_INSTALLED);
- DEBUG_CHECK_RES(pm_status, PM_SUCCESS);
+
+ if (pm_status != PM_SUCCESS) {
+ errno = ENOSYS; /* FIXME add comment */
+ return ERL_DRV_ERROR_ERRNO;
+ }
/* Create Program */
pm_status = ose_pm_create_program(&domain, handle, 0, 0,
@@ -1143,17 +1224,13 @@ static void erl_stop(ErlDrvData drv_data)
if (data->ifd != data->ofd) { /* read and write */
nbio_stop_fd(data->port_num, data->input_event);
nbio_stop_fd(data->port_num, data->output_event);
- driver_select(data->port_num, data->input_event, ERL_DRV_USE, 0);
- driver_select(data->port_num, data->output_event, ERL_DRV_USE, 0);
}
else { /* write only */
nbio_stop_fd(data->port_num, data->output_event);
- driver_select(data->port_num, data->output_event, ERL_DRV_USE, 0);
}
}
else { /* read only */
nbio_stop_fd(data->port_num, data->input_event);
- driver_select(data->port_num, data->input_event, ERL_DRV_USE, 0);
}
close(data->ifd);
close(data->ofd);
@@ -1177,18 +1254,31 @@ static void output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
lbp = lb + (4-(data->packet_bytes));
if ((sz = driver_sizeq(data->port_num)) > 0) {
- driver_enq(data->port_num, lbp, data->packet_bytes);
- driver_enq(data->port_num, buf, len);
- if (sz + len + data->packet_bytes >= (1 << 13))
+ if (data->packet_bytes != 0) {
+ driver_enq(data->port_num, lbp, data->packet_bytes);
+ }
+ driver_enq(data->port_num, buf, len);
+
+ if (sz + len + data->packet_bytes >= (1 << 13))
set_busy_port(data->port_num, 1);
}
else {
- driver_enq(data->port_num, buf, len); /* n is the skip value */
-
+ char *pbbuf;
+ if (data->packet_bytes != 0) {
+ pbbuf = malloc(len + data->packet_bytes);
+ int i;
+ for (i = 0; i < data->packet_bytes; i++) {
+ *pbbuf++ = *lbp++;
+ }
+ strncpy(pbbuf, buf, len);
+ pbbuf -= data->packet_bytes;
+ }
driver_select(data->port_num, data->output_event,
ERL_DRV_WRITE|ERL_DRV_USE, 1);
-
- WRITE_AIO(data->ofd, len, buf);
+ WRITE_AIO(data->ofd,
+ (data->packet_bytes ? len+data->packet_bytes : len),
+ (data->packet_bytes ? pbbuf : buf));
+ if (data->packet_bytes != 0) free(pbbuf);
}
return; /* 0; */
}
@@ -1204,12 +1294,12 @@ static int port_inp_failure(ErlDrvPort port_num, ErlDrvEvent ready_fd, int res)
ASSERT(res <= 0);
erl_drv_ose_event_fetch(ready_fd,&sig_no, NULL, (void **)&fd);
-
/* As we need to handle two signals, we do this in two steps */
if (driver_data[*fd].alive) {
report_exit_status(driver_data[*fd].report_exit, 0); /* status? */
}
else {
+ driver_select(port_num,ready_fd,DO_READ|DO_WRITE,0);
clear_fd_data(*fd);
driver_report_exit(driver_data[*fd].port_num, driver_data[*fd].status);
/* As we do not really know if the spawn has crashed or exited nicely
@@ -1248,6 +1338,10 @@ static void ready_input(ErlDrvData drv_data, ErlDrvEvent ready_fd)
}
else {
res = sig->fm_read_reply.actual;
+ if (res == 0) {
+ port_inp_failure(data->port_num, ready_fd, res);
+ break;
+ }
if (data->packet_bytes == 0) {
if (res < 0) {
@@ -1258,6 +1352,7 @@ static void ready_input(ErlDrvData drv_data, ErlDrvEvent ready_fd)
else if (res == 0) {
/* read of 0 bytes, eof, otherside of pipe is assumed dead */
port_inp_failure(data->port_num, ready_fd, res);
+ break;
}
else {
buf = driver_alloc(res);
@@ -1267,100 +1362,91 @@ static void ready_input(ErlDrvData drv_data, ErlDrvEvent ready_fd)
driver_output(data->port_num, (char*) buf, res);
driver_free(buf);
}
+ /* clear the previous read */
+ memset(data->aiocb.aio_buf, 0, res);
+
+ /* issue a new read */
+ DISPATCH_AIO(sig);
+ aio_read(&data->aiocb);
}
- /* We try to read the remainder */
- else if (fd_data[data->ifd].remain > 0) {
- if (res < 0) {
- if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
- port_inp_failure(data->port_num, ready_fd, res);
+ else if (data->packet_bytes && fd_data[data->ifd].remain > 0) {
+ /* we've read a partial package, or a header */
+
+ if (res == fd_data[data->ifd].remain) { /* we are done! */
+ char *buf = data->aiocb.aio_buf;
+ int i;
+
+ /* do we have anything buffered? */
+ if (fd_data[data->ifd].buf != NULL) {
+ memcpy(fd_data[data->ifd].buf + fd_data[data->ifd].sz,
+ buf, res);
+ buf = fd_data[data->ifd].buf;
}
- }
- else if (res == 0) {
- port_inp_failure(data->port_num, ready_fd, res);
- }
- else if (res == fd_data[data->ifd].remain) { /* we're done */
- driver_output(data->port_num,
- fd_data[data->ifd].buf,
- fd_data[data->ifd].sz);
+
+ fd_data[data->ifd].sz += res;
+ driver_output(data->port_num, buf, (fd_data[data->ifd].sz>0?fd_data[data->ifd].sz:res));
clear_fd_data(data->ifd);
- }
- else { /* if (res < fd_data[fd].remain) */
- fd_data[data->ifd].cpos += res;
- fd_data[data->ifd].remain -= res;
- }
- }
- else if (fd_data[data->ifd].remain == 0) { /* clean fd */
- if (res < 0) {
- if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
- port_inp_failure(data->port_num, ready_fd, res);
+
+ /* clear the previous read */
+ memset(data->aiocb.aio_buf, 0, res);
+
+ /* issue a new read */
+ DISPATCH_AIO(sig);
+ data->aiocb.aio_nbytes = data->packet_bytes;
+
+ if (data->aiocb.aio_buf == NULL) {
+ port_inp_failure(data->port_num, ready_fd, -1);
}
+ aio_read(&data->aiocb);
}
- else if (res == 0) { /* eof */
- port_inp_failure(data->port_num, ready_fd, res);
- }
- else if (res < data->packet_bytes - fd_data[data->ifd].psz) {
- memcpy(fd_data[data->ifd].pbuf+fd_data[data->ifd].psz,
- (void *)data->aiocb.aio_buf, res);
- fd_data[data->ifd].psz += res;
- }
- else { /* if (res >= packet_bytes) */
- unsigned char* cpos = (unsigned char*)data->aiocb.aio_buf;
- int bytes_left = res;
-
- while (1) {
- int psz = fd_data[data->ifd].psz;
- char* pbp = fd_data[data->ifd].pbuf + psz;
-
- while (bytes_left && (psz < data->packet_bytes)) {
- *pbp++ = *cpos++;
- bytes_left--;
- psz++;
- }
-
- if (psz < data->packet_bytes) {
- fd_data[data->ifd].psz = psz;
- break;
- }
- fd_data[data->ifd].psz = 0;
-
- switch (data->packet_bytes) {
- case 1: h = get_int8(fd_data[data->ifd].pbuf); break;
- case 2: h = get_int16(fd_data[data->ifd].pbuf); break;
- case 4: h = get_int32(fd_data[data->ifd].pbuf); break;
- default: ASSERT(0); return; /* -1; */
- }
-
- if (h <= (bytes_left)) {
- driver_output(data->port_num, (char*) cpos, h);
- cpos += h;
- bytes_left -= h;
- continue;
- }
- else { /* The last message we got was split */
- char *buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
- if (!buf) {
- errno = ENOMEM;
- port_inp_failure(data->port_num, ready_fd, -1);
- }
- else {
- erts_smp_atomic_add_nob(&sys_misc_mem_sz, h);
- sys_memcpy(buf, cpos, bytes_left);
- fd_data[data->ifd].buf = buf;
- fd_data[data->ifd].sz = h;
- fd_data[data->ifd].remain = h - bytes_left;
- fd_data[data->ifd].cpos = buf + bytes_left;
- }
- break;
- }
+ else if(res < fd_data[data->ifd].remain) { /* received part of a package */
+ if (fd_data[data->ifd].sz == 0) {
+
+ fd_data[data->ifd].sz += res;
+ memcpy(fd_data[data->ifd].buf, data->aiocb.aio_buf, res);
+ fd_data[data->ifd].remain -= res;
+ }
+ else {
+ memcpy(fd_data[data->ifd].buf + fd_data[data->ifd].sz,
+ data->aiocb.aio_buf, res);
+ fd_data[data->ifd].sz += res;
+ fd_data[data->ifd].remain -= res;
+ }
+ /* clear the previous read */
+ memset(data->aiocb.aio_buf, 0, res);
+
+ /* issue a new read */
+ DISPATCH_AIO(sig);
+ data->aiocb.aio_nbytes = fd_data[data->ifd].remain;
+
+ if (data->aiocb.aio_buf == NULL) {
+ port_inp_failure(data->port_num, ready_fd, -1);
}
+ aio_read(&data->aiocb);
}
}
+ else if (data->packet_bytes && fd_data[data->ifd].remain == 0) { /* we've recieved a header */
+
+ /* analyze the header FIXME */
+ switch (data->packet_bytes) {
+ case 1: h = get_int8(data->aiocb.aio_buf); break;
+ case 2: h = get_int16(data->aiocb.aio_buf); break;
+ case 4: h = get_int32(data->aiocb.aio_buf); break;
+ }
- /* reset the read buffer and init next asynch read */
- DISPATCH_AIO(sig);
- memset((void *)data->aiocb.aio_buf, 0, 255);
+ fd_data[data->ifd].buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h + data->packet_bytes);
+ fd_data[data->ifd].remain = ((h + data->packet_bytes) - res);
- if (res > 0) {
+ /* clear the previous read */
+ memset(data->aiocb.aio_buf, 0, data->packet_bytes);
+
+ /* issue a new read */
+ DISPATCH_AIO(sig);
+ data->aiocb.aio_nbytes = h;
+
+ if (data->aiocb.aio_buf == NULL) {
+ port_inp_failure(data->port_num, ready_fd, -1);
+ }
aio_read(&data->aiocb);
}
}
@@ -1400,11 +1486,16 @@ static void ready_output(ErlDrvData drv_data, ErlDrvEvent ready_fd)
DISPATCH_AIO(sig);
FREE_AIO(sig->fm_write_reply.buffer);
res = driver_deq(data->port_num, iov[0].iov_len);
- if (res > 0) {
+ if (res > 0) {
iov = driver_peekq(data->port_num, &vlen);
WRITE_AIO(data->ofd, iov[0].iov_len, iov[0].iov_base);
}
}
+ else if (vlen == 0) {
+ DISPATCH_AIO(sig);
+ FREE_AIO(sig->fm_write_reply.buffer);
+ }
+
}
sig = erl_drv_ose_get_signal(ready_fd);
}
@@ -1650,10 +1741,10 @@ erl_assert_error(const char* expr, const char* func,
{
fflush(stdout);
fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n",
- file, func, line, expr);
+ file, line, func, expr);
fflush(stderr);
ramlog_printf("%s:%d:%s() Assertion failed: %s\n",
- file, func, line, expr);
+ file, line, func, expr);
abort();
}
diff --git a/erts/emulator/sys/unix/erl_child_setup.c b/erts/emulator/sys/unix/erl_child_setup.c
index 7c6e4a2f37..5ad92dad02 100644
--- a/erts/emulator/sys/unix/erl_child_setup.c
+++ b/erts/emulator/sys/unix/erl_child_setup.c
@@ -54,6 +54,17 @@ void sys_sigrelease(int sig)
#endif /* !SIG_SIGNAL */
#endif /* !SIG_SIGSET */
+#if defined(__ANDROID__)
+int __system_properties_fd(void);
+#endif /* __ANDROID__ */
+
+#if defined(__ANDROID__)
+#define SHELL "/system/bin/sh"
+#else
+#define SHELL "/bin/sh"
+#endif /* __ANDROID__ */
+
+
int
main(int argc, char *argv[])
{
@@ -89,8 +100,18 @@ main(int argc, char *argv[])
if (sscanf(argv[CS_ARGV_FD_CR_IX], "%d:%d", &from, &to) != 2)
return 1;
+
+#if defined(HAVE_CLOSEFROM)
+ closefrom(from);
+#elif defined(__ANDROID__)
+ for (i = from; i <= to; i++) {
+ if (i!=__system_properties_fd)
+ (void) close(i);
+ }
+#else
for (i = from; i <= to; i++)
(void) close(i);
+#endif
if (!(argv[CS_ARGV_WD_IX][0] == '.' && argv[CS_ARGV_WD_IX][1] == '\0')
&& chdir(argv[CS_ARGV_WD_IX]) < 0)
@@ -116,7 +137,23 @@ main(int argc, char *argv[])
execv(argv[CS_ARGV_NO_OF_ARGS],&(argv[CS_ARGV_NO_OF_ARGS + 1]));
}
} else {
- execl("/bin/sh", "sh", "-c", argv[CS_ARGV_CMD_IX], (char *) NULL);
+ execl(SHELL, "sh", "-c", argv[CS_ARGV_CMD_IX], (char *) NULL);
}
return 1;
}
+
+#if defined(__ANDROID__)
+int __system_properties_fd(void)
+{
+ int s, fd;
+ char *env;
+
+ env = getenv("ANDROID_PROPERTY_WORKSPACE");
+ if (!env) {
+ return -1;
+ }
+ fd = atoi(env);
+ return fd;
+}
+#endif /* __ANDROID__ */
+
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 176fc049a7..26ed2fb558 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -135,9 +135,6 @@
/* File descriptors are numbers anc consecutively allocated on Unix */
#define ERTS_SYS_CONTINOUS_FD_NUMBERS
-#define HAVE_ERTS_CHECK_IO_DEBUG
-int erts_check_io_debug(void);
-
#ifndef ERTS_SMP
# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
@@ -230,8 +227,24 @@ extern void sys_stop_cat(void);
*/
#ifdef USE_ISINF_ISNAN /* simulate finite() */
-# define finite(f) (!isinf(f) && !isnan(f))
-# define HAVE_FINITE
+# define isfinite(f) (!isinf(f) && !isnan(f))
+# define HAVE_ISFINITE
+#elif (defined(__GNUC__) && !defined(__llvm__)) && defined(HAVE_FINITE)
+/* We use finite in gcc as it emits assembler instead of
+ the function call that isfinite emits. The assembler is
+ significantly faster. */
+# ifdef isfinite
+# undef isfinite
+# endif
+# define isfinite finite
+# ifndef HAVE_ISFINITE
+# define HAVE_ISFINITE
+# endif
+#elif defined(isfinite) && !defined(HAVE_ISFINITE)
+# define HAVE_ISFINITE
+#elif !defined(HAVE_ISFINITE) && defined(HAVE_FINITE)
+# define isfinite finite
+# define HAVE_ISFINITE
#endif
#ifdef NO_FPE_SIGNALS
@@ -241,7 +254,7 @@ extern void sys_stop_cat(void);
#define erts_thread_init_fp_exception() do{}while(0)
#endif
# define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
-# define __ERTS_FP_ERROR(fpexnp, f, Action) if (!finite(f)) { Action; } else {}
+# define __ERTS_FP_ERROR(fpexnp, f, Action) if (!isfinite(f)) { Action; } else {}
# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
# define __ERTS_SAVE_FP_EXCEPTION(fpexnp)
# define __ERTS_RESTORE_FP_EXCEPTION(fpexnp)
@@ -305,7 +318,7 @@ static __inline__ void __ERTS_FP_CHECK_INIT(volatile unsigned long *fp_exception
code to always throw floating-point exceptions on errors. */
static __inline__ int erts_check_fpe_thorough(volatile unsigned long *fp_exception, double f)
{
- return erts_check_fpe(fp_exception, f) || !finite(f);
+ return erts_check_fpe(fp_exception, f) || !isfinite(f);
}
# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) \
do { if (erts_check_fpe_thorough((fpexnp),(f))) { Action; } } while (0)
diff --git a/erts/emulator/sys/unix/erl_unix_sys_ddll.c b/erts/emulator/sys/unix/erl_unix_sys_ddll.c
index 8760b58839..2659d623c7 100644
--- a/erts/emulator/sys/unix/erl_unix_sys_ddll.c
+++ b/erts/emulator/sys/unix/erl_unix_sys_ddll.c
@@ -123,6 +123,7 @@ int erts_sys_ddll_open(const char *full_name, void **handle, ErtsSysDdllError* e
int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
{
+#if defined(HAVE_DLOPEN)
int ret = ERL_DE_NO_ERROR;
char *str;
dlerror();
@@ -148,6 +149,9 @@ int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
ret = ERL_DE_DYNAMIC_ERROR_OFFSET - find_errcode(str, err);
}
return ret;
+#else
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+#endif
}
/*
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 865cb50a56..cd87b320e2 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -149,6 +149,13 @@ extern void erl_crash_dump(char* file, int line, char* fmt, ...);
#define DIR_SEPARATOR_CHAR '/'
+#if defined(__ANDROID__)
+#define SHELL "/system/bin/sh"
+#else
+#define SHELL "/bin/sh"
+#endif /* __ANDROID__ */
+
+
#if defined(DEBUG)
#define ERL_BUILD_TYPE_MARKER ".debug"
#elif defined(PURIFY)
@@ -195,8 +202,6 @@ static erts_smp_atomic_t sys_misc_mem_sz;
#if defined(ERTS_SMP)
static void smp_sig_notify(char c);
static int sig_notify_fds[2] = {-1, -1};
-#elif defined(USE_THREADS)
-static int async_fd[2];
#endif
#if CHLDWTHR || defined(ERTS_SMP)
@@ -239,6 +244,8 @@ static void note_child_death(int, int);
static void* child_waiter(void *);
#endif
+static int crashdump_companion_cube_fd = -1;
+
/********************* General functions ****************************/
/* This is used by both the drivers and general I/O, must be set early */
@@ -277,7 +284,7 @@ struct {
void (*check_io)(int);
Uint (*size)(void);
Eterm (*info)(void *);
- int (*check_io_debug)(void);
+ int (*check_io_debug)(ErtsCheckIoDebugInfo *);
} io_func = {0};
@@ -299,9 +306,9 @@ Eterm erts_check_io_info(void *p)
}
int
-erts_check_io_debug(void)
+erts_check_io_debug(ErtsCheckIoDebugInfo *ip)
{
- return (*io_func.check_io_debug)();
+ return (*io_func.check_io_debug)(ip);
}
@@ -568,6 +575,14 @@ erts_sys_pre_init(void)
close(fd);
}
+ /* We need a file descriptor to close in the crashdump creation.
+ * We close this one to be sure we can get a fd for our real file ...
+ * so, we create one here ... a stone to carry all the way home.
+ */
+
+ crashdump_companion_cube_fd = open("/dev/null", O_RDONLY);
+
+ /* don't lose it, there will be cake */
}
void
@@ -712,14 +727,13 @@ static ERTS_INLINE int
prepare_crash_dump(int secs)
{
#define NUFBUF (3)
- int i, max;
+ int i;
char env[21]; /* enough to hold any 64-bit integer */
size_t envsz;
DeclareTmpHeapNoproc(heap,NUFBUF);
Port *heart_port;
Eterm *hp = heap;
Eterm list = NIL;
- int heart_fd[2] = {-1,-1};
int has_heart = 0;
UseTmpHeapNoproc(NUFBUF);
@@ -742,43 +756,22 @@ prepare_crash_dump(int secs)
alarm((unsigned int)secs);
}
- if (heart_port) {
- /* hearts input fd
- * We "know" drv_data is the in_fd since the port is started with read|write
- */
- heart_fd[0] = (int)heart_port->drv_data;
- heart_fd[1] = (int)driver_data[heart_fd[0]].ofd;
- has_heart = 1;
+ /* close all viable sockets via emergency close callbacks.
+ * Specifically we want to close epmd sockets.
+ */
- list = CONS(hp, make_small(8), list); hp += 2;
+ erts_emergency_close_ports();
+ if (heart_port) {
+ has_heart = 1;
+ list = CONS(hp, make_small(8), list); hp += 2;
/* send to heart port, CMD = 8, i.e. prepare crash dump =o */
erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port,
heart_port->common.id, list, NULL);
}
- /* Make sure we unregister at epmd (unknown fd) and get at least
- one free filedescriptor (for erl_crash.dump) */
-
- max = max_files;
- if (max < 1024)
- max = 1024;
- for (i = 3; i < max; i++) {
-#if defined(ERTS_SMP)
- /* We don't want to close the signal notification pipe... */
- if (i == sig_notify_fds[0] || i == sig_notify_fds[1])
- continue;
-#elif defined(USE_THREADS)
- /* We don't want to close the async notification pipe... */
- if (i == async_fd[0] || i == async_fd[1])
- continue;
-#endif
- /* We don't want to close our heart yet ... */
- if (i == heart_fd[0] || i == heart_fd[1])
- continue;
-
- close(i);
- }
+ /* Make sure we have a fd for our crashdump file. */
+ close(crashdump_companion_cube_fd);
envsz = sizeof(env);
i = erts_sys_getenv__("ERL_CRASH_DUMP_NICE", env, &envsz);
@@ -1567,9 +1560,13 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
goto child_error;
}
+#if defined(HAVE_CLOSEFROM)
+ closefrom(opts->use_stdio ? 3 : 5);
+#else
for (i = opts->use_stdio ? 3 : 5; i < max_files; i++)
(void) close(i);
-
+#endif
+
if (opts->wd && chdir(opts->wd) < 0)
goto child_error;
@@ -1596,7 +1593,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
}
}
} else {
- execle("/bin/sh", "sh", "-c", cmd_line, (char *) NULL, new_environ);
+ execle(SHELL, "sh", "-c", cmd_line, (char *) NULL, new_environ);
}
child_error:
_exit(1);
@@ -1717,7 +1714,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
fcntl(i, F_SETFD, 1);
qnx_spawn_options.flags = _SPAWN_SETSID;
- if ((pid = spawnl(P_NOWAIT, "/bin/sh", "/bin/sh", "-c", cmd_line,
+ if ((pid = spawnl(P_NOWAIT, SHELL, SHELL, "-c", cmd_line,
(char *) 0)) < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
reset_qnx_spawn();
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index 7a1d129cd5..972170d465 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -1085,7 +1085,7 @@ void erts_poll_controlv(ErtsPollSet ps,
pcev[i].events,
pcev[i].on);
}
- ERTS_POLLSET_LOCK(ps);
+ ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_controlv"));
}
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 8015e8f378..838f0c61eb 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -60,16 +60,18 @@
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
-/*
- * Define MAXPATHLEN in terms of MAXPATH if available.
- */
-
-#ifndef MAXPATH
-#define MAXPATH MAX_PATH
-#endif /* MAXPATH */
#ifndef MAXPATHLEN
-#define MAXPATHLEN MAXPATH
+#define MAXPATHLEN 4096
+/*
+ erts-6.0 (OTP 17.0):
+ We now accept windows paths longer than 260 (MAX_PATH) by conversion to
+ UNC path format. In order to also return long paths from the driver we
+ increased MAXPATHLEN from 260 to larger (but arbitrary) value 4096.
+ It would of course be nicer to instead dynamically allocate large enough
+ tmp buffers when efile_drv needs to return really long paths, and do that
+ for unix as well.
+ */
#endif /* MAXPATHLEN */
/*
@@ -111,12 +113,10 @@
/*
* Our own type of "FD's"
*/
+#define ERTS_SYS_FD_INVALID INVALID_HANDLE_VALUE
#define ERTS_SYS_FD_TYPE HANDLE
#define NO_FSTAT_ON_SYS_FD_TYPE 1 /* They are events, not files */
-#define HAVE_ERTS_CHECK_IO_DEBUG
-int erts_check_io_debug(void);
-
/*
* For erl_time_sup
*/
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 0ded6b274e..0ded6b274e 100755..100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 0b0568c31a..dfbe47786a 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -31,6 +31,7 @@ MODULES= \
a_SUITE \
after_SUITE \
alloc_SUITE \
+ async_ports_SUITE \
beam_SUITE \
beam_literals_SUITE \
bif_SUITE \
diff --git a/erts/emulator/test/a_SUITE.erl b/erts/emulator/test/a_SUITE.erl
index 195c9c0a5f..17579be416 100644
--- a/erts/emulator/test/a_SUITE.erl
+++ b/erts/emulator/test/a_SUITE.erl
@@ -97,23 +97,13 @@ display_check_io(ChkIo) ->
catch erlang:display('--- CHECK IO INFO ---'),
catch erlang:display(ChkIo),
catch erts_debug:set_internal_state(available_internal_state, true),
- NoOfErrorFds = (catch erts_debug:get_internal_state(check_io_debug)),
+ NoOfErrorFds = (catch element(1, erts_debug:get_internal_state(check_io_debug))),
catch erlang:display({'NoOfErrorFds', NoOfErrorFds}),
catch erts_debug:set_internal_state(available_internal_state, false),
catch erlang:display('--- CHECK IO INFO ---'),
ok.
get_check_io_info() ->
- ChkIo = erlang:system_info(check_io),
- case lists:keysearch(pending_updates, 1, ChkIo) of
- {value, {pending_updates, 0}} ->
- display_check_io(ChkIo),
- ChkIo;
- false ->
- ChkIo;
- _ ->
- receive after 10 -> ok end,
- get_check_io_info()
- end.
+ z_SUITE:get_check_io_info().
diff --git a/erts/emulator/test/async_ports_SUITE.erl b/erts/emulator/test/async_ports_SUITE.erl
new file mode 100644
index 0000000000..c89b3655ff
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE.erl
@@ -0,0 +1,118 @@
+-module(async_ports_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(PACKET_SIZE, (10 * 1024 * 8)).
+-define(CPORT_DELAY, 100).
+-define(TEST_LOOPS_COUNT, 100000).
+-define(SLEEP_BEFORE_CHECK, 1000).
+-define(TEST_PROCS_COUNT, 2).
+-define(TC_TIMETRAP_SECONDS, 10).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ permanent_busy_test
+ ].
+
+permanent_busy_test(Config) ->
+ ct:timetrap({seconds, ?TC_TIMETRAP_SECONDS}),
+ ExePath = filename:join(?config(data_dir, Config), "cport"),
+
+ Self = self(),
+ spawn_link(
+ fun() ->
+ Block = <<0:?PACKET_SIZE>>,
+
+ Port = open_port(ExePath),
+
+ Testers =
+ lists:map(
+ fun(_) ->
+ erlang:spawn_link(?MODULE, run_loop,
+ [Self,
+ Port,
+ Block,
+ ?TEST_LOOPS_COUNT,
+ 0])
+ end,
+ lists:seq(1, ?TEST_PROCS_COUNT)),
+ Self ! {test_info, Port, Testers},
+ endless_flush(Port)
+ end),
+
+ receive
+ {test_info, Port, Testers} ->
+ MaxWaitTime = round(0.7 * ?TC_TIMETRAP_SECONDS * 1000),
+ ct:log("wait testers, maximum ~w mcsec~n", [MaxWaitTime]),
+ ok = wait_testers(MaxWaitTime, Testers),
+ timer:sleep(?SLEEP_BEFORE_CHECK),
+ case erlang:port_command(Port, <<"test">>, [nosuspend]) of
+ false ->
+ exit(port_dead);
+ true ->
+ ok
+ end
+ end.
+
+wait_testers(Timeout, Testers) ->
+ lists:foldl(
+ fun(Pid, AccIn) ->
+ StartWait = os:timestamp(),
+ receive
+ {Pid, port_dead} ->
+ recalc_timeout(AccIn, StartWait)
+ after AccIn ->
+ Pid ! stop,
+ recalc_timeout(AccIn, StartWait)
+ end
+ end, Timeout, Testers),
+ ok.
+
+recalc_timeout(TimeoutIn, WaitStart) ->
+ erlang:max(0, TimeoutIn - round(timer:now_diff(os:timestamp(), WaitStart)) div 1000).
+
+open_port(ExePath) ->
+ erlang:open_port({spawn, ExePath ++ " 100"}, [{packet, 4}, eof, exit_status, use_stdio, binary]).
+
+run_loop(RootProc, Port, Block, CheckLimit, BusyCnt) ->
+ receive
+ stop ->
+ ok
+ after 0 ->
+ case erlang:port_command(Port, Block, [nosuspend]) of
+ true ->
+ run_loop(RootProc, Port, Block, CheckLimit, 0);
+ false ->
+ if
+ BusyCnt + 1 > CheckLimit ->
+ check_dead(RootProc, Port, Block, CheckLimit);
+ true ->
+ run_loop(RootProc, Port, Block, CheckLimit, BusyCnt + 1)
+ end
+ end
+ end.
+
+check_dead(RootProc, Port, Block, CheckLimit) ->
+ ct:log("~p: check port dead~n", [self()]),
+ timer:sleep(?SLEEP_BEFORE_CHECK),
+ case erlang:port_command(Port, Block, [nosuspend]) of
+ true ->
+ ct:log("not dead~n"),
+ run_loop(RootProc, Port, Block, CheckLimit, 0);
+ false ->
+ ct:log("port dead: ~p~n", [Port]),
+ RootProc ! {self(), port_dead},
+ ok
+ end.
+
+endless_flush(Port) ->
+ receive
+ {Port, {data, _}} ->
+ endless_flush(Port);
+ {Port, SomethingWrong} ->
+ erlang:error({someting_wrong, SomethingWrong})
+ end.
diff --git a/erts/emulator/test/async_ports_SUITE_data/Makefile.src b/erts/emulator/test/async_ports_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..56da3fbe12
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE_data/Makefile.src
@@ -0,0 +1,15 @@
+CC = @CC@
+LD = @LD@
+CFLAGS = @CFLAGS@ @DEFS@
+CROSSLDFLAGS = @CROSSLDFLAGS@
+
+PROGS = cport@exe@
+
+
+all: $(PROGS)
+
+cport@exe@: cport@obj@
+ $(LD) $(CROSSLDFLAGS) -o cport cport@obj@ @LIBS@
+
+cport@obj@: cport.c
+ $(CC) -c -o cport@obj@ $(CFLAGS) cport.c
diff --git a/erts/emulator/test/async_ports_SUITE_data/cport.c b/erts/emulator/test/async_ports_SUITE_data/cport.c
new file mode 100644
index 0000000000..033aff382a
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE_data/cport.c
@@ -0,0 +1,81 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#ifdef __WIN32__
+# include "windows.h"
+# include "winbase.h"
+#else
+# include <unistd.h>
+#endif
+
+typedef unsigned char byte;
+
+int read_cmd(byte *buf)
+{
+ int len;
+ if (read_exact(buf, 4) != 4)
+ return(-1);
+
+ len = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+ return read_exact(buf, len);
+}
+
+int write_cmd(byte *buf, int len)
+{
+ byte li[4];
+ li[0] = (len >> 24) & 0xff;
+ li[1] = (len >> 16) & 0xff;
+ li[2] = (len >> 8) & 0xff;
+ li[3] = len & 0xff;
+ write_exact(&li, 4);
+
+ return write_exact(buf, len);
+}
+
+int read_exact(byte *buf, int len)
+{
+ int i, got=0;
+ do {
+ if ((i = read(0, buf+got, len-got)) <= 0)
+ {
+ return(i);
+ }
+ got += i;
+ } while (got<len);
+ return len;
+}
+
+int write_exact(byte *buf, int len)
+{
+ int i, wrote = 0;
+ do {
+ if ((i = write(1, buf+wrote, len-wrote)) < 0)
+ return (i);
+ wrote += i;
+ } while (wrote<len);
+ return len;
+}
+
+byte static_buf[31457280]; // 30 mb
+
+int main(int argc, char **argv) {
+ int sleep_time = atoi(argv[1]);
+ int fn, arg, res;
+ byte *buf = &static_buf[0];
+ int len = 0;
+ if (sleep_time <= 0)
+ sleep_time = 0;
+#ifdef __WIN32__
+ else
+ sleep_time = ((sleep_time - 1) / 1000) + 1; /* Milli seconds */
+#endif
+ while ((len = read_cmd(buf)) > 0) {
+#ifdef __WIN32__
+ Sleep((DWORD) sleep_time);
+#else
+ usleep(sleep_time);
+#endif
+ write_cmd(buf, len);
+ }
+}
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 938aac6a0e..44e9e4f243 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -58,13 +58,14 @@
ordering/1,unaligned_order/1,gc_test/1,
bit_sized_binary_sizes/1,
otp_6817/1,deep/1,obsolete_funs/1,robustness/1,otp_8117/1,
- otp_8180/1, trapping/1]).
+ otp_8180/1, trapping/1, large/1,
+ error_after_yield/1, cmp_old_impl/1]).
%% Internal exports.
-export([sleeper/0,trapping_loop/4]).
suite() -> [{ct_hooks,[ts_install_cth]},
- {timetrap,{minutes,2}}].
+ {timetrap,{minutes,4}}].
all() ->
[copy_terms, conversions, deep_lists, deep_bitstr_lists,
@@ -76,7 +77,8 @@ all() ->
bad_term_to_binary, more_bad_terms, otp_5484, otp_5933,
ordering, unaligned_order, gc_test,
bit_sized_binary_sizes, otp_6817, otp_8117, deep,
- obsolete_funs, robustness, otp_8180, trapping].
+ obsolete_funs, robustness, otp_8180, trapping, large,
+ error_after_yield, cmp_old_impl].
groups() ->
[].
@@ -1266,7 +1268,7 @@ deep(Config) when is_list(Config) ->
deep_roundtrip(T) ->
B = term_to_binary(T),
- T = binary_to_term_stress(B).
+ T = binary_to_term(B).
obsolete_funs(Config) when is_list(Config) ->
erts_debug:set_internal_state(available_internal_state, true),
@@ -1351,7 +1353,16 @@ trapping(Config) when is_list(Config)->
do_trapping(5, term_to_binary,
fun() -> [lists:duplicate(2000000,2000000)] end),
do_trapping(5, binary_to_term,
- fun() -> [term_to_binary(lists:duplicate(2000000,2000000))] end).
+ fun() -> [term_to_binary(lists:duplicate(2000000,2000000))] end),
+ do_trapping(5, binary_to_list,
+ fun() -> [list_to_binary(lists:duplicate(2000000,$x))] end),
+ do_trapping(5, list_to_binary,
+ fun() -> [lists:duplicate(2000000,$x)] end),
+ do_trapping(5, bitstring_to_list,
+ fun() -> [list_to_bitstring([lists:duplicate(2000000,$x),<<7:4>>])] end),
+ do_trapping(5, list_to_bitstring,
+ fun() -> [[lists:duplicate(2000000,$x),<<7:4>>]] end)
+ .
do_trapping(0, _, _) ->
ok;
@@ -1384,9 +1395,189 @@ trapping_loop2(Bif,Args,N) ->
apply(erlang,Bif,Args),
trapping_loop2(Bif, Args, N-1).
+large(Config) when is_list(Config) ->
+ List = lists:flatten(lists:map(fun (_) ->
+ [0,1,2,3,4,5,6,7,8]
+ end,
+ lists:seq(1, 131072))),
+ Bin = list_to_binary(List),
+ List = binary_to_list(Bin),
+ PartList = lists:reverse(tl(tl(lists:reverse(tl(tl(List)))))),
+ PartList = binary_to_list(Bin, 3, length(List)-2),
+ ListBS = List ++ [<<7:4>>],
+ ListBS = bitstring_to_list(list_to_bitstring(ListBS)),
+ BitStr1 = list_to_bitstring(lists:duplicate(1024*1024, [<<1,5:3>>])),
+ BitStr1 = list_to_bitstring(bitstring_to_list(BitStr1)),
+ BitStr2 = list_to_bitstring([lists:duplicate(512*1024, [<<1,5:3>>]),
+ Bin]),
+ BitStr2 = list_to_bitstring(bitstring_to_list(BitStr2)),
+ ok.
+
+error_after_yield(Config) when is_list(Config) ->
+ L2BTrap = {erts_internal, list_to_binary_continue, 1},
+ error_after_yield(badarg, erlang, list_to_binary, 1, fun () -> [[mk_list(1000000), oops]] end, L2BTrap),
+ error_after_yield(badarg, erlang, iolist_to_binary, 1, fun () -> [[list2iolist(mk_list(1000000)), oops]] end, L2BTrap),
+ error_after_yield(badarg, erlang, list_to_bitstring, 1, fun () -> [[list2bitstrlist(mk_list(1000000)), oops]] end, L2BTrap),
+ error_after_yield(badarg, binary, list_to_bin, 1, fun () -> [[mk_list(1000000), oops]] end, L2BTrap),
+
+ B2TTrap = {erts_internal, binary_to_term_trap, 1},
+
+ error_after_yield(badarg, erlang, binary_to_term, 1, fun () -> [error_after_yield_bad_ext_term()] end, B2TTrap),
+ error_after_yield(badarg, erlang, binary_to_term, 2, fun () -> [error_after_yield_bad_ext_term(), [safe]] end, B2TTrap),
+
+ case erlang:system_info(wordsize) of
+ 4 ->
+ SysLimitSz = 1 bsl 32,
+ error_after_yield(system_limit, erlang, list_to_binary, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap),
+ error_after_yield(system_limit, erlang, iolist_to_binary, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap),
+ error_after_yield(system_limit, erlang, list_to_bitstring, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap),
+ error_after_yield(system_limit, binary, list_to_bin, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap);
+ 8 ->
+ % Takes waaaay to long time to test system_limit on 64-bit archs...
+ ok
+ end,
+ ok.
+
+error_after_yield(Type, M, F, AN, AFun, TrapFunc) ->
+ io:format("Testing ~p for ~p:~p/~p~n", [Type, M, F, AN]),
+ Tracer = self(),
+ {Pid, Mon} = spawn_monitor(fun () ->
+ A = AFun(),
+ try
+ erlang:yield(),
+ erlang:trace(self(),true,[running,{tracer,Tracer}]),
+ apply(M, F, A),
+ exit({unexpected_success, {M, F, A}})
+ catch
+ error:Type ->
+ erlang:trace(self(),false,[running,{tracer,Tracer}]),
+ %% We threw the exception from the native
+ %% function we trapped to, but we want
+ %% the BIF that originally was called
+ %% to appear in the stack trace.
+ [{M, F, A, _} | _] = erlang:get_stacktrace()
+ end
+ end),
+ receive
+ {'DOWN', Mon, process, Pid, Reason} ->
+ normal = Reason
+ end,
+ TD = erlang:trace_delivered(Pid),
+ receive
+ {trace_delivered, Pid, TD} ->
+ NoYields = error_after_yield_sched(Pid, TrapFunc, 0),
+ io:format("No of yields: ~p~n", [NoYields]),
+ true = NoYields > 2
+ end,
+ ok.
+
+error_after_yield_sched(P, TrapFunc, N) ->
+ receive
+ {trace, P, out, TrapFunc} ->
+ receive
+ {trace, P, in, TrapFunc} ->
+ error_after_yield_sched(P, TrapFunc, N+1)
+ after 0 ->
+ exit(trap_sched_mismatch)
+ end;
+ {trace, P, out, Func} ->
+ receive
+ {trace, P, in, Func} ->
+ error_after_yield_sched(P, TrapFunc, N)
+ after 0 ->
+ exit(other_sched_mismatch)
+ end
+ after 0 ->
+ N
+ end.
+
+error_after_yield_bad_ext_term() ->
+ TupleSz = 2000000,
+ <<131, % Version magic
+ AtomExt/binary>> = term_to_binary(an_atom_we_use_for_this),
+ BadAtomExt = [100, %% ATOM_EXT
+ 255, 255, % Invalid size of 65535 bytes
+ "oops"],
+
+ %% Produce a large tuple where the last element is invalid
+ list_to_binary([131, %% Version magic
+ 105, %% LARGE_TUPLE_EXT
+ <<TupleSz:32/big>>, %% Tuple size
+ lists:duplicate(TupleSz-1, AtomExt), %% Valid atoms
+ BadAtomExt]). %% Invalid atom at the end
+
+cmp_old_impl(Config) when is_list(Config) ->
+ %% Compare results from new yielding implementations with
+ %% old non yielding implementations
+ Cookie = atom_to_list(erlang:get_cookie()),
+ Rel = "r16b_latest",
+ case test_server:is_release_available(Rel) of
+ false ->
+ {skipped, "No "++Rel++" available"};
+ true ->
+ {ok, Node} = ?t:start_node(list_to_atom(atom_to_list(?MODULE)++"_"++Rel),
+ peer,
+ [{args, " -setcookie "++Cookie},
+ {erl, [{release, Rel}]}]),
+
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(1))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(10))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(100))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(1000))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(10000))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(100000))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(1000000))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(10000000))]}),
+ cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list_lb(10000000))]}),
+
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(1))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(10))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(100))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(1000))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(10000))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(100000))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(1000000))]}),
+ cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(10000000))]}),
+
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(1))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(10))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(100))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(1000))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(10000))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(100000))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(1000000))]}),
+ cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(10000000))]}),
+
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(1)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(10)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(100)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(1000)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(10000)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(100000)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(1000000)))]}),
+ cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(10000000)))]}),
+
+ ?t:stop_node(Node),
+
+ ok
+ end.
%% Utilities.
+huge_iolist(Lim) ->
+ Sz = 1024,
+ huge_iolist(list_to_binary(mk_list(Sz)), Sz, Lim).
+
+huge_iolist(X, Sz, Lim) when Sz >= Lim ->
+ X;
+huge_iolist(X, Sz, Lim) ->
+ huge_iolist([X, X], Sz*2, Lim).
+
+cmp_node(Node, {M, F, A}) ->
+ Res = rpc:call(Node, M, F, A),
+ Res = apply(M, F, A),
+ ok.
+
make_sub_binary(Bin) when is_binary(Bin) ->
{_,B} = split_binary(list_to_binary([0,1,3,Bin]), 3),
B;
@@ -1467,3 +1658,78 @@ get_reds() ->
erts_debug:set_internal_state(available_internal_state, true),
get_reds()
end.
+
+-define(LARGE_BIN, (512*1024+10)).
+-define(LARGE_BIN_LIM, (1024*1024)).
+
+mk_list(0, Acc) ->
+ Acc;
+mk_list(Sz, Acc) ->
+ mk_list(Sz-1, [$A+(Sz band 63) | Acc]).
+
+mk_list(Sz) when Sz >= ?LARGE_BIN_LIM ->
+ SzLeft = Sz - ?LARGE_BIN,
+ SzHd = SzLeft div 2,
+ SzTl = SzLeft - SzHd,
+ [mk_list(SzHd, []), erlang:list_to_binary(mk_list(?LARGE_BIN, [])), mk_list(SzTl, [])];
+mk_list(Sz) ->
+ mk_list(Sz, []).
+
+mk_list_lb(Sz) when Sz >= ?LARGE_BIN_LIM ->
+ SzLeft = Sz - ?LARGE_BIN,
+ SzHd = SzLeft div 2,
+ SzTl = SzLeft - SzHd,
+ [mk_list(SzHd, []), erlang:list_to_binary(mk_list(?LARGE_BIN, [])), mk_list(SzTl, [])];
+mk_list_lb(Sz) ->
+ mk_list(Sz, []).
+
+
+list2iolist(List) ->
+ list2iolist(List, []).
+
+list2iolist([], Acc) ->
+ Acc;
+list2iolist([X0, X1, X2, X3, X4, X5 | Xs], Acc) when is_integer(X0), 0 =< X0, X0 < 256,
+ is_integer(X1), 0 =< X1, X1 < 256,
+ is_integer(X2), 0 =< X2, X2 < 256,
+ is_integer(X3), 0 =< X3, X3 < 256,
+ is_integer(X4), 0 =< X4, X4 < 256,
+ is_integer(X5), 0 =< X5, X5 < 256 ->
+ NewAcc = case (X0+X1+X2+X3+X4+X5) band 3 of
+ 0 ->
+ [Acc, [[[[[[[[[[[[X0,[],<<"">>,X1]]]]]]]]],[X2,X3]],[],[],[],[],X4],X5]];
+ 1 ->
+ [Acc, [], erlang:list_to_binary([X0, X1, X2, X3, X4, X5])];
+ 2 ->
+ [Acc, [[[[X0|erlang:list_to_binary([X1])],[X2|erlang:list_to_binary([X3])],[X4|erlang:list_to_binary([X5])]]]|<<"">>]];
+ 3 ->
+ [Acc, X0, X1, X2, <<"">>, [], X3, X4 | erlang:list_to_binary([X5])]
+ end,
+ list2iolist(Xs, NewAcc);
+list2iolist([X | Xs], Acc) ->
+ list2iolist(Xs, [Acc,X]).
+
+list2bitstrlist(List) ->
+ [list2bitstrlist(List, []), <<4:7>>].
+
+list2bitstrlist([], Acc) ->
+ Acc;
+list2bitstrlist([X0, X1, X2, X3, X4, X5 | Xs], Acc) when is_integer(X0), 0 =< X0, X0 < 256,
+ is_integer(X1), 0 =< X1, X1 < 256,
+ is_integer(X2), 0 =< X2, X2 < 256,
+ is_integer(X3), 0 =< X3, X3 < 256,
+ is_integer(X4), 0 =< X4, X4 < 256,
+ is_integer(X5), 0 =< X5, X5 < 256 ->
+ NewAcc = case (X0+X1+X2+X3+X4+X5) band 3 of
+ 0 ->
+ [Acc, [[[[[[[[[[[[X0,[],<<"">>,X1]]]]]]]]],[X2,X3]],[],[],[],[],X4],X5]];
+ 1 ->
+ [Acc, [], <<X0:X1>>, <<X2:X3>>, <<X4:X5>>];
+ 2 ->
+ [Acc, [[[[X0|<<X1:X2>>],X3]],[X4|erlang:list_to_binary([X5])]|<<"">>]];
+ 3 ->
+ [Acc, X0, X1, X2, <<"">>, [], X3, X4 | erlang:list_to_binary([X5])]
+ end,
+ list2bitstrlist(Xs, NewAcc);
+list2bitstrlist([X | Xs], Acc) ->
+ list2bitstrlist(Xs, [Acc,X]).
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 4b4af0babe..2ed5aaa0d0 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -98,8 +98,10 @@ generator(0, Writer, _Data) ->
%% Calling process_info(Pid, current_function) on a suspended process
%% used to crash Beam.
- {current_function, {erlang, send, 2}} =
- process_info(Writer, current_function),
+ case process_info(Writer, [status,current_function]) of
+ [{status,suspended},{current_function,{erlang,send,2}}] -> ok;
+ [{status,suspended},{current_function,{erlang,bif_return_trap,_}}] -> ok
+ end,
unlock_slave();
generator(N, Writer, Data) ->
Writer ! {exec, Data},
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index c62bc0c454..623d62f876 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -31,8 +31,9 @@
end_per_suite/1, init_per_group/2,end_per_group/2,
init_per_testcase/2,
end_per_testcase/2,
+
+ a_test/1,
outputv_echo/1,
-
timer_measure/1,
timer_cancel/1,
timer_change/1,
@@ -79,7 +80,8 @@
thr_free_drv/1,
async_blast/1,
thr_msg_blast/1,
- consume_timeslice/1]).
+ consume_timeslice/1,
+ z_test/1]).
-export([bin_prefix/2]).
@@ -122,19 +124,19 @@ init_per_testcase(Case, Config) when is_atom(Case), is_list(Config) ->
_ -> erts_debug:set_internal_state(available_internal_state, true)
end,
erlang:display({init_per_testcase, Case}),
- ?line 0 = erts_debug:get_internal_state(check_io_debug),
+ ?line 0 = element(1, erts_debug:get_internal_state(check_io_debug)),
[{watchdog, Dog},{testcase, Case}|Config].
end_per_testcase(Case, Config) ->
Dog = ?config(watchdog, Config),
erlang:display({end_per_testcase, Case}),
- ?line 0 = erts_debug:get_internal_state(check_io_debug),
+ ?line 0 = element(1, erts_debug:get_internal_state(check_io_debug)),
?t:timetrap_cancel(Dog).
suite() -> [{ct_hooks,[ts_install_cth]}].
-all() ->
- [outputv_errors, outputv_echo, queue_echo, {group, timer},
+all() -> %% Keep a_test first and z_test last...
+ [a_test, outputv_errors, outputv_echo, queue_echo, {group, timer},
driver_unloaded, io_ready_exit, use_fallback_pollset,
bad_fd_in_pollset, driver_event, fd_change,
steal_control, otp_6602, driver_system_info_base_ver,
@@ -151,7 +153,8 @@ all() ->
thr_free_drv,
async_blast,
thr_msg_blast,
- consume_timeslice].
+ consume_timeslice,
+ z_test].
groups() ->
[{timer, [],
@@ -917,8 +920,7 @@ steal_control_test(Hndl = {erts_poll_info, Before}) ->
end.
chkio_test_init(Config) when is_list(Config) ->
- ?line wait_until_no_pending_updates(),
- ?line ChkIo = erlang:system_info(check_io),
+ ?line ChkIo = get_stable_check_io_info(),
?line case catch lists:keysearch(name, 1, ChkIo) of
{value, {name, erts_poll}} ->
?line ?t:format("Before test: ~p~n", [ChkIo]),
@@ -937,8 +939,7 @@ chkio_test_fini({skipped, _} = Res) ->
chkio_test_fini({chkio_test_result, Res, Before}) ->
?line ok = erl_ddll:unload_driver('chkio_drv'),
?line ok = erl_ddll:stop(),
- ?line wait_until_no_pending_updates(),
- ?line After = erlang:system_info(check_io),
+ ?line After = get_stable_check_io_info(),
?line ?t:format("After test: ~p~n", [After]),
?line verify_chkio_state(Before, After),
?line Res.
@@ -985,7 +986,7 @@ chkio_test({erts_poll_info, Before},
?line Fun(),
?line During = erlang:system_info(check_io),
?line erlang:display(During),
- ?line 0 = erts_debug:get_internal_state(check_io_debug),
+ ?line 0 = element(1, erts_debug:get_internal_state(check_io_debug)),
?line ?t:format("During test: ~p~n", [During]),
?line chk_chkio_port(Port),
?line case erlang:port_control(Port, ?CHKIO_STOP, "") of
@@ -1034,18 +1035,22 @@ verify_chkio_state(Before, After) ->
After)
end,
?line ok.
-
-
-wait_until_no_pending_updates() ->
- case lists:keysearch(pending_updates, 1, erlang:system_info(check_io)) of
- {value, {pending_updates, 0}} ->
- ok;
- false ->
- ok;
+get_stable_check_io_info() ->
+ ChkIo = erlang:system_info(check_io),
+ PendUpdNo = case lists:keysearch(pending_updates, 1, ChkIo) of
+ {value, {pending_updates, PendNo}} ->
+ PendNo;
+ false ->
+ 0
+ end,
+ {value, {active_fds, ActFds}} = lists:keysearch(active_fds, 1, ChkIo),
+ case {PendUpdNo, ActFds} of
+ {0, 0} ->
+ ChkIo;
_ ->
receive after 10 -> ok end,
- wait_until_no_pending_updates()
+ get_stable_check_io_info()
end.
otp_6602(doc) -> ["Missed port lock when stealing control of fd from a "
@@ -1062,10 +1067,9 @@ otp_6602(Config) when is_list(Config) ->
%% Inet driver use port locking...
{ok, S} = gen_udp:open(0),
{ok, Fd} = inet:getfd(S),
- {ok, Port} = inet:port(S),
%% Steal fd (lock checker used to
%% trigger here).
- {ok, _S2} = gen_udp:open(Port,[{fd,Fd}]),
+ {ok, _S2} = gen_udp:open(0,[{fd,Fd}]),
Parent ! Done
end),
?line receive Done -> ok end,
@@ -1085,7 +1089,15 @@ otp_6602(Config) when is_list(Config) ->
["async_thrs",
"sched_thrs"])).
--define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES2).
+-define(EXPECTED_SYSTEM_INFO_NAMES3,
+ (?EXPECTED_SYSTEM_INFO_NAMES2 ++
+ ["emu_nif_vsn"])).
+
+-define(EXPECTED_SYSTEM_INFO_NAMES4,
+ (?EXPECTED_SYSTEM_INFO_NAMES3 ++
+ ["dirty_sched"])).
+
+-define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES4).
'driver_system_info_base_ver'(doc) ->
[];
@@ -1133,16 +1145,18 @@ check_driver_system_info_result(Result) ->
drv_vsn_str2tup(erlang:system_info(driver_version))} of
{DDVSN, DDVSN} ->
?line [] = Ns;
- {{1, 0}, _} ->
- ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
- -- ?EXPECTED_SYSTEM_INFO_NAMES1),
- ?line ExpNs = lists:sort(Ns);
- {{1, 1}, _} ->
+ %% {{1, 0}, _} ->
+ %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
+ %% -- ?EXPECTED_SYSTEM_INFO_NAMES1),
+ %% ?line ExpNs = lists:sort(Ns);
+ %% {{1, 1}, _} ->
+ %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
+ %% -- ?EXPECTED_SYSTEM_INFO_NAMES2),
+ %% ?line ExpNs = lists:sort(Ns);
+ {{3, 0}, _} ->
?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
- -- ?EXPECTED_SYSTEM_INFO_NAMES2),
- ?line ExpNs = lists:sort(Ns);
- {{2, 0}, _} ->
- ?line [] = Ns
+ -- ?EXPECTED_SYSTEM_INFO_NAMES3),
+ ?line ExpNs = lists:sort(Ns)
end.
chk_sis(SIs, Ns) ->
@@ -1189,6 +1203,14 @@ check_si_res(["async_thrs", Value]) ->
check_si_res(["sched_thrs", Value]) ->
?line Value = integer_to_list(erlang:system_info(schedulers));
+%% Data added in 3rd version of driver_system_info() (driver version 1.5)
+check_si_res(["emu_nif_vsn", Value]) ->
+ ?line Value = erlang:system_info(nif_version);
+
+%% Data added in 4th version of driver_system_info() (driver version 3.1)
+check_si_res(["dirty_sched", _Value]) ->
+ true;
+
check_si_res(Unexpected) ->
?line ?t:fail({unexpected_result, Unexpected}).
@@ -2370,10 +2392,25 @@ count_proc_sched(Ps, PNs) ->
PNs
end.
+a_test(Config) when is_list(Config) ->
+ check_io_debug().
+
+z_test(Config) when is_list(Config) ->
+ check_io_debug().
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Utilities
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+check_io_debug() ->
+ get_stable_check_io_info(),
+ {NoErrorFds, NoUsedFds, NoDrvSelStructs, NoDrvEvStructs}
+ = erts_debug:get_internal_state(check_io_debug),
+ 0 = NoErrorFds,
+ NoUsedFds = NoDrvSelStructs,
+ 0 = NoDrvEvStructs,
+ ok.
+
%flush_msgs() ->
% receive
% M ->
diff --git a/erts/emulator/test/driver_SUITE_data/smaller_major_vsn_drv.c b/erts/emulator/test/driver_SUITE_data/smaller_major_vsn_drv.c
index a1299fe807..6b9d4745ba 100644
--- a/erts/emulator/test/driver_SUITE_data/smaller_major_vsn_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/smaller_major_vsn_drv.c
@@ -20,12 +20,12 @@
* Author: Rickard Green
*
* Description: Implementation of a driver with a smaller major
- * driver version than the current system.
+ * driver version than allowed on load.
*/
#define VSN_MISMATCH_DRV_NAME_STR "smaller_major_vsn_drv"
#define VSN_MISMATCH_DRV_NAME smaller_major_vsn_drv
-#define VSN_MISMATCH_DRV_MAJOR_VSN_DIFF (-1)
+#define VSN_MISMATCH_DRV_MAJOR_VSN_DIFF (ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD - ERL_DRV_EXTENDED_MAJOR_VERSION - 1)
#define VSN_MISMATCH_DRV_MINOR_VSN_DIFF 0
#include "vsn_mismatch_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
index e44c7dbd5e..964034f5a6 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
@@ -41,7 +41,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d"
+
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
return slen;
}
@@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
index 5bbc966932..6d2c47fdaf 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
@@ -40,7 +40,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d " \
+ "dirty_sched=%s"
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -54,6 +56,8 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
+ slen += 5; /* dirty_sched */
return slen;
}
@@ -71,7 +75,10 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version,
+ sip->dirty_scheduler_support ? "true" : "false");
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
index 63c69f751c..2271d7027b 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
@@ -41,7 +41,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d"
+
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
return slen;
}
@@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/erts_debug_SUITE.erl b/erts/emulator/test/erts_debug_SUITE.erl
index 87778dd0c2..e5c904cfb9 100644
--- a/erts/emulator/test/erts_debug_SUITE.erl
+++ b/erts/emulator/test/erts_debug_SUITE.erl
@@ -67,6 +67,9 @@ test_size(Config) when is_list(Config) ->
2 = do_test_size({[]}),
3 = do_test_size({a,b}),
7 = do_test_size({a,[b,c]}),
+ 8 = do_test_size(#{b => 2,c => 3}),
+ 4 = do_test_size(#{}),
+ 32 = do_test_size(#{b => 2,c => 3,txt => "hello world"}),
%% Test internal consistency of sizes, but without testing
%% exact sizes.
@@ -97,6 +100,9 @@ test_size(Config) when is_list(Config) ->
do_test_size({SimplestFun,SimplestFun},
2*FunSz0+do_test_size({a,b}),
FunSz0+do_test_size({a,b})),
+
+ M = id(#{ "atom" => first, i => 0}),
+ do_test_size([M,M#{ "atom" := other },M#{i := 42}],54,32),
ok.
do_test_size(Term) ->
diff --git a/erts/emulator/test/float_SUITE_data/fp_drv.c b/erts/emulator/test/float_SUITE_data/fp_drv.c
index b80385c3f9..82d18d6440 100644
--- a/erts/emulator/test/float_SUITE_data/fp_drv.c
+++ b/erts/emulator/test/float_SUITE_data/fp_drv.c
@@ -29,9 +29,14 @@
#if defined (__GNUC__)
int _finite(double x);
#endif
-#ifndef finite
-#define finite _finite
+#ifndef isfinite
+#define isfinite _finite
#endif
+#elif !defined(HAVE_ISFINITE) && defined(HAVE_FINITE)
+/* If not windows and we do not have isfinite */
+#define isfinite finite
+#elif !defined(HAVE_ISFINITE)
+# error "No finite function found!"
#endif
#include "erl_driver.h"
@@ -79,21 +84,21 @@ do_test(void *unused)
x = 3.23e133;
y = 3.57e257;
z = x*y;
- if (finite(z))
+ if (isfinite(z))
return "is finite (1)";
x = 5.0;
y = 0.0;
z = x/y;
- if (finite(z))
+ if (isfinite(z))
return "is finite (2)";
z = log(-1.0);
- if (finite(z))
+ if (isfinite(z))
return "is finite (3)";
z = log(0.0);
- if (finite(z))
+ if (isfinite(z))
return "is finite (4)";
return "ok";
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index 8ad5f290ed..2968f5bebb 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -30,7 +30,7 @@
fun_to_port/1,t_hash/1,t_phash/1,t_phash2/1,md5/1,
refc/1,refc_ets/1,refc_dist/1,
const_propagation/1,t_arity/1,t_is_function2/1,
- t_fun_info/1]).
+ t_fun_info/1,t_fun_info_mfa/1]).
-export([nothing/0]).
@@ -42,7 +42,8 @@ all() ->
[bad_apply, bad_fun_call, badarity, ext_badarity,
equality, ordering, fun_to_port, t_hash, t_phash,
t_phash2, md5, refc, refc_ets, refc_dist,
- const_propagation, t_arity, t_is_function2, t_fun_info].
+ const_propagation, t_arity, t_is_function2, t_fun_info,
+ t_fun_info_mfa].
groups() ->
[].
@@ -824,6 +825,24 @@ t_fun_info(Config) when is_list(Config) ->
?line bad_info(<<1,2>>),
ok.
+t_fun_info_mfa(Config) when is_list(Config) ->
+ Fun1 = fun spawn_call/2,
+ {module,M1} = erlang:fun_info(Fun1, module),
+ {name,F1} = erlang:fun_info(Fun1, name),
+ {arity,A1} = erlang:fun_info(Fun1, arity),
+ {M1,F1,A1=2} = erlang:fun_info_mfa(Fun1),
+ %% Module fun.
+ Fun2 = fun ?MODULE:t_fun_info/1,
+ {module,M2} = erlang:fun_info(Fun2, module),
+ {name,F2} = erlang:fun_info(Fun2, name),
+ {arity,A2} = erlang:fun_info(Fun2, arity),
+ {M2,F2,A2=1} = erlang:fun_info_mfa(Fun2),
+
+ %% Not fun.
+ {'EXIT',_} = (catch erlang:fun_info_mfa(id(d))),
+ ok.
+
+
bad_info(Term) ->
try erlang:fun_info(Term, module) of
Any ->
diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl
index 753d6f7727..888ed8e272 100644
--- a/erts/emulator/test/map_SUITE.erl
+++ b/erts/emulator/test/map_SUITE.erl
@@ -813,16 +813,16 @@ t_map_encode_decode(Config) when is_list(Config) ->
%% literally #{ b=>2, a=>1 } in the internal order
#{ a:=1, b:=2 } =
- erlang:binary_to_term(<<131,116,0,0,0,2,100,0,1,98,100,0,1,97,97,2,97,1>>),
+ erlang:binary_to_term(<<131,116,0,0,0,2,100,0,1,98,97,2,100,0,1,97,97,1>>),
%% literally #{ "hi" => "value", a=>33, b=>55 } in the internal order
#{ a:=33, b:=55, "hi" := "value"} = erlang:binary_to_term(<<131,116,0,0,0,3,
107,0,2,104,105, % "hi" :: list()
- 100,0,1,97, % a :: atom()
- 100,0,1,98, % b :: atom()
107,0,5,118,97,108,117,101, % "value" :: list()
+ 100,0,1,97, % a :: atom()
97,33, % 33 :: integer()
+ 100,0,1,98, % b :: atom()
97,55 % 55 :: integer()
>>),
@@ -834,11 +834,17 @@ t_map_encode_decode(Config) when is_list(Config) ->
%% uniqueness violation
%% literally #{ a=>1, "hi"=>"value", a=>2 }
{'EXIT',{badarg,[{_,_,_,_}|_]}} = (catch
- erlang:binary_to_term(<<131,116,0,0,0,3,100,0,1,97,107,0,2,104,105,100,0,1,97,97,1,107,0,5,118,97,108,117,101,97,2>>)),
+ erlang:binary_to_term(<<131,116,0,0,0,3,
+ 100,0,1,97,
+ 97,1,
+ 107,0,2,104,105,
+ 107,0,5,118,97,108,117,101,
+ 100,0,1,97,
+ 97,2>>)),
%% bad size (too large)
{'EXIT',{badarg,[{_,_,_,_}|_]}} = (catch
- erlang:binary_to_term(<<131,116,0,0,0,12,100,0,1,97,100,0,1,98,97,1,97,1>>)),
+ erlang:binary_to_term(<<131,116,0,0,0,12,100,0,1,97,97,1,100,0,1,98,97,1>>)),
%% bad size (too small) .. should fail just truncate it .. weird.
%% possibly change external format so truncated will be #{a:=1}
@@ -852,7 +858,8 @@ map_encode_decode_and_match([{K,V}|Pairs], EncodedPairs, M0) ->
B0 = erlang:term_to_binary(M1),
Ls = lists:sort(fun(A,B) -> erts_internal:cmp_term(A,B) < 0 end, [{K, erlang:term_to_binary(K), erlang:term_to_binary(V)}|EncodedPairs]),
%% sort Ks and Vs according to term spec, then match it
- ok = match_encoded_map(B0, length(Ls), [Kbin||{_,Kbin,_}<-Ls] ++ [Vbin||{_,_,Vbin}<-Ls]),
+ KVbins = lists:foldr(fun({_,Kbin,Vbin}, Acc) -> [Kbin,Vbin | Acc] end, [], Ls),
+ ok = match_encoded_map(B0, length(Ls), KVbins),
%% decode and match it
M1 = erlang:binary_to_term(B0),
map_encode_decode_and_match(Pairs,Ls,M1);
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 330bef7104..fdce157abc 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -42,7 +42,7 @@
-export([init_per_testcase/2, end_per_testcase/2]).
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Dog=?t:timetrap(?t:seconds(10)),
+ Dog=?t:timetrap(?t:seconds(30)),
[{watchdog, Dog}|Config].
end_per_testcase(_Func, Config) ->
@@ -1009,12 +1009,14 @@ loop_runner(Collector, Fun, Laps) ->
end,
loop_runner_cont(Collector, Fun, 0, Laps).
-loop_runner_cont(_Collector, _Fun, Laps, Laps) ->
+loop_runner_cont(Collector, _Fun, Laps, Laps) ->
receive
- {done, Collector} ->
- io:format("loop_runner ~p exit after ~p laps\n", [self(), Laps]),
- Collector ! {gone, self()}
- end;
+ {done, Collector} -> ok;
+ {abort, Collector} -> ok
+ end,
+ io:format("loop_runner ~p exit after ~p laps\n", [self(), Laps]),
+ Collector ! {gone, self()};
+
loop_runner_cont(Collector, Fun, N, Laps) ->
Fun(),
receive
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index a854d3f05b..4560077a51 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -37,7 +37,9 @@
threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1,
is_checks/1,
get_length/1, make_atom/1, make_string/1, reverse_list_test/1,
- otp_9668/1, consume_timeslice/1, dirty_nif/1
+ otp_9828/1,
+ otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1,
+ dirty_nif_exception/1, nif_schedule/1
]).
-export([many_args_100/100]).
@@ -64,7 +66,9 @@ all() ->
resource_takeover, threading, send, send2, send3,
send_threaded, neg, is_checks, get_length, make_atom,
make_string,reverse_list_test,
- otp_9668, consume_timeslice, dirty_nif
+ otp_9828,
+ otp_9668, consume_timeslice,
+ nif_schedule, dirty_nif, dirty_nif_send, dirty_nif_exception
].
groups() ->
@@ -1440,6 +1444,20 @@ otp_9668(Config) ->
?line verify_tmpmem(TmpMem),
ok.
+otp_9828(doc) -> ["Copy of writable binary"];
+otp_9828(Config) ->
+ ensure_lib_loaded(Config, 1),
+
+ otp_9828_loop(<<"I'm alive!">>, 1000).
+
+otp_9828_loop(Bin, 0) ->
+ ok;
+otp_9828_loop(Bin, Val) ->
+ WrtBin = <<Bin/binary, Val:32>>,
+ ok = otp_9828_nif(WrtBin),
+ otp_9828_loop(WrtBin, Val-1).
+
+
consume_timeslice(Config) when is_list(Config) ->
CONTEXT_REDS = 2000,
Me = self(),
@@ -1524,6 +1542,20 @@ consume_timeslice(Config) when is_list(Config) ->
ok.
+nif_schedule(Config) when is_list(Config) ->
+ ensure_lib_loaded(Config),
+ A = "this is a string",
+ B = {this,is,a,tuple},
+ {B,A} = call_nif_schedule(A, B),
+ ok = try call_nif_schedule(1, 2)
+ catch
+ error:badarg ->
+ [{?MODULE,call_nif_schedule,[1,2],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end,
+ ok.
+
dirty_nif(Config) when is_list(Config) ->
try erlang:system_info(dirty_cpu_schedulers) of
N when is_integer(N) ->
@@ -1532,12 +1564,50 @@ dirty_nif(Config) when is_list(Config) ->
Val2 = "Erlang",
Val3 = list_to_binary([Val2, 0]),
{Val1, Val2, Val3} = call_dirty_nif(Val1, Val2, Val3),
+ LargeArray = lists:duplicate(1000, ok),
+ LargeArray = call_dirty_nif_zero_args(),
ok
catch
error:badarg ->
{skipped,"No dirty scheduler support"}
end.
+dirty_nif_send(Config) when is_list(Config) ->
+ try erlang:system_info(dirty_cpu_schedulers) of
+ N when is_integer(N) ->
+ ensure_lib_loaded(Config),
+ Parent = self(),
+ Pid = spawn_link(fun() ->
+ Self = self(),
+ {ok, Self} = receive_any(),
+ Parent ! {ok, Self}
+ end),
+ {ok, Pid} = send_from_dirty_nif(Pid),
+ {ok, Pid} = receive_any(),
+ ok
+ catch
+ error:badarg ->
+ {skipped,"No dirty scheduler support"}
+ end.
+
+dirty_nif_exception(Config) when is_list(Config) ->
+ try erlang:system_info(dirty_cpu_schedulers) of
+ N when is_integer(N) ->
+ ensure_lib_loaded(Config),
+ try
+ call_dirty_nif_exception(),
+ ?t:fail(expected_badarg)
+ catch
+ error:badarg ->
+ [{?MODULE,call_dirty_nif_exception,[],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end
+ catch
+ error:badarg ->
+ {skipped,"No dirty scheduler support"}
+ end.
+
next_msg(_Pid) ->
receive
M -> M
@@ -1666,8 +1736,13 @@ reverse_list(_) -> ?nif_stub.
echo_int(_) -> ?nif_stub.
type_sizes() -> ?nif_stub.
otp_9668_nif(_) -> ?nif_stub.
+otp_9828_nif(_) -> ?nif_stub.
consume_timeslice_nif(_,_) -> ?nif_stub.
+call_nif_schedule(_,_) -> ?nif_stub.
call_dirty_nif(_,_,_) -> ?nif_stub.
+send_from_dirty_nif(_) -> ?nif_stub.
+call_dirty_nif_exception() -> ?nif_stub.
+call_dirty_nif_zero_args() -> ?nif_stub.
%% maps
is_map_nif(_) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 160f4843ad..85544db2ab 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -1473,6 +1473,26 @@ static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
return atom_ok;
}
+static ERL_NIF_TERM otp_9828_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ /* copy a writable binary could reallocate it due to "emasculation"
+ and thereby render a previous inspection invalid.
+ */
+ ErlNifBinary bin1;
+ ErlNifEnv* myenv;
+
+ if (!enif_inspect_binary(env, argv[0], &bin1)) {
+ return enif_make_badarg(env);
+ }
+
+ myenv = enif_alloc_env();
+ enif_make_copy(myenv, argv[0]);
+ enif_free_env(myenv);
+
+ return memcmp(bin1.data, "I'm alive!", 10)==0 ? atom_ok : atom_false;
+}
+
+
static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int percent;
@@ -1493,25 +1513,57 @@ static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NI
}
}
+static ERL_NIF_TERM nif_sched2(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ char s[64];
+ if (!enif_get_string(env, argv[2], s, sizeof s, ERL_NIF_LATIN1))
+ return enif_make_badarg(env);
+ return enif_make_tuple2(env, argv[3], argv[2]);
+}
+
+static ERL_NIF_TERM nif_sched1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM new_argv[4];
+ new_argv[0] = enif_make_atom(env, "garbage0");
+ new_argv[1] = enif_make_atom(env, "garbage1");
+ new_argv[2] = argv[0];
+ new_argv[3] = argv[1];
+ return enif_schedule_nif(env, "nif_sched2", 0, nif_sched2, 4, new_argv);
+}
+
+static ERL_NIF_TERM call_nif_schedule(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (argc != 2)
+ return enif_make_atom(env, "false");
+ return enif_schedule_nif(env, "nif_sched1", 0, nif_sched1, argc, argv);
+}
+
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
+static int have_dirty_schedulers(void)
+{
+ ErlNifSysInfo si;
+ enif_system_info(&si, sizeof(si));
+ return si.dirty_scheduler_support;
+}
+
static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int n;
char s[10];
ErlNifBinary b;
ERL_NIF_TERM result;
- if (enif_have_dirty_schedulers()) {
+ if (have_dirty_schedulers()) {
assert(enif_is_on_dirty_scheduler(env));
}
assert(argc == 3);
enif_get_int(env, argv[0], &n);
enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1);
enif_inspect_binary(env, argv[2], &b);
- result = enif_make_tuple3(env,
- enif_make_int(env, n),
- enif_make_string(env, s, ERL_NIF_LATIN1),
- enif_make_binary(env, &b));
- return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer);
+ return enif_make_tuple3(env,
+ enif_make_int(env, n),
+ enif_make_string(env, s, ERL_NIF_LATIN1),
+ enif_make_binary(env, &b));
}
static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -1522,17 +1574,67 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM
assert(!enif_is_on_dirty_scheduler(env));
if (argc != 3)
return enif_make_badarg(env);
- if (enif_have_dirty_schedulers()) {
+ if (have_dirty_schedulers()) {
if (enif_get_int(env, argv[0], &n) &&
enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &b))
- return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
+ return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
else
return enif_make_badarg(env);
} else {
return dirty_nif(env, argc, argv);
}
}
+
+static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM result;
+ ErlNifPid pid;
+ ErlNifEnv* menv;
+ int res;
+
+ if (!enif_get_local_pid(env, argv[0], &pid))
+ return enif_make_badarg(env);
+ result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid));
+ menv = enif_alloc_env();
+ res = enif_send(env, &pid, menv, result);
+ enif_free_env(menv);
+ if (!res)
+ return enif_make_badarg(env);
+ else
+ return result;
+}
+
+static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ switch (argc) {
+ case 0: {
+ ERL_NIF_TERM args[255];
+ int i;
+ for (i = 0; i < 255; i++)
+ args[i] = enif_make_int(env, i);
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, 255, argv);
+ }
+ case 1:
+ return enif_make_badarg(env);
+ default:
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, argc-1, argv);
+ }
+}
+
+static ERL_NIF_TERM call_dirty_nif_zero_args(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int i;
+ ERL_NIF_TERM result[1000];
+ ERL_NIF_TERM ok = enif_make_atom(env, "ok");
+ assert(argc == 0);
+ for (i = 0; i < sizeof(result)/sizeof(*result); i++) {
+ result[i] = ok;
+ }
+ return enif_make_list_from_array(env, result, i);
+}
#endif
static ERL_NIF_TERM is_map_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -1710,9 +1812,14 @@ static ErlNifFunc nif_funcs[] =
{"echo_int", 1, echo_int},
{"type_sizes", 0, type_sizes},
{"otp_9668_nif", 1, otp_9668_nif},
+ {"otp_9828_nif", 1, otp_9828_nif},
{"consume_timeslice_nif", 2, consume_timeslice_nif},
+ {"call_nif_schedule", 2, call_nif_schedule},
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
{"call_dirty_nif", 3, call_dirty_nif},
+ {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND},
+ {"call_dirty_nif_exception", 0, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND},
+ {"call_dirty_nif_zero_args", 0, call_dirty_nif_zero_args, ERL_NIF_DIRTY_JOB_CPU_BOUND},
#endif
{"is_map_nif", 1, is_map_nif},
{"get_map_size_nif", 1, get_map_size_nif},
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.c b/erts/emulator/test/nif_SUITE_data/nif_mod.c
index 55a0d2ac4f..11b5d0cc35 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_mod.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.c
@@ -217,7 +217,8 @@ static int upgrade(ErlNifEnv* env, void** priv, void** old_priv_data, ERL_NIF_TE
*priv = *old_priv_data;
do_load_info(env, load_info, &retval);
-
+ if (retval)
+ NifModPrivData_release(data);
return retval;
}
diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl
index ff8d18eef8..8cf8377c30 100644
--- a/erts/emulator/test/num_bif_SUITE.erl
+++ b/erts/emulator/test/num_bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -394,18 +394,15 @@ t_string_to_integer(Config) when is_list(Config) ->
test_sti(268435455),
test_sti(-268435455),
- %% 1 bsl 28 - 1, just before 32 bit bignum
- test_sti(1 bsl 28 - 1),
- %% 1 bsl 28, just beyond 32 bit small
- test_sti(1 bsl 28),
- %% 1 bsl 33, just beyond 32 bit
- test_sti(1 bsl 33),
- %% 1 bsl 60 - 1, just before 64 bit bignum
- test_sti(1 bsl 60 - 1),
- %% 1 bsl 60, just beyond 64 bit small
- test_sti(1 bsl 60),
- %% 1 bsl 65, just beyond 64 bit
- test_sti(1 bsl 65),
+ % Interesting values around 2-pows, such as MIN_SMALL and MAX_SMALL.
+ lists:foreach(fun(Bits) ->
+ N = 1 bsl Bits,
+ test_sti(N - 1),
+ test_sti(N),
+ test_sti(N + 1)
+ end,
+ lists:seq(16, 130)),
+
%% Bignums.
test_sti(123456932798748738738,16),
test_sti(list_to_integer(lists:duplicate(2000, $1))),
@@ -454,10 +451,11 @@ test_sti(Num) ->
end|| Base <- lists:seq(2,36)].
test_sti(Num,Base) ->
- Num = list_to_integer(int2list(Num,Base),Base),
- Num = -1*list_to_integer(int2list(Num*-1,Base),Base),
- Num = binary_to_integer(int2bin(Num,Base),Base),
- Num = -1*binary_to_integer(int2bin(Num*-1,Base),Base).
+ Neg = -Num,
+ Num = list_to_integer(int2list(Num,Base),Base),
+ Neg = list_to_integer(int2list(Num*-1,Base),Base),
+ Num = binary_to_integer(int2bin(Num,Base),Base),
+ Neg = binary_to_integer(int2bin(Num*-1,Base),Base).
% Calling this function (which is not supposed to be inlined) prevents
% the compiler from calculating the answer, so we don't test the compiler
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 202a8b7537..9083545060 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -90,6 +90,7 @@
mix_up_ports/1, otp_5112/1, otp_5119/1, otp_6224/1,
exit_status_multi_scheduling_block/1, ports/1,
spawn_driver/1, spawn_executable/1, close_deaf_port/1,
+ port_setget_data/1,
unregister_name/1, parallelism_option/1]).
-export([do_iter_max_ports/2]).
@@ -115,6 +116,7 @@ all() ->
mix_up_ports, otp_5112, otp_5119,
exit_status_multi_scheduling_block, ports, spawn_driver,
spawn_executable, close_deaf_port, unregister_name,
+ port_setget_data,
parallelism_option].
groups() ->
@@ -1698,12 +1700,13 @@ otp_5119(Config) when is_list(Config) ->
Path = ?config(data_dir, Config),
ok = load_driver(Path, "exit_drv"),
PI1 = port_ix(otp_5119_fill_empty_port_tab([])),
- PI2 = port_ix(erlang:open_port({spawn, "exit_drv"}, [])),
+ Port2 = erlang:open_port({spawn, "exit_drv"}, []),
+ PI2 = port_ix(Port2),
{PortIx1, PortIx2} = case PI2 > PI1 of
true ->
{PI1, PI2};
false ->
- {port_ix(otp_5119_fill_empty_port_tab([PI2])),
+ {port_ix(otp_5119_fill_empty_port_tab([Port2])),
port_ix(erlang:open_port({spawn, "exit_drv"}, []))}
end,
MaxPorts = max_ports(),
@@ -2318,7 +2321,7 @@ close_deaf_port(Config) when is_list(Config) ->
test_server:timetrap_cancel(Dog),
Res.
-close_deaf_port_1(1000, _) ->
+close_deaf_port_1(200, _) ->
ok;
close_deaf_port_1(N, Cmd) ->
Timeout = integer_to_list(random:uniform(5*1000)),
@@ -2332,6 +2335,61 @@ close_deaf_port_1(N, Cmd) ->
{comment, "Could not spawn more than " ++ integer_to_list(N) ++ " OS processes."}
end.
+%% Test undocumented port_set_data/2 and port_get_data/1
+%% Hammer from multiple processes a while
+%% and then abrubtly close the port (OTP-12208).
+port_setget_data(Config) when is_list(Config) ->
+ ok = load_driver(?config(data_dir, Config), "echo_drv"),
+ Port = erlang:open_port({spawn_driver, "echo_drv"}, []),
+
+ NSched = erlang:system_info(schedulers_online),
+ HeapData = {1,2,3,<<"A heap binary">>,fun()->"This is fun"end,
+ list_to_binary(lists:seq(1,100))},
+ PRs = lists:map(fun(I) ->
+ spawn_opt(fun() -> port_setget_data_hammer(Port,HeapData,false,1) end,
+ [monitor, {scheduler, I rem NSched}])
+ end,
+ lists:seq(1,10)),
+ receive after 100 -> ok end,
+ Papa = self(),
+ lists:foreach(fun({Pid,_}) -> Pid ! {Papa,prepare_for_close} end, PRs),
+ lists:foreach(fun({Pid,_}) ->
+ receive {Pid,prepare_for_close} -> ok end
+ end,
+ PRs),
+ port_close(Port),
+ lists:foreach(fun({Pid,Ref}) ->
+ receive {'DOWN', Ref, process, Pid, normal} -> ok end
+ end,
+ PRs),
+ ok.
+
+port_setget_data_hammer(Port, HeapData, IsSet0, N) ->
+ Rand = random:uniform(3),
+ IsSet1 = try case Rand of
+ 1 -> true = erlang:port_set_data(Port, atom), true;
+ 2 -> true = erlang:port_set_data(Port, HeapData), true;
+ 3 -> case erlang:port_get_data(Port) of
+ atom -> true;
+ HeapData -> true;
+ undefined -> false=IsSet0
+ end
+ end
+ catch
+ error:badarg ->
+ true = get(prepare_for_close),
+ io:format("~p did ~p rounds before port closed\n", [self(), N]),
+ exit(normal)
+ end,
+ receive {Papa, prepare_for_close} ->
+ put(prepare_for_close, true),
+ Papa ! {self(),prepare_for_close}
+ after 0 ->
+ ok
+ end,
+ port_setget_data_hammer(Port, HeapData, IsSet1, N+1).
+
+
wait_until(Fun) ->
case catch Fun() of
true ->
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index ceb4afb5cf..f959714be7 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -155,6 +155,7 @@ misc_smoke_tests(Config) when is_list(Config) ->
?line true = is_binary(erlang:system_info(loaded)),
?line true = is_binary(erlang:system_info(dist)),
?line ok = try erlang:system_info({cpu_topology,erts_get_cpu_topology_error_case}), fail catch error:badarg -> ok end,
+ true = lists:member(erlang:system_info(tolerant_timeofday), [enabled, disabled]),
?line ok.
diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl
index 2251575e5a..4d7598cf1f 100644
--- a/erts/emulator/test/trace_SUITE.erl
+++ b/erts/emulator/test/trace_SUITE.erl
@@ -181,6 +181,13 @@ send_trace(Config) when is_list(Config) ->
?line {trace, Sender, send, to_receiver, Receiver} = receive_first(),
?line receive_nothing(),
+ %% Check that a message sent to another registered process is traced.
+ register(?MODULE,Receiver),
+ Sender ! {send_please, ?MODULE, to_receiver},
+ {trace, Sender, send, to_receiver, ?MODULE} = receive_first(),
+ receive_nothing(),
+ unregister(?MODULE),
+
%% Check that a message sent to this process is traced.
?line Sender ! {send_please, self(), to_myself},
?line receive to_myself -> ok end,
@@ -188,6 +195,21 @@ send_trace(Config) when is_list(Config) ->
?line {trace, Sender, send, to_myself, Self} = receive_first(),
?line receive_nothing(),
+ %% Check that a message sent to dead process is traced.
+ {Pid,Ref} = spawn_monitor(fun() -> ok end),
+ receive {'DOWN',Ref,_,_,_} -> ok end,
+ Sender ! {send_please, Pid, to_dead},
+ {trace, Sender, send_to_non_existing_process, to_dead, Pid} = receive_first(),
+ receive_nothing(),
+
+ %% Check that a message sent to unknown registrated process is traced.
+ BadargSender = fun_spawn(fun sender/0),
+ 1 = erlang:trace(BadargSender, true, [send]),
+ unlink(BadargSender),
+ BadargSender ! {send_please, not_registered, to_unknown},
+ {trace, BadargSender, send, to_unknown, not_registered} = receive_first(),
+ receive_nothing(),
+
%% Another process should not be able to trace Sender.
?line Intruder = fun_spawn(fun() -> erlang:trace(Sender, true, [send]) end),
?line {'EXIT', Intruder, {badarg, _}} = receive_first(),
diff --git a/erts/emulator/test/trace_call_time_SUITE.erl b/erts/emulator/test/trace_call_time_SUITE.erl
index 5dfa87bbee..3036d2957b 100644
--- a/erts/emulator/test/trace_call_time_SUITE.erl
+++ b/erts/emulator/test/trace_call_time_SUITE.erl
@@ -33,7 +33,7 @@
%% Exported end user tests
-export([seq/3, seq_r/3]).
--export([loaded/1, a_function/1, a_called_function/1, dec/1, nif_dec/1]).
+-export([loaded/1, a_function/1, a_called_function/1, dec/1, nif_dec/1, dead_tracer/1]).
-define(US_ERROR, 10000).
-define(R_ERROR, 0.8).
@@ -89,7 +89,7 @@ all() ->
true -> [not_run];
false ->
[basic, on_and_off, info, pause_and_restart, scheduling,
- combo, bif, nif, called_function]
+ combo, bif, nif, called_function, dead_tracer]
end.
groups() ->
@@ -470,6 +470,92 @@ called_function(Config) when is_list(Config) ->
?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
ok.
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+dead_tracer(Config) when is_list(Config) ->
+ Self = self(),
+ FirstTracer = tracer(),
+ StartTracing = fun() -> turn_on_tracing(Self) end,
+ tell_tracer(FirstTracer, StartTracing),
+ [1,2,3,4,5,6,7,8] = seq(1, 8, fun(I) -> I + 1 end),
+ Ref = erlang:monitor(process, FirstTracer),
+ FirstTracer ! quit,
+ receive
+ {'DOWN',Ref,process,FirstTracer,normal} ->
+ ok
+ end,
+ erlang:yield(),
+
+ %% Collect and check that we only get call_time info for the current process.
+ Info1 = collect_all_info(),
+ [] = other_than_self(Info1),
+ io:format("~p\n", [Info1]),
+
+ %% Note that we have not turned off tracing for the current process,
+ %% but that the tracer has terminated. No more call_time information should be recorded.
+ [1,2,3] = seq(1, 3, fun(I) -> I + 1 end),
+ [] = collect_all_info(),
+
+ %% When we start a second tracer process, that tracer process must
+ %% not inherit the tracing flags and the dead tracer (even though
+ %% we used set_on_spawn).
+ SecondTracer = tracer(),
+ tell_tracer(SecondTracer, StartTracing),
+ Seq20 = lists:seq(1, 20),
+ Seq20 = seq(1, 20, fun(I) -> I + 1 end),
+ Info2 = collect_all_info(),
+ io:format("~p\n", [Info2]),
+ [] = other_than_self(Info2),
+ SecondTracer ! quit,
+
+ ok.
+
+other_than_self(Info) ->
+ [{Pid,MFA} || {MFA,[{Pid,_,_,_}]} <- Info,
+ Pid =/= self()].
+
+tell_tracer(Tracer, Fun) ->
+ Tracer ! {execute,self(),Fun},
+ receive
+ {Tracer,executed} ->
+ ok
+ end.
+
+tracer() ->
+ spawn_link(fun Loop() ->
+ receive
+ quit ->
+ ok;
+ {execute,From,Fun} ->
+ Fun(),
+ From ! {self(),executed},
+ Loop()
+ end
+ end).
+
+turn_on_tracing(Pid) ->
+ _ = erlang:trace(Pid, true, [call,set_on_spawn]),
+ _ = erlang:trace_pattern({?MODULE,'_','_'}, true, [call_time]),
+ _ = now(),
+ ok.
+
+collect_all_info() ->
+ collect_all_info([{?MODULE,F,A} || {F,A} <- module_info(functions)] ++
+ erlang:system_info(snifs)).
+
+collect_all_info([MFA|T]) ->
+ CallTime = erlang:trace_info(MFA, call_time),
+ erlang:trace_pattern(MFA, restart, [call_time]),
+ case CallTime of
+ {call_time,false} ->
+ collect_all_info(T);
+ {call_time,[]} ->
+ collect_all_info(T);
+ {call_time,[_|_]=List} ->
+ [{MFA,List}|collect_all_info(T)]
+ end;
+collect_all_info([]) -> [].
+
%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% The Tests
%%%
@@ -478,7 +564,6 @@ called_function(Config) when is_list(Config) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Local helpers
-
load_nif(Config) ->
?line Path = ?config(data_dir, Config),
?line ok = erlang:load_nif(filename:join(Path,"trace_nif"), 0).
@@ -602,8 +687,11 @@ collect(A, Ref) ->
end.
setup() ->
+ setup([]).
+
+setup(Opts) ->
Pid = spawn_link(fun() -> loop() end),
- ?line 1 = erlang:trace(Pid, true, [call]),
+ 1 = erlang:trace(Pid, true, [call|Opts]),
Pid.
execute(Pids, Mfa) when is_list(Pids) ->
diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl
index 46ece41096..f627eea07f 100644
--- a/erts/emulator/test/tuple_SUITE.erl
+++ b/erts/emulator/test/tuple_SUITE.erl
@@ -21,8 +21,9 @@
init_per_group/2,end_per_group/2,
t_size/1, t_tuple_size/1, t_element/1, t_setelement/1,
t_insert_element/1, t_delete_element/1,
- t_list_to_tuple/1, t_tuple_to_list/1,
- t_make_tuple_2/1, t_make_tuple_3/1, t_append_element/1,
+ t_list_to_tuple/1, t_list_to_upper_boundry_tuple/1, t_tuple_to_list/1,
+ t_make_tuple_2/1, t_make_upper_boundry_tuple_2/1, t_make_tuple_3/1,
+ t_append_element/1, t_append_element_upper_boundry/1,
build_and_match/1, tuple_with_case/1, tuple_in_guard/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -40,8 +41,10 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[build_and_match, t_size, t_tuple_size, t_list_to_tuple,
+ t_list_to_upper_boundry_tuple,
t_tuple_to_list, t_element, t_setelement,
- t_make_tuple_2, t_make_tuple_3, t_append_element,
+ t_make_tuple_2, t_make_upper_boundry_tuple_2, t_make_tuple_3,
+ t_append_element, t_append_element_upper_boundry,
t_insert_element, t_delete_element,
tuple_with_case, tuple_in_guard].
@@ -49,11 +52,21 @@ groups() ->
[].
init_per_suite(Config) ->
+ A0 = case application:start(sasl) of
+ ok -> [sasl];
+ _ -> []
+ end,
+ A = case application:start(os_mon) of
+ ok -> [os_mon|A0];
+ _ -> A0
+ end,
+ [{started_apps, A}|Config].
+
+end_per_suite(Config) ->
+ As = ?config(started_apps, Config),
+ lists:foreach(fun (A) -> application:stop(A) end, As),
Config.
-end_per_suite(_Config) ->
- ok.
-
init_per_group(_GroupName, Config) ->
Config.
@@ -176,14 +189,19 @@ t_list_to_tuple(Config) when is_list(Config) ->
{'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
{'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
- % test upper boundry, 16777215 elements
- MaxSize = 1 bsl 24 - 1,
- MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
- MaxSize = size(MaxTuple),
-
{'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))),
ok.
+t_list_to_upper_boundry_tuple(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ MaxSize = size(MaxTuple),
+ ok
+ end).
+
%% Tests tuple_to_list/1.
t_tuple_to_list(Config) when is_list(Config) ->
@@ -214,8 +232,6 @@ t_make_tuple_2(Config) when is_list(Config) ->
t_make_tuple1({a}),
t_make_tuple1(erlang:make_tuple(400, [])),
- % test upper boundry, 16777215 elements
- t_make_tuple(1 bsl 24 - 1, a),
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)),
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)),
@@ -225,6 +241,13 @@ t_make_tuple_2(Config) when is_list(Config) ->
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)),
ok.
+t_make_upper_boundry_tuple_2(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ t_make_tuple(1 bsl 24 - 1, a)
+ end).
+
t_make_tuple1(Element) ->
lists:foreach(fun(Size) -> t_make_tuple(Size, Element) end,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 256, 511, 512, 999,
@@ -309,13 +332,17 @@ t_delete_element(Config) when is_list(Config) ->
%% Tests the append_element/2 BIF.
t_append_element(Config) when is_list(Config) ->
- ok = t_append_element({}, 2048, 2048),
-
- % test upper boundry, 16777215 elements
- MaxSize = 1 bsl 24 - 1,
- MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
- {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
- ok.
+ ok = t_append_element({}, 2048, 2048).
+
+t_append_element_upper_boundry(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
+ ok
+ end).
t_append_element(_Tuple, 0, _High) -> ok;
t_append_element(Tuple, N, High) ->
@@ -371,3 +398,31 @@ tuple_in_guard(Config) when is_list(Config) ->
%% Use this function to avoid compile-time evaluation of an expression.
id(I) -> I.
+
+sys_mem_cond_run(ReqSizeMB, TestFun) when is_integer(ReqSizeMB) ->
+ case total_memory() of
+ TotMem when is_integer(TotMem), TotMem >= ReqSizeMB ->
+ TestFun();
+ TotMem when is_integer(TotMem) ->
+ {skipped, "Not enough memory ("++integer_to_list(TotMem)++" MB)"};
+ undefined ->
+ {skipped, "Could not retrieve memory information"}
+ end.
+
+
+total_memory() ->
+ %% Totat memory in MB.
+ try
+ MemoryData = memsup:get_system_memory_data(),
+ case lists:keysearch(total_memory, 1, MemoryData) of
+ {value, {total_memory, TM}} ->
+ TM div (1024*1024);
+ false ->
+ {value, {system_total_memory, STM}} =
+ lists:keysearch(system_total_memory, 1, MemoryData),
+ STM div (1024*1024)
+ end
+ catch
+ _ : _ ->
+ undefined
+ end.
diff --git a/erts/emulator/test/z_SUITE.erl b/erts/emulator/test/z_SUITE.erl
index 4b3075a164..b0c6224dfe 100644
--- a/erts/emulator/test/z_SUITE.erl
+++ b/erts/emulator/test/z_SUITE.erl
@@ -38,7 +38,7 @@
-export([schedulers_alive/1, node_container_refc_check/1,
long_timers/1, pollset_size/1,
- check_io_debug/1]).
+ check_io_debug/1, get_check_io_info/0]).
-define(DEFAULT_TIMEOUT, ?t:minutes(5)).
@@ -288,11 +288,14 @@ check_io_debug(Config) when is_list(Config) ->
end.
check_io_debug_test() ->
+ ?line erlang:display(get_check_io_info()),
?line erts_debug:set_internal_state(available_internal_state, true),
- ?line erlang:display(erlang:system_info(check_io)),
- ?line NoOfErrorFds = erts_debug:get_internal_state(check_io_debug),
+ ?line {NoErrorFds, NoUsedFds, NoDrvSelStructs, NoDrvEvStructs}
+ = erts_debug:get_internal_state(check_io_debug),
?line erts_debug:set_internal_state(available_internal_state, false),
- ?line 0 = NoOfErrorFds,
+ ?line 0 = NoErrorFds,
+ ?line NoUsedFds = NoDrvSelStructs,
+ ?line 0 = NoDrvEvStructs,
?line ok.
@@ -305,7 +308,7 @@ display_check_io(ChkIo) ->
catch erlang:display('--- CHECK IO INFO ---'),
catch erlang:display(ChkIo),
catch erts_debug:set_internal_state(available_internal_state, true),
- NoOfErrorFds = (catch erts_debug:get_internal_state(check_io_debug)),
+ NoOfErrorFds = (catch element(1, erts_debug:get_internal_state(check_io_debug))),
catch erlang:display({'NoOfErrorFds', NoOfErrorFds}),
catch erts_debug:set_internal_state(available_internal_state, false),
catch erlang:display('--- CHECK IO INFO ---'),
@@ -313,14 +316,19 @@ display_check_io(ChkIo) ->
get_check_io_info() ->
ChkIo = erlang:system_info(check_io),
- case lists:keysearch(pending_updates, 1, ChkIo) of
- {value, {pending_updates, 0}} ->
+ PendUpdNo = case lists:keysearch(pending_updates, 1, ChkIo) of
+ {value, {pending_updates, PendNo}} ->
+ PendNo;
+ false ->
+ 0
+ end,
+ {value, {active_fds, ActFds}} = lists:keysearch(active_fds, 1, ChkIo),
+ case {PendUpdNo, ActFds} of
+ {0, 0} ->
display_check_io(ChkIo),
ChkIo;
- false ->
- ChkIo;
_ ->
- receive after 10 -> ok end,
+ receive after 100 -> ok end,
get_check_io_info()
end.
diff --git a/erts/emulator/utils/gen_git_version b/erts/emulator/utils/gen_git_version
index ef06a4b8e2..9faf015b62 100755
--- a/erts/emulator/utils/gen_git_version
+++ b/erts/emulator/utils/gen_git_version
@@ -5,9 +5,9 @@ OUTPUT_FILE=$1
if command -v git 2>&1 >/dev/null &&
test -d $ERL_TOP/.git -o -f $ERL_TOP/.git
then
- VSN=`git describe --match "OTP_R[0-9][0-9][A-B]*" HEAD`
+ VSN=`git describe --match "OTP-[0-9]*" HEAD`
case "$VSN" in
- OTP_R*-g*)
+ OTP-*-g*)
VSN=`echo $VSN | sed -e 's/.*-g\\(.*\\)/\\1/g'` ;;
*) VSN="na" ;;
esac
@@ -36,4 +36,4 @@ then
fi
exit 0
fi
-exit 1 \ No newline at end of file
+exit 1
diff --git a/erts/emulator/utils/make_compiler_flags b/erts/emulator/utils/make_compiler_flags
index cebe8cd0c5..ca1bc47113 100755
--- a/erts/emulator/utils/make_compiler_flags
+++ b/erts/emulator/utils/make_compiler_flags
@@ -70,7 +70,7 @@ my($prog) = $prog[$#prog];
print "/* Warning: Do not edit this file.\n";
print " Auto-generated by '$prog'.*/\n";
-foreach(keys %constants) {
+foreach (sort(keys %constants)) {
print "const char* erts_build_flags_$_ = \"$constants{$_}\";\n"
}