aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in36
-rw-r--r--erts/emulator/beam/atom.names17
-rw-r--r--erts/emulator/beam/beam_debug.c42
-rw-r--r--erts/emulator/beam/beam_emu.c452
-rw-r--r--erts/emulator/beam/beam_load.c244
-rw-r--r--erts/emulator/beam/beam_load.h8
-rw-r--r--erts/emulator/beam/benchmark.c65
-rw-r--r--erts/emulator/beam/benchmark.h45
-rw-r--r--erts/emulator/beam/bif.c526
-rw-r--r--erts/emulator/beam/bif.h6
-rw-r--r--erts/emulator/beam/bif.tab43
-rw-r--r--erts/emulator/beam/big.c40
-rw-r--r--erts/emulator/beam/big.h7
-rw-r--r--erts/emulator/beam/binary.c7
-rw-r--r--erts/emulator/beam/break.c131
-rw-r--r--erts/emulator/beam/copy.c58
-rw-r--r--erts/emulator/beam/dist.c477
-rw-r--r--erts/emulator/beam/dist.h97
-rw-r--r--erts/emulator/beam/erl_alloc.c2
-rw-r--r--erts/emulator/beam/erl_alloc.types5
-rw-r--r--erts/emulator/beam/erl_alloc_util.c6
-rw-r--r--erts/emulator/beam/erl_async.c13
-rw-r--r--erts/emulator/beam/erl_bif_binary.c5
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c1
-rw-r--r--erts/emulator/beam/erl_bif_guard.c30
-rw-r--r--erts/emulator/beam/erl_bif_info.c161
-rw-r--r--erts/emulator/beam/erl_bif_timer.c160
-rw-r--r--erts/emulator/beam/erl_bif_timer.h1
-rw-r--r--erts/emulator/beam/erl_bif_trace.c5
-rw-r--r--erts/emulator/beam/erl_bif_unique.c556
-rw-r--r--erts/emulator/beam/erl_bif_unique.h131
-rw-r--r--erts/emulator/beam/erl_binary.h34
-rw-r--r--erts/emulator/beam/erl_bits.c10
-rw-r--r--erts/emulator/beam/erl_db.c119
-rw-r--r--erts/emulator/beam/erl_db_hash.c307
-rw-r--r--erts/emulator/beam/erl_db_hash.h5
-rw-r--r--erts/emulator/beam/erl_db_tree.c82
-rw-r--r--erts/emulator/beam/erl_db_util.c556
-rw-r--r--erts/emulator/beam/erl_db_util.h21
-rw-r--r--erts/emulator/beam/erl_drv_thread.c7
-rw-r--r--erts/emulator/beam/erl_gc.c93
-rw-r--r--erts/emulator/beam/erl_gc.h40
-rw-r--r--erts/emulator/beam/erl_init.c159
-rw-r--r--erts/emulator/beam/erl_instrument.c2
-rw-r--r--erts/emulator/beam/erl_lock_check.c7
-rw-r--r--erts/emulator/beam/erl_lock_count.c9
-rw-r--r--erts/emulator/beam/erl_lock_count.h2
-rw-r--r--erts/emulator/beam/erl_map.c2793
-rw-r--r--erts/emulator/beam/erl_map.h177
-rw-r--r--erts/emulator/beam/erl_math.c18
-rw-r--r--erts/emulator/beam/erl_message.c14
-rw-r--r--erts/emulator/beam/erl_message.h29
-rw-r--r--erts/emulator/beam/erl_monitors.h3
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_nif.c192
-rw-r--r--erts/emulator/beam/erl_nif.h14
-rw-r--r--erts/emulator/beam/erl_printf_term.c188
-rw-r--r--erts/emulator/beam/erl_process.c573
-rw-r--r--erts/emulator/beam/erl_process.h51
-rw-r--r--erts/emulator/beam/erl_process_dict.c54
-rw-r--r--erts/emulator/beam/erl_process_dump.c174
-rw-r--r--erts/emulator/beam/erl_term.c4
-rw-r--r--erts/emulator/beam/erl_term.h67
-rw-r--r--erts/emulator/beam/erl_thr_progress.c50
-rw-r--r--erts/emulator/beam/erl_thr_progress.h4
-rw-r--r--erts/emulator/beam/erl_threads.h49
-rw-r--r--erts/emulator/beam/erl_time.h283
-rw-r--r--erts/emulator/beam/erl_time_sup.c1858
-rw-r--r--erts/emulator/beam/erl_trace.c7
-rw-r--r--erts/emulator/beam/erl_utils.h2
-rw-r--r--erts/emulator/beam/erl_vm.h7
-rw-r--r--erts/emulator/beam/external.c479
-rw-r--r--erts/emulator/beam/external.h11
-rw-r--r--erts/emulator/beam/global.h286
-rw-r--r--erts/emulator/beam/io.c89
-rw-r--r--erts/emulator/beam/ops.tab22
-rw-r--r--erts/emulator/beam/sys.h141
-rw-r--r--erts/emulator/beam/time.c650
-rw-r--r--erts/emulator/beam/utils.c949
-rw-r--r--erts/emulator/drivers/common/inet_drv.c6
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c73
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c178
-rw-r--r--erts/emulator/drivers/win32/ttsl_drv.c16
-rw-r--r--erts/emulator/hipe/hipe_amd64.c75
-rw-r--r--erts/emulator/hipe/hipe_amd64_abi.txt2
-rw-r--r--erts/emulator/hipe/hipe_amd64_asm.m455
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m442
-rw-r--r--erts/emulator/hipe/hipe_amd64_glue.S17
-rw-r--r--erts/emulator/hipe/hipe_arch.h1
-rw-r--r--erts/emulator/hipe/hipe_arm.c18
-rw-r--r--erts/emulator/hipe/hipe_arm.h4
-rw-r--r--erts/emulator/hipe/hipe_arm_asm.m412
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m41
-rw-r--r--erts/emulator/hipe/hipe_arm_glue.S9
-rw-r--r--erts/emulator/hipe/hipe_bif0.c244
-rw-r--r--erts/emulator/hipe/hipe_bif0.tab2
-rw-r--r--erts/emulator/hipe/hipe_bif1.c53
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m45
-rw-r--r--erts/emulator/hipe/hipe_debug.c4
-rw-r--r--erts/emulator/hipe/hipe_gc.c3
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c5
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c66
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.h53
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c3
-rw-r--r--erts/emulator/hipe/hipe_perfctr.c229
-rw-r--r--erts/emulator/hipe/hipe_perfctr.h23
-rw-r--r--erts/emulator/hipe/hipe_perfctr.tab25
-rw-r--r--erts/emulator/hipe/hipe_ppc.c38
-rw-r--r--erts/emulator/hipe/hipe_ppc.h4
-rw-r--r--erts/emulator/hipe/hipe_ppc_asm.m432
-rw-r--r--erts/emulator/hipe/hipe_ppc_bifs.m439
-rw-r--r--erts/emulator/hipe/hipe_ppc_glue.S19
-rw-r--r--erts/emulator/hipe/hipe_process.h9
-rw-r--r--erts/emulator/hipe/hipe_risc_stack.c4
-rw-r--r--erts/emulator/hipe/hipe_sparc.c25
-rw-r--r--erts/emulator/hipe/hipe_sparc.h4
-rw-r--r--erts/emulator/hipe/hipe_sparc_asm.m418
-rw-r--r--erts/emulator/hipe/hipe_sparc_bifs.m437
-rw-r--r--erts/emulator/hipe/hipe_sparc_glue.S23
-rw-r--r--erts/emulator/hipe/hipe_stack.h11
-rw-r--r--erts/emulator/hipe/hipe_x86.c49
-rw-r--r--erts/emulator/hipe/hipe_x86.h4
-rw-r--r--erts/emulator/hipe/hipe_x86_asm.m426
-rw-r--r--erts/emulator/hipe/hipe_x86_bifs.m439
-rw-r--r--erts/emulator/hipe/hipe_x86_glue.S17
-rw-r--r--erts/emulator/hipe/hipe_x86_stack.c4
-rw-r--r--erts/emulator/sys/common/erl_check_io.c23
-rw-r--r--erts/emulator/sys/common/erl_check_io.h6
-rw-r--r--erts/emulator/sys/common/erl_poll.c221
-rw-r--r--erts/emulator/sys/common/erl_poll.h6
-rw-r--r--erts/emulator/sys/ose/erl_poll.c69
-rw-r--r--erts/emulator/sys/ose/sys.c4
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h159
-rw-r--r--erts/emulator/sys/unix/sys.c494
-rw-r--r--erts/emulator/sys/unix/sys_float.c4
-rw-r--r--erts/emulator/sys/unix/sys_time.c398
-rw-r--r--erts/emulator/sys/win32/erl_poll.c60
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h65
-rw-r--r--erts/emulator/sys/win32/sys.c167
-rw-r--r--erts/emulator/sys/win32/sys_time.c199
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/bif_SUITE.erl33
-rw-r--r--erts/emulator/test/long_timers_test.erl96
-rw-r--r--erts/emulator/test/map_SUITE.erl746
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl72
-rw-r--r--erts/emulator/test/module_info_SUITE.erl22
-rw-r--r--erts/emulator/test/monitor_SUITE.erl113
-rw-r--r--erts/emulator/test/nif_SUITE.erl2
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c7
-rw-r--r--erts/emulator/test/port_SUITE.erl6
-rw-r--r--erts/emulator/test/time_SUITE.erl407
-rw-r--r--erts/emulator/test/timer_bif_SUITE.erl18
-rw-r--r--erts/emulator/test/trace_bif_SUITE.erl4
-rw-r--r--erts/emulator/test/unique_SUITE.erl390
-rw-r--r--erts/emulator/valgrind/suppress.patched.3.6.05
-rw-r--r--erts/emulator/valgrind/suppress.standard5
156 files changed, 15083 insertions, 4415 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 7145824f91..1429a6cf2c 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -112,18 +112,24 @@ NO_INLINE_FUNCTIONS=true
else
ifeq ($(TYPE),lcnt)
-PURIFY =
+PURIFY =
TYPEMARKER = .lcnt
TYPE_FLAGS = @CFLAGS@ -DERTS_ENABLE_LOCK_COUNT
else
ifeq ($(TYPE),frmptr)
-PURIFY =
+PURIFY =
OMIT_OMIT_FP=yes
TYPEMARKER = .frmptr
TYPE_FLAGS = @CFLAGS@ -DERTS_FRMPTR
else
+ifeq ($(TYPE),icount)
+PURIFY =
+TYPEMARKER = .icount
+TYPE_FLAGS = @CFLAGS@ -DERTS_OPCODE_COUNTER_SUPPORT
+else
+
# If type isn't one of the above, it *is* opt type...
override TYPE=opt
PURIFY =
@@ -138,6 +144,7 @@ endif
endif
endif
endif
+endif
comma:=,
space:=
@@ -342,16 +349,6 @@ endif
EPCRE_LIB = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
DEPLIBS += $(EPCRE_LIB)
-PERFCTR_PATH=@PERFCTR_PATH@
-USE_PERFCTR=@USE_PERFCTR@
-ifdef PERFCTR_PATH
-LIBS += $(PERFCTR_PATH)/usr.lib/libperfctr.a
-else
-ifdef USE_PERFCTR
-LIBS += -lperfctr
-endif
-endif
-
LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
@@ -559,9 +556,6 @@ HIPE_ppc64_TAB=hipe/hipe_ppc64.tab $(HIPE_ARCH64_TAB)
HIPE_arm_TAB=hipe/hipe_arm.tab
HIPE_ARCH_TAB=$(HIPE_$(ARCH)_TAB)
BIFS += hipe/hipe_bif0.tab hipe/hipe_bif1.tab hipe/hipe_bif2.tab $(HIPE_ARCH_TAB)
-ifdef USE_PERFCTR
-BIFS += hipe/hipe_perfctr.tab
-endif
endif
$(TARGET)/erl_bif_table.c \
@@ -657,10 +651,6 @@ COMMON_INCLUDES += -I../include/internal -I../include/internal/$(TARGET)
INCLUDES = -I$(TTF_DIR) $(COMMON_INCLUDES)
-ifdef PERFCTR_PATH
-INCLUDES += -I$(PERFCTR_PATH)/usr.lib -I$(PERFCTR_PATH)/linux/include
-endif
-
ifeq ($(TARGET),win32)
$(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c
$(V_CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@
@@ -764,7 +754,8 @@ RUN_OBJS = \
$(OBJDIR)/erl_bif_ddll.o $(OBJDIR)/erl_bif_guard.o \
$(OBJDIR)/erl_bif_info.o $(OBJDIR)/erl_bif_op.o \
$(OBJDIR)/erl_bif_os.o $(OBJDIR)/erl_bif_lists.o \
- $(OBJDIR)/erl_bif_trace.o $(OBJDIR)/erl_bif_wrap.o \
+ $(OBJDIR)/erl_bif_trace.o $(OBJDIR)/erl_bif_unique.o \
+ $(OBJDIR)/erl_bif_wrap.o \
$(OBJDIR)/erl_trace.o $(OBJDIR)/copy.o \
$(OBJDIR)/utils.o $(OBJDIR)/bif.o \
$(OBJDIR)/io.o $(OBJDIR)/erl_printf_term.o\
@@ -920,14 +911,11 @@ HIPE_OBJS= \
$(OBJDIR)/hipe_mode_switch.o \
$(OBJDIR)/hipe_native_bif.o \
$(OBJDIR)/hipe_stack.o $(HIPE_ARCH_OBJS)
-ifdef USE_PERFCTR
-HIPE_OBJS += $(OBJDIR)/hipe_perfctr.o
-endif
ifdef HIPE_ENABLED
EXTRA_BASE_OBJS += $(HIPE_OBJS)
endif
-BASE_OBJS = $(RUN_OBJS) $(EMU_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
+BASE_OBJS = $(EMU_OBJS) $(RUN_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
before_DTrace_OBJS = $(BASE_OBJS) $(DRV_OBJS)
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 5d06a32941..ae3f30d82f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -109,6 +109,7 @@ atom bag
atom band
atom big
atom bif_return_trap
+atom bif_timer_server
atom binary
atom binary_bin_to_list_trap
atom binary_copy_trap
@@ -144,9 +145,11 @@ atom catchlevel
atom cd
atom cdr
atom cflags
+atom CHANGE='CHANGE'
atom characters_to_binary_int
atom characters_to_list_int
atom clear
+atom clock_service
atom close
atom closed
atom code
@@ -156,6 +159,7 @@ atom compat_rel
atom compile
atom compressed
atom config_h
+atom convert_time_unit
atom connect
atom connected
atom connection_closed
@@ -198,6 +202,7 @@ atom dotall
atom driver
atom driver_options
atom dsend
+atom dsend_continue_trap
atom dunlink
atom duplicate_bag
atom dupnames
@@ -236,7 +241,7 @@ atom first
atom firstline
atom flags
atom flush
-atom flush_monitor_message
+atom flush_monitor_messages
atom force
atom format_cpu_topology
atom free
@@ -279,7 +284,6 @@ atom http httph https http_response http_request http_header http_eoh http_error
atom id
atom if_clause
atom ignore
-atom imports
atom in
atom in_exiting
atom inactive
@@ -335,6 +339,7 @@ atom max
atom maximum
atom max_tables max_processes
atom mbuf_size
+atom md5
atom memory
atom memory_internal
atom memory_types
@@ -344,6 +349,8 @@ atom message_queue_len
atom messages
atom meta
atom meta_match_spec
+atom micro_seconds
+atom milli_seconds
atom min_heap_size
atom min_bin_vheap_size
atom minor_version
@@ -354,12 +361,15 @@ atom monitored_by
atom monitor
atom monitor_nodes
atom monitors
+atom monotonic
atom more
atom multi_scheduling
atom multiline
+atom nano_seconds
atom name
atom named_table
atom namelist
+atom native
atom native_addresses
atom Neq='=/='
atom Neqeq='/='
@@ -450,6 +460,7 @@ atom ports
atom port_count
atom port_limit
atom port_op
+atom positive
atom print
atom priority
atom private
@@ -509,6 +520,7 @@ atom schedulers_online
atom scheme
atom scientific
atom scope
+atom seconds
atom sensitive
atom sequential_tracer
atom sequential_trace_token
@@ -554,6 +566,7 @@ atom term_to_binary_trap
atom this
atom thread_pool_size
atom threads
+atom time_offset
atom timeout
atom timeout_value
atom Times='*'
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index a3cd08834f..6bb987985d 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -579,9 +579,29 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
unpacked = ap;
ap = addr + size;
switch (op) {
- case op_i_select_val_rfI:
- case op_i_select_val_xfI:
- case op_i_select_val_yfI:
+ case op_i_select_val_lins_rfI:
+ case op_i_select_val_lins_xfI:
+ case op_i_select_val_lins_yfI:
+ {
+ int n = ap[-1];
+ int ix = n;
+
+ while (ix--) {
+ erts_print(to, to_arg, "%T ", (Eterm) ap[0]);
+ ap++;
+ size++;
+ }
+ ix = n;
+ while (ix--) {
+ erts_print(to, to_arg, "f(" HEXF ") ", (Eterm) ap[0]);
+ ap++;
+ size++;
+ }
+ }
+ break;
+ case op_i_select_val_bins_rfI:
+ case op_i_select_val_bins_xfI:
+ case op_i_select_val_bins_yfI:
{
int n = ap[-1];
@@ -598,13 +618,19 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
case op_i_select_tuple_arity_yfI:
{
int n = ap[-1];
+ int ix = n;
- while (n > 0) {
+ while (ix--) {
Uint arity = arityval(ap[0]);
- erts_print(to, to_arg, " {%d} f(" HEXF ")", arity, ap[1]);
- ap += 2;
- size += 2;
- n--;
+ erts_print(to, to_arg, "{%d} ", arity, ap[1]);
+ ap++;
+ size++;
+ }
+ ix = n;
+ while (ix--) {
+ erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
+ ap++;
+ size++;
}
}
break;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 8bfb7d2ad2..6526e87e4c 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -241,10 +241,6 @@ BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
void** beam_ops;
#endif
-#ifndef ERTS_SMP /* Not supported with smp emulator */
-extern int count_instructions;
-#endif
-
#define SWAPIN \
HTOP = HEAP_TOP(c_p); \
E = c_p->stop
@@ -703,7 +699,7 @@ extern int count_instructions;
Fail; \
}
-#define IsMap(Src, Fail) if (is_not_map(Src)) { Fail; }
+#define IsMap(Src, Fail) if (!is_map(Src)) { Fail; }
#define HasMapField(Src, Key, Fail) if (has_not_map_field(Src, Key)) { Fail; }
@@ -1163,14 +1159,15 @@ void process_main(void)
Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
-#ifndef NO_JUMP_TABLE
- static void* opcodes[] = { DEFINE_OPCODES };
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
-#endif
+#else
+#ifndef NO_JUMP_TABLE
+ static void* opcodes[] = { DEFINE_OPCODES };
#else
int Go;
#endif
+#endif
Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
@@ -2141,19 +2138,18 @@ void process_main(void)
NextPF(0, next);
}
-
{
Eterm select_val2;
- OpCase(i_select_tuple_arity2_yfAfAf):
+ OpCase(i_select_tuple_arity2_yfAAff):
select_val2 = yb(Arg(0));
goto do_select_tuple_arity2;
- OpCase(i_select_tuple_arity2_xfAfAf):
+ OpCase(i_select_tuple_arity2_xfAAff):
select_val2 = xb(Arg(0));
goto do_select_tuple_arity2;
- OpCase(i_select_tuple_arity2_rfAfAf):
+ OpCase(i_select_tuple_arity2_rfAAff):
select_val2 = r(0);
I--;
@@ -2164,22 +2160,22 @@ void process_main(void)
select_val2 = *tuple_val(select_val2);
goto do_select_val2;
- OpCase(i_select_val2_yfcfcf):
+ OpCase(i_select_val2_yfccff):
select_val2 = yb(Arg(0));
goto do_select_val2;
- OpCase(i_select_val2_xfcfcf):
+ OpCase(i_select_val2_xfccff):
select_val2 = xb(Arg(0));
goto do_select_val2;
- OpCase(i_select_val2_rfcfcf):
+ OpCase(i_select_val2_rfccff):
select_val2 = r(0);
I--;
do_select_val2:
if (select_val2 == Arg(2)) {
- I += 2;
- } else if (select_val2 == Arg(4)) {
+ I += 3;
+ } else if (select_val2 == Arg(3)) {
I += 4;
}
@@ -2206,20 +2202,50 @@ void process_main(void)
do_select_tuple_arity:
if (is_tuple(select_val)) {
select_val = *tuple_val(select_val);
- goto do_binary_search;
+ goto do_linear_search;
+ }
+ SET_I((BeamInstr *) Arg(1));
+ Goto(*I);
+
+ OpCase(i_select_val_lins_xfI):
+ select_val = xb(Arg(0));
+ goto do_linear_search;
+
+ OpCase(i_select_val_lins_yfI):
+ select_val = yb(Arg(0));
+ goto do_linear_search;
+
+ OpCase(i_select_val_lins_rfI):
+ select_val = r(0);
+ I--;
+
+ do_linear_search: {
+ BeamInstr *vs = &Arg(3);
+ int ix = 0;
+
+ for(;;) {
+ if (vs[ix+0] >= select_val) { ix += 0; break; }
+ if (vs[ix+1] >= select_val) { ix += 1; break; }
+ ix += 2;
}
+
+ if (vs[ix] == select_val) {
+ I += ix + Arg(2) + 2;
+ }
+
SET_I((BeamInstr *) Arg(1));
Goto(*I);
+ }
- OpCase(i_select_val_xfI):
+ OpCase(i_select_val_bins_xfI):
select_val = xb(Arg(0));
goto do_binary_search;
- OpCase(i_select_val_yfI):
+ OpCase(i_select_val_bins_yfI):
select_val = yb(Arg(0));
goto do_binary_search;
- OpCase(i_select_val_rfI):
+ OpCase(i_select_val_bins_rfI):
select_val = r(0);
I--;
@@ -2366,7 +2392,7 @@ void process_main(void)
}
OpCase(i_has_map_fields_fsI): {
- map_t* mp;
+ flatmap_t* mp;
Eterm map;
Eterm field;
Eterm *ks;
@@ -2374,22 +2400,34 @@ void process_main(void)
Uint sz,n;
GetArg1(1, map);
+ n = (Uint)Arg(2);
+ fs = &Arg(3); /* pattern fields */
- /* this instruction assumes Arg1 is a map,
- * i.e. that it follows a test is_map if needed.
- */
+ /* get term from field? */
+ if (is_hashmap(map)) {
+ Uint32 hx;
+ while(n--) {
+ field = *fs++;
+ hx = hashmap_make_hash(field);
+ if (!erts_hashmap_get(hx,field,map)) {
+ SET_I((BeamInstr *) Arg(0));
+ goto has_map_fields_fail;
+ }
+ }
+ goto has_map_fields_ok;
+ }
+
+ ASSERT(is_flatmap(map));
- mp = (map_t *)map_val(map);
- sz = map_get_size(mp);
+ mp = (flatmap_t *)flatmap_val(map);
+ sz = flatmap_get_size(mp);
if (sz == 0) {
SET_I((BeamInstr *) Arg(0));
goto has_map_fields_fail;
}
- ks = map_get_keys(mp);
- n = (Uint)Arg(2);
- fs = &Arg(3); /* pattern fields */
+ ks = flatmap_get_keys(mp);
ASSERT(n>0);
@@ -2407,7 +2445,7 @@ void process_main(void)
SET_I((BeamInstr *) Arg(0));
goto has_map_fields_fail;
}
-
+has_map_fields_ok:
I += 4 + Arg(2);
has_map_fields_fail:
ASSERT(VALID_INSTR(*I));
@@ -2434,12 +2472,8 @@ do { \
OpCase(i_get_map_elements_fsI): {
Eterm map;
- map_t *mp;
- Eterm field;
- Eterm *ks;
- Eterm *vs;
BeamInstr *fs;
- Uint sz,n;
+ Uint sz, n;
GetArg1(1, map);
@@ -2447,36 +2481,56 @@ do { \
* i.e. that it follows a test is_map if needed.
*/
- mp = (map_t *)map_val(map);
- sz = map_get_size(mp);
-
- if (sz == 0) {
- SET_I((BeamInstr *) Arg(0));
- goto get_map_elements_fail;
- }
-
n = (Uint)Arg(2) / 2;
fs = &Arg(3); /* pattern fields and target registers */
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
- while(sz) {
- field = (Eterm)*fs;
- if (EQ(field,*ks)) {
- PUT_TERM_REG(*vs, fs[1]);
- n--;
+ if (is_flatmap(map)) {
+ flatmap_t *mp;
+ Eterm *ks;
+ Eterm *vs;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ sz = flatmap_get_size(mp);
+
+ if (sz == 0) {
+ SET_I((BeamInstr *) Arg(0));
+ goto get_map_elements_fail;
+ }
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ while(sz) {
+ if (EQ((Eterm)*fs,*ks)) {
+ PUT_TERM_REG(*vs, fs[1]);
+ n--;
+ fs += 2;
+ /* no more values to fetch, we are done */
+ if (n == 0) break;
+ }
+ ks++; sz--;
+ vs++;
+ }
+
+ if (n) {
+ SET_I((BeamInstr *) Arg(0));
+ goto get_map_elements_fail;
+ }
+ } else {
+ const Eterm *v;
+ Uint32 hx;
+ ASSERT(is_hashmap(map));
+ while(n--) {
+ hx = hashmap_make_hash((Eterm)*fs);
+ if ((v = erts_hashmap_get(hx,(Eterm)*fs, map)) == NULL) {
+ SET_I((BeamInstr *) Arg(0));
+ goto get_map_elements_fail;
+ }
+ PUT_TERM_REG(*v, fs[1]);
fs += 2;
- /* no more values to fetch, we are done */
- if (n == 0) break;
}
- ks++; sz--;
- vs++;
}
- if (n) {
- SET_I((BeamInstr *) Arg(0));
- goto get_map_elements_fail;
- }
I += 4 + Arg(2);
get_map_elements_fail:
@@ -2775,6 +2829,7 @@ get_map_elements_fail:
}
PreFetch(1, next);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
reg[0] = r(0);
result = (*bf)(c_p, reg, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
@@ -3556,7 +3611,7 @@ get_map_elements_fail:
vbf = (BifFunction) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
- ASSERT(bif_nif_arity <= 3);
+ ASSERT(bif_nif_arity <= 4);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
reg[0] = r(0);
{
@@ -3782,8 +3837,6 @@ get_map_elements_fail:
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(num_bytes);
- bptr->flags = 0;
- bptr->orig_size = num_bytes;
erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
@@ -3883,8 +3936,6 @@ get_map_elements_fail:
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(tmp_arg1);
- bptr->flags = 0;
- bptr->orig_size = tmp_arg1;
erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
@@ -4987,14 +5038,14 @@ get_map_elements_fail:
* ... remainder of original BEAM code
*/
ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.ncallee = (void(*)(void)) I[-4];
+ c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
cmd = HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8);
++hipe_trap_count;
goto L_hipe_mode_switch;
}
OpCase(hipe_trap_call_closure): {
ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.ncallee = (void(*)(void)) I[-4];
+ c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
cmd = HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8);
++hipe_trap_count;
goto L_hipe_mode_switch;
@@ -5028,7 +5079,10 @@ get_map_elements_fail:
case HIPE_MODE_SWITCH_RES_RETURN:
ASSERT(is_value(reg[0]));
MoveReturn(reg[0], r(0));
- case HIPE_MODE_SWITCH_RES_CALL:
+ case HIPE_MODE_SWITCH_RES_CALL_EXPORTED:
+ c_p->i = c_p->hipe.u.callee_exp->addressv[erts_active_code_ix()];
+ /*fall through*/
+ case HIPE_MODE_SWITCH_RES_CALL_BEAM:
SET_I(c_p->i);
r(0) = reg[0];
Dispatch();
@@ -5145,22 +5199,16 @@ get_map_elements_fail:
#ifndef NO_JUMP_TABLE
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
-
/* Are tables correctly generated by beam_makeops? */
- ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
-
- if (count_instructions) {
+ ERTS_CT_ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
#ifdef DEBUG
- counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
+ counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
#endif
- counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
- beam_ops = counting_opcodes;
- }
- else
-#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
- {
- beam_ops = opcodes;
- }
+ counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
+ beam_ops = counting_opcodes;
+#else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
+ beam_ops = opcodes;
+#endif /* ERTS_OPCODE_COUNTER_SUPPORT */
#endif /* NO_JUMP_TABLE */
em_call_error_handler = OpCode(call_error_handler);
@@ -6424,55 +6472,69 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
static int has_not_map_field(Eterm map, Eterm key)
{
- map_t* mp;
- Eterm* keys;
- Uint i;
- Uint n;
-
- mp = (map_t *)map_val(map);
- keys = map_get_keys(mp);
- n = map_get_size(mp);
- if (is_immed(key)) {
- for (i = 0; i < n; i++) {
- if (keys[i] == key) {
- return 0;
+ Uint32 hx;
+ if (is_flatmap(map)) {
+ flatmap_t* mp;
+ Eterm* keys;
+ Uint i;
+ Uint n;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ keys = flatmap_get_keys(mp);
+ n = flatmap_get_size(mp);
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (keys[i] == key) {
+ return 0;
+ }
}
- }
- } else {
- for (i = 0; i < n; i++) {
- if (EQ(keys[i], key)) {
- return 0;
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(keys[i], key)) {
+ return 0;
+ }
}
}
+ return 1;
}
- return 1;
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ return erts_hashmap_get(hx,key,map) ? 0 : 1;
}
static Eterm get_map_element(Eterm map, Eterm key)
{
- map_t *mp;
- Eterm* ks, *vs;
- Uint i;
- Uint n;
-
- mp = (map_t *)map_val(map);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
- n = map_get_size(mp);
- if (is_immed(key)) {
- for (i = 0; i < n; i++) {
- if (ks[i] == key) {
- return vs[i];
+ Uint32 hx;
+ const Eterm *vs;
+ if (is_flatmap(map)) {
+ flatmap_t *mp;
+ Eterm *ks;
+ Uint i;
+ Uint n;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ return vs[i];
+ }
}
- }
- } else {
- for (i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- return vs[i];
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ return vs[i];
+ }
}
}
+ return THE_NON_VALUE;
}
- return THE_NON_VALUE;
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ vs = erts_hashmap_get(hx,key,map);
+ return vs ? *vs : THE_NON_VALUE;
}
#define GET_TERM(term, dest) \
@@ -6505,7 +6567,30 @@ new_map(Process* p, Eterm* reg, BeamInstr* I)
Eterm *mhp,*thp;
Eterm *E;
BeamInstr *ptr;
- map_t *mp;
+ flatmap_t *mp;
+ ErtsHeapFactory factory;
+
+ ptr = &Arg(4);
+
+ if (n > 2*MAP_SMALL_MAP_LIMIT) {
+ if (HeapWordsLeft(p) < n) {
+ erts_garbage_collect(p, n, reg, Arg(2));
+ }
+
+ mhp = p->htop;
+ thp = p->htop;
+ E = p->stop;
+
+ for (i = 0; i < n/2; i++) {
+ GET_TERM(*ptr++, *mhp++);
+ GET_TERM(*ptr++, *mhp++);
+ }
+
+ p->htop = mhp;
+
+ factory.p = p;
+ return erts_hashmap_from_array(&factory, thp, n/2, 0);
+ }
if (HeapWordsLeft(p) < need) {
erts_garbage_collect(p, need, reg, Arg(2));
@@ -6514,11 +6599,10 @@ new_map(Process* p, Eterm* reg, BeamInstr* I)
thp = p->htop;
mhp = thp + 1 + n/2;
E = p->stop;
- ptr = &Arg(4);
keys = make_tuple(thp);
*thp++ = make_arityval(n/2);
- mp = (map_t *)mhp; mhp += MAP_HEADER_SIZE;
+ mp = (flatmap_t *)mhp; mhp += MAP_HEADER_SIZE;
mp->thing_word = MAP_HEADER;
mp->size = n/2;
mp->keys = keys;
@@ -6528,7 +6612,7 @@ new_map(Process* p, Eterm* reg, BeamInstr* I)
GET_TERM(*ptr++, *mhp++);
}
p->htop = mhp;
- return make_map(mp);
+ return make_flatmap(mp);
}
static Eterm
@@ -6538,7 +6622,7 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
Uint num_old;
Uint num_updates;
Uint need;
- map_t *old_mp, *mp;
+ flatmap_t *old_mp, *mp;
Eterm res;
Eterm* hp;
Eterm* E;
@@ -6548,12 +6632,44 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
Eterm new_key;
Eterm* kp;
- if (is_not_map(map)) {
- return THE_NON_VALUE;
+ new_p = &Arg(5);
+ num_updates = Arg(4) / 2;
+
+ if (is_not_flatmap(map)) {
+ Uint32 hx;
+ Eterm val;
+
+ /* apparently the compiler does not emit is_map instructions,
+ * bad compiler */
+
+ if (is_not_hashmap(map))
+ return THE_NON_VALUE;
+
+ res = map;
+ E = p->stop;
+ while(num_updates--) {
+ /* assoc can't fail */
+ GET_TERM(new_p[0], new_key);
+ GET_TERM(new_p[1], val);
+ hx = hashmap_make_hash(new_key);
+
+ res = erts_hashmap_insert(p, hx, new_key, val, res, 0);
+ if (p->mbuf) {
+ Uint live = Arg(3);
+ reg[live] = res;
+ erts_garbage_collect(p, 0, reg, live+1);
+ res = reg[live];
+ }
+
+ E = p->stop;
+
+ new_p += 2;
+ }
+ return res;
}
- old_mp = (map_t *) map_val(map);
- num_old = map_get_size(old_mp);
+ old_mp = (flatmap_t *) flatmap_val(map);
+ num_old = flatmap_get_size(old_mp);
/*
* If the old map is empty, create a new map.
@@ -6568,14 +6684,13 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
* update list are new).
*/
- num_updates = Arg(4) / 2;
need = 2*(num_old+num_updates) + 1 + MAP_HEADER_SIZE;
if (HeapWordsLeft(p) < need) {
Uint live = Arg(3);
reg[live] = map;
erts_garbage_collect(p, need, reg, live+1);
map = reg[live];
- old_mp = (map_t *)map_val(map);
+ old_mp = (flatmap_t *)flatmap_val(map);
}
/*
@@ -6606,16 +6721,15 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
kp = p->htop + 1; /* Point to first key */
hp = kp + num_old + num_updates;
- res = make_map(hp);
- mp = (map_t *)hp;
+ res = make_flatmap(hp);
+ mp = (flatmap_t *)hp;
hp += MAP_HEADER_SIZE;
mp->thing_word = MAP_HEADER;
mp->keys = make_tuple(kp-1);
- old_vals = map_get_values(old_mp);
- old_keys = map_get_keys(old_mp);
+ old_vals = flatmap_get_values(old_mp);
+ old_keys = flatmap_get_keys(old_mp);
- new_p = &Arg(5);
GET_TERM(*new_p, new_key);
n = num_updates;
@@ -6701,8 +6815,19 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
n = kp - p->htop - 1; /* Actual number of keys/values */
*p->htop = make_arityval(n);
+ p->htop = hp;
mp->size = n;
- p->htop = hp;
+
+ /* The expensive case, need to build a hashmap */
+ if (n > MAP_SMALL_MAP_LIMIT) {
+ res = erts_hashmap_from_ks_and_vs(p,flatmap_get_keys(mp),flatmap_get_values(mp),n);
+ if (p->mbuf) {
+ Uint live = Arg(3);
+ reg[live] = res;
+ erts_garbage_collect(p, 0, reg, live+1);
+ res = reg[live];
+ }
+ }
return res;
}
@@ -6717,7 +6842,7 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
Uint i;
Uint num_old;
Uint need;
- map_t *old_mp, *mp;
+ flatmap_t *old_mp, *mp;
Eterm res;
Eterm* hp;
Eterm* E;
@@ -6726,12 +6851,48 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
BeamInstr* new_p;
Eterm new_key;
- if (is_not_map(map)) {
- return THE_NON_VALUE;
+ new_p = &Arg(5);
+ n = Arg(4) / 2; /* Number of values to be updated */
+ ASSERT(n > 0);
+
+ if (is_not_flatmap(map)) {
+ Uint32 hx;
+ Eterm val;
+
+ /* apparently the compiler does not emit is_map instructions,
+ * bad compiler */
+
+ if (is_not_hashmap(map))
+ return THE_NON_VALUE;
+
+ res = map;
+ E = p->stop;
+ while(n--) {
+ /* assoc can't fail */
+ GET_TERM(new_p[0], new_key);
+ GET_TERM(new_p[1], val);
+ hx = hashmap_make_hash(new_key);
+
+ res = erts_hashmap_insert(p, hx, new_key, val, res, 1);
+ if (is_non_value(res))
+ return res;
+
+ if (p->mbuf) {
+ Uint live = Arg(3);
+ reg[live] = res;
+ erts_garbage_collect(p, 0, reg, live+1);
+ res = reg[live];
+ }
+
+ E = p->stop;
+
+ new_p += 2;
+ }
+ return res;
}
- old_mp = (map_t *) map_val(map);
- num_old = map_get_size(old_mp);
+ old_mp = (flatmap_t *) flatmap_val(map);
+ num_old = flatmap_get_size(old_mp);
/*
* If the old map is empty, create a new map.
@@ -6751,7 +6912,7 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
reg[live] = map;
erts_garbage_collect(p, need, reg, live+1);
map = reg[live];
- old_mp = (map_t *)map_val(map);
+ old_mp = (flatmap_t *)flatmap_val(map);
}
/*
@@ -6761,23 +6922,20 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
hp = p->htop;
E = p->stop;
- old_vals = map_get_values(old_mp);
- old_keys = map_get_keys(old_mp);
+ old_vals = flatmap_get_values(old_mp);
+ old_keys = flatmap_get_keys(old_mp);
- res = make_map(hp);
- mp = (map_t *)hp;
+ res = make_flatmap(hp);
+ mp = (flatmap_t *)hp;
hp += MAP_HEADER_SIZE;
mp->thing_word = MAP_HEADER;
mp->size = num_old;
mp->keys = old_mp->keys;
/* Get array of key/value pairs to be updated */
- new_p = &Arg(5);
GET_TERM(*new_p, new_key);
/* Update all values */
- n = Arg(4) / 2; /* Number of values to be updated */
- ASSERT(n > 0);
for (i = 0; i < num_old; i++) {
if (!EQ(*old_keys, new_key)) {
/* Not same keys */
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 41c1b5d2c2..02689e5b19 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -245,7 +245,7 @@ typedef struct {
/*
* This structure contains all information about the module being loaded.
*/
-
+#define MD5_SIZE 16
typedef struct LoaderState {
/*
* The current logical file within the binary.
@@ -292,7 +292,7 @@ typedef struct LoaderState {
StringPatch* string_patches; /* Linked list of position into string table to patch. */
BeamInstr catches; /* Linked list of catch_yf instructions. */
unsigned loaded_size; /* Final size of code when loaded. */
- byte mod_md5[16]; /* MD5 for module code. */
+ byte mod_md5[MD5_SIZE]; /* MD5 for module code. */
int may_load_nif; /* true if NIFs may later be loaded for this module */
int on_load; /* Index in the code for the on_load function
* (or 0 if there is no on_load function)
@@ -528,6 +528,7 @@ static Eterm exported_from_module(Process* p, Eterm mod);
static Eterm functions_in_module(Process* p, Eterm mod);
static Eterm attributes_for_module(Process* p, Eterm mod);
static Eterm compilation_info_for_module(Process* p, Eterm mod);
+static Eterm md5_of_module(Process* p, Eterm mod);
static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
@@ -648,6 +649,7 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
stp->code[MI_COMPILE_PTR] = 0;
stp->code[MI_COMPILE_SIZE] = 0;
stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
+ stp->code[MI_MD5_PTR] = 0;
/*
* Read the atom table.
@@ -3319,9 +3321,10 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
{
GenOp* op;
+ GenOpArg *tmp;
int arity = Size.val + 3;
int size = Size.val / 2;
- int i;
+ int i, j, align = 0;
/*
* Verify the validity of the list.
@@ -3336,9 +3339,37 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
}
/*
+ * Use a special-cased instruction if there are only two values.
+ */
+ if (size == 2) {
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_tuple_arity2_6;
+ GENOP_ARITY(op, arity - 1);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = Rest[0].val;
+ op->a[3].type = TAG_u;
+ op->a[3].val = Rest[2].val;
+ op->a[4] = Rest[1];
+ op->a[5] = Rest[3];
+
+ return op;
+ }
+
+ /*
* Generate the generic instruction.
+ * Assumption:
+ * Few different tuple arities to select on (fewer than 20).
+ * Use linear scan approach.
*/
+ align = 1;
+
+ arity += 2*align;
+ size += align;
+
NEW_GENOP(stp, op);
op->next = NULL;
op->op = genop_i_select_tuple_arity_3;
@@ -3346,39 +3377,36 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
op->a[0] = S;
op->a[1] = Fail;
op->a[2].type = TAG_u;
- op->a[2].val = Size.val / 2;
- for (i = 0; i < Size.val; i += 2) {
- op->a[i+3].type = TAG_v;
- op->a[i+3].val = make_arityval(Rest[i].val);
- op->a[i+4] = Rest[i+1];
- }
+ op->a[2].val = size;
- /*
- * Sort the values to make them useful for a binary search.
- */
+ tmp = (GenOpArg *) erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(GenOpArg)*(arity-2*align));
- qsort(op->a+3, size, 2*sizeof(GenOpArg),
- (int (*)(const void *, const void *)) genopargcompare);
-#ifdef DEBUG
- for (i = 3; i < arity-2; i += 2) {
- ASSERT(op->a[i].val < op->a[i+2].val);
+ for (i = 3; i < arity - 2*align; i+=2) {
+ tmp[i-3].type = TAG_v;
+ tmp[i-3].val = make_arityval(Rest[i-3].val);
+ tmp[i-2] = Rest[i-2];
}
-#endif
/*
- * Use a special-cased instruction if there are only two values.
+ * Sort the values to make them useful for a sentinel search
*/
- if (size == 2) {
- op->op = genop_i_select_tuple_arity2_6;
- op->arity--;
- op->a[2].type = TAG_u;
- op->a[2].val = arityval(op->a[3].val);
- op->a[3] = op->a[4];
- op->a[4].type = TAG_u;
- op->a[4].val = arityval(op->a[5].val);
- op->a[5] = op->a[6];
+
+ qsort(tmp, size - align, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+
+ j = 3;
+ for (i = 3; i < arity - 2*align; i += 2) {
+ op->a[j] = tmp[i-3];
+ op->a[j + size] = tmp[i-2];
+ j++;
}
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp);
+
+ op->a[j].type = TAG_u;
+ op->a[j].val = ~((BeamInstr)0);
+ op->a[j+size] = Fail;
+
return op;
}
@@ -3600,45 +3628,109 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest)
{
GenOp* op;
+ GenOpArg *tmp;
int arity = Size.val + 3;
int size = Size.val / 2;
- int i;
+ int i, j, align = 0;
+
+ if (size == 2) {
+
+ /*
+ * Use a special-cased instruction if there are only two values.
+ */
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_val2_6;
+ GENOP_ARITY(op, arity - 1);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2] = Rest[0];
+ op->a[3] = Rest[2];
+ op->a[4] = Rest[1];
+ op->a[5] = Rest[3];
+
+ return op;
+
+ } else if (size > 10) {
+
+ /* binary search instruction */
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_val_bins_3;
+ GENOP_ARITY(op, arity);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ for (i = 3; i < arity; i++) {
+ op->a[i] = Rest[i-3];
+ }
+
+ /*
+ * Sort the values to make them useful for a binary search.
+ */
+
+ qsort(op->a+3, size, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+#ifdef DEBUG
+ for (i = 3; i < arity-2; i += 2) {
+ ASSERT(op->a[i].val < op->a[i+2].val);
+ }
+#endif
+ return op;
+ }
+
+ /* linear search instruction */
+
+ align = 1;
+
+ arity += 2*align;
+ size += align;
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_select_val_3;
+ op->op = genop_i_select_val_lins_3;
GENOP_ARITY(op, arity);
op->a[0] = S;
op->a[1] = Fail;
op->a[2].type = TAG_u;
op->a[2].val = size;
- for (i = 3; i < arity; i++) {
- op->a[i] = Rest[i-3];
+
+ tmp = (GenOpArg *) erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(GenOpArg)*(arity-2*align));
+
+ for (i = 3; i < arity - 2*align; i++) {
+ tmp[i-3] = Rest[i-3];
}
/*
- * Sort the values to make them useful for a binary search.
+ * Sort the values to make them useful for a sentinel search
*/
- qsort(op->a+3, size, 2*sizeof(GenOpArg),
- (int (*)(const void *, const void *)) genopargcompare);
-#ifdef DEBUG
- for (i = 3; i < arity-2; i += 2) {
- ASSERT(op->a[i].val < op->a[i+2].val);
+ qsort(tmp, size - align, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+
+ j = 3;
+ for (i = 3; i < arity - 2*align; i += 2) {
+ op->a[j] = tmp[i-3];
+ op->a[j+size] = tmp[i-2];
+ j++;
}
-#endif
- /*
- * Use a special-cased instruction if there are only two values.
- */
- if (size == 2) {
- op->op = genop_i_select_val2_6;
- op->arity--;
- op->a[2] = op->a[3];
- op->a[3] = op->a[4];
- op->a[4] = op->a[5];
- op->a[5] = op->a[6];
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp);
+
+ /* add sentinel */
+
+ op->a[j].type = TAG_u;
+ op->a[j].val = ~((BeamInstr)0);
+ op->a[j+size] = Fail;
+
+#ifdef DEBUG
+ for (i = 0; i < size - 1; i++) {
+ ASSERT(op->a[i+3].val <= op->a[i+4].val);
}
+#endif
return op;
}
@@ -4042,7 +4134,7 @@ freeze_code(LoaderState* stp)
}
size = (stp->ci * sizeof(BeamInstr)) +
(stp->total_literal_size * sizeof(Eterm)) +
- strtab_size + attr_size + compile_size + line_size;
+ strtab_size + attr_size + compile_size + MD5_SIZE + line_size;
/*
* Move the code to its final location.
@@ -4251,11 +4343,20 @@ freeze_code(LoaderState* stp)
code[MI_COMPILE_SIZE_ON_HEAP] = decoded_size;
}
CHKBLK(ERTS_ALC_T_CODE,code);
+ {
+ byte* md5_sum = str_table + strtab_size + attr_size + compile_size;
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ sys_memcpy(md5_sum, stp->mod_md5, MD5_SIZE);
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ code[MI_MD5_PTR] = (BeamInstr) md5_sum;
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ }
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Make sure that we have not overflowed the allocated code space.
*/
- ASSERT(str_table + strtab_size + attr_size + compile_size ==
+ ASSERT(str_table + strtab_size + attr_size + compile_size + MD5_SIZE ==
((byte *) code) + size);
/*
@@ -5108,10 +5209,11 @@ erts_module_info_0(Process* p, Eterm module)
hp += 3; \
list = CONS(hp, tup, list)
+ BUILD_INFO(am_md5);
BUILD_INFO(am_compile);
BUILD_INFO(am_attributes);
- BUILD_INFO(am_imports);
BUILD_INFO(am_exports);
+ BUILD_INFO(am_module);
#undef BUILD_INFO
return list;
}
@@ -5121,8 +5223,8 @@ erts_module_info_1(Process* p, Eterm module, Eterm what)
{
if (what == am_module) {
return module;
- } else if (what == am_imports) {
- return NIL;
+ } else if (what == am_md5) {
+ return md5_of_module(p, module);
} else if (what == am_exports) {
return exported_from_module(p, module);
} else if (what == am_functions) {
@@ -5311,7 +5413,7 @@ attributes_for_module(Process* p, /* Process whose heap to use. */
Eterm result = NIL;
Eterm* end;
- if (is_not_atom(mod) || (is_not_list(result) && is_not_nil(result))) {
+ if (is_not_atom(mod)) {
return THE_NON_VALUE;
}
@@ -5350,7 +5452,7 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
Eterm result = NIL;
Eterm* end;
- if (is_not_atom(mod) || (is_not_list(result) && is_not_nil(result))) {
+ if (is_not_atom(mod)) {
return THE_NON_VALUE;
}
@@ -5373,6 +5475,33 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
}
/*
+ * Returns the MD5 checksum for a module
+ *
+ * Returns a tagged term, or 0 on error.
+ */
+
+Eterm
+md5_of_module(Process* p, /* Process whose heap to use. */
+ Eterm mod) /* Tagged atom for module. */
+{
+ Module* modp;
+ BeamInstr* code;
+ Eterm res = NIL;
+
+ if (is_not_atom(mod)) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(mod, erts_active_code_ix());
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+ code = modp->curr.code;
+ res = new_binary(p, (byte *) code[MI_MD5_PTR], MD5_SIZE);
+ return res;
+}
+
+/*
* Build a single {M,F,A,Loction} item to be part of
* a stack trace.
*/
@@ -5548,7 +5677,7 @@ code_module_md5_1(BIF_ALIST_1)
res = am_undefined;
goto done;
}
- res = new_binary(p, stp->mod_md5, sizeof(stp->mod_md5));
+ res = new_binary(p, stp->mod_md5, MD5_SIZE);
done:
erts_free_aligned_binary_bytes(temp_alloc);
@@ -5944,6 +6073,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
code[MI_LITERALS_END] = 0;
code[MI_LITERALS_OFF_HEAP] = 0;
code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ code[MI_MD5_PTR] = 0;
ci = MI_FUNCTIONS + n + 1;
/*
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index bd22b0c4de..0e3ca0bdb0 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -91,7 +91,6 @@ extern Uint erts_total_code_size;
#define MI_LITERALS_END 8
#define MI_LITERALS_OFF_HEAP 9
-
/*
* Pointer to the on_load function (or NULL if none).
*/
@@ -103,6 +102,11 @@ extern Uint erts_total_code_size;
#define MI_LINE_TABLE 11
/*
+ * Pointer to the module MD5 sum (16 bytes)
+ */
+#define MI_MD5_PTR 12
+
+/*
* Start of function pointer table. This table contains pointers to
* all functions in the module plus an additional pointer just beyond
* the end of the last function.
@@ -111,7 +115,7 @@ extern Uint erts_total_code_size;
* this table.
*/
-#define MI_FUNCTIONS 12
+#define MI_FUNCTIONS 13
/*
* Layout of the line table.
diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c
index 8613131176..b16fe6b271 100644
--- a/erts/emulator/beam/benchmark.c
+++ b/erts/emulator/beam/benchmark.c
@@ -37,37 +37,9 @@ unsigned long long major_gc;
#ifdef BM_TIMERS
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
-
-#include "libperfctr.h"
-struct vperfctr *system_clock;
-double cpu_khz;
-BM_NEW_TIMER(start);
-
-static double get_hrvtime(void)
-{
- unsigned long long ticks;
- double milli_seconds;
-
- ticks = vperfctr_read_tsc(system_clock);
- milli_seconds = (double)ticks / cpu_khz;
- return milli_seconds;
-}
-
-static void stop_hrvtime(void)
-{
- if(system_clock)
- {
- vperfctr_stop(system_clock);
- vperfctr_close(system_clock);
- system_clock = NULL;
- }
-}
-
-#else /* not perfctr, asuming Solaris */
+/* assuming Solaris */
#include <time.h>
BM_TIMER_T system_clock;
-#endif
unsigned long local_pause_times[MAX_PAUSE_TIME];
unsigned long pause_times[MAX_PAUSE_TIME];
@@ -117,40 +89,6 @@ unsigned long long message_sizes[1000];
void init_benchmarking()
{
#ifdef BM_TIMERS
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
- /* pass `--with-perfctr=/path/to/perfctr' when configuring */
- struct perfctr_info info;
- struct vperfctr_control control;
- int i;
-
- system_clock = vperfctr_open();
- if (system_clock != NULL)
- {
- if (vperfctr_info(system_clock,&info) >= 0)
- {
- cpu_khz = (double)info.cpu_khz;
- if (info.cpu_features & PERFCTR_FEATURE_RDTSC)
- {
- memset(&control,0,sizeof control);
- control.cpu_control.tsc_on = 1;
- }
- }
- if (vperfctr_control(system_clock,&control) < 0)
- {
- vperfctr_close(system_clock);
- system_clock = NULL;
- }
- }
-
- for (i = 0; i < 1000; i++)
- {
- BM_START_TIMER(system);
- BM_STOP_TIMER(system);
- }
-
- timer_time = system_time / 1000;
- start_time = 0;
-#else
int i;
for (i = 0; i < 1000; i++)
{
@@ -158,7 +96,6 @@ void init_benchmarking()
BM_STOP_TIMER(system);
}
timer_time = system_time / 1000;
-#endif
for (i = 0; i < MAX_PAUSE_TIME; i++) {
local_pause_times[i] = 0;
diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h
index 766edaac42..7fc3933f3d 100644
--- a/erts/emulator/beam/benchmark.h
+++ b/erts/emulator/beam/benchmark.h
@@ -37,10 +37,7 @@
/* BM_TIMERS keeps track of the time spent in diferent parts of the
* system. It only measures accual active time, not time spent in idle
- * mode. These timers requires hardware support. For Linux, use the
- * package perfctr from user.it.uu.se/~mikpe/linux/perfctr. If this
- * package is not specified when configuring the system
- * (--with-perfctr=PATH), the Solaris hrtime_t will be used.
+ * mode. Currently, the Solaris hrtime_t will be used.
* To add new timers look below.
*/
#define BM_TIMERS
@@ -142,43 +139,12 @@ extern unsigned long long major_gc;
* meassure (send time in shared heap for instance).
*/
-#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
-#include "libperfctr.h"
+/* (Assuming Solaris) */
-#define BM_TIMER_T double
-
-extern struct vperfctr *system_clock;
-extern double cpu_khz;
-extern BM_TIMER_T start_time;
-
-#define BM_START_TIMER(t) start_time = \
- (BM_TIMER_T)vperfctr_read_tsc(system_clock) / \
- cpu_khz;
-
-#define BM_STOP_TIMER(t) do { \
- BM_TIMER_T tmp = ((BM_TIMER_T)vperfctr_read_tsc(system_clock) / cpu_khz); \
- tmp -= (start_time + timer_time); \
- t##_time += (tmp > 0 ? tmp : 0); \
-} while(0)
-
-#define BM_TIME_PRINTER(str,time) do { \
- int min,sec,milli,micro; \
- BM_TIMER_T tmp = (time) * 1000; \
- micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- sec = (uint)(tmp - ((int)(tmp / 60)) * 60); \
- min = (uint)tmp / 60; \
- erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \
-} while(0)
-
-#else /* !USE_PERFCTR (Assuming Solaris) */
-
-#define BM_TIMER_T hrtime_t
-#define BM_START_TIMER(t) system_clock = sys_gethrtime()
+#define BM_TIMER_T ErtsMonotonicTime
+#define BM_START_TIMER(t) system_clock = ERTS_MONOTONIC_TO_NSEC(erts_os_monotonic_time())
#define BM_STOP_TIMER(t) do { \
- BM_TIMER_T tmp = (sys_gethrtime() - system_clock) - timer_time; \
+ BM_TIMER_T tmp = (ERTS_MONOTONIC_TO_NSEC(erts_os_monotonic_time()) - system_clock) - timer_time; \
t##_time += (tmp > 0 ? tmp : 0); \
} while(0)
@@ -196,7 +162,6 @@ extern BM_TIMER_T start_time;
} while(0)
extern BM_TIMER_T system_clock;
-#endif /* USE_PERFCTR */
extern BM_TIMER_T timer_time;
extern BM_TIMER_T system_time;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 42dd160e38..af02a55b69 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -28,7 +28,9 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "dist.h"
#include "erl_version.h"
@@ -40,16 +42,22 @@
#define ERTS_PTAB_WANT_BIF_IMPL__
#include "erl_ptab.h"
#include "erl_bits.h"
+#include "erl_bif_unique.h"
-static Export* flush_monitor_message_trap = NULL;
+static Export* flush_monitor_messages_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
static Export* await_proc_exit_trap = NULL;
static Export* await_port_send_result_trap = NULL;
Export* erts_format_cpu_topology_trap = NULL;
+static Export dsend_continue_trap_export;
+Export *erts_convert_time_unit_trap = NULL;
static Export *await_sched_wall_time_mod_trap;
static erts_smp_atomic32_t sched_wall_time;
+static erts_smp_mtx_t ports_snapshot_mtx;
+erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
+
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/*
@@ -391,7 +399,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
return res;
}
-static int demonitor(Process *c_p, Eterm ref)
+static int demonitor(Process *c_p, Eterm ref, Eterm *multip)
{
ErtsMonitor *mon = NULL; /* The monitor entry to delete */
Process *rp; /* Local target process */
@@ -415,65 +423,73 @@ static int demonitor(Process *c_p, Eterm ref)
goto done;
}
- if (mon->type != MON_ORIGIN) {
- res = ERTS_DEMONITOR_BADARG;
- goto done;
- }
- to = mon->pid;
-
- if (is_atom(to)) {
- /* Monitoring a name at node to */
- ASSERT(is_node_name_atom(to));
- dep = erts_sysname_to_connected_dist_entry(to);
- ASSERT(dep != erts_this_dist_entry);
- if (dep)
- deref_de = 1;
- } else {
- ASSERT(is_pid(to));
- dep = pid_dist_entry(to);
- }
- if (dep != erts_this_dist_entry) {
- res = remote_demonitor(c_p, dep, ref, to);
- /* remote_demonitor() unlocks link lock on c_p */
- unlock_link = 0;
- }
- else { /* Local monitor */
- if (deref_de) {
- deref_de = 0;
- erts_deref_dist_entry(dep);
+ switch (mon->type) {
+ case MON_TIME_OFFSET:
+ *multip = am_true;
+ erts_demonitor_time_offset(ref);
+ res = ERTS_DEMONITOR_TRUE;
+ break;
+ case MON_ORIGIN:
+ to = mon->pid;
+ *multip = am_false;
+ if (is_atom(to)) {
+ /* Monitoring a name at node to */
+ ASSERT(is_node_name_atom(to));
+ dep = erts_sysname_to_connected_dist_entry(to);
+ ASSERT(dep != erts_this_dist_entry);
+ if (dep)
+ deref_de = 1;
+ } else {
+ ASSERT(is_pid(to));
+ dep = pid_dist_entry(to);
}
- dep = NULL;
- rp = erts_pid2proc_opt(c_p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- to,
- ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
+ if (dep != erts_this_dist_entry) {
+ res = remote_demonitor(c_p, dep, ref, to);
+ /* remote_demonitor() unlocks link lock on c_p */
+ unlock_link = 0;
+ }
+ else { /* Local monitor */
+ if (deref_de) {
+ deref_de = 0;
+ erts_deref_dist_entry(dep);
+ }
+ dep = NULL;
+ rp = erts_pid2proc_opt(c_p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ to,
+ ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
#ifndef ERTS_SMP
- ASSERT(mon);
+ ASSERT(mon);
#else
- if (!mon)
- res = ERTS_DEMONITOR_FALSE;
- else
+ if (!mon)
+ res = ERTS_DEMONITOR_FALSE;
+ else
#endif
- {
- res = ERTS_DEMONITOR_TRUE;
- erts_destroy_monitor(mon);
- }
- if (rp) {
- ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- if (rp != c_p)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon != NULL)
- erts_destroy_monitor(rmon);
- }
- else {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
- }
+ {
+ res = ERTS_DEMONITOR_TRUE;
+ erts_destroy_monitor(mon);
+ }
+ if (rp) {
+ ErtsMonitor *rmon;
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ if (rp != c_p)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon != NULL)
+ erts_destroy_monitor(rmon);
+ }
+ else {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
+ }
+ }
+ break;
+ default:
+ res = ERTS_DEMONITOR_BADARG;
+ *multip = am_false;
+ break;
}
-
done:
if (unlock_link)
@@ -490,7 +506,8 @@ static int demonitor(Process *c_p, Eterm ref)
BIF_RETTYPE demonitor_1(BIF_ALIST_1)
{
- switch (demonitor(BIF_P, BIF_ARG_1)) {
+ Eterm multi;
+ switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
case ERTS_DEMONITOR_FALSE:
case ERTS_DEMONITOR_TRUE:
BIF_RET(am_true);
@@ -508,6 +525,7 @@ BIF_RETTYPE demonitor_1(BIF_ALIST_1)
BIF_RETTYPE demonitor_2(BIF_ALIST_2)
{
Eterm res = am_true;
+ Eterm multi = am_false;
int info = 0;
int flush = 0;
Eterm list = BIF_ARG_2;
@@ -530,13 +548,18 @@ BIF_RETTYPE demonitor_2(BIF_ALIST_2)
if (is_not_nil(list))
goto badarg;
- switch (demonitor(BIF_P, BIF_ARG_1)) {
+ switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
case ERTS_DEMONITOR_FALSE:
if (info)
res = am_false;
- if (flush)
- BIF_TRAP2(flush_monitor_message_trap, BIF_P, BIF_ARG_1, res);
+ if (flush) {
+ flush_messages:
+ BIF_TRAP3(flush_monitor_messages_trap, BIF_P,
+ BIF_ARG_1, multi, res);
+ }
case ERTS_DEMONITOR_TRUE:
+ if (multi == am_true && flush)
+ goto flush_messages;
BIF_RET(res);
case ERTS_DEMONITOR_YIELD_TRUE:
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
@@ -595,14 +618,12 @@ erts_queue_monitor_message(Process *p,
}
static BIF_RETTYPE
-local_pid_monitor(Process *p, Eterm target)
+local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int bool)
{
BIF_RETTYPE ret;
- Eterm mon_ref;
Process *rp;
ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
- mon_ref = erts_make_ref(p);
ERTS_BIF_PREP_RET(ret, mon_ref);
if (target == p->common.id) {
return ret;
@@ -615,12 +636,18 @@ local_pid_monitor(Process *p, Eterm target)
if (!rp) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
p_locks &= ~ERTS_PROC_LOCK_LINK;
- erts_queue_monitor_message(p, &p_locks,
- mon_ref, am_process, target, am_noproc);
+ if (bool)
+ ret = am_false;
+ else
+ erts_queue_monitor_message(p, &p_locks,
+ mon_ref, am_process, target, am_noproc);
}
else {
ASSERT(rp != p);
+ if (bool)
+ ret = am_true;
+
erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL);
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL);
@@ -744,13 +771,28 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
int deref_de = 0;
/* Only process monitors are implemented */
- if (BIF_ARG_1 != am_process) {
+ switch (BIF_ARG_1) {
+ case am_time_offset: {
+ Eterm ref;
+ if (BIF_ARG_2 != am_clock_service)
+ goto error;
+ ref = erts_make_ref(BIF_P);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET,
+ ref, am_clock_service, NIL);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_monitor_time_offset(BIF_P->common.id, ref);
+ BIF_RET(ref);
+ }
+ case am_process:
+ break;
+ default:
goto error;
}
if (is_internal_pid(target)) {
local_pid:
- ret = local_pid_monitor(BIF_P, target);
+ ret = local_pid_monitor(BIF_P, target, erts_make_ref(BIF_P), 0);
} else if (is_external_pid(target)) {
dep = external_pid_dist_entry(target);
if (dep == erts_this_dist_entry)
@@ -793,6 +835,25 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
return ret;
}
+BIF_RETTYPE erts_internal_monitor_process_2(BIF_ALIST_2)
+{
+ if (is_not_internal_pid(BIF_ARG_1)) {
+ if (is_external_pid(BIF_ARG_1)
+ && (external_pid_dist_entry(BIF_ARG_1)
+ == erts_this_dist_entry)) {
+ BIF_RET(am_false);
+ }
+ goto badarg;
+ }
+
+ if (is_not_internal_ref(BIF_ARG_2))
+ goto badarg;
+
+ BIF_RET(local_pid_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, 1));
+
+badarg:
+ BIF_ERROR(BIF_P, BADARG);
+}
/**********************************************************************/
/* this is a combination of the spawn and link BIFs */
@@ -1777,6 +1838,8 @@ BIF_RETTYPE whereis_1(BIF_ALIST_1)
* erlang:'!'/2
*/
+HIPE_WRAPPER_BIF_DISABLE_GC(ebif_bang, 2)
+
BIF_RETTYPE
ebif_bang_2(BIF_ALIST_2)
{
@@ -1795,34 +1858,36 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_USER_ERROR (-5)
#define SEND_INTERNAL_ERROR (-6)
#define SEND_AWAIT_RESULT (-7)
+#define SEND_YIELD_CONTINUE (-8)
+
-Sint do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp);
+Sint do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext*);
static Sint remote_send(Process *p, DistEntry *dep,
- Eterm to, Eterm full_to, Eterm msg, int suspend)
+ Eterm to, Eterm full_to, Eterm msg,
+ ErtsSendContext* ctx)
{
Sint res;
int code;
- ErtsDSigData dsd;
ASSERT(is_atom(to) || is_external_pid(to));
- code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, !suspend);
+ code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_DSP_NO_LOCK, !ctx->suspend);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
res = SEND_TRAP;
break;
case ERTS_DSIG_PREP_WOULD_SUSPEND:
- ASSERT(!suspend);
+ ASSERT(!ctx->suspend);
res = SEND_YIELD;
break;
case ERTS_DSIG_PREP_CONNECTED: {
if (is_atom(to))
- code = erts_dsig_send_reg_msg(&dsd, to, msg);
+ code = erts_dsig_send_reg_msg(to, msg, ctx);
else
- code = erts_dsig_send_msg(&dsd, to, msg);
+ code = erts_dsig_send_msg(to, msg, ctx);
/*
* Note that reductions have been bumped on calling
* process by erts_dsig_send_reg_msg() or
@@ -1830,6 +1895,8 @@ static Sint remote_send(Process *p, DistEntry *dep,
*/
if (code == ERTS_DSIG_SEND_YIELD)
res = SEND_YIELD_RETURN;
+ else if (code == ERTS_DSIG_SEND_CONTINUE)
+ res = SEND_YIELD_CONTINUE;
else
res = 0;
break;
@@ -1850,7 +1917,8 @@ static Sint remote_send(Process *p, DistEntry *dep,
}
Sint
-do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
+do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
+{
Eterm portid;
Port *pt;
Process* rp;
@@ -1883,7 +1951,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
#endif
return 0;
}
- return remote_send(p, dep, to, to, msg, suspend);
+ return remote_send(p, dep, to, to, msg, ctx);
} else if (is_atom(to)) {
Eterm id = erts_whereis_name_to_id(p, to);
@@ -1940,7 +2008,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
ret_val = 0;
if (pt) {
- int ps_flags = suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
+ int ps_flags = ctx->suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
*refp = NIL;
switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
@@ -1949,12 +2017,12 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
return SEND_USER_ERROR;
case ERTS_PORT_OP_BUSY:
/* Nothing has been sent */
- if (suspend)
+ if (ctx->suspend)
erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
return SEND_YIELD;
case ERTS_PORT_OP_BUSY_SCHEDULED:
/* Message was sent */
- if (suspend) {
+ if (ctx->suspend) {
erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
ret_val = SEND_YIELD_RETURN;
break;
@@ -2034,9 +2102,14 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
return 0;
}
- ret = remote_send(p, dep, tp[1], to, msg, suspend);
- if (dep)
- erts_deref_dist_entry(dep);
+ ret = remote_send(p, dep, tp[1], to, msg, ctx);
+ if (ret != SEND_YIELD_CONTINUE) {
+ if (dep) {
+ erts_deref_dist_entry(dep);
+ }
+ } else {
+ ctx->dep_to_deref = dep;
+ }
return ret;
} else {
if (IS_TRACED(p)) /* XXX Is this really neccessary ??? */
@@ -2067,9 +2140,11 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(send, 3)
BIF_RETTYPE send_3(BIF_ALIST_3)
{
+ BIF_RETTYPE retval;
Eterm ref;
Process *p = BIF_P;
Eterm to = BIF_ARG_1;
@@ -2077,34 +2152,44 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
Eterm opts = BIF_ARG_3;
int connect = !0;
- int suspend = !0;
Eterm l = opts;
Sint result;
-
+ DeclareTypedTmpHeap(ErtsSendContext, ctx, BIF_P);
+ UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
+
+ ctx->suspend = !0;
+ ctx->dep_to_deref = NULL;
+ ctx->return_term = am_ok;
+ ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+ ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
+
while (is_list(l)) {
if (CAR(list_val(l)) == am_noconnect) {
connect = 0;
} else if (CAR(list_val(l)) == am_nosuspend) {
- suspend = 0;
+ ctx->suspend = 0;
} else {
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
+ goto done;
}
l = CDR(list_val(l));
}
if(!is_nil(l)) {
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
+ goto done;
}
#ifdef DEBUG
ref = NIL;
#endif
- result = do_send(p, to, msg, suspend, &ref);
+ result = do_send(p, to, msg, &ref, ctx);
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(am_ok);
+ ERTS_BIF_PREP_RET(retval, am_ok);
+ goto done;
}
switch (result) {
@@ -2112,68 +2197,127 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
/* May need to yield even though we do not bump reds here... */
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(am_ok);
+ ERTS_BIF_PREP_RET(retval, am_ok);
break;
case SEND_TRAP:
if (connect) {
- BIF_TRAP3(dsend3_trap, p, to, msg, opts);
+ ERTS_BIF_PREP_TRAP3(retval, dsend3_trap, p, to, msg, opts);
} else {
- BIF_RET(am_noconnect);
+ ERTS_BIF_PREP_RET(retval, am_noconnect);
}
break;
case SEND_YIELD:
- if (suspend) {
- ERTS_BIF_YIELD3(bif_export[BIF_send_3], p, to, msg, opts);
+ if (ctx->suspend) {
+ ERTS_BIF_PREP_YIELD3(retval,
+ bif_export[BIF_send_3], p, to, msg, opts);
} else {
- BIF_RET(am_nosuspend);
+ ERTS_BIF_PREP_RET(retval, am_nosuspend);
}
break;
case SEND_YIELD_RETURN:
- if (!suspend)
- BIF_RET(am_nosuspend);
+ if (!ctx->suspend) {
+ ERTS_BIF_PREP_RET(retval, am_nosuspend);
+ break;
+ }
yield_return:
- ERTS_BIF_YIELD_RETURN(p, am_ok);
+ ERTS_BIF_PREP_YIELD_RETURN(retval, p, am_ok);
+ break;
case SEND_AWAIT_RESULT:
ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
+ ERTS_BIF_PREP_TRAP3(retval, await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
+ break;
case SEND_BADARG:
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
break;
case SEND_USER_ERROR:
- BIF_ERROR(p, EXC_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);
break;
case SEND_INTERNAL_ERROR:
- BIF_ERROR(p, EXC_INTERNAL_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_INTERNAL_ERROR);
+ break;
+ case SEND_YIELD_CONTINUE:
+ BUMP_ALL_REDS(p);
+ erts_set_gc_state(p, 0);
+ ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p,
+ erts_dsend_export_trap_context(p, ctx));
break;
default:
- ASSERT(! "Illegal send result");
+ erl_exit(ERTS_ABORT_EXIT, "send_3 invalid result %d\n", (int)result);
break;
}
- ASSERT(! "Can not arrive here");
- BIF_ERROR(p, BADARG);
+
+done:
+ UnUseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
+ return retval;
}
+HIPE_WRAPPER_BIF_DISABLE_GC(send, 2)
+
BIF_RETTYPE send_2(BIF_ALIST_2)
{
return erl_send(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
+static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
+{
+ Binary* bin = ((ProcBin*) binary_val(BIF_ARG_1))->val;
+ ErtsSendContext* ctx = (ErtsSendContext*) ERTS_MAGIC_BIN_DATA(bin);
+ Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(BIF_P) * TERM_TO_BINARY_LOOP_FACTOR);
+ int result;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dsend_context_dtor);
+
+ ctx->dss.reds = initial_reds;
+ result = erts_dsig_send(&ctx->dsd, &ctx->dss);
+
+ switch (result) {
+ case ERTS_DSIG_SEND_OK:
+ erts_set_gc_state(BIF_P, 1);
+ BIF_RET(ctx->return_term);
+ break;
+ case ERTS_DSIG_SEND_YIELD: /*SEND_YIELD_RETURN*/
+ erts_set_gc_state(BIF_P, 1);
+ if (!ctx->suspend)
+ BIF_RET(am_nosuspend);
+ ERTS_BIF_YIELD_RETURN(BIF_P, ctx->return_term);
+
+ case ERTS_DSIG_SEND_CONTINUE: { /*SEND_YIELD_CONTINUE*/
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&dsend_continue_trap_export, BIF_P, BIF_ARG_1);
+ }
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "dsend_continue_trap invalid result %d\n", (int)result);
+ break;
+ }
+ ASSERT(! "Can not arrive here");
+ BIF_ERROR(BIF_P, BADARG);
+}
+
Eterm erl_send(Process *p, Eterm to, Eterm msg)
{
+ Eterm retval;
Eterm ref;
Sint result;
+ DeclareTypedTmpHeap(ErtsSendContext, ctx, p);
+ UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
#ifdef DEBUG
ref = NIL;
#endif
+ ctx->suspend = !0;
+ ctx->dep_to_deref = NULL;
+ ctx->return_term = msg;
+ ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+ ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
- result = do_send(p, to, msg, !0, &ref);
+ result = do_send(p, to, msg, &ref, ctx);
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(msg);
+ ERTS_BIF_PREP_RET(retval, msg);
+ goto done;
}
switch (result) {
@@ -2181,35 +2325,46 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
/* May need to yield even though we do not bump reds here... */
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
- BIF_RET(msg);
+ ERTS_BIF_PREP_RET(retval, msg);
break;
case SEND_TRAP:
- BIF_TRAP2(dsend2_trap, p, to, msg);
+ ERTS_BIF_PREP_TRAP2(retval, dsend2_trap, p, to, msg);
break;
case SEND_YIELD:
- ERTS_BIF_YIELD2(bif_export[BIF_send_2], p, to, msg);
+ ERTS_BIF_PREP_YIELD2(retval, bif_export[BIF_send_2], p, to, msg);
break;
case SEND_YIELD_RETURN:
yield_return:
- ERTS_BIF_YIELD_RETURN(p, msg);
+ ERTS_BIF_PREP_YIELD_RETURN(retval, p, msg);
+ break;
case SEND_AWAIT_RESULT:
ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, p, ref, msg, msg);
+ ERTS_BIF_PREP_TRAP3(retval,
+ await_port_send_result_trap, p, ref, msg, msg);
+ break;
case SEND_BADARG:
- BIF_ERROR(p, BADARG);
+ ERTS_BIF_PREP_ERROR(retval, p, BADARG);
break;
case SEND_USER_ERROR:
- BIF_ERROR(p, EXC_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);
break;
case SEND_INTERNAL_ERROR:
- BIF_ERROR(p, EXC_INTERNAL_ERROR);
+ ERTS_BIF_PREP_ERROR(retval, p, EXC_INTERNAL_ERROR);
+ break;
+ case SEND_YIELD_CONTINUE:
+ BUMP_ALL_REDS(p);
+ erts_set_gc_state(p, 0);
+ ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p,
+ erts_dsend_export_trap_context(p, ctx));
break;
default:
- ASSERT(! "Illegal send result");
+ erl_exit(ERTS_ABORT_EXIT, "invalid send result %d\n", (int)result);
break;
}
- ASSERT(! "Can not arrive here");
- BIF_ERROR(p, BADARG);
+
+done:
+ UnUseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
+ return retval;
}
/**********************************************************************/
@@ -3446,91 +3601,6 @@ BIF_RETTYPE self_0(BIF_ALIST_0)
/**********************************************************************/
-/*
- New representation of refs in R9, see erl_term.h
-
- In the first data word, only the usual 18 bits are used. Ordinarily,
- in "long refs" all words are used (in other words, practically never
- wrap around), but for compatibility with older nodes, "short refs"
- exist. Short refs come into being by being converted from the old
- external format for refs (tag REFERENCE_EXT). Short refs are
- converted back to the old external format.
-
- When converting a long ref to the external format in the case of
- preparing for sending to an older node, the ref is truncated by only
- using the first word (with 18 significant bits), and using the old tag
- REFERENCE_EXT.
-
- When comparing refs or different size, only the parts up to the length
- of the shorter operand are used. This has the desirable effect that a
- long ref sent to an old node and back will be treated as equal to
- the original, although some of the bits have been lost.
-
- The hash value for a ref always considers only the first word, since
- in the above scenario, the original and the copy should have the same
- hash value.
-*/
-
-static Uint32 reference0; /* Initialized in erts_init_bif */
-static Uint32 reference1;
-static Uint32 reference2;
-static erts_smp_spinlock_t make_ref_lock;
-static erts_smp_mtx_t ports_snapshot_mtx;
-erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-
-void
-erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
-{
- erts_smp_spin_lock(&make_ref_lock);
-
- reference0++;
- if (reference0 >= MAX_REFERENCE) {
- reference0 = 0;
- reference1++;
- if (reference1 == 0) {
- reference2++;
- }
- }
-
- ref[0] = reference0;
- ref[1] = reference1;
- ref[2] = reference2;
-
- erts_smp_spin_unlock(&make_ref_lock);
-}
-
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
-{
- Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
-
- erts_make_ref_in_array(ref);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
- return make_internal_ref(hp);
-}
-
-Eterm erts_make_ref(Process *p)
-{
- Eterm* hp;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
-
- hp = HAlloc(p, REF_THING_SIZE);
-
- erts_make_ref_in_array(ref);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
-
- return make_internal_ref(hp);
-}
-
-BIF_RETTYPE make_ref_0(BIF_ALIST_0)
-{
- return erts_make_ref(BIF_P);
-}
-
-/**********************************************************************/
-
/* return the time of day */
BIF_RETTYPE time_0(BIF_ALIST_0)
@@ -3996,16 +4066,19 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
BIF_RETTYPE function_exported_3(BIF_ALIST_3)
{
+ int arity;
if (is_not_atom(BIF_ARG_1) ||
is_not_atom(BIF_ARG_2) ||
is_not_small(BIF_ARG_3)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3),
- erts_active_code_ix()) == NULL) {
- BIF_RET(am_false);
+ arity = signed_val(BIF_ARG_3);
+ if (erts_find_function(BIF_ARG_1, BIF_ARG_2, arity,
+ erts_active_code_ix()) != NULL ||
+ erts_is_builtin(BIF_ARG_1, BIF_ARG_2, arity)) {
+ BIF_RET(am_true);
}
- BIF_RET(am_true);
+ BIF_RET(am_false);
}
/**********************************************************************/
@@ -4508,6 +4581,28 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
break;
}
#endif
+ } else if (BIF_ARG_1 == am_time_offset
+ && ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) {
+ ErtsTimeOffsetState res;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ res = erts_finalize_time_offset();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ switch (res) {
+ case ERTS_TIME_OFFSET_PRELIMINARY: {
+ DECL_AM(preliminary);
+ BIF_RET(AM_preliminary);
+ }
+ case ERTS_TIME_OFFSET_FINAL: {
+ DECL_AM(final);
+ BIF_RET(AM_final);
+ }
+ case ERTS_TIME_OFFSET_VOLATILE: {
+ DECL_AM(volatile);
+ BIF_RET(AM_volatile);
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Unknown state");
+ }
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
@@ -4795,11 +4890,6 @@ void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
void erts_init_bif(void)
{
- reference0 = 0;
- reference1 = 0;
- reference2 = 0;
-
- erts_smp_spinlock_init(&make_ref_lock, "make_ref");
erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
@@ -4816,9 +4906,17 @@ void erts_init_bif(void)
#endif
, &bif_return_trap);
- flush_monitor_message_trap = erts_export_put(am_erlang,
- am_flush_monitor_message,
- 2);
+ erts_init_trap_export(&dsend_continue_trap_export,
+ am_erts_internal, am_dsend_continue_trap, 1,
+ dsend_continue_trap_1);
+
+ flush_monitor_messages_trap = erts_export_put(am_erts_internal,
+ am_flush_monitor_messages,
+ 3);
+
+ erts_convert_time_unit_trap = erts_export_put(am_erlang,
+ am_convert_time_unit,
+ 3);
set_cpu_topology_trap = erts_export_put(am_erlang,
am_set_cpu_topology,
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 72c55ccb55..d461c3f479 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -21,6 +21,7 @@
#define __BIF_H__
extern Export* erts_format_cpu_topology_trap;
+extern Export *erts_convert_time_unit_trap;
#define BIF_RETTYPE Eterm
@@ -30,10 +31,12 @@ extern Export* erts_format_cpu_topology_trap;
#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_4 Process* A__p, Eterm* BIF__ARGS
#define BIF_ARG_1 (BIF__ARGS[0])
#define BIF_ARG_2 (BIF__ARGS[1])
#define BIF_ARG_3 (BIF__ARGS[2])
+#define BIF_ARG_4 (BIF__ARGS[3])
#define ERTS_IS_PROC_OUT_OF_REDS(p) \
((p)->fcalls > 0 \
@@ -465,6 +468,8 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+#ifdef ERL_WANT_HIPE_BIF_WRAPPER__
+
#ifndef HIPE
#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY)
@@ -509,6 +514,7 @@ BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
#endif
+#endif /* ERL_WANT_HIPE_BIF_WRAPPER__ */
#include "erl_bif_table.h"
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index e68b8e6274..471f687101 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -92,6 +92,8 @@ bif erlang:loaded/0
bif erlang:localtime/0
bif erlang:localtime_to_universaltime/2
bif erlang:make_ref/0
+bif erlang:unique_integer/0
+bif erlang:unique_integer/1
bif erlang:md5/1
bif erlang:md5_init/0
bif erlang:md5_update/2
@@ -104,6 +106,13 @@ ubif erlang:node/1
ubif erlang:node/0
bif erlang:nodes/1
bif erlang:now/0
+bif erlang:monotonic_time/0
+bif erlang:monotonic_time/1
+bif erlang:system_time/0
+bif erlang:system_time/1
+bif erlang:time_offset/0
+bif erlang:time_offset/1
+bif erlang:timestamp/0
bif erlang:open_port/2
@@ -157,6 +166,17 @@ bif erts_internal:request_system_task/3
bif erts_internal:check_process_code/2
bif erts_internal:map_to_tuple_keys/1
+bif erts_internal:map_type/1
+bif erts_internal:map_hashmap_children/1
+
+bif erts_internal:time_unit/0
+
+bif erts_internal:get_bif_timer_servers/0
+bif erts_internal:create_bif_timer/0
+bif erts_internal:access_bif_timer/1
+
+bif erts_internal:monitor_process/2
+bif erts_internal:is_system_process/1
# inet_db support
bif erlang:port_set_data/2
@@ -194,16 +214,12 @@ bif math:erf/1
bif math:erfc/1
bif math:exp/1
bif math:log/1
+bif math:log2/1
bif math:log10/1
bif math:sqrt/1
bif math:atan2/2
bif math:pow/2
-bif erlang:start_timer/3
-bif erlang:send_after/3
-bif erlang:cancel_timer/1
-bif erlang:read_timer/1
-
bif erlang:make_tuple/2
bif erlang:append_element/2
bif erlang:make_tuple/3
@@ -347,6 +363,8 @@ bif os:getenv/0
bif os:getenv/1
bif os:getpid/0
bif os:timestamp/0
+bif os:system_time/0
+bif os:system_time/1
#
# Bifs in the erl_ddll module (the module actually does not exist)
@@ -578,7 +596,7 @@ bif io:printable_range/0
bif os:unsetenv/1
#
-# New in R17A
+# New in 17.0
#
bif re:inspect/2
@@ -600,10 +618,21 @@ bif maps:values/1
bif erts_internal:cmp_term/2
+bif ets:take/2
+
#
-# New in 17.1.
+# New in 17.1
#
+
bif erlang:fun_info_mfa/1
+
+# New in 18.0
+#
+
+bif erlang:get_keys/0
+bif ets:update_counter/4
+bif erts_debug:map_info/1
+
#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index d1e46e3063..a4ea9c59ca 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1577,6 +1577,46 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
return make_big(hp);
}
+Eterm
+erts_uint64_array_to_big(Uint **hpp, int neg, int len, Uint64 *array)
+{
+ Uint *headerp;
+ int i, pot_digits, digits;
+
+ headerp = *hpp;
+
+ pot_digits = digits = 0;
+ for (i = 0; i < len; i++) {
+#if defined(ARCH_32) || HALFWORD_HEAP
+ Uint low_val = array[i] & ((Uint) 0xffffffff);
+ Uint high_val = (array[i] >> 32) & ((Uint) 0xffffffff);
+ BIG_DIGIT(headerp, pot_digits) = low_val;
+ pot_digits++;
+ if (low_val)
+ digits = pot_digits;
+ BIG_DIGIT(headerp, pot_digits) = high_val;
+ pot_digits++;
+ if (high_val)
+ digits = pot_digits;
+#else
+ Uint val = array[i];
+ BIG_DIGIT(headerp, pot_digits) = val;
+ pot_digits++;
+ if (val)
+ digits = pot_digits;
+#endif
+ }
+
+ if (neg)
+ *headerp = make_neg_bignum_header(digits);
+ else
+ *headerp = make_pos_bignum_header(digits);
+
+ *hpp = headerp + 1 + digits;
+
+ return make_big(headerp);
+}
+
/*
** Convert a bignum to a double float
*/
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index da31876d75..4e4611de16 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -104,6 +104,9 @@ typedef Uint dsize_t; /* Vector size type */
: ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
+#define ERTS_MAX_SINT64_HEAP_SIZE (1 + 2)
+#define ERTS_MAX_UINT64_HEAP_SIZE (1 + 2)
+#define ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(LEN) (2*(LEN)+1)
#else
@@ -111,6 +114,9 @@ typedef Uint dsize_t; /* Vector size type */
(IS_SSMALL((X)) ? 0 : (1 + 1))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : (1 + 1))
+#define ERTS_MAX_SINT64_HEAP_SIZE (1 + 1)
+#define ERTS_MAX_UINT64_HEAP_SIZE (1 + 1)
+#define ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(LEN) ((LEN)+1)
#endif
@@ -156,6 +162,7 @@ int term_to_Uint(Eterm, Uint*);
int term_to_UWord(Eterm, UWord*);
int term_to_Sint(Eterm, Sint*);
#if HAVE_INT64
+Eterm erts_uint64_array_to_big(Uint **, int, int, Uint64 *);
int term_to_Uint64(Eterm, Uint64*);
int term_to_Sint64(Eterm, Sint64*);
#endif
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index f50d484576..cc0b3b9b6c 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -26,7 +26,9 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "erl_binary.h"
#include "erl_bits.h"
@@ -83,8 +85,6 @@ new_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
@@ -122,8 +122,6 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
@@ -177,7 +175,6 @@ erts_realloc_binary(Eterm bin, size_t size)
} else { /* REFC */
ProcBin* pb = (ProcBin *) bval;
Binary* newbin = erts_bin_realloc(pb->val, size);
- newbin->orig_size = size;
pb->val = newbin;
pb->size = size;
pb->bytes = (byte*) newbin->orig_bytes;
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 2cddfe2800..e2fa572546 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -209,25 +209,12 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "State: ");
state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_FREE)
- erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
- else if (state & ERTS_PSFLG_EXITING)
- erts_print(to, to_arg, "Exiting\n");
- else if (state & ERTS_PSFLG_GC) {
- garbing = 1;
- running = 1;
- erts_print(to, to_arg, "Garbing\n");
- }
- else if (state & ERTS_PSFLG_SUSPENDED)
- erts_print(to, to_arg, "Suspended\n");
- else if (state & ERTS_PSFLG_RUNNING) {
- running = 1;
- erts_print(to, to_arg, "Running\n");
- }
- else if (state & ERTS_PSFLG_ACTIVE)
- erts_print(to, to_arg, "Scheduled\n");
- else
- erts_print(to, to_arg, "Waiting\n");
+ erts_dump_process_state(to, to_arg, state);
+ if (state & ERTS_PSFLG_GC) {
+ garbing = 1;
+ running = 1;
+ } else if (state & ERTS_PSFLG_RUNNING)
+ running = 1;
/*
* If the process is registered as a global process, display the
@@ -351,6 +338,10 @@ print_process_info(int to, void *to_arg, Process *p)
#endif
erts_stack_dump(to, to_arg, p);
}
+
+ /* Display all states */
+ erts_print(to, to_arg, "Internal State: ");
+ erts_dump_extended_process_state(to, to_arg, state);
}
static void
@@ -680,27 +671,39 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
char* dumpname;
int secs;
int env_erl_crash_dump_seconds_set = 1;
+ int i;
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
#ifdef ERTS_SMP
+ /* Order all managed threads to block, this has to be done
+ first to guarantee that this is the only thread to generate
+ crash dump. */
+ erts_thr_progress_fatal_error_block(&tpd_buf);
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
- * Wait for all managed threads to block. If all threads haven't blocked
- * after a minute, we go anyway and hope for the best...
- *
- * We do not release system again. We expect an exit() or abort() after
- * dump has been written.
+ * We suspend all scheduler threads so that we can dump some
+ * data about the currently running processes and scheduler data.
+ * We have to be very very careful when doing this as the schedulers
+ * could be anywhere.
*/
- erts_thr_progress_fatal_error_block(60000, &tpd_buf);
- /* Either worked or not... */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
+ if (!erts_equal_tids(tid,erts_thr_self()))
+ sys_thr_suspend(tid);
+ }
+
+#endif
/* Allow us to pass certain places without locking... */
erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
-#else
+
+#else /* !ERTS_SMP */
erts_writing_erl_crash_dump = 1;
-#endif
+#endif /* ERTS_SMP */
envsz = sizeof(env);
/* ERL_CRASH_DUMP_SECONDS not set
@@ -753,11 +756,12 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
dumpname = "erl_crash.dump";
else
dumpname = &dumpnamebuf[0];
+
+ erts_fprintf(stderr,"\nCrash dump is being written to: %s...", dumpname);
fd = open(dumpname,O_WRONLY | O_CREAT | O_TRUNC,0640);
- if (fd < 0)
+ if (fd < 0)
return; /* Can't create the crash dump, skip it */
-
time(&now);
erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now));
@@ -771,9 +775,74 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_fdprintf(fd, "System version: ");
erts_print_system_version(fd, NULL, NULL);
erts_fdprintf(fd, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
+
erts_fdprintf(fd, "Taints: ");
erts_print_nif_taints(fd, NULL);
erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
+
+#ifdef USE_THREADS
+ /* We want to note which thread it was that called erl_exit */
+ if (erts_get_scheduler_data()) {
+ erts_fdprintf(fd, "Calling Thread: scheduler:%d\n",
+ erts_get_scheduler_data()->no);
+ } else {
+ if (!erts_thr_getname(erts_thr_self(), dumpnamebuf, MAXPATHLEN))
+ erts_fdprintf(fd, "Calling Thread: %s\n", dumpnamebuf);
+ else
+ erts_fdprintf(fd, "Calling Thread: %p\n", erts_thr_self());
+ }
+#else
+ erts_fdprintf(fd, "Calling Thread: scheduler:1\n");
+#endif
+
+#if defined(ERTS_HAVE_TRY_CATCH)
+
+ /*
+ * erts_print_scheduler_info is not guaranteed to be safe to call
+ * here for all schedulers as we may have suspended a scheduler
+ * in the middle of updating the STACK_TOP and STACK_START
+ * variables and thus when scanning the stack we could get
+ * segmentation faults. We protect against this very unlikely
+ * scenario by using the ERTS_SYS_TRY_CATCH.
+ */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i)),
+ erts_fdprintf(fd, "** crashed **\n"));
+ }
+#endif
+
+#ifdef ERTS_SMP
+
+#if defined(ERTS_THR_HAVE_SIG_FUNCS)
+
+ /* We resume all schedulers so that we are in a known safe state
+ when we write the rest of the crash dump */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
+ if (!erts_equal_tids(tid,erts_thr_self()))
+ sys_thr_resume(tid);
+ }
+#endif
+
+ /*
+ * Wait for all managed threads to block. If all threads haven't blocked
+ * after a minute, we go anyway and hope for the best...
+ *
+ * We do not release system again. We expect an exit() or abort() after
+ * dump has been written.
+ */
+ erts_thr_progress_fatal_error_wait(60000);
+ /* Either worked or not... */
+#endif
+
+#ifndef ERTS_HAVE_TRY_CATCH
+ /* This is safe to call here, as all schedulers are blocked */
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i));
+ }
+#endif
+
info(fd, NULL); /* General system info */
if (erts_ptab_initialized(&erts_proc))
process_info(fd, NULL); /* Info about each process and port */
@@ -803,7 +872,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_fdprintf(fd, "=end\n");
close(fd);
- erts_fprintf(stderr,"\nCrash dump was written to: %s\n", dumpname);
+ erts_fprintf(stderr,"done\n");
}
void
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 50548850eb..027b85b079 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -21,6 +21,8 @@
# include "config.h"
#endif
+#define ERL_WANT_GC_INTERNALS__
+
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
@@ -125,6 +127,35 @@ Uint size_object(Eterm obj)
obj = *bptr;
break;
}
+ case HASHMAP_SUBTAG:
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
+ {
+ Eterm *head;
+ Uint sz;
+ head = hashmap_val_rel(obj, base);
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ sum += 1 + sz + header_arity(hdr);
+ head += 1 + header_arity(hdr);
+
+ if (sz == 0) {
+ goto pop_next;
+ }
+ while(sz-- > 1) {
+ obj = head[sz];
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ obj = head[0];
+ }
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "size_object: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ break;
case SUB_BINARY_SUBTAG:
{
Eterm real_bin;
@@ -155,10 +186,10 @@ Uint size_object(Eterm obj)
case MAP_SUBTAG:
{
Uint n;
- map_t *mp;
- mp = (map_t*)map_val_rel(obj,base);
+ flatmap_t *mp;
+ mp = (flatmap_t*)flatmap_val_rel(obj,base);
ptr = (Eterm *)mp;
- n = map_get_size(mp) + 1;
+ n = flatmap_get_size(mp) + 1;
sum += n + 2;
ptr += 2; /* hdr + size words */
while (n--) {
@@ -340,8 +371,8 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
break;
case MAP_SUBTAG:
{
- i = map_get_size(objp) + 3;
- *argp = make_map_rel(htop, dst_base);
+ i = flatmap_get_size(objp) + 3;
+ *argp = make_flatmap_rel(htop, dst_base);
while (i--) {
*htop++ = *objp++;
}
@@ -457,7 +488,7 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
{
ExternalThing *etp = (ExternalThing *) htop;
- i = thing_arityval(hdr) + 1;
+ i = thing_arityval(hdr) + 1;
tp = htop;
while (i--) {
@@ -471,6 +502,21 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*argp = make_external_rel(tp, dst_base);
}
break;
+ case HASHMAP_SUBTAG:
+ tp = htop;
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
+ *htop++ = *objp++;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
+ i = 1 + hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ while (i--) { *htop++ = *objp++; }
+ *argp = make_hashmap_rel(tp, dst_base);
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "copy_struct: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
+ }
+ break;
case BIN_MATCHSTATE_SUBTAG:
erl_exit(ERTS_ABORT_EXIT,
"copy_struct: matchstate term not allowed");
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index ec07ddcd9c..bfecac1612 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -119,7 +119,7 @@ Export* dmonitor_p_trap = NULL;
/* forward declarations */
static void clear_dist_entry(DistEntry*);
-static int dsig_send(ErtsDSigData *, Eterm, Eterm, int);
+static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy);
static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
@@ -622,9 +622,7 @@ alloc_dist_obuf(Uint size)
ErtsDistOutputBuf *obuf;
Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
Binary *bin = erts_bin_drv_alloc(obuf_size);
- bin->flags = BIN_FLAG_DRV;
erts_refc_init(&bin->refc, 1);
- bin->orig_size = (SWord) obuf_size;
obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
#ifdef DEBUG
obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
@@ -709,6 +707,55 @@ static void clear_dist_entry(DistEntry *dep)
}
}
+void erts_dsend_context_dtor(Binary* ctx_bin)
+{
+ ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ switch (ctx->dss.phase) {
+ case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
+ DESTROY_SAVED_ESTACK(&ctx->dss.u.sc.estack);
+ break;
+ case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
+ DESTROY_SAVED_WSTACK(&ctx->dss.u.ec.wstack);
+ break;
+ default:;
+ }
+ if (ctx->dss.phase >= ERTS_DSIG_SEND_PHASE_ALLOC && ctx->dss.obuf) {
+ free_dist_obuf(ctx->dss.obuf);
+ }
+ if (ctx->dep_to_deref)
+ erts_deref_dist_entry(ctx->dep_to_deref);
+}
+
+Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
+{
+ struct exported_ctx {
+ ErtsSendContext ctx;
+ ErtsAtomCacheMap acm;
+ };
+ Binary* ctx_bin = erts_create_magic_binary(sizeof(struct exported_ctx),
+ erts_dsend_context_dtor);
+ struct exported_ctx* dst = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ Uint ctl_size = !HALFWORD_HEAP ? 0 : (arityval(ctx->ctl_heap[0]) + 1);
+ Eterm* hp = HAlloc(p, ctl_size + PROC_BIN_SIZE);
+
+ sys_memcpy(&dst->ctx, ctx, sizeof(ErtsSendContext));
+ ASSERT(ctx->dss.ctl == make_tuple(ctx->ctl_heap));
+#if !HALFWORD_HEAP
+ dst->ctx.dss.ctl = make_tuple(dst->ctx.ctl_heap);
+#else
+ /* Must put control tuple in low mem */
+ sys_memcpy(hp, ctx->ctl_heap, ctl_size*sizeof(Eterm));
+ dst->ctx.dss.ctl = make_tuple(hp);
+ hp += ctl_size;
+#endif
+ if (ctx->dss.acmp) {
+ sys_memcpy(&dst->acm, ctx->dss.acmp, sizeof(ErtsAtomCacheMap));
+ dst->ctx.dss.acmp = &dst->acm;
+ }
+ return erts_mk_magic_binary_term(&hp, &MSO(p), ctx_bin);
+}
+
+
/*
* The erts_dsig_send_*() functions implemented below, sends asynchronous
* distributed signals to other Erlang nodes. Before sending a distributed
@@ -731,7 +778,7 @@ erts_dsig_send_link(ErtsDSigData *dsdp, Eterm local, Eterm remote)
int res;
UseTmpHeapNoproc(4);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(4);
return res;
}
@@ -744,7 +791,7 @@ erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
int res;
UseTmpHeapNoproc(4);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(4);
return res;
}
@@ -772,7 +819,7 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
erts_smp_de_links_unlock(dsdp->dep);
#endif
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(6);
return res;
}
@@ -793,7 +840,7 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
make_small(DOP_MONITOR_P),
watcher, watched, ref);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(5);
return res;
}
@@ -815,18 +862,17 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
make_small(DOP_DEMONITOR_P),
watcher, watched, ref);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, force);
+ res = dsig_send_ctl(dsdp, ctl, force);
UnUseTmpHeapNoproc(5);
return res;
}
int
-erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
+erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
{
Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,5);
Eterm token = NIL;
- Process *sender = dsdp->proc;
+ Process *sender = ctx->dsd.proc;
int res;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
@@ -838,8 +884,7 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
DTRACE_CHARBUF(receiver_name, 64);
#endif
- UseTmpHeapNoproc(5);
- if (SEQ_TRACE_TOKEN(sender) != NIL
+ if (SEQ_TRACE_TOKEN(sender) != NIL
#ifdef USE_VM_PROBES
&& SEQ_TRACE_TOKEN(sender) != am_have_dt_utag
#endif
@@ -852,7 +897,7 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", dsdp->dep->sysname);
+ "%T", ctx->dsd.dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
@@ -867,26 +912,28 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
#endif
if (token != NIL)
- ctl = TUPLE4(&ctl_heap[0],
+ ctl = TUPLE4(&ctx->ctl_heap[0],
make_small(DOP_SEND_TT), am_Cookie, remote, token);
else
- ctl = TUPLE3(&ctl_heap[0], make_small(DOP_SEND), am_Cookie, remote);
+ ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_SEND), am_Cookie, remote);
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- res = dsig_send(dsdp, ctl, message, 0);
- UnUseTmpHeapNoproc(5);
+ ctx->dss.ctl = ctl;
+ ctx->dss.msg = message;
+ ctx->dss.force_busy = 0;
+ res = erts_dsig_send(&ctx->dsd, &ctx->dss);
return res;
}
int
-erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
+erts_dsig_send_reg_msg(Eterm remote_name, Eterm message,
+ ErtsSendContext* ctx)
{
Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,6);
Eterm token = NIL;
- Process *sender = dsdp->proc;
+ Process *sender = ctx->dsd.proc;
int res;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
@@ -898,7 +945,6 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
DTRACE_CHARBUF(receiver_name, 128);
#endif
- UseTmpHeapNoproc(6);
if (SEQ_TRACE_TOKEN(sender) != NIL
#ifdef USE_VM_PROBES
&& SEQ_TRACE_TOKEN(sender) != am_have_dt_utag
@@ -912,7 +958,7 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", dsdp->dep->sysname);
+ "%T", ctx->dsd.dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
@@ -927,17 +973,19 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
#endif
if (token != NIL)
- ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT),
+ ctl = TUPLE5(&ctx->ctl_heap[0], make_small(DOP_REG_SEND_TT),
sender->common.id, am_Cookie, remote_name, token);
else
- ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
+ ctl = TUPLE4(&ctx->ctl_heap[0], make_small(DOP_REG_SEND),
sender->common.id, am_Cookie, remote_name);
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- res = dsig_send(dsdp, ctl, message, 0);
- UnUseTmpHeapNoproc(6);
+ ctx->dss.ctl = ctl;
+ ctx->dss.msg = message;
+ ctx->dss.force_busy = 0;
+ res = erts_dsig_send(&ctx->dsd, &ctx->dss);
return res;
}
@@ -994,7 +1042,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
DTRACE7(process_exit_signal_remote, sender_name, node_name,
remote_name, reason_str, tok_label, tok_lastcnt, tok_serial);
/* forced, i.e ignore busy */
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(6);
return res;
}
@@ -1010,7 +1058,7 @@ erts_dsig_send_exit(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_EXIT), local, remote, reason);
/* forced, i.e ignore busy */
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(5);
return res;
}
@@ -1026,7 +1074,7 @@ erts_dsig_send_exit2(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_EXIT2), local, remote, reason);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(5);
return res;
}
@@ -1043,7 +1091,7 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
ctl = TUPLE3(&ctl_heap[0],
make_small(DOP_GROUP_LEADER), leader, remote);
- res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send_ctl(dsdp, ctl, 0);
UnUseTmpHeapNoproc(4);
return res;
}
@@ -1693,194 +1741,235 @@ int erts_net_message(Port *prt,
return -1;
}
-static int
-dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
+static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy)
{
+ struct erts_dsig_send_context ctx;
+ int ret;
+ ctx.ctl = ctl;
+ ctx.msg = THE_NON_VALUE;
+ ctx.force_busy = force_busy;
+ ctx.phase = ERTS_DSIG_SEND_PHASE_INIT;
+#ifdef DEBUG
+ ctx.reds = 1; /* provoke assert below (no reduction count without msg) */
+#endif
+ ret = erts_dsig_send(dsdp, &ctx);
+ ASSERT(ret != ERTS_DSIG_SEND_CONTINUE);
+ return ret;
+}
+
+int
+erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
+{
+ int retval;
+ Sint initial_reds = ctx->reds;
Eterm cid;
- int suspended = 0;
- int resume = 0;
- Uint32 pass_through_size;
- Uint data_size, dhdr_ext_size;
- ErtsAtomCacheMap *acmp;
- ErtsDistOutputBuf *obuf;
- DistEntry *dep = dsdp->dep;
- Uint32 flags = dep->flags;
- Process *c_p = dsdp->proc;
- if (!c_p || dsdp->no_suspend)
- force_busy = 1;
+ while (1) {
+ switch (ctx->phase) {
+ case ERTS_DSIG_SEND_PHASE_INIT:
+ ctx->flags = dsdp->dep->flags;
+ ctx->c_p = dsdp->proc;
- ERTS_SMP_LC_ASSERT(!c_p
- || (ERTS_PROC_LOCK_MAIN
- == erts_proc_lc_my_proc_locks(c_p)));
+ if (!ctx->c_p || dsdp->no_suspend)
+ ctx->force_busy = 1;
- if (!erts_is_alive)
- return ERTS_DSIG_SEND_OK;
+ ERTS_SMP_LC_ASSERT(!ctx->c_p
+ || (ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(ctx->c_p)));
- if (flags & DFLAG_DIST_HDR_ATOM_CACHE) {
- acmp = erts_get_atom_cache_map(c_p);
- pass_through_size = 0;
- }
- else {
- acmp = NULL;
- pass_through_size = 1;
- }
+ if (!erts_is_alive)
+ return ERTS_DSIG_SEND_OK;
-#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, ">>%s CTL: %T\n", pass_through_size ? "P" : " ", ctl);
- if (is_value(msg))
- erts_fprintf(stderr, " MSG: %T\n", msg);
-#endif
+ if (ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE) {
+ ctx->acmp = erts_get_atom_cache_map(ctx->c_p);
+ ctx->pass_through_size = 0;
+ }
+ else {
+ ctx->acmp = NULL;
+ ctx->pass_through_size = 1;
+ }
- data_size = pass_through_size;
- erts_reset_atom_cache_map(acmp);
- data_size += erts_encode_dist_ext_size(ctl, flags, acmp);
- if (is_value(msg))
- data_size += erts_encode_dist_ext_size(msg, flags, acmp);
- erts_finalize_atom_cache_map(acmp, flags);
+ #ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, ">>%s CTL: %T\n", ctx->pass_through_size ? "P" : " ", ctx->ctl);
+ if (is_value(msg))
+ erts_fprintf(stderr, " MSG: %T\n", msg);
+ #endif
+
+ ctx->data_size = ctx->pass_through_size;
+ erts_reset_atom_cache_map(ctx->acmp);
+ erts_encode_dist_ext_size(ctx->ctl, ctx->flags, ctx->acmp, &ctx->data_size);
+
+ if (is_value(ctx->msg)) {
+ ctx->u.sc.estack.start = NULL;
+ ctx->u.sc.flags = ctx->flags;
+ ctx->u.sc.level = 0;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE;
+ } else {
+ ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
+ }
+ break;
- dhdr_ext_size = erts_encode_ext_dist_header_size(acmp);
- data_size += dhdr_ext_size;
+ case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
+ if (erts_encode_dist_ext_size_int(ctx->msg, ctx, &ctx->data_size)) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
+ }
- obuf = alloc_dist_obuf(data_size);
- obuf->ext_endp = &obuf->data[0] + pass_through_size + dhdr_ext_size;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
+ case ERTS_DSIG_SEND_PHASE_ALLOC:
+ erts_finalize_atom_cache_map(ctx->acmp, ctx->flags);
+
+ ctx->dhdr_ext_size = erts_encode_ext_dist_header_size(ctx->acmp);
+ ctx->data_size += ctx->dhdr_ext_size;
+
+ ctx->obuf = alloc_dist_obuf(ctx->data_size);
+ ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->pass_through_size + ctx->dhdr_ext_size;
+
+ /* Encode internal version of dist header */
+ ctx->obuf->extp = erts_encode_ext_dist_header_setup(ctx->obuf->ext_endp, ctx->acmp);
+ /* Encode control message */
+ erts_encode_dist_ext(ctx->ctl, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, NULL, NULL);
+ if (is_value(ctx->msg)) {
+ ctx->u.ec.flags = ctx->flags;
+ ctx->u.ec.level = 0;
+ ctx->u.ec.wstack.wstart = NULL;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE;
+ } else {
+ ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
+ }
+ break;
- /* Encode internal version of dist header */
- obuf->extp = erts_encode_ext_dist_header_setup(obuf->ext_endp, acmp);
- /* Encode control message */
- erts_encode_dist_ext(ctl, &obuf->ext_endp, flags, acmp);
- if (is_value(msg)) {
- /* Encode message */
- erts_encode_dist_ext(msg, &obuf->ext_endp, flags, acmp);
- }
+ case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
+ if (erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, &ctx->u.ec, &ctx->reds)) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
+ }
- ASSERT(obuf->extp < obuf->ext_endp);
- ASSERT(&obuf->data[0] <= obuf->extp - pass_through_size);
- ASSERT(obuf->ext_endp <= &obuf->data[0] + data_size);
+ ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
+ case ERTS_DSIG_SEND_PHASE_FIN: {
+ DistEntry *dep = dsdp->dep;
+ int suspended = 0;
+ int resume = 0;
- data_size = obuf->ext_endp - obuf->extp;
+ ASSERT(ctx->obuf->extp < ctx->obuf->ext_endp);
+ ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->pass_through_size);
+ ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size);
- /*
- * Signal encoded; now verify that the connection still exists,
- * and if so enqueue the signal and schedule it for send.
- */
- obuf->next = NULL;
- erts_smp_de_rlock(dep);
- cid = dep->cid;
- if (cid != dsdp->cid
- || dep->connection_id != dsdp->connection_id
- || dep->status & ERTS_DE_SFLG_EXITING) {
- /* Not the same connection as when we started; drop message... */
- erts_smp_de_runlock(dep);
- free_dist_obuf(obuf);
- }
- else {
- ErtsProcList *plp = NULL;
- erts_smp_mtx_lock(&dep->qlock);
- dep->qsize += size_obuf(obuf);
- if (dep->qsize >= erts_dist_buf_busy_limit)
- dep->qflgs |= ERTS_DE_QFLG_BUSY;
- if (!force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) {
- erts_smp_mtx_unlock(&dep->qlock);
+ ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp;
- plp = erts_proclist_create(c_p);
- erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
- suspended = 1;
- erts_smp_mtx_lock(&dep->qlock);
- }
+ /*
+ * Signal encoded; now verify that the connection still exists,
+ * and if so enqueue the signal and schedule it for send.
+ */
+ ctx->obuf->next = NULL;
+ erts_smp_de_rlock(dep);
+ cid = dep->cid;
+ if (cid != dsdp->cid
+ || dep->connection_id != dsdp->connection_id
+ || dep->status & ERTS_DE_SFLG_EXITING) {
+ /* Not the same connection as when we started; drop message... */
+ erts_smp_de_runlock(dep);
+ free_dist_obuf(ctx->obuf);
+ }
+ else {
+ ErtsProcList *plp = NULL;
+ erts_smp_mtx_lock(&dep->qlock);
+ dep->qsize += size_obuf(ctx->obuf);
+ if (dep->qsize >= erts_dist_buf_busy_limit)
+ dep->qflgs |= ERTS_DE_QFLG_BUSY;
+ if (!ctx->force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) {
+ erts_smp_mtx_unlock(&dep->qlock);
+
+ plp = erts_proclist_create(ctx->c_p);
+ erts_suspend(ctx->c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ suspended = 1;
+ erts_smp_mtx_lock(&dep->qlock);
+ }
- /* Enqueue obuf on dist entry */
- if (dep->out_queue.last)
- dep->out_queue.last->next = obuf;
- else
- dep->out_queue.first = obuf;
- dep->out_queue.last = obuf;
+ /* Enqueue obuf on dist entry */
+ if (dep->out_queue.last)
+ dep->out_queue.last->next = ctx->obuf;
+ else
+ dep->out_queue.first = ctx->obuf;
+ dep->out_queue.last = ctx->obuf;
+
+ if (!ctx->force_busy) {
+ if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) {
+ if (suspended)
+ resume = 1; /* was busy when we started, but isn't now */
+ #ifdef USE_VM_PROBES
+ if (resume && DTRACE_ENABLED(dist_port_not_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
+ "%T", dep->sysname);
+ DTRACE3(dist_port_not_busy, erts_this_node_sysname,
+ port_str, remote_str);
+ }
+ #endif
+ }
+ else {
+ /* Enqueue suspended process on dist entry */
+ ASSERT(plp);
+ erts_proclist_store_last(&dep->suspended, plp);
+ }
+ }
- if (!force_busy) {
- if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) {
- if (suspended)
- resume = 1; /* was busy when we started, but isn't now */
-#ifdef USE_VM_PROBES
- if (resume && DTRACE_ENABLED(dist_port_not_busy)) {
- DTRACE_CHARBUF(port_str, 64);
- DTRACE_CHARBUF(remote_str, 64);
-
- erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
- "%T", cid);
- erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", dep->sysname);
- DTRACE3(dist_port_not_busy, erts_this_node_sysname,
- port_str, remote_str);
- }
-#endif
+ erts_smp_mtx_unlock(&dep->qlock);
+ erts_schedule_dist_command(NULL, dep);
+ erts_smp_de_runlock(dep);
+
+ if (resume) {
+ erts_resume(ctx->c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proclist_destroy(plp);
+ /*
+ * Note that the calling process still have to yield as if it
+ * suspended. If not, the calling process could later be
+ * erroneously scheduled when it shouldn't be.
+ */
+ }
}
- else {
- /* Enqueue suspended process on dist entry */
- ASSERT(plp);
- erts_proclist_store_last(&dep->suspended, plp);
+ ctx->obuf = NULL;
+
+ if (suspended) {
+ #ifdef USE_VM_PROBES
+ if (!resume && DTRACE_ENABLED(dist_port_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+ DTRACE_CHARBUF(pid_str, 16);
+
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
+ "%T", dep->sysname);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)),
+ "%T", ctx->c_p->common.id);
+ DTRACE4(dist_port_busy, erts_this_node_sysname,
+ port_str, remote_str, pid_str);
+ }
+ #endif
+ if (!resume && erts_system_monitor_flags.busy_dist_port)
+ monitor_generic(ctx->c_p, am_busy_dist_port, cid);
+ retval = ERTS_DSIG_SEND_YIELD;
+ } else {
+ retval = ERTS_DSIG_SEND_OK;
}
+ goto done;
}
-
- erts_smp_mtx_unlock(&dep->qlock);
- erts_schedule_dist_command(NULL, dep);
- erts_smp_de_runlock(dep);
-
- if (resume) {
- erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
- erts_proclist_destroy(plp);
- /*
- * Note that the calling process still have to yield as if it
- * suspended. If not, the calling process could later be
- * erroneously scheduled when it shouldn't be.
- */
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "dsig_send invalid phase (%d)\n", (int)ctx->phase);
}
}
- if (c_p) {
- int reds;
- /*
- * Bump reductions on calling process.
- *
- * This is the reduction cost: Always a base cost of 8 reductions
- * plus 16 reductions per kilobyte generated external data.
- */
-
- data_size >>= (10-4);
-#if defined(ARCH_64) && !HALFWORD_HEAP
- data_size &= 0x003fffffffffffff;
-#elif defined(ARCH_32) || HALFWORD_HEAP
- data_size &= 0x003fffff;
-#else
-# error "Ohh come on ... !?!"
-#endif
- reds = 8 + ((int) data_size > 1000000 ? 1000000 : (int) data_size);
- BUMP_REDS(c_p, reds);
- }
-
- if (suspended) {
-#ifdef USE_VM_PROBES
- if (!resume && DTRACE_ENABLED(dist_port_busy)) {
- DTRACE_CHARBUF(port_str, 64);
- DTRACE_CHARBUF(remote_str, 64);
- DTRACE_CHARBUF(pid_str, 16);
-
- erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", cid);
- erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", dep->sysname);
- erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)),
- "%T", c_p->common.id);
- DTRACE4(dist_port_busy, erts_this_node_sysname,
- port_str, remote_str, pid_str);
- }
-#endif
- if (!resume && erts_system_monitor_flags.busy_dist_port)
- monitor_generic(c_p, am_busy_dist_port, cid);
- return ERTS_DSIG_SEND_YIELD;
+done:
+ if (ctx->msg && ctx->c_p) {
+ BUMP_REDS(ctx->c_p, (initial_reds - ctx->reds) / TERM_TO_BINARY_LOOP_FACTOR);
}
- return ERTS_DSIG_SEND_OK;
+ return retval;
}
-
static Uint
dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
{
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index f32b999198..2a2ba0c83f 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -22,6 +22,7 @@
#include "erl_process.h"
#include "erl_node_tables.h"
+#include "zlib.h"
#define DFLAG_PUBLISHED 0x01
#define DFLAG_ATOM_CACHE 0x02
@@ -264,17 +265,105 @@ erts_destroy_dist_link(ErtsDistLinkData *dldp)
#endif
+
+
+/* Define for testing */
+/* #define EXTREME_TTB_TRAPPING 1 */
+
+#ifndef EXTREME_TTB_TRAPPING
+#define TERM_TO_BINARY_LOOP_FACTOR 32
+#else
+#define TERM_TO_BINARY_LOOP_FACTOR 1
+#endif
+
+typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState;
+typedef struct TTBSizeContext_ {
+ Uint flags;
+ int level;
+ Uint result;
+ Eterm obj;
+ ErtsEStack estack;
+} TTBSizeContext;
+
+typedef struct TTBEncodeContext_ {
+ Uint flags;
+ int level;
+ byte* ep;
+ Eterm obj;
+ ErtsWStack wstack;
+ Binary *result_bin;
+} TTBEncodeContext;
+
+typedef struct {
+ Uint real_size;
+ Uint dest_len;
+ byte *dbytes;
+ Binary *result_bin;
+ Binary *destination_bin;
+ z_stream stream;
+} TTBCompressContext;
+
+typedef struct {
+ int alive;
+ TTBState state;
+ union {
+ TTBSizeContext sc;
+ TTBEncodeContext ec;
+ TTBCompressContext cc;
+ } s;
+} TTBContext;
+
+enum erts_dsig_send_phase {
+ ERTS_DSIG_SEND_PHASE_INIT,
+ ERTS_DSIG_SEND_PHASE_MSG_SIZE,
+ ERTS_DSIG_SEND_PHASE_ALLOC,
+ ERTS_DSIG_SEND_PHASE_MSG_ENCODE,
+ ERTS_DSIG_SEND_PHASE_FIN
+};
+
+struct erts_dsig_send_context {
+ enum erts_dsig_send_phase phase;
+ Sint reds;
+
+ Eterm ctl;
+ Eterm msg;
+ int force_busy;
+ Uint32 pass_through_size;
+ Uint data_size, dhdr_ext_size;
+ ErtsAtomCacheMap *acmp;
+ ErtsDistOutputBuf *obuf;
+ Uint32 flags;
+ Process *c_p;
+ union {
+ TTBSizeContext sc;
+ TTBEncodeContext ec;
+ }u;
+};
+
+typedef struct {
+ int suspend;
+
+ Eterm ctl_heap[6];
+ ErtsDSigData dsd;
+ DistEntry* dep_to_deref;
+ struct erts_dsig_send_context dss;
+
+ Eterm return_term;
+}ErtsSendContext;
+
+
/*
* erts_dsig_send_* return values.
*/
#define ERTS_DSIG_SEND_OK 0
#define ERTS_DSIG_SEND_YIELD 1
+#define ERTS_DSIG_SEND_CONTINUE 2
extern int erts_dsig_send_link(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_msg(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_msg(Eterm, Eterm, ErtsSendContext*);
extern int erts_dsig_send_exit_tt(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
extern int erts_dsig_send_unlink(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_reg_msg(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_reg_msg(Eterm, Eterm, ErtsSendContext*);
extern int erts_dsig_send_group_leader(ErtsDSigData *, Eterm, Eterm);
extern int erts_dsig_send_exit(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_exit2(ErtsDSigData *, Eterm, Eterm, Eterm);
@@ -282,6 +371,10 @@ extern int erts_dsig_send_demonitor(ErtsDSigData *, Eterm, Eterm, Eterm, int);
extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
+extern int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx);
+extern void erts_dsend_context_dtor(Binary*);
+extern Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx);
+
extern int erts_dist_command(Port *prt, int reds);
extern void erts_dist_port_not_busy(Port *prt);
extern void erts_kill_dist_connection(DistEntry *dep, Uint32);
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 90cd227fae..f2bceff4eb 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3939,7 +3939,7 @@ static Uint
install_debug_functions(void)
{
int i;
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 21434eb117..e2f8da38b9 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -269,6 +269,7 @@ type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
+type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
@@ -364,6 +365,7 @@ type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request
type GC_INFO_REQ STANDARD_LOW SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD_LOW SYSTEM port_data_heap
+type BIF_TIMER_DATA LONG_LIVED_LOW SYSTEM bif_timer_data
+else # "fullword"
@@ -384,6 +386,7 @@ type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
+type BIF_TIMER_DATA LONG_LIVED SYSTEM bif_timer_data
+endif
@@ -415,6 +418,8 @@ type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
type ENVIRONMENT TEMPORARY SYSTEM environment
type PUTENV_STR SYSTEM SYSTEM putenv_string
type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+type SYS_BLOCKING STANDARD SYSTEM sys_blocking
+type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf
+endif
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index e3172dc4fb..2f277690e4 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -1442,7 +1442,7 @@ get_pref_allctr(void *extra)
pref_ix = ERTS_ALC_GET_THR_IX();
- ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
+ ERTS_CT_ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
ASSERT(0 <= pref_ix && pref_ix < tspec->size);
return tspec->allctr[pref_ix];
@@ -1861,7 +1861,7 @@ handle_delayed_dealloc(Allctr_t *allctr,
* if this carrier is pulled from dc_list by cpool_fetch()
*/
ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
- ERTS_ALC_CPOOL_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
+ ERTS_CT_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
#ifdef MBC_ABLK_OFFSET_BITS
blk->u.carrier = crr;
#else
@@ -5942,7 +5942,7 @@ erts_alcu_init(AlcUInit_t *init)
erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
}
#endif
- ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
+ ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
max_mseg_carriers = init->mmc;
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index decae6b2ca..bc06d41720 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -176,7 +176,7 @@ erts_init_async(void)
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
#endif
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
- char *ptr;
+ char *ptr, thr_name[16];
size_t tot_size = 0;
int i;
@@ -227,23 +227,16 @@ erts_init_async(void)
thr_opts.suggested_stack_size
= erts_async_thread_suggested_stack_size;
-#ifdef ETHR_HAVE_THREAD_NAMES
- thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1));
-#endif
+ thr_opts.name = thr_name;
for (i = 0; i < erts_async_max_threads; i++) {
ErtsAsyncQ *aq = async_q(i);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(thr_opts.name, "async_%d", i+1);
-#endif
+ erts_snprintf(thr_opts.name, 16, "async_%d", i+1);
erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
}
-#ifdef ETHR_HAVE_THREAD_NAMES
- free(thr_opts.name);
-#endif
/* Wait for async threads to initialize... */
erts_mtx_lock(&async->init.data.mtx);
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 3bf78adce7..934904d58e 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -32,10 +32,13 @@
#include "global.h"
#include "erl_process.h"
#include "error.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "erl_bif_unique.h"
/*
@@ -2424,8 +2427,6 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
}
cbs->result = erts_bin_nrml_alloc(target_size); /* Always offheap
if trapping */
- cbs->result->flags = 0;
- cbs->result->orig_size = target_size;
erts_refc_init(&(cbs->result->refc), 1);
t = (byte *) cbs->result->orig_bytes; /* No offset or anything */
pos = 0;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 56cd2ba04f..fc4f819f56 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -45,6 +45,7 @@
#include "big.h"
#include "dist.h"
#include "erl_version.h"
+#include "erl_bif_unique.h"
#include "dtrace-wrapper.h"
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index bbd8aa31d9..e7d84ebda1 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -459,23 +459,25 @@ Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live)
Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live)
{
Eterm arg = reg[live];
- if (is_map(arg)) {
- map_t *mp = (map_t*)map_val(arg);
- Uint size = map_get_size(mp);
- if (IS_USMALL(0, size)) {
- return make_small(size);
- } else {
- Eterm* hp;
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(size, hp);
- }
+ Eterm* hp;
+ Uint size;
+ if (is_flatmap(arg)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(arg);
+ size = flatmap_get_size(mp);
+ } else if (is_hashmap(arg)) {
+ size = hashmap_size(arg);
} else {
BIF_ERROR(p, BADARG);
}
+ if (IS_USMALL(0, size)) {
+ return make_small(size);
+ }
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(size, hp);
}
Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live)
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index b90362d82c..2eeebab9a3 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -42,6 +42,7 @@
#include "erl_cpu_topology.h"
#include "erl_async.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
#ifdef HIPE
@@ -116,6 +117,9 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef ERTS_ENABLE_LOCK_COUNT
" [lock-counting]"
#endif
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ " [instruction-counting]"
+#endif
#ifdef PURIFY
" [purify-compiled]"
#endif
@@ -537,6 +541,7 @@ pi_locks(Eterm info)
switch (info) {
case am_status:
case am_priority:
+ case am_trap_exit:
return ERTS_PROC_LOCK_STATUS;
case am_links:
case am_monitors:
@@ -589,7 +594,7 @@ static Eterm pi_args[] = {
am_min_bin_vheap_size,
am_current_location,
am_current_stacktrace,
-};
+};
#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
@@ -2099,6 +2104,46 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_opt);
#endif
BIF_RET(res);
+ } else if (BIF_ARG_1 == am_time_offset) {
+ switch (erts_time_offset_state()) {
+ case ERTS_TIME_OFFSET_PRELIMINARY: {
+ ERTS_DECL_AM(preliminary);
+ BIF_RET(AM_preliminary);
+ }
+ case ERTS_TIME_OFFSET_FINAL: {
+ ERTS_DECL_AM(final);
+ BIF_RET(AM_final);
+ }
+ case ERTS_TIME_OFFSET_VOLATILE: {
+ ERTS_DECL_AM(volatile);
+ BIF_RET(AM_volatile);
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time offset state");
+ }
+ } else if (ERTS_IS_ATOM_STR("os_monotonic_time_source", BIF_ARG_1)) {
+ BIF_RET(erts_monotonic_time_source(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("time_correction", BIF_ARG_1)) {
+ BIF_RET(erts_has_time_correction() ? am_true : am_false);
+ } else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
+ BIF_RET(erts_get_monotonic_start_time(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
+ switch (erts_time_warp_mode()) {
+ case ERTS_NO_TIME_WARP_MODE: {
+ ERTS_DECL_AM(no_time_warp);
+ BIF_RET(AM_no_time_warp);
+ }
+ case ERTS_SINGLE_TIME_WARP_MODE: {
+ ERTS_DECL_AM(single_time_warp);
+ BIF_RET(AM_single_time_warp);
+ }
+ case ERTS_MULTI_TIME_WARP_MODE: {
+ ERTS_DECL_AM(multi_time_warp);
+ BIF_RET(AM_multi_time_warp);
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time warp mode");
+ }
} else if (BIF_ARG_1 == am_allocated_areas) {
res = erts_allocated_areas(NULL, NULL, BIF_P);
BIF_RET(res);
@@ -2301,7 +2346,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
for (i = num_instructions-1; i >= 0; i--) {
res = erts_bld_cons(hpp, hszp,
erts_bld_tuple(hpp, hszp, 2,
- erts_atom_put(opc[i].name,
+ erts_atom_put((byte *)opc[i].name,
strlen(opc[i].name),
ERTS_ATOM_ENC_LATIN1,
1),
@@ -2700,9 +2745,11 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(erts_db_get_max_tabs()));
}
else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
- BIF_RET(erts_disable_tolerant_timeofday
- ? am_disabled
- : am_enabled);
+ if (erts_has_time_correction()
+ && erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
+ BIF_RET(am_enabled);
+ }
+ BIF_RET(am_disabled);
}
else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
BIF_RET(erts_eager_check_io ? am_true : am_false);
@@ -3400,6 +3447,29 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
BIF_RET(erts_mmap_debug_info(BIF_P));
}
+ else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
+ BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("min_unique_monotonic_integer", BIF_ARG_1)) {
+ Sint64 value = erts_get_min_unique_monotonic_integer();
+ if (IS_SSMALL(value))
+ BIF_RET(make_small(value));
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(erts_sint64_to_big(value, &hp));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("min_unique_integer", BIF_ARG_1)) {
+ Sint64 value = erts_get_min_unique_integer();
+ if (IS_SSMALL(value))
+ BIF_RET(make_small(value));
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(erts_sint64_to_big(value, &hp));
+ }
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3594,6 +3664,58 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
}
+ else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
+ Uint hash = (Uint) make_internal_hash(tp[2]);
+ Uint hsz = 0;
+ Eterm* hp;
+ erts_bld_uint(NULL, &hsz, hash);
+ hp = HAlloc(BIF_P,hsz);
+ return erts_bld_uint(&hp, NULL, hash);
+ }
+ else if (ERTS_IS_ATOM_STR("atom", tp[1])) {
+ Uint ix;
+ if (!term_to_Uint(tp[2], &ix))
+ BIF_ERROR(BIF_P, BADARG);
+ while (ix >= atom_table_size()) {
+ char tmp[20];
+ erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
+ erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ return make_atom(ix);
+ }
+
+ break;
+ }
+ case 3: {
+ if (ERTS_IS_ATOM_STR("check_time_config", tp[1])) {
+ int res, time_correction;
+ ErtsTimeWarpMode time_warp_mode;
+ if (tp[2] == am_true)
+ time_correction = !0;
+ else if (tp[2] == am_false)
+ time_correction = 0;
+ else
+ break;
+ if (ERTS_IS_ATOM_STR("no_time_warp", tp[3]))
+ time_warp_mode = ERTS_NO_TIME_WARP_MODE;
+ else if (ERTS_IS_ATOM_STR("single_time_warp", tp[3]))
+ time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
+ else if (ERTS_IS_ATOM_STR("multi_time_warp", tp[3]))
+ time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
+ else
+ break;
+ res = erts_check_time_adj_support(time_correction,
+ time_warp_mode);
+ BIF_RET(res ? am_true : am_false);
+ }
+ else if (ERTS_IS_ATOM_STR("make_unique_integer", tp[1])) {
+ Eterm res = erts_debug_make_unique_integer(BIF_P,
+ tp[2],
+ tp[3]);
+ if (is_non_value(res))
+ break;
+ BIF_RET(res);
+ }
break;
}
default:
@@ -3603,6 +3725,24 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE erts_internal_is_system_process_1(BIF_ALIST_1)
+{
+ if (is_internal_pid(BIF_ARG_1)) {
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ if (rp && (rp->static_flags & ERTS_STC_FLG_SYSTEM_PROC))
+ BIF_RET(am_true);
+ BIF_RET(am_false);
+ }
+
+ if (is_external_pid(BIF_ARG_1)
+ && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
+ BIF_RET(am_false);
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
static erts_smp_atomic_t hipe_test_reschedule_flag;
@@ -3897,6 +4037,17 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
}
}
+ else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
+ /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
+#if defined(ERTS_HAVE_TRY_CATCH)
+ erts_get_scheduler_data()->run_queue = NULL;
+#endif
+ erl_exit(ERTS_DUMP_EXIT, "%T", BIF_ARG_2);
+ }
+ else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
+ int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
+ BIF_RET(res ? am_true : am_false);
+ }
}
BIF_ERROR(BIF_P, BADARG);
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index 03ac97283c..0bd8d20c34 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -27,6 +27,7 @@
#include "error.h"
#include "big.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
/****************************************************************************
** BIF Timer support
@@ -480,7 +481,7 @@ setup_bif_timer(Uint32 xflags,
tab_insert(btm);
ASSERT(btm == tab_find(ref));
- btm->tm.active = 0; /* MUST be initalized */
+ erts_init_timer(&btm->tm);
erts_set_timer(&btm->tm,
(ErlTimeoutProc) bif_timer_timeout,
(ErlCancelProc) bif_timer_cleanup,
@@ -489,8 +490,9 @@ setup_bif_timer(Uint32 xflags,
return ref;
}
+BIF_RETTYPE old_send_after_3(BIF_ALIST_3);
/* send_after(Time, Pid, Message) -> Ref */
-BIF_RETTYPE send_after_3(BIF_ALIST_3)
+BIF_RETTYPE old_send_after_3(BIF_ALIST_3)
{
Eterm res;
@@ -510,8 +512,9 @@ BIF_RETTYPE send_after_3(BIF_ALIST_3)
}
}
+BIF_RETTYPE old_start_timer_3(BIF_ALIST_3);
/* start_timer(Time, Pid, Message) -> Ref */
-BIF_RETTYPE start_timer_3(BIF_ALIST_3)
+BIF_RETTYPE old_start_timer_3(BIF_ALIST_3)
{
Eterm res;
@@ -531,8 +534,9 @@ BIF_RETTYPE start_timer_3(BIF_ALIST_3)
}
}
+BIF_RETTYPE old_cancel_timer_1(BIF_ALIST_1);
/* cancel_timer(Ref) -> false | RemainingTime */
-BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
+BIF_RETTYPE old_cancel_timer_1(BIF_ALIST_1)
{
Eterm res;
ErtsBifTimer *btm;
@@ -569,8 +573,9 @@ BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
BIF_RET(res);
}
+BIF_RETTYPE old_read_timer_1(BIF_ALIST_1);
/* read_timer(Ref) -> false | RemainingTime */
-BIF_RETTYPE read_timer_1(BIF_ALIST_1)
+BIF_RETTYPE old_read_timer_1(BIF_ALIST_1)
{
Eterm res;
ErtsBifTimer *btm;
@@ -652,7 +657,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_smp_btm_rwunlock();
}
-void erts_bif_timer_init(void)
+static void erts_old_bif_timer_init(void)
{
int i;
no_bif_timers = 0;
@@ -703,3 +708,146 @@ erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
}
}
}
+
+typedef struct {
+ Uint ref_heap[REF_THING_SIZE];
+ Eterm pid[1];
+} ErtsBifTimerServers;
+
+static ErtsBifTimerServers *bif_timer_servers;
+
+void erts_bif_timer_init(void)
+{
+ erts_old_bif_timer_init();
+}
+
+void
+erts_bif_timer_start_servers(Eterm parent)
+{
+ Process *parent_proc;
+ Eterm *hp, btr_ref, arg_list_end;
+ ErlSpawnOpts so;
+ int i;
+
+ bif_timer_servers = erts_alloc(ERTS_ALC_T_BIF_TIMER_DATA,
+ (sizeof(ErtsBifTimerServers)
+ + (sizeof(Eterm)*(erts_no_schedulers-1))));
+
+ so.flags = SPO_USE_ARGS|SPO_SYSTEM_PROC|SPO_PREFER_SCHED|SPO_OFF_HEAP_MSGS;
+ so.min_heap_size = H_MIN_SIZE;
+ so.min_vheap_size = BIN_VH_MIN_SIZE;
+ so.priority = PRIORITY_MAX;
+ so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+
+ /*
+ * Parent is "init" and schedulers have not yet been started, so it
+ * *should* be alive and well...
+ */
+ ASSERT(is_internal_pid(parent));
+ parent_proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(parent));
+ ASSERT(parent_proc);
+ ASSERT(parent_proc->common.id == parent);
+ ASSERT(!ERTS_PROC_IS_EXITING(parent_proc));
+
+ erts_smp_proc_lock(parent_proc, ERTS_PROC_LOCK_MAIN);
+
+ hp = HAlloc(parent_proc, 2*erts_no_schedulers + 2 + REF_THING_SIZE);
+
+ btr_ref = erts_make_ref_in_buffer(hp);
+ hp += REF_THING_SIZE;
+
+ arg_list_end = CONS(hp, btr_ref, NIL);
+ hp += 2;
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ int sched = i+1;
+ Eterm arg_list = CONS(hp, make_small(i+1), arg_list_end);
+ hp += 2;
+
+ so.scheduler = sched; /* Preferred scheduler */
+
+ bif_timer_servers->pid[i] = erl_create_process(parent_proc,
+ am_erts_internal,
+ am_bif_timer_server,
+ arg_list,
+ &so);
+ }
+
+ erts_smp_proc_unlock(parent_proc, ERTS_PROC_LOCK_MAIN);
+
+ hp = internal_ref_val(btr_ref);
+ for (i = 0; i < REF_THING_SIZE; i++)
+ bif_timer_servers->ref_heap[i] = hp[i];
+}
+
+BIF_RETTYPE
+erts_internal_get_bif_timer_servers_0(BIF_ALIST_0)
+{
+ int i;
+ Eterm *hp, res = NIL;
+
+ hp = HAlloc(BIF_P, erts_no_schedulers*2);
+ for (i = erts_no_schedulers-1; i >= 0; i--) {
+ res = CONS(hp, bif_timer_servers->pid[i], res);
+ hp += 2;
+ }
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
+erts_internal_access_bif_timer_1(BIF_ALIST_1)
+{
+ int ix;
+ Uint32 *rdp;
+ Eterm ref, pid, *hp, res;
+
+ if (is_not_internal_ref(BIF_ARG_1)) {
+ if (is_not_ref(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ BIF_RET(am_undefined);
+ }
+
+ rdp = internal_ref_numbers(BIF_ARG_1);
+ ix = (int) erts_get_ref_numbers_thr_id(rdp);
+ if (ix < 1 || erts_no_schedulers < ix)
+ BIF_RET(am_undefined);
+
+ pid = bif_timer_servers->pid[ix-1];
+ ASSERT(is_internal_pid(pid));
+
+ hp = HAlloc(BIF_P, 3 /* 2-tuple */ + REF_THING_SIZE);
+ for (ix = 0; ix < REF_THING_SIZE; ix++)
+ hp[ix] = bif_timer_servers->ref_heap[ix];
+ ref = make_internal_ref(&hp[0]);
+ hp += REF_THING_SIZE;
+
+ res = TUPLE2(hp, ref, pid);
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
+erts_internal_create_bif_timer_0(BIF_ALIST_0)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(BIF_P);
+ Eterm *hp, btr_ref, t_ref, pid, res;
+ int ix;
+
+ hp = HAlloc(BIF_P, 4 /* 3-tuple */ + 2*REF_THING_SIZE);
+ for (ix = 0; ix < REF_THING_SIZE; ix++)
+ hp[ix] = bif_timer_servers->ref_heap[ix];
+ btr_ref = make_internal_ref(&hp[0]);
+ hp += REF_THING_SIZE;
+
+ t_ref = erts_sched_make_ref_in_buffer(esdp, hp);
+ hp += REF_THING_SIZE;
+
+ ASSERT(erts_get_ref_numbers_thr_id(internal_ref_numbers(t_ref))
+ == (Uint32) esdp->no);
+
+ pid = bif_timer_servers->pid[((int) esdp->no) - 1];
+
+ res = TUPLE3(hp, btr_ref, pid, t_ref);
+
+ BIF_RET(res);
+}
diff --git a/erts/emulator/beam/erl_bif_timer.h b/erts/emulator/beam/erl_bif_timer.h
index 1197c176f5..c2f5dfd3c3 100644
--- a/erts/emulator/beam/erl_bif_timer.h
+++ b/erts/emulator/beam/erl_bif_timer.h
@@ -33,4 +33,5 @@ void erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks);
void erts_bif_timer_init(void);
void erts_bif_timer_foreach(void (*func)(Eterm,Eterm,ErlHeapFragment *,void *),
void *arg);
+void erts_bif_timer_start_servers(Eterm);
#endif
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 06fbbea123..ac57205c47 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -38,6 +38,7 @@
#include "beam_bp.h"
#include "erl_binary.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -651,7 +652,7 @@ Eterm trace_3(BIF_ALIST_3)
if (pid_spec == am_all) {
if (on) {
if (!erts_cpu_timestamp) {
-#ifdef HAVE_CLOCK_GETTIME
+#ifdef HAVE_CLOCK_GETTIME_CPU_TIME
/*
Perhaps clock_gettime was found during config
on a different machine than this. We check
@@ -678,7 +679,7 @@ Eterm trace_3(BIF_ALIST_3)
if (erts_start_now_cpu() < 0) {
goto error;
}
-#endif /* HAVE_CLOCK_GETTIME */
+#endif /* HAVE_CLOCK_GETTIME_CPU_TIME */
erts_cpu_timestamp = !0;
}
}
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
new file mode 100644
index 0000000000..57b0bab72f
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -0,0 +1,556 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "erl_alloc.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Reference *
+\* */
+
+static union {
+ erts_atomic64_t count;
+ char align__[ERTS_CACHE_LINE_SIZE];
+} global_reference erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+
+/*
+ * ref[0] indicate thread creating reference as follows:
+ *
+ * - ref[0] == 0 => Non-scheduler thread;
+ * - else; ref[0] <= erts_no_schedulers =>
+ * ordinary scheduler with id == ref[0];
+ * - else; ref[0] <= erts_no_schedulers
+ * + erts_no_dirty_cpu_schedulers =>
+ * dirty cpu scheduler with id == 'ref[0] - erts_no_schedulers';
+ * - else =>
+ * dirty io scheduler with id == 'ref[0]
+ * - erts_no_schedulers
+ * - erts_no_dirty_cpu_schedulers'
+ */
+
+#ifdef DEBUG
+static Uint32 max_thr_id;
+#endif
+
+static void
+init_reference(void)
+{
+#ifdef DEBUG
+ max_thr_id = (Uint32) erts_no_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ max_thr_id += (Uint32) erts_no_dirty_cpu_schedulers;
+ max_thr_id += (Uint32) erts_no_dirty_io_schedulers;
+#endif
+#endif
+ erts_atomic64_init_nob(&global_reference.count, 0);
+}
+
+static ERTS_INLINE void
+global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Uint64 value;
+
+ value = (Uint64) erts_atomic64_inc_read_mb(&global_reference.count);
+
+ erts_set_ref_numbers(ref, thr_id, value);
+}
+
+static ERTS_INLINE void
+make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp)
+ erts_sched_make_ref_in_array(esdp, ref);
+ else
+ global_make_ref_in_array(0, ref);
+}
+
+void
+erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ make_ref_in_array(ref);
+}
+
+Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+
+ make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+Eterm erts_make_ref(Process *c_p)
+{
+ Eterm* hp;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+
+ hp = HAlloc(c_p, REF_THING_SIZE);
+
+ make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+
+ return make_internal_ref(hp);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Unique Integer *
+\* */
+
+static struct {
+ union {
+ struct {
+ int left_shift;
+ int right_shift;
+ Uint64 mask;
+ Uint64 val0_max;
+ } o;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } r;
+ union {
+ erts_atomic64_t val1;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } w;
+} unique_data erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static void
+init_unique_integer(void)
+{
+ int bits;
+ unique_data.r.o.val0_max = (Uint64) erts_no_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ unique_data.r.o.val0_max += (Uint64) erts_no_dirty_cpu_schedulers;
+ unique_data.r.o.val0_max += (Uint64) erts_no_dirty_io_schedulers;
+#endif
+ bits = erts_fit_in_bits_int64(unique_data.r.o.val0_max);
+ unique_data.r.o.left_shift = bits;
+ unique_data.r.o.right_shift = 64 - bits;
+ unique_data.r.o.mask = (((Uint64) 1) << bits) - 1;
+ erts_atomic64_init_nob(&unique_data.w.val1, -1);
+}
+
+#define ERTS_MAX_UNIQUE_INT_HEAP_SIZE ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(2)
+
+static ERTS_INLINE Eterm
+bld_unique_integer_term(Eterm **hpp, Uint *szp,
+ Uint64 val0, Uint64 val1,
+ int positive)
+{
+ Uint hsz;
+ Uint64 unique_val[2];
+
+ unique_val[0] = ((Uint64) val0);
+ unique_val[0] |= ((Uint64) val1) << unique_data.r.o.left_shift;
+ unique_val[1] = ((Uint64) val1) >> unique_data.r.o.right_shift;
+ unique_val[1] &= unique_data.r.o.mask;
+
+ if (positive) {
+ unique_val[0]++;
+ if (unique_val[0] == 0)
+ unique_val[1]++;
+ }
+ else {
+ ASSERT(MIN_SMALL < 0);
+ if (unique_val[1] == 0
+ && unique_val[0] < ((Uint64) -1*((Sint64) MIN_SMALL))) {
+ Sint64 s_unique_val = (Sint64) unique_val[0];
+ s_unique_val += MIN_SMALL;
+ ASSERT(MIN_SMALL <= s_unique_val && s_unique_val < 0);
+ if (szp)
+ *szp = 0;
+ if (!hpp)
+ return THE_NON_VALUE;
+ return make_small((Sint) s_unique_val);
+ }
+ if (unique_val[0] < ((Uint64) -1*((Sint64) MIN_SMALL))) {
+ ASSERT(unique_val[1] != 0);
+ unique_val[1] -= 1;
+ }
+ unique_val[0] += MIN_SMALL;
+ }
+
+ if (!unique_val[1]) {
+ if (unique_val[0] <= MAX_SMALL) {
+ if (szp)
+ *szp = 0;
+ if (!hpp)
+ return THE_NON_VALUE;
+ return make_small((Uint) unique_val[0]);
+ }
+
+ if (szp)
+ *szp = ERTS_UINT64_HEAP_SIZE(unique_val[0]);
+ if (!hpp)
+ return THE_NON_VALUE;
+ return erts_uint64_to_big(unique_val[0], hpp);
+ }
+ else {
+ Eterm tmp, *tmp_hp, res;
+ DeclareTmpHeapNoproc(local_heap, 2*ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ UseTmpHeapNoproc(2*ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ tmp_hp = local_heap;
+
+ tmp = erts_uint64_array_to_big(&tmp_hp, 0, 2, unique_val);
+ ASSERT(is_big(tmp));
+
+ hsz = big_arity(tmp) + 1;
+
+ ASSERT(hsz <= ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ if (szp)
+ *szp = hsz;
+
+ if (!hpp)
+ res = THE_NON_VALUE;
+ else {
+ int hix;
+ Eterm *hp = *hpp;
+ tmp_hp = big_val(tmp);
+ for (hix = 0; hix < hsz; hix++)
+ hp[hix] = tmp_hp[hix];
+
+ *hpp = hp + hsz;
+ res = make_big(hp);
+ }
+
+ UnUseTmpHeapNoproc(2*ERTS_MAX_UNIQUE_INT_HEAP_SIZE);
+
+ return res;
+ }
+}
+
+static ERTS_INLINE Eterm unique_integer_bif(Process *c_p, int positive)
+{
+ ErtsSchedulerData *esdp;
+ Uint64 thr_id, unique;
+ Uint hsz;
+ Eterm *hp;
+
+ esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ thr_id = (Uint64) esdp->thr_id;
+ unique = esdp->unique++;
+ bld_unique_integer_term(NULL, &hsz, thr_id, unique, positive);
+ hp = hsz ? HAlloc(c_p, hsz) : NULL;
+ return bld_unique_integer_term(&hp, NULL, thr_id, unique, positive);
+}
+
+Uint
+erts_raw_unique_integer_heap_size(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES])
+{
+ Uint sz;
+ bld_unique_integer_term(NULL, &sz, val[0], val[1], 0);
+ return sz;
+}
+
+Eterm
+erts_raw_make_unique_integer(Eterm **hpp, Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES])
+{
+ return bld_unique_integer_term(hpp, NULL, val[0], val[1], 0);
+}
+
+void
+erts_raw_get_unique_integer(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES])
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp) {
+ val[0] = (Uint64) esdp->thr_id;
+ val[1] = esdp->unique++;
+ }
+ else {
+ val[0] = (Uint64) 0;
+ val[1] = (Uint64) erts_atomic64_inc_read_nob(&unique_data.w.val1);
+ }
+}
+
+
+Sint64
+erts_get_min_unique_integer(void)
+{
+ return (Sint64) MIN_SMALL;
+}
+
+/* --- Debug --- */
+
+Eterm
+erts_debug_make_unique_integer(Process *c_p, Eterm etval0, Eterm etval1)
+{
+ Uint64 val0, val1;
+ Uint hsz;
+ Eterm res, *hp, *end_hp;
+
+ if (!term_to_Uint64(etval0, &val0))
+ return THE_NON_VALUE;
+
+ if (!term_to_Uint64(etval1, &val1))
+ return THE_NON_VALUE;
+
+ bld_unique_integer_term(NULL, &hsz, val0, val1, 0);
+
+ hp = HAlloc(c_p, hsz);
+ end_hp = hp + hsz;
+
+ res = bld_unique_integer_term(&hp, NULL, val0, val1, 0);
+ if (hp != end_hp)
+ ERTS_INTERNAL_ERROR("Heap allocation error");
+
+ return res;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Strict Monotonic Counter *
+\* */
+
+static struct {
+ union {
+ erts_atomic64_t value;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } w;
+} raw_unique_monotonic_integer erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+#if defined(ARCH_32) || HALFWORD_HEAP
+# define ERTS_UNIQUE_MONOTONIC_OFFSET ERTS_SINT64_MIN
+#else
+# define ERTS_UNIQUE_MONOTONIC_OFFSET MIN_SMALL
+#endif
+
+static void
+init_unique_monotonic_integer(void)
+{
+ erts_atomic64_init_nob(&raw_unique_monotonic_integer.w.value,
+ (erts_aint64_t) -1);
+}
+
+static ERTS_INLINE Uint64
+get_raw_unique_monotonic_integer(void)
+{
+ return (Uint64) erts_atomic64_inc_read_mb(&raw_unique_monotonic_integer.w.value);
+}
+
+static ERTS_INLINE Uint
+get_unique_monotonic_integer_heap_size(Uint64 raw, int positive)
+{
+ if (positive) {
+ Uint64 value = raw+1;
+ return ERTS_UINT64_HEAP_SIZE(value);
+ }
+ else {
+ Sint64 value = ((Sint64) raw) + ERTS_UNIQUE_MONOTONIC_OFFSET;
+ if (IS_SSMALL(value))
+ return 0;
+#if defined(ARCH_32) || HALFWORD_HEAP
+ return ERTS_SINT64_HEAP_SIZE(value);
+#else
+ return ERTS_UINT64_HEAP_SIZE((Uint64) value);
+#endif
+ }
+}
+
+static ERTS_INLINE Eterm
+make_unique_monotonic_integer_value(Eterm *hp, Uint hsz, Uint64 raw, int positive)
+{
+ Eterm res;
+#ifdef DEBUG
+ Eterm *end_hp = hp + hsz;
+#endif
+
+ if (positive) {
+ Uint64 value = raw+1;
+ res = hsz ? erts_uint64_to_big(value, &hp) : make_small(value);
+ }
+ else {
+ Sint64 value = ((Sint64) raw) + ERTS_UNIQUE_MONOTONIC_OFFSET;
+ if (hsz == 0)
+ res = make_small(value);
+ else {
+#if defined(ARCH_32) || HALFWORD_HEAP
+ res = erts_sint64_to_big(value, &hp);
+#else
+ res = erts_uint64_to_big((Uint64) value, &hp);
+#endif
+ }
+ }
+
+ ASSERT(end_hp == hp);
+
+ return res;
+}
+
+static ERTS_INLINE Eterm
+unique_monotonic_integer_bif(Process *c_p, int positive)
+{
+ Uint64 raw;
+ Uint hsz;
+ Eterm *hp;
+
+ raw = get_raw_unique_monotonic_integer();
+ hsz = get_unique_monotonic_integer_heap_size(raw, positive);
+ hp = hsz ? HAlloc(c_p, hsz) : NULL;
+ return make_unique_monotonic_integer_value(hp, hsz, raw, positive);
+}
+
+Sint64
+erts_raw_get_unique_monotonic_integer(void)
+{
+ return get_raw_unique_monotonic_integer();
+}
+
+Uint
+erts_raw_unique_monotonic_integer_heap_size(Sint64 raw)
+{
+ return get_unique_monotonic_integer_heap_size(raw, 0);
+}
+
+Eterm
+erts_raw_make_unique_monotonic_integer_value(Eterm **hpp, Sint64 raw)
+{
+ Uint hsz = get_unique_monotonic_integer_heap_size(raw, 0);
+ Eterm res = make_unique_monotonic_integer_value(*hpp, hsz, raw, 0);
+ *hpp += hsz;
+ return res;
+}
+
+Sint64
+erts_get_min_unique_monotonic_integer(void)
+{
+ return ERTS_UNIQUE_MONOTONIC_OFFSET;
+}
+
+/* --- Debug --- */
+
+int
+erts_debug_set_unique_monotonic_integer_state(Eterm et_value)
+{
+ Sint64 value;
+
+ if (!term_to_Sint64(et_value, &value)) {
+ Uint64 uvalue;
+ if (!term_to_Uint64(et_value, &uvalue))
+ return 0;
+ value = (Sint64) uvalue;
+ }
+
+ erts_atomic64_set_mb(&raw_unique_monotonic_integer.w.value,
+ (erts_aint64_t) value);
+ return 1;
+}
+
+Eterm
+erts_debug_get_unique_monotonic_integer_state(Process *c_p)
+{
+ Uint64 value;
+ Eterm hsz, *hp;
+
+ value = (Uint64) erts_atomic64_read_mb(&raw_unique_monotonic_integer.w.value);
+
+ if (IS_USMALL(0, value))
+ return make_small(value);
+ hsz = ERTS_UINT64_HEAP_SIZE(value);
+ hp = HAlloc(c_p, hsz);
+ return erts_uint64_to_big(value, &hp);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Initilazation *
+\* */
+
+void
+erts_bif_unique_init(void)
+{
+ init_reference();
+ init_unique_monotonic_integer();
+ init_unique_integer();
+}
+
+void
+erts_sched_bif_unique_init(ErtsSchedulerData *esdp)
+{
+ esdp->unique = (Uint64) 0;
+ esdp->ref = (Uint64) 0;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * The BIFs *
+\* */
+
+
+BIF_RETTYPE make_ref_0(BIF_ALIST_0)
+{
+ BIF_RETTYPE res;
+ Eterm* hp;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+
+ hp = HAlloc(BIF_P, REF_THING_SIZE);
+
+ res = erts_sched_make_ref_in_buffer(ERTS_PROC_GET_SCHDATA(BIF_P), hp);
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE unique_integer_0(BIF_ALIST_0)
+{
+ BIF_RET(unique_integer_bif(BIF_P, 0));
+}
+
+BIF_RETTYPE unique_integer_1(BIF_ALIST_1)
+{
+ Eterm modlist = BIF_ARG_1;
+ int monotonic = 0;
+ int positive = 0;
+ BIF_RETTYPE res;
+
+ while (is_list(modlist)) {
+ Eterm *consp = list_val(modlist);
+ switch (CAR(consp)) {
+ case am_monotonic:
+ monotonic = 1;
+ break;
+ case am_positive:
+ positive = 1;
+ break;
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ modlist = CDR(consp);
+ }
+
+ if (is_not_nil(modlist))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (monotonic)
+ res = unique_monotonic_integer_bif(BIF_P, positive);
+ else
+ res = unique_integer_bif(BIF_P, positive);
+
+ BIF_RET(res);
+}
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
new file mode 100644
index 0000000000..cd001172a1
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -0,0 +1,131 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_BIF_UNIQUE_H__
+#define ERTS_BIF_UNIQUE_H__
+
+#include "erl_process.h"
+#include "big.h"
+
+void erts_bif_unique_init(void);
+void erts_sched_bif_unique_init(ErtsSchedulerData *esdp);
+
+/* reference */
+Eterm erts_make_ref(Process *);
+Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+/* strict monotonic counter */
+
+#define ERTS_MAX_UNIQUE_MONOTONIC_INTEGER_HEAP_SIZE ERTS_MAX_UINT64_HEAP_SIZE
+
+/*
+ * Note that a raw value is an intermediate value that
+ * not necessarily correspond to the end result.
+ */
+Sint64 erts_raw_get_unique_monotonic_integer(void);
+Uint erts_raw_unique_monotonic_integer_heap_size(Sint64 raw);
+Eterm erts_raw_make_unique_monotonic_integer_value(Eterm **hpp, Sint64 raw);
+
+Sint64 erts_get_min_unique_monotonic_integer(void);
+
+int erts_debug_set_unique_monotonic_integer_state(Eterm et_value);
+Eterm erts_debug_get_unique_monotonic_integer_state(Process *c_p);
+
+/* unique integer */
+#define ERTS_UNIQUE_INT_RAW_VALUES 2
+#define ERTS_MAX_UNIQUE_INT_HEAP_SIZE ERTS_UINT64_ARRAY_TO_BIG_MAX_HEAP_SZ(2)
+
+Uint erts_raw_unique_integer_heap_size(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES]);
+Eterm erts_raw_make_unique_integer(Eterm **hpp, Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES]);
+void erts_raw_get_unique_integer(Uint64 val[ERTS_UNIQUE_INT_RAW_VALUES]);
+Sint64 erts_get_min_unique_integer(void);
+
+Eterm erts_debug_make_unique_integer(Process *c_p,
+ Eterm etval0,
+ Eterm etval1);
+
+
+ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS],
+ Uint32 thr_id, Uint64 value);
+ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ERTS_GLB_INLINE void erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ERTS_GLB_INLINE Eterm erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
+ Eterm buffer[REF_THING_SIZE]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 value)
+{
+ /*
+ * We cannot use thread id in the first 18-bit word since
+ * the hash/phash/phash2 BIFs only hash on this word. If
+ * we did, we would get really poor hash values. Instead
+ * we have to shuffle the bits a bit.
+ */
+ ASSERT(thr_id == (thr_id & ((Uint32) 0x3ffff)));
+ ref[0] = (Uint32) (value & ((Uint64) 0x3ffff));
+ ref[1] = (((Uint32) (value & ((Uint64) 0xfffc0000)))
+ | (thr_id & ((Uint32) 0x3ffff)));
+ ref[2] = (Uint32) ((value >> 32) & ((Uint64) 0xffffffff));
+}
+
+ERTS_GLB_INLINE Uint32
+erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ return ref[1] & ((Uint32) 0x3ffff);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ return (((((Uint64) ref[2]) & ((Uint64) 0xffffffff)) << 32)
+ | (((Uint64) ref[1]) & ((Uint64) 0xfffc0000))
+ | (((Uint64) ref[0]) & ((Uint64) 0x3ffff)));
+}
+
+ERTS_GLB_INLINE void
+erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Uint64 value;
+
+ ASSERT(esdp);
+ value = esdp->ref++;
+ erts_set_ref_numbers(ref, (Uint32) esdp->thr_id, value);
+}
+
+ERTS_GLB_INLINE Eterm
+erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
+ Eterm buffer[REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+
+ erts_sched_make_ref_in_array(esdp, ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_BIF_UNIQUE_H__ */
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 06dfeb1260..8d264d166e 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -231,41 +231,58 @@ erts_free_aligned_binary_bytes(byte* buf)
# define CHICKEN_PAD (sizeof(void*) - 1)
#endif
+/* Caller must initialize 'refc'
+*/
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc_fnf(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- void *res;
+ Binary *res;
+
if (bsize < size) /* overflow */
return NULL;
res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- return (Binary *) res;
+ if (res) {
+ res->orig_size = size;
+ res->flags = BIN_FLAG_DRV;
+ }
+ return res;
}
+/* Caller must initialize 'refc'
+*/
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- void *res;
+ Binary *res;
+
if (bsize < size) /* overflow */
erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- return (Binary *) res;
+ res->orig_size = size;
+ res->flags = BIN_FLAG_DRV;
+ return res;
}
+/* Caller must initialize 'refc'
+*/
ERTS_GLB_INLINE Binary *
erts_bin_nrml_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- void *res;
+ Binary *res;
+
if (bsize < size) /* overflow */
erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
- return (Binary *) res;
+ res->orig_size = size;
+ res->flags = 0;
+ return res;
}
ERTS_GLB_INLINE Binary *
@@ -280,6 +297,8 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
return NULL;
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
+ if (nbp)
+ nbp->orig_size = size;
return nbp;
}
@@ -297,6 +316,7 @@ erts_bin_realloc(Binary *bp, Uint size)
if (!nbp)
erts_realloc_enomem(type, bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
+ nbp->orig_size = size;
return nbp;
}
@@ -329,4 +349,4 @@ erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif
+#endif /* !__ERL_BINARY_H */
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 53c21c40e1..5cc0a23dc9 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1302,7 +1302,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
if (binp->orig_size < pb->size) {
Uint new_size = 2*pb->size;
binp = erts_bin_realloc(binp, new_size);
- binp->orig_size = new_size;
pb->val = binp;
pb->bytes = (byte *) binp->orig_bytes;
}
@@ -1374,8 +1373,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- bptr->flags = 0;
- bptr->orig_size = bin_size;
erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
@@ -1478,7 +1475,6 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* is safe to reallocate it.
*/
binp = erts_bin_realloc(binp, new_size);
- binp->orig_size = new_size;
pb->val = binp;
pb->bytes = (byte *) binp->orig_bytes;
} else {
@@ -1491,8 +1487,6 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* binary and copy the contents of the old binary into it.
*/
Binary* bptr = erts_bin_nrml_alloc(new_size);
- bptr->flags = 0;
- bptr->orig_size = new_size;
erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size);
pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
@@ -1540,8 +1534,6 @@ erts_bs_init_writable(Process* p, Eterm sz)
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- bptr->flags = 0;
- bptr->orig_size = bin_size;
erts_refc_init(&bptr->refc, 1);
/*
@@ -1588,9 +1580,7 @@ erts_emasculate_writable_binary(ProcBin* pb)
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
if (unused >= 8) {
- Uint new_size = pb->size;
binp = erts_bin_realloc(binp, pb->size);
- binp->orig_size = new_size;
pb->val = binp;
pb->bytes = (byte *) binp->orig_bytes;
}
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 8f246ffa07..fff892ae54 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -753,6 +753,31 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
BIF_RET(ret);
}
+/*
+** take(Tab, Key)
+*/
+BIF_RETTYPE ets_take_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+#ifdef DEBUG
+ int cret;
+#endif
+ Eterm ret;
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC);
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+#ifdef DEBUG
+ cret =
+#endif
+ tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
+ ASSERT(cret == DB_ERROR_NONE);
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_RET(ret);
+}
+
/*
** update_element(Tab, Key, {Pos, Value})
** update_element(Tab, Key, [{Pos, Value}])
@@ -780,7 +805,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
list = BIF_ARG_3;
}
- if (!tb->common.meth->db_lookup_dbterm(tb, BIF_ARG_2, &handle)) {
+ if (!tb->common.meth->db_lookup_dbterm(BIF_P, tb, BIF_ARG_2, THE_NON_VALUE, &handle)) {
cret = DB_ERROR_BADKEY;
goto bail_out;
}
@@ -819,7 +844,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
}
finalize:
- tb->common.meth->db_finalize_dbterm(&handle);
+ tb->common.meth->db_finalize_dbterm(cret, &handle);
bail_out:
UnUseTmpHeap(2,BIF_P);
@@ -838,14 +863,8 @@ bail_out:
}
}
-/*
-** update_counter(Tab, Key, Incr)
-** update_counter(Tab, Key, {Upop})
-** update_counter(Tab, Key, [{Upop}])
-** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
-** Returns new value(s) (integer or [integer])
-*/
-BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
+static BIF_RETTYPE
+do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
{
DbTable* tb;
int cret = DB_ERROR_BADITEM;
@@ -855,7 +874,7 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
Eterm* ret_list_currp = NULL;
Eterm* ret_list_prevp = NULL;
Eterm iter;
- DeclareTmpHeap(cell,5,BIF_P);
+ DeclareTmpHeap(cell, 5, p);
Eterm *tuple = cell+2;
DbUpdateHandle handle;
Uint halloc_size = 0; /* overestimated heap usage */
@@ -863,28 +882,29 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
Eterm* hstart;
Eterm* hend;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((tb = db_get_table(p, arg1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- UseTmpHeap(5,BIF_P);
+ UseTmpHeap(5, p);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
}
- if (is_integer(BIF_ARG_3)) { /* Incr */
- upop_list = CONS(cell, TUPLE2(tuple, make_small(tb->common.keypos+1),
- BIF_ARG_3), NIL);
+ if (is_integer(arg3)) { /* Incr */
+ upop_list = CONS(cell,
+ TUPLE2(tuple, make_small(tb->common.keypos+1), arg3),
+ NIL);
}
- else if (is_tuple(BIF_ARG_3)) { /* {Upop} */
- upop_list = CONS(cell, BIF_ARG_3, NIL);
+ else if (is_tuple(arg3)) { /* {Upop} */
+ upop_list = CONS(cell, arg3, NIL);
}
else { /* [{Upop}] (probably) */
- upop_list = BIF_ARG_3;
+ upop_list = arg3;
ret_list_prevp = &ret;
}
- if (!tb->common.meth->db_lookup_dbterm(tb, BIF_ARG_2, &handle)) {
+ if (!tb->common.meth->db_lookup_dbterm(p, tb, arg2, arg4, &handle)) {
goto bail_out; /* key not found */
}
@@ -957,13 +977,13 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
if (ret_list_prevp) { /* Prepare to return a list */
ret = NIL;
halloc_size += list_size;
- hstart = HAlloc(BIF_P, halloc_size);
+ hstart = HAlloc(p, halloc_size);
ret_list_currp = hstart;
htop = hstart + list_size;
hend = hstart + halloc_size;
}
else {
- hstart = htop = HAlloc(BIF_P, halloc_size);
+ hstart = htop = HAlloc(p, halloc_size);
}
hend = hstart + halloc_size;
@@ -1010,26 +1030,54 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
(is_list(ret) && (list_val(ret)+list_size)==ret_list_currp));
ASSERT(htop <= hend);
- HRelease(BIF_P,hend,htop);
+ HRelease(p, hend, htop);
finalize:
- tb->common.meth->db_finalize_dbterm(&handle);
+ tb->common.meth->db_finalize_dbterm(cret, &handle);
bail_out:
- UnUseTmpHeap(5,BIF_P);
+ UnUseTmpHeap(5, p);
db_unlock(tb, LCK_WRITE_REC);
switch (cret) {
case DB_ERROR_NONE:
BIF_RET(ret);
case DB_ERROR_SYSRES:
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ BIF_ERROR(p, SYSTEM_LIMIT);
default:
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
break;
}
}
+/*
+** update_counter(Tab, Key, Incr)
+** update_counter(Tab, Key, Upop)
+** update_counter(Tab, Key, [{Upop}])
+** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
+** Returns new value(s) (integer or [integer])
+*/
+BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
+{
+ return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
+}
+
+/*
+** update_counter(Tab, Key, Incr, Default)
+** update_counter(Tab, Key, Upop, Default)
+** update_counter(Tab, Key, [{Upop}], Default)
+** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
+** Returns new value(s) (integer or [integer])
+*/
+BIF_RETTYPE ets_update_counter_4(BIF_ALIST_4)
+{
+ if (is_not_tuple(BIF_ARG_4)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+}
+
+
/*
** The put BIF
*/
@@ -2643,7 +2691,9 @@ BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
BIF_RETTYPE ets_info_1(BIF_ALIST_1)
{
static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
- am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed};
+ am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed,
+ am_write_concurrency,
+ am_read_concurrency};
Eterm results[sizeof(fields)/sizeof(Eterm)];
DbTable* tb;
Eterm res;
@@ -3670,6 +3720,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_protected;
else if (tb->common.status & DB_PUBLIC)
ret = am_public;
+ } else if (What == am_write_concurrency) {
+ ret = tb->common.status & DB_FINE_LOCKED ? am_true : am_false;
+ } else if (What == am_read_concurrency) {
+ ret = tb->common.status & DB_FREQ_READ ? am_true : am_false;
} else if (What == am_name) {
ret = tb->common.the_name;
} else if (What == am_keypos) {
@@ -3752,7 +3806,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
make_small(stats.max_chain_len),
- make_small(db_kept_items_hash(&tb->hash)));
+ make_small(stats.kept_items));
}
else {
ret = am_false;
@@ -3774,6 +3828,11 @@ static void print_table(int to, void *to_arg, int show, DbTable* tb)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
+ erts_print(to, to_arg, "Type: %T\n", table_info(NULL, tb, am_type));
+ erts_print(to, to_arg, "Protection: %T\n", table_info(NULL, tb, am_protection));
+ erts_print(to, to_arg, "Compressed: %T\n", table_info(NULL, tb, am_compressed));
+ erts_print(to, to_arg, "Write Concurrency: %T\n", table_info(NULL, tb, am_write_concurrency));
+ erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency));
}
void db_info(int to, void *to_arg, int show) /* Called by break handler */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 06dac8f161..045c8ae135 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -174,7 +174,7 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_hash2(term)) % MAX_HASH)
+ make_internal_hash(term)) % MAX_HASH)
#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
@@ -382,7 +382,7 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
static void shrink(DbTableHash* tb, int nactive);
static void grow(DbTableHash* tb, int nactive);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
- DbTableHash*);
+ Uint sz, DbTableHash*);
static int analyze_pattern(DbTableHash *tb, Eterm pattern,
struct mp_info *mpi);
@@ -426,6 +426,7 @@ static int db_select_count_continue_hash(Process *p, DbTable *tbl,
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_take_hash(Process *, DbTable *, Eterm, Eterm *);
static void db_print_hash(int to,
void *to_arg,
int show,
@@ -443,8 +444,11 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl);
#ifdef HARDDEBUG
static void db_check_table_hash(DbTableHash *tb);
#endif
-static int db_lookup_dbterm_hash(DbTable *tbl, Eterm key, DbUpdateHandle* handle);
-static void db_finalize_dbterm_hash(DbUpdateHandle* handle);
+static int
+db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle);
+static void
+db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
@@ -536,6 +540,7 @@ DbTableMethod db_hash =
db_select_delete_continue_hash,
db_select_count_hash,
db_select_count_continue_hash,
+ db_take_hash,
db_delete_all_objects_hash,
db_free_table_hash,
db_free_table_continue_hash,
@@ -646,25 +651,6 @@ restart:
/* ToDo: Maybe try grow/shrink the table as well */
}
-/* Only used by tests
-*/
-Uint db_kept_items_hash(DbTableHash *tb)
-{
- Uint kept_items = 0;
- Uint ix = 0;
- erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
- HashDbTerm* b;
- do {
- for (b = BUCKET(tb, ix); b != NULL; b = b->next) {
- if (b->hvalue == INVALID_HASH) {
- ++kept_items;
- }
- }
- ix = next_slot(tb, ix, &lck);
- }while (ix);
- return kept_items;
-}
-
int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
@@ -879,34 +865,49 @@ Ldone:
return ret;
}
+static Eterm
+get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
+ HashDbTerm *b1, HashDbTerm **bend)
+{
+ HashDbTerm* b2 = b1->next;
+ Eterm copy;
+ Uint sz = b1->dbterm.size + 2;
+
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ while (b2 && has_key(tb, b2, key, hval)) {
+ if (b2->hvalue != INVALID_HASH)
+ sz += b2->dbterm.size + 2;
+
+ b2 = b2->next;
+ }
+ }
+ copy = build_term_list(p, b1, b2, sz, tb);
+ CHECK_TABLES();
+ if (bend) {
+ *bend = b2;
+ }
+ return copy;
+}
+
int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
int ix;
- HashDbTerm* b1;
+ HashDbTerm* b;
erts_smp_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
-
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- HashDbTerm* b2 = b1->next;
- Eterm copy;
+ b = BUCKET(tb, ix);
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- while(b2 != NULL && has_key(tb,b2,key,hval))
- b2 = b2->next;
- }
- copy = build_term_list(p, b1, b2, tb);
- CHECK_TABLES();
- *ret = copy;
+ while(b != 0) {
+ if (has_live_key(tb, b, key, hval)) {
+ *ret = get_term_list(p, tb, key, hval, b, NULL);
goto done;
}
- b1 = b1->next;
+ b = b->next;
}
*ret = NIL;
done:
@@ -1240,7 +1241,7 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
lck = RLOCK_HASH(tb, slot);
nactive = NACTIVE(tb);
if (slot < nactive) {
- *ret = build_term_list(p, BUCKET(tb, slot), 0, tb);
+ *ret = build_term_list(p, BUCKET(tb, slot), NULL, 0, tb);
retval = DB_ERROR_NONE;
}
else if (slot == nactive) {
@@ -2069,6 +2070,46 @@ trap:
}
+static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm **bp, *b;
+ HashValue hval = MAKE_HASH(key);
+ erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval);
+ int ix = hash_to_ix(tb, hval);
+ int nitems_diff = 0;
+
+ *ret = NIL;
+ for (bp = &BUCKET(tb, ix), b = *bp; b; bp = &b->next, b = b->next) {
+ if (has_live_key(tb, b, key, hval)) {
+ HashDbTerm *bend;
+
+ *ret = get_term_list(p, tb, key, hval, b, &bend);
+ while (b != bend) {
+ --nitems_diff;
+ if (nitems_diff == -1 && IS_FIXED(tb)) {
+ /* Pseudo remove (no need to keep several of same key) */
+ add_fixed_deletion(tb, ix);
+ bp = &b->next;
+ b->hvalue = INVALID_HASH;
+ b = b->next;
+ } else {
+ *bp = b->next;
+ free_term(tb, b);
+ b = *bp;
+ }
+ }
+ break;
+ }
+ }
+ WUNLOCK_HASH(lck);
+ if (nitems_diff) {
+ erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ try_shrink(tb);
+ }
+ return DB_ERROR_NONE;
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
@@ -2104,10 +2145,38 @@ int db_mark_all_deleted_hash(DbTable *tbl)
static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
+ DbHashStats stats;
int i;
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
-
+
+#ifdef ERTS_SMP
+ i = tbl->common.is_thread_safe;
+ /* If crash dumping we set table to thread safe in order to
+ avoid taking any locks */
+ if (ERTS_IS_CRASH_DUMPING)
+ tbl->common.is_thread_safe = 1;
+
+ db_calc_stats_hash(&tbl->hash, &stats);
+
+ tbl->common.is_thread_safe = i;
+#else
+ db_calc_stats_hash(&tbl->hash, &stats);
+#endif
+
+ erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
+ erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
+ erts_print(to, to_arg, "Chain Length Min: %d\n", stats.min_chain_len);
+ erts_print(to, to_arg, "Chain Length Std Dev: %f\n",
+ stats.std_dev_chain_len);
+ erts_print(to, to_arg, "Chain Length Expected Std Dev: %f\n",
+ stats.std_dev_expected);
+
+ if (IS_FIXED(tb))
+ erts_print(to, to_arg, "Fixed: %d\n", stats.kept_items);
+ else
+ erts_print(to, to_arg, "Fixed: false\n");
+
if (show) {
for (i = 0; i < NACTIVE(tb); i++) {
HashDbTerm* list = BUCKET(tb,i);
@@ -2483,23 +2552,23 @@ static int free_seg(DbTableHash *tb, int free_records)
** Copy terms from ptr1 until ptr2
** works for ptr1 == ptr2 == 0 => []
** or ptr2 == 0
+** sz is either precalculated heap size or 0 if not known
*/
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
- DbTableHash* tb)
+ Uint sz, DbTableHash* tb)
{
- int sz = 0;
HashDbTerm* ptr;
Eterm list = NIL;
Eterm copy;
Eterm *hp, *hend;
- ptr = ptr1;
- while(ptr != ptr2) {
-
- if (ptr->hvalue != INVALID_HASH)
- sz += ptr->dbterm.size + 2;
-
- ptr = ptr->next;
+ if (!sz) {
+ ptr = ptr1;
+ while(ptr != ptr2) {
+ if (ptr->hvalue != INVALID_HASH)
+ sz += ptr->dbterm.size + 2;
+ ptr = ptr->next;
+ }
}
hp = HAlloc(p, sz);
@@ -2730,59 +2799,129 @@ static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
return NULL;
}
-static int db_lookup_dbterm_hash(DbTable *tbl, Eterm key, DbUpdateHandle* handle)
+static int
+db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle)
{
DbTableHash *tb = &tbl->hash;
- HashDbTerm* b;
- HashDbTerm** prevp;
- int ix;
HashValue hval;
+ HashDbTerm **bp, *b;
erts_smp_rwmtx_t* lck;
+ int flags = 0;
+
+ ASSERT(tb->common.status & DB_SET);
hval = MAKE_HASH(key);
- lck = WLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- prevp = &BUCKET(tb, ix);
- b = *prevp;
+ lck = WLOCK_HASH(tb, hval);
+ bp = &BUCKET(tb, hash_to_ix(tb, hval));
+ b = *bp;
- while (b != 0) {
- if (has_live_key(tb,b,key,hval)) {
- handle->tb = tbl;
- handle->bp = (void**) prevp;
- handle->dbterm = &b->dbterm;
- handle->mustResize = 0;
- handle->new_size = b->dbterm.size;
- #if HALFWORD_HEAP
- handle->abs_vec = NULL;
- #endif
- handle->lck = lck;
- /* KEEP hval WLOCKED, db_finalize_dbterm_hash will WUNLOCK */
- return 1;
- }
- prevp = &b->next;
- b = *prevp;
+ for (;;) {
+ if (b == NULL) {
+ break;
+ }
+ if (has_key(tb, b, key, hval)) {
+ if (b->hvalue != INVALID_HASH) {
+ goto Ldone;
+ }
+ break;
+ }
+ bp = &b->next;
+ b = *bp;
}
- WUNLOCK_HASH(lck);
- return 0;
+
+ if (obj == THE_NON_VALUE) {
+ WUNLOCK_HASH(lck);
+ return 0;
+ }
+
+ {
+ Eterm *objp = tuple_val(obj);
+ int arity = arityval(*objp);
+ Eterm *htop, *hend;
+
+ ASSERT(arity >= tb->common.keypos);
+ htop = HAlloc(p, arity + 1);
+ hend = htop + arity + 1;
+ sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1));
+ htop[tb->common.keypos] = key;
+ obj = make_tuple(htop);
+
+ if (b == NULL) {
+ HashDbTerm *q = new_dbterm(tb, obj);
+
+ q->hvalue = hval;
+ q->next = NULL;
+ *bp = b = q;
+
+ {
+ int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
+ int nactive = NACTIVE(tb);
+
+ if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) {
+ grow(tb, nactive);
+ }
+ }
+ } else {
+ HashDbTerm *q, *next = b->next;
+
+ ASSERT(b->hvalue == INVALID_HASH);
+ q = replace_dbterm(tb, b, obj);
+ q->next = next;
+ q->hvalue = hval;
+ *bp = b = q;
+ erts_smp_atomic_inc_nob(&tb->common.nitems);
+ }
+
+ HRelease(p, hend, htop);
+ flags |= DB_NEW_OBJECT;
+ }
+
+Ldone:
+ handle->tb = tbl;
+ handle->bp = (void **)bp;
+ handle->dbterm = &b->dbterm;
+ handle->flags = flags;
+ handle->new_size = b->dbterm.size;
+#if HALFWORD_HEAP
+ handle->abs_vec = NULL;
+#endif
+ handle->lck = lck;
+ return 1;
}
/* Must be called after call to db_lookup_dbterm
*/
-static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
+static void
+db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
{
DbTable* tbl = handle->tb;
- HashDbTerm* oldp = (HashDbTerm*) *(handle->bp);
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm **bp = (HashDbTerm **) handle->bp;
+ HashDbTerm *b = *bp;
erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck;
- ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(&tbl->hash,lck)); /* locked by db_lookup_dbterm_hash */
+ ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
- ASSERT((&oldp->dbterm == handle->dbterm) == !(tbl->common.compress && handle->mustResize));
+ ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE));
+
+ if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
+ if (IS_FIXED(tb)) {
+ add_fixed_deletion(tb, hash_to_ix(tb, b->hvalue));
+ b->hvalue = INVALID_HASH;
+ } else {
+ *bp = b->next;
+ free_term(tb, b);
+ }
- if (handle->mustResize) {
+ WUNLOCK_HASH(lck);
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
+ try_shrink(tb);
+ } else if (handle->flags & DB_MUST_RESIZE) {
db_finalize_resize(handle, offsetof(HashDbTerm,dbterm));
WUNLOCK_HASH(lck);
- free_term(&tbl->hash, oldp);
+ free_term(tb, b);
}
else {
WUNLOCK_HASH(lck);
@@ -2833,6 +2972,7 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
erts_smp_rwmtx_t* lck;
int sum = 0;
int sq_sum = 0;
+ int kept_items = 0;
int ix;
int len;
@@ -2844,6 +2984,8 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
len = 0;
for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
len++;
+ if (b->hvalue == INVALID_HASH)
+ ++kept_items;
}
sum += len;
sq_sum += len*len;
@@ -2855,7 +2997,8 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
stats->std_dev_chain_len = sqrt((sq_sum - stats->avg_chain_len*sum) / NACTIVE(tb));
/* Expected standard deviation from a good uniform hash function,
ie binomial distribution (not taking the linear hashing into acount) */
- stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+ stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+ stats->kept_items = kept_items;
}
#ifdef HARDDEBUG
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 908cec11d4..f12cd363b0 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,7 +42,7 @@ typedef struct hash_db_term {
typedef struct db_table_hash_fine_locks {
union {
erts_smp_rwmtx_t lck;
- byte _cache_line_alignment[64];
+ byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_rwmtx_t))];
}lck_vec[DB_HASH_LOCK_CNT];
} DbTableHashFineLocks;
@@ -104,6 +104,7 @@ typedef struct {
float std_dev_expected;
int max_chain_len;
int min_chain_len;
+ int kept_items;
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index a62a83a928..577da35b75 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -383,6 +383,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
Eterm pattern, Eterm *ret);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
static void db_print_tree(int to, void *to_arg,
int show, DbTable *tbl);
static int db_free_table_tree(DbTable *tbl);
@@ -398,8 +399,11 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl);
#ifdef HARDDEBUG
static void db_check_table_tree(DbTable *tbl);
#endif
-static int db_lookup_dbterm_tree(DbTable *, Eterm key, DbUpdateHandle*);
-static void db_finalize_dbterm_tree(DbUpdateHandle*);
+static int
+db_lookup_dbterm_tree(Process *, DbTable *, Eterm key, Eterm obj,
+ DbUpdateHandle*);
+static void
+db_finalize_dbterm_tree(int cret, DbUpdateHandle *);
/*
** Static variables
@@ -431,6 +435,7 @@ DbTableMethod db_tree =
db_select_delete_continue_tree,
db_select_count_tree,
db_select_count_continue_tree,
+ db_take_tree,
db_delete_all_objects_tree,
db_free_table_tree,
db_free_table_continue_tree,
@@ -1722,6 +1727,28 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
}
+static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *this;
+
+ *ret = NIL;
+ this = linkout_tree(tb, key, NULL);
+ if (this) {
+ Eterm copy, *hp, *hend;
+
+ hp = HAlloc(p, this->dbterm.size + 2);
+ hend = hp + this->dbterm.size + 2;
+ copy = db_copy_object_from_ets(&tb->common,
+ &this->dbterm, &hp, &MSO(p));
+ *ret = CONS(hp, copy, NIL);
+ hp += 2;
+ HRelease(p, hend, hp);
+ free_term(tb, this);
+ }
+ return DB_ERROR_NONE;
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
@@ -2522,16 +2549,43 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
return this;
}
-static int db_lookup_dbterm_tree(DbTable *tbl, Eterm key, DbUpdateHandle* handle)
+static int
+db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle)
{
DbTableTree *tb = &tbl->tree;
TreeDbTerm **pp = find_node2(tb, key);
-
- if (pp == NULL) return 0;
+ int flags = 0;
+
+ if (pp == NULL) {
+ if (obj == THE_NON_VALUE) {
+ return 0;
+ } else {
+ Eterm *objp = tuple_val(obj);
+ int arity = arityval(*objp);
+ Eterm *htop, *hend;
+
+ ASSERT(arity >= tb->common.keypos);
+ htop = HAlloc(p, arity + 1);
+ hend = htop + arity + 1;
+ sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1));
+ htop[tb->common.keypos] = key;
+ obj = make_tuple(htop);
+
+ if (db_put_tree(tbl, obj, 1) != DB_ERROR_NONE) {
+ return 0;
+ }
+
+ pp = find_node2(tb, key);
+ ASSERT(pp != NULL);
+ HRelease(p, hend, htop);
+ flags |= DB_NEW_OBJECT;
+ }
+ }
handle->tb = tbl;
handle->dbterm = &(*pp)->dbterm;
- handle->mustResize = 0;
+ handle->flags = flags;
handle->bp = (void**) pp;
handle->new_size = (*pp)->dbterm.size;
#if HALFWORD_HEAP
@@ -2540,15 +2594,21 @@ static int db_lookup_dbterm_tree(DbTable *tbl, Eterm key, DbUpdateHandle* handle
return 1;
}
-static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
+static void
+db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
{
- if (handle->mustResize) {
- TreeDbTerm* oldp = (TreeDbTerm*) *handle->bp;
+ DbTable *tbl = handle->tb;
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *bp = (TreeDbTerm *) *handle->bp;
+ if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
+ Eterm ret;
+ db_erase_tree(tbl, GETKEY(tb, bp->dbterm.tpl), &ret);
+ } else if (handle->flags & DB_MUST_RESIZE) {
db_finalize_resize(handle, offsetof(TreeDbTerm,dbterm));
- reset_static_stack(&handle->tb->tree);
+ reset_static_stack(tb);
- free_term(&handle->tb->tree, oldp);
+ free_term(tb, bp);
}
#ifdef DEBUG
handle->dbterm = 0;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 3927615e04..9d699d4b22 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -198,11 +198,6 @@ set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
return ret;
}
-
-/* Type checking... */
-
-#define BOXED_IS_TUPLE(Boxed) is_arity_value(*boxed_val((Boxed)))
-
/*
**
** Types and enum's (compiled matches)
@@ -218,7 +213,9 @@ typedef enum {
matchTuple,
matchPushT,
matchPushL,
+ matchPushM,
matchPop,
+ matchSwap,
matchBind,
matchCmp,
matchEqBin,
@@ -227,11 +224,15 @@ typedef enum {
matchEqRef,
matchEq,
matchList,
+ matchMap,
+ matchKey,
matchSkip,
matchPushC,
matchConsA, /* Car is below Cdr */
matchConsB, /* Cdr is below Car (unusual) */
matchMkTuple,
+ matchMkFlatMap,
+ matchMkHashMap,
matchCall0,
matchCall1,
matchCall2,
@@ -856,6 +857,13 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info);
static Uint my_size_object(Eterm t);
static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap);
+/* Guard subroutines */
+static void
+dmc_rearrange_constants(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
+ int textpos, Eterm *p, Uint nelems);
+static DMCRet
+dmc_array(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm *p, Uint nelems, int *constant);
/* Guard compilation */
static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
Eterm t);
@@ -869,6 +877,9 @@ static DMCRet dmc_tuple(DMCContext *context,
DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
+static DMCRet
+dmc_map(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm t, int *constant);
static DMCRet dmc_variable(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(UWord) *text,
@@ -888,12 +899,14 @@ static DMCRet compile_guard_expr(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(UWord) *text,
Eterm t);
-/* match expression subroutine */
+/* match expression subroutines */
static DMCRet dmc_one_term(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(Eterm) *stack,
DMC_STACK_TYPE(UWord) *text,
Eterm c);
+static Eterm
+dmc_private_copy(DMCContext *context, Eterm c);
#ifdef DMC_DEBUG
@@ -1364,7 +1377,112 @@ restart:
for (;;) {
switch (t & _TAG_PRIMARY_MASK) {
case TAG_PRIMARY_BOXED:
- if (!BOXED_IS_TUPLE(t)) {
+ if (is_flatmap(t)) {
+ num_iters = flatmap_get_size(flatmap_val(t));
+ if (!structure_checked) {
+ DMC_PUSH(text, matchMap);
+ DMC_PUSH(text, num_iters);
+ }
+ structure_checked = 0;
+ for (i = 0; i < num_iters; ++i) {
+ Eterm key = flatmap_get_keys(flatmap_val(t))[i];
+ if (db_is_variable(key) >= 0) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Variable found in map key.",
+ -1, 0UL, dmcError);
+ }
+ goto error;
+ } else if (key == am_Underscore) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Underscore found in map key.",
+ -1, 0UL, dmcError);
+ }
+ goto error;
+ }
+ DMC_PUSH(text, matchKey);
+ DMC_PUSH(text, dmc_private_copy(&context, key));
+ {
+ int old_stack = ++(context.stack_used);
+ Eterm value = flatmap_get_values(flatmap_val(t))[i];
+ res = dmc_one_term(&context, &heap, &stack, &text,
+ value);
+ ASSERT(res != retFail);
+ if (res == retRestart) {
+ goto restart;
+ }
+ if (old_stack != context.stack_used) {
+ ASSERT(old_stack + 1 == context.stack_used);
+ DMC_PUSH(text, matchSwap);
+ }
+ if (context.stack_used > context.stack_need) {
+ context.stack_need = context.stack_used;
+ }
+ DMC_PUSH(text, matchPop);
+ --(context.stack_used);
+ }
+ }
+ break;
+ }
+ if (is_hashmap(t)) {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv;
+ num_iters = hashmap_size(t);
+ if (!structure_checked) {
+ DMC_PUSH(text, matchMap);
+ DMC_PUSH(text, num_iters);
+ }
+ structure_checked = 0;
+
+ hashmap_iterator_init(&wstack, t, 0);
+
+ while ((kv=hashmap_iterator_next(&wstack)) != NULL) {
+ Eterm key = CAR(kv);
+ Eterm value = CDR(kv);
+ if (db_is_variable(key) >= 0) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Variable found in map key.",
+ -1, 0UL, dmcError);
+ }
+ DESTROY_WSTACK(wstack);
+ goto error;
+ } else if (key == am_Underscore) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Underscore found in map key.",
+ -1, 0UL, dmcError);
+ }
+ DESTROY_WSTACK(wstack);
+ goto error;
+ }
+ DMC_PUSH(text, matchKey);
+ DMC_PUSH(text, dmc_private_copy(&context, key));
+ {
+ int old_stack = ++(context.stack_used);
+ res = dmc_one_term(&context, &heap, &stack, &text,
+ value);
+ ASSERT(res != retFail);
+ if (res == retRestart) {
+ DESTROY_WSTACK(wstack);
+ goto restart;
+ }
+ if (old_stack != context.stack_used) {
+ ASSERT(old_stack + 1 == context.stack_used);
+ DMC_PUSH(text, matchSwap);
+ }
+ if (context.stack_used > context.stack_need) {
+ context.stack_need = context.stack_used;
+ }
+ DMC_PUSH(text, matchPop);
+ --(context.stack_used);
+ }
+ }
+ DESTROY_WSTACK(wstack);
+ break;
+ }
+ if (!is_tuple(t)) {
goto simple_term;
}
num_iters = arityval(*tuple_val(t));
@@ -1715,10 +1833,8 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
Uint32 *return_flags)
{
MatchProg *prog = Binary2MatchProg(bprog);
- Eterm *ep;
- Eterm *tp;
+ const Eterm *ep, *tp, **sp;
Eterm t;
- Eterm **sp;
Eterm *esp;
MatchVariable* variables;
BeamInstr *cp;
@@ -1808,7 +1924,7 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
restart:
ep = &term;
esp = (Eterm*)((char*)mpsp->u.heap + prog->stack_offset);
- sp = (Eterm **) esp;
+ sp = (const Eterm **)esp;
ret = am_true;
do_catch = 0;
fail_label = -1;
@@ -1887,9 +2003,57 @@ restart:
*sp++ = list_val_rel(*ep,base);
++ep;
break;
+ case matchMap:
+ if (!is_map_rel(*ep, base)) {
+ FAIL();
+ }
+ n = *pc++;
+ if (is_flatmap_rel(*ep,base)) {
+ if (flatmap_get_size(flatmap_val_rel(*ep, base)) < n) {
+ FAIL();
+ }
+ } else {
+ ASSERT(is_hashmap_rel(*ep,base));
+ if (hashmap_size_rel(*ep, base) < n) {
+ FAIL();
+ }
+ }
+ ep = flatmap_val_rel(*ep, base);
+ break;
+ case matchPushM:
+ if (!is_map_rel(*ep, base)) {
+ FAIL();
+ }
+ n = *pc++;
+ if (is_flatmap_rel(*ep,base)) {
+ if (flatmap_get_size(flatmap_val_rel(*ep, base)) < n) {
+ FAIL();
+ }
+ } else {
+ ASSERT(is_hashmap_rel(*ep,base));
+ if (hashmap_size_rel(*ep, base) < n) {
+ FAIL();
+ }
+ }
+ *sp++ = flatmap_val_rel(*ep++, base);
+ break;
+ case matchKey:
+ t = (Eterm) *pc++;
+ tp = erts_maps_get_rel(t, make_flatmap_rel(ep, base), base);
+ if (!tp) {
+ FAIL();
+ }
+ *sp++ = ep;
+ ep = tp;
+ break;
case matchPop:
ep = *(--sp);
break;
+ case matchSwap:
+ tp = sp[-1];
+ sp[-1] = sp[-2];
+ sp[-2] = tp;
+ break;
case matchBind:
n = *pc++;
variables[n].term = *ep++;
@@ -1987,6 +2151,38 @@ restart:
}
*esp++ = t;
break;
+ case matchMkFlatMap:
+ n = *pc++;
+ ehp = HAllocX(build_proc, 1 + MAP_HEADER_SIZE + n, HEAP_XTRA);
+ t = *ehp++ = *--esp;
+ {
+ flatmap_t *m = (flatmap_t *)ehp;
+ m->thing_word = MAP_HEADER;
+ m->size = n;
+ m->keys = t;
+ }
+ t = make_flatmap(ehp);
+ ehp += MAP_HEADER_SIZE;
+ while (n--) {
+ *ehp++ = *--esp;
+ }
+ *esp++ = t;
+ break;
+ case matchMkHashMap:
+ n = *pc++;
+ esp -= 2*n;
+ ehp = HAllocX(build_proc, 2*n, HEAP_XTRA);
+ {
+ ErtsHeapFactory factory;
+ Uint ix;
+ factory.p = build_proc;
+ for (ix = 0; ix < 2*n; ix++){
+ ehp[ix] = esp[ix];
+ }
+ t = erts_hashmap_from_array(&factory, ehp, n, 0);
+ }
+ *esp++ = t;
+ break;
case matchCall0:
bif = (Eterm (*)(Process*, ...)) *pc++;
t = (*bif)(build_proc, bif_args);
@@ -2601,10 +2797,10 @@ Wterm db_do_read_element(DbUpdateHandle* handle, Sint position)
}
ASSERT(((DbTableCommon*)handle->tb)->compress);
- ASSERT(!handle->mustResize);
+ ASSERT(!(handle->flags & DB_MUST_RESIZE));
handle->dbterm = db_alloc_tmp_uncompressed(&handle->tb->common,
handle->dbterm);
- handle->mustResize = 1;
+ handle->flags |= DB_MUST_RESIZE;
return handle->dbterm->tpl[position];
}
@@ -2637,11 +2833,11 @@ void db_do_update_element(DbUpdateHandle* handle,
#endif
return;
}
- if (!handle->mustResize) {
+ if (!(handle->flags & DB_MUST_RESIZE)) {
if (handle->tb->common.compress) {
handle->dbterm = db_alloc_tmp_uncompressed(&handle->tb->common,
handle->dbterm);
- handle->mustResize = 1;
+ handle->flags |= DB_MUST_RESIZE;
oldval = handle->dbterm->tpl[position];
#if HALFWORD_HEAP
old_base = NULL;
@@ -2701,7 +2897,7 @@ both_size_set:
/* write new value in old dbterm, finalize will make a flat copy */
handle->dbterm->tpl[position] = newval;
- handle->mustResize = 1;
+ handle->flags |= DB_MUST_RESIZE;
#if HALFWORD_HEAP
if (old_base && newval_sz > 0) {
@@ -3156,34 +3352,45 @@ int db_is_variable(Eterm obj)
/* return 1 if obj contains a variable or underscore */
/* return 0 if obj is fully ground */
-int db_has_variable(Eterm obj)
-{
- switch(obj & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_LIST: {
- while (is_list(obj)) {
- if (db_has_variable(CAR(list_val(obj))))
+int db_has_variable(Eterm node) {
+ DECLARE_ESTACK(s);
+
+ ESTACK_PUSH(s,node);
+ while (!ESTACK_ISEMPTY(s)) {
+ node = ESTACK_POP(s);
+ switch(node & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ while (is_list(node)) {
+ ESTACK_PUSH(s,CAR(list_val(node)));
+ node = CDR(list_val(node));
+ }
+ ESTACK_PUSH(s,node); /* Non wellformed list or [] */
+ break;
+ case TAG_PRIMARY_BOXED:
+ if (is_tuple(node)) {
+ Eterm *tuple = tuple_val(node);
+ int arity = arityval(*tuple);
+ while(arity--) {
+ ESTACK_PUSH(s,*(++tuple));
+ }
+ } else if (is_flatmap(node)) {
+ Eterm *values = flatmap_get_values(flatmap_val(node));
+ Uint size = flatmap_get_size(flatmap_val(node));
+ ESTACK_PUSH(s, ((flatmap_t *) flatmap_val(node))->keys);
+ while (size--) {
+ ESTACK_PUSH(s, *(values++));
+ }
+ }
+ break;
+ case TAG_PRIMARY_IMMED1:
+ if (node == am_Underscore || db_is_variable(node) >= 0) {
+ DESTROY_ESTACK(s);
return 1;
- obj = CDR(list_val(obj));
- }
- return(db_has_variable(obj)); /* Non wellformed list or [] */
- }
- case TAG_PRIMARY_BOXED:
- if (!BOXED_IS_TUPLE(obj)) {
- return 0;
- } else {
- Eterm *tuple = tuple_val(obj);
- int arity = arityval(*tuple++);
- while(arity--) {
- if (db_has_variable(*tuple))
- return 1;
- tuple++;
}
- return(0);
+ break;
}
- case TAG_PRIMARY_IMMED1:
- if (obj == am_Underscore || db_is_variable(obj) >= 0)
- return 1;
}
+ DESTROY_ESTACK(s);
return 0;
}
@@ -3243,11 +3450,9 @@ static DMCRet dmc_one_term(DMCContext *context,
{
Sint n;
Eterm *hp;
- ErlHeapFragment *tmp_mb;
Uint sz, sz2, sz3;
Uint i, j;
-
switch (c & _TAG_PRIMARY_MASK) {
case TAG_PRIMARY_IMMED1:
if ((n = db_is_variable(c)) >= 0) { /* variable */
@@ -3334,6 +3539,20 @@ static DMCRet dmc_one_term(DMCContext *context,
DMC_PUSH(*text, n);
DMC_PUSH(*stack, c);
break;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE):
+ n = flatmap_get_size(flatmap_val(c));
+ DMC_PUSH(*text, matchPushM);
+ ++(context->stack_used);
+ DMC_PUSH(*text, n);
+ DMC_PUSH(*stack, c);
+ break;
+ case (_TAG_HEADER_HASHMAP >> _TAG_PRIMARY_SIZE):
+ n = hashmap_size(c);
+ DMC_PUSH(*text, matchPushM);
+ ++(context->stack_used);
+ DMC_PUSH(*text, n);
+ DMC_PUSH(*stack, c);
+ break;
case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
{
Eterm* ref_val = internal_ref_val(c);
@@ -3415,16 +3634,8 @@ static DMCRet dmc_one_term(DMCContext *context,
#endif
break;
default: /* BINARY, FUN, VECTOR, or EXTERNAL */
- /*
- ** Make a private copy...
- */
- n = size_object(c);
- tmp_mb = new_message_buffer(n);
- hp = tmp_mb->mem;
DMC_PUSH(*text, matchEqBin);
- DMC_PUSH(*text, copy_struct(c, n, &hp, &(tmp_mb->off_heap)));
- tmp_mb->next = context->save;
- context->save = tmp_mb;
+ DMC_PUSH(*text, dmc_private_copy(context, c));
break;
}
break;
@@ -3437,6 +3648,22 @@ static DMCRet dmc_one_term(DMCContext *context,
}
/*
+** Make a private copy of a term in a context.
+*/
+
+static Eterm
+dmc_private_copy(DMCContext *context, Eterm c)
+{
+ Uint n = size_object(c);
+ ErlHeapFragment *tmp_mb = new_message_buffer(n);
+ Eterm *hp = tmp_mb->mem;
+ Eterm copy = copy_struct(c, n, &hp, &(tmp_mb->off_heap));
+ tmp_mb->next = context->save;
+ context->save = tmp_mb;
+ return copy;
+}
+
+/*
** Match guard compilation
*/
@@ -3527,57 +3754,78 @@ static DMCRet dmc_list(DMCContext *context,
return retOk;
}
-static DMCRet dmc_tuple(DMCContext *context,
- DMCHeap *heap,
- DMC_STACK_TYPE(UWord) *text,
- Eterm t,
- int *constant)
+static void
+dmc_rearrange_constants(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
+ int textpos, Eterm *p, Uint nelems)
{
DMC_STACK_TYPE(UWord) instr_save;
+ Uint i;
+
+ DMC_INIT_STACK(instr_save);
+ while (DMC_STACK_NUM(*text) > textpos) {
+ DMC_PUSH(instr_save, DMC_POP(*text));
+ }
+ for (i = nelems; i--;) {
+ do_emit_constant(context, text, p[i]);
+ }
+ while(!DMC_EMPTY(instr_save)) {
+ DMC_PUSH(*text, DMC_POP(instr_save));
+ }
+ DMC_FREE(instr_save);
+}
+
+static DMCRet
+dmc_array(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm *p, Uint nelems, int *constant)
+{
int all_constant = 1;
int textpos = DMC_STACK_NUM(*text);
- Eterm *p = tuple_val(t);
- Uint nelems = arityval(*p);
Uint i;
- int c;
- DMCRet ret;
/*
- ** We remember where we started to layout code,
+ ** We remember where we started to layout code,
** assume all is constant and back up and restart if not so.
- ** The tuple should be laid out with the last element first,
- ** so we can memcpy the tuple to the eheap.
+ ** The array should be laid out with the last element first,
+ ** so we can memcpy it to the eheap.
*/
- for (i = nelems; i > 0; --i) {
- if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
- return ret;
- if (!c && all_constant) {
- all_constant = 0;
- if (i < nelems) {
- Uint j;
+ for (i = nelems; i--;) {
+ DMCRet ret;
+ int c;
- /*
- * Oops, we need to relayout the constants.
- * Save the already laid out instructions.
- */
- DMC_INIT_STACK(instr_save);
- while (DMC_STACK_NUM(*text) > textpos)
- DMC_PUSH(instr_save, DMC_POP(*text));
- for (j = nelems; j > i; --j)
- do_emit_constant(context, text, p[j]);
- while(!DMC_EMPTY(instr_save))
- DMC_PUSH(*text, DMC_POP(instr_save));
- DMC_FREE(instr_save);
- }
- } else if (c && !all_constant) {
- /* push a constant */
- do_emit_constant(context, text, p[i]);
- }
+ ret = dmc_expr(context, heap, text, p[i], &c);
+ if (ret != retOk) {
+ return ret;
+ }
+ if (!c && all_constant) {
+ all_constant = 0;
+ if (i < nelems - 1) {
+ dmc_rearrange_constants(context, text, textpos,
+ p + i + 1, nelems - i - 1);
+ }
+ } else if (c && !all_constant) {
+ do_emit_constant(context, text, p[i]);
+ }
+ }
+ *constant = all_constant;
+ return retOk;
+}
+
+static DMCRet
+dmc_tuple(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm t, int *constant)
+{
+ int all_constant;
+ Eterm *p = tuple_val(t);
+ Uint nelems = arityval(*p);
+ DMCRet ret;
+
+ ret = dmc_array(context, heap, text, p + 1, nelems, &all_constant);
+ if (ret != retOk) {
+ return ret;
}
-
if (all_constant) {
- *constant = 1;
- return retOk;
+ *constant = 1;
+ return retOk;
}
DMC_PUSH(*text, matchMkTuple);
DMC_PUSH(*text, nelems);
@@ -3586,6 +3834,93 @@ static DMCRet dmc_tuple(DMCContext *context,
return retOk;
}
+static DMCRet
+dmc_map(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
+ Eterm t, int *constant)
+{
+ int nelems;
+ int constant_values;
+ DMCRet ret;
+ if (is_flatmap(t)) {
+ flatmap_t *m = (flatmap_t *)flatmap_val(t);
+ Eterm *values = flatmap_get_values(m);
+
+ nelems = flatmap_get_size(m);
+ ret = dmc_array(context, heap, text, values, nelems, &constant_values);
+
+ if (ret != retOk) {
+ return ret;
+ }
+ if (constant_values) {
+ *constant = 1;
+ return retOk;
+ }
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, dmc_private_copy(context, m->keys));
+ if (++context->stack_used > context->stack_need) {
+ context->stack_need = context->stack_used;
+ }
+ DMC_PUSH(*text, matchMkFlatMap);
+ DMC_PUSH(*text, nelems);
+ context->stack_used -= nelems;
+ *constant = 0;
+ return retOk;
+ } else {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv;
+ int c;
+
+ ASSERT(is_hashmap(t));
+
+ hashmap_iterator_init(&wstack, t, 1);
+ constant_values = 1;
+ nelems = hashmap_size(t);
+
+ while ((kv=hashmap_iterator_prev(&wstack)) != NULL) {
+ if ((ret = dmc_expr(context, heap, text, CDR(kv), &c)) != retOk) {
+ DESTROY_WSTACK(wstack);
+ return ret;
+ }
+ if (!c)
+ constant_values = 0;
+ }
+
+ if (constant_values) {
+ *constant = 1;
+ DESTROY_WSTACK(wstack);
+ return retOk;
+ }
+
+ *constant = 0;
+
+ hashmap_iterator_init(&wstack, t, 1);
+
+ while ((kv=hashmap_iterator_prev(&wstack)) != NULL) {
+ /* push key */
+ if ((ret = dmc_expr(context, heap, text, CAR(kv), &c)) != retOk) {
+ DESTROY_WSTACK(wstack);
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, CAR(kv));
+ }
+ /* push value */
+ if ((ret = dmc_expr(context, heap, text, CDR(kv), &c)) != retOk) {
+ DESTROY_WSTACK(wstack);
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, CDR(kv));
+ }
+ }
+ DMC_PUSH(*text, matchMkHashMap);
+ DMC_PUSH(*text, nelems);
+ context->stack_used -= nelems;
+ DESTROY_WSTACK(wstack);
+ return retOk;
+ }
+}
+
static DMCRet dmc_whole_expression(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(UWord) *text,
@@ -4580,7 +4915,10 @@ static DMCRet dmc_expr(DMCContext *context,
return ret;
break;
case TAG_PRIMARY_BOXED:
- if (!BOXED_IS_TUPLE(t)) {
+ if (is_map(t)) {
+ return dmc_map(context, heap, text, t, constant);
+ }
+ if (!is_tuple(t)) {
goto simple_term;
}
p = tuple_val(t);
@@ -4855,7 +5193,7 @@ static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap)
*hp += 2;
break;
case TAG_PRIMARY_BOXED:
- if (BOXED_IS_TUPLE(t)) {
+ if (is_tuple(t)) {
if (arityval(*tuple_val(t)) == 1 &&
is_tuple(a = tuple_val(t)[1])) {
Uint i,n;
@@ -5126,6 +5464,18 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("Tuple\t%beu\n", n);
break;
+ case matchMap:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Map\t%beu\n", n);
+ break;
+ case matchKey:
+ ++t;
+ p = (Eterm) *t;
+ ++t;
+ erts_printf("Key\t%p (%T)\n", t, p);
+ break;
case matchPushT:
++t;
n = *t;
@@ -5136,10 +5486,20 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("PushL\n");
break;
+ case matchPushM:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("PushM\t%beu\n", n);
+ break;
case matchPop:
++t;
erts_printf("Pop\n");
break;
+ case matchSwap:
+ ++t;
+ erts_printf("Swap\n");
+ break;
case matchBind:
++t;
n = *t;
@@ -5252,6 +5612,18 @@ void db_match_dis(Binary *bp)
++t;
erts_printf("MkTuple\t%beu\n", n);
break;
+ case matchMkFlatMap:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("MkFlatMap\t%beu\n", n);
+ break;
+ case matchMkHashMap:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("MkHashMap\t%beu\n", n);
+ break;
case matchOr:
++t;
n = *t;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 328b19dfc9..ca206c7f58 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -76,6 +76,9 @@ typedef struct db_term {
union db_table;
typedef union db_table DbTable;
+#define DB_MUST_RESIZE 1
+#define DB_NEW_OBJECT 2
+
/* Info about a database entry while it's being updated
* (by update_counter or update_element)
*/
@@ -84,7 +87,7 @@ typedef struct {
DbTerm* dbterm;
void** bp; /* {Hash|Tree}DbTerm** */
Uint new_size;
- int mustResize;
+ int flags;
void* lck;
#if HALFWORD_HEAP
unsigned char* abs_vec; /* [i] true if dbterm->tpl[i] is absolute Eterm */
@@ -165,6 +168,7 @@ typedef struct db_table_method
DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
+ int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
int (*db_delete_all_objects)(Process* p,
DbTable* db /* [in out] */ );
@@ -182,15 +186,14 @@ typedef struct db_table_method
void *arg);
void (*db_check_table)(DbTable* tb);
- /* Lookup a dbterm for updating. Return false if not found.
- */
- int (*db_lookup_dbterm)(DbTable*, Eterm key,
- DbUpdateHandle* handle); /* [out] */
+ /* Lookup a dbterm for updating. Return false if not found. */
+ int (*db_lookup_dbterm)(Process *, DbTable *, Eterm key, Eterm obj,
+ DbUpdateHandle* handle);
- /* Must be called for each db_lookup_dbterm that returned true,
- ** even if dbterm was not updated.
- */
- void (*db_finalize_dbterm)(DbUpdateHandle* handle);
+ /* Must be called for each db_lookup_dbterm that returned true, even if
+ ** dbterm was not updated. If the handle was of a new object and cret is
+ ** not DB_ERROR_NONE, the object is removed from the table. */
+ void (*db_finalize_dbterm)(int cret, DbUpdateHandle* handle);
} DbTableMethod;
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 147249f751..31b05d22af 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -601,17 +601,14 @@ erl_drv_thread_create(char *name,
#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid;
- ethr_thr_opts ethr_opts;
+ ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
ethr_thr_opts *use_opts;
- ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
if (!opts)
use_opts = NULL;
else {
- sys_memcpy((void *) &ethr_opts,
- (void *) &def_ethr_opts,
- sizeof(ethr_thr_opts));
ethr_opts.suggested_stack_size = opts->suggested_stack_size;
+ ethr_opts.name = name;
use_opts = &ethr_opts;
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 0db42d4325..4a116c0740 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -20,6 +20,8 @@
# include "config.h"
#endif
+#define ERL_WANT_GC_INTERNALS__
+
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
@@ -37,6 +39,7 @@
#include "hipe_mode_switch.h"
#endif
#include "dtrace-wrapper.h"
+#include "erl_bif_unique.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
@@ -94,10 +97,10 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static Uint combined_message_size(Process* p);
+static Uint combined_message_size(Process* p, int off_heap_msgs);
static void remove_message_buffers(Process* p);
-static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
+static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs);
+static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs);
static void do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj);
static Eterm* sweep_rootset(Rootset *rootset, Eterm* htop, char* src, Uint src_size);
static Eterm* sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size);
@@ -173,15 +176,15 @@ erts_init_gc(void)
int i = 0, ix;
Sint max_heap_size = 0;
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
- ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
- ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
- ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
- ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
- ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
- ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
- ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
+ ERTS_CT_ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
+ ERTS_CT_ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
erts_test_long_gc_sleep = 0;
@@ -400,7 +403,9 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
Uint reclaimed_now = 0;
int done = 0;
+ int off_heap_msgs;
Uint ms1, s1, us1;
+ erts_aint32_t state;
ErtsSchedulerData *esdp;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
@@ -417,7 +422,8 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
trace_gc(p, am_gc_start);
}
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ state = erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ off_heap_msgs = state & ERTS_PSFLG_OFF_HEAP_MSGS;
if (erts_system_monitor_long_gc != 0) {
get_now(&ms1, &s1, &us1);
}
@@ -443,11 +449,11 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
while (!done) {
if ((FLAGS(p) & F_NEED_FULLSWEEP) != 0) {
DTRACE2(gc_major_start, pidbuf, need);
- done = major_collection(p, need, objv, nobj, &reclaimed_now);
+ done = major_collection(p, need, objv, nobj, &reclaimed_now, off_heap_msgs);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
} else {
DTRACE2(gc_minor_start, pidbuf, need);
- done = minor_collection(p, need, objv, nobj, &reclaimed_now);
+ done = minor_collection(p, need, objv, nobj, &reclaimed_now, off_heap_msgs);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
}
}
@@ -830,7 +836,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
}
static int
-minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs)
{
Uint mature = HIGH_WATER(p) - HEAP_START(p);
@@ -869,20 +875,22 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
Uint size_after;
Uint need_after;
Uint stack_size = STACK_SZ_ON_HEAP(p);
- Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
+ Uint fragments = MBUF_SIZE(p) + combined_message_size(p, off_heap_msgs);
Uint size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
Uint new_sz = next_heap_size(p, HEAP_SIZE(p) + fragments, 0);
do_minor(p, new_sz, objv, nobj);
- /*
- * Copy newly received message onto the end of the new heap.
- */
- ErtsGcQuickSanityCheck(p);
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
- ErtsGcQuickSanityCheck(p);
+ if (!off_heap_msgs) {
+ /*
+ * Copy newly received message onto the end of the new heap.
+ */
+ ErtsGcQuickSanityCheck(p);
+ for (msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached) {
+ erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
+ ErtsGcQuickSanityCheck(p);
+ }
}
}
ErtsGcQuickSanityCheck(p);
@@ -1208,7 +1216,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
*/
static int
-major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs)
{
Rootset rootset;
Roots* roots;
@@ -1221,8 +1229,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
Uint oh_size = (char *) OLD_HTOP(p) - oh;
Uint n;
Uint new_sz;
- Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
- ErlMessage *msgp;
+ Uint fragments = MBUF_SIZE(p) + combined_message_size(p, off_heap_msgs);
size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
@@ -1432,13 +1439,16 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
ErtsGcQuickSanityCheck(p);
- /*
- * Copy newly received message onto the end of the new heap.
- */
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
- ErtsGcQuickSanityCheck(p);
+ if (!off_heap_msgs) {
+ ErlMessage *msgp;
+ /*
+ * Copy newly received message onto the end of the new heap.
+ */
+ for (msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached) {
+ erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
+ ErtsGcQuickSanityCheck(p);
+ }
}
}
@@ -1499,15 +1509,17 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int
* mbuf list.
*/
static Uint
-combined_message_size(Process* p)
+combined_message_size(Process* p, int off_heap_msgs)
{
- Uint sz = 0;
+ Uint sz;
ErlMessage *msgp;
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
+ if (off_heap_msgs)
+ return 0;
+
+ for (sz = 0, msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached)
sz += erts_msg_attached_data_size(msgp);
- }
}
return sz;
}
@@ -2400,7 +2412,6 @@ sweep_off_heap(Process *p, int fullsweep)
}
pb->val = erts_bin_realloc(pb->val, new_size);
- pb->val->orig_size = new_size;
pb->bytes = (byte *) pb->val->orig_bytes;
}
}
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 5203dda263..8afcb060a1 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -20,10 +20,12 @@
#ifndef __ERL_GC_H__
#define __ERL_GC_H__
-#include "erl_map.h"
+#if defined(ERL_WANT_GC_INTERNALS__) || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
+#include "erl_map.h"
+
#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
# define HARDDEBUG 1
#endif
@@ -53,7 +55,9 @@ do { \
nelts = header_arity(HDR); \
switch ((HDR) & _HEADER_SUBTAG_MASK) { \
case SUB_BINARY_SUBTAG: nelts++; break; \
- case MAP_SUBTAG: nelts+=map_get_size(PTR) + 1; break; \
+ case MAP_SUBTAG: nelts+=flatmap_get_size(PTR) + 1; break; \
+ case HASHMAP_SUBTAG: nelts=hashmap_bitcount(MAP_HEADER_VAL(HDR)); \
+ nelts += is_hashmap_header_head(HDR) ? 1 : 0; break; \
case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \
} \
gval = make_boxed(HTOP); \
@@ -61,14 +65,11 @@ do { \
*HTOP++ = HDR; \
*PTR++ = gval; \
while (nelts--) *HTOP++ = *PTR++; \
- \
} while(0)
#define in_area(ptr,start,nbytes) \
((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
-extern Uint erts_test_long_gc_sleep;
-
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
int within(Eterm *ptr, Process *p);
#endif
@@ -97,4 +98,33 @@ ERTS_GLB_INLINE Eterm follow_moved(Eterm term)
}
#endif
+#endif /* ERL_GC_C__ || HIPE_GC_C__ */
+
+/*
+ * Global exported
+ */
+
+extern Uint erts_test_long_gc_sleep;
+
+typedef struct {
+ Uint64 reclaimed;
+ Uint64 garbage_cols;
+} ErtsGCInfo;
+
+void erts_gc_info(ErtsGCInfo *gcip);
+void erts_init_gc(void);
+int erts_garbage_collect(struct process*, int, Eterm*, int);
+void erts_garbage_collect_hibernate(struct process* p);
+Eterm erts_gc_after_bif_call(struct process* p, Eterm result, Eterm* regs, Uint arity);
+void erts_garbage_collect_literals(struct process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh);
+Uint erts_next_heap_size(Uint, Uint);
+Eterm erts_heap_sizes(struct process* p);
+
+void erts_offset_off_heap(struct erl_off_heap*, Sint, Eterm*, Eterm*);
+void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_free_heap_frags(struct process* p);
+
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 61f8385efc..86d3416423 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -45,6 +45,7 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "erl_ptab.h"
+#include "erl_bif_unique.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -134,7 +135,9 @@ static void erl_init(int ncpu,
int legacy_proc_tab,
int port_tab_sz,
int port_tab_sz_ignore_files,
- int legacy_port_tab);
+ int legacy_port_tab,
+ int time_correction,
+ ErtsTimeWarpMode time_warp_mode);
static erts_atomic_t exiting;
@@ -161,9 +164,6 @@ int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
Uint32 erts_debug_flags; /* Debug flags. */
-#ifdef ERTS_OPCODE_COUNTER_SUPPORT
-int count_instructions;
-#endif
int erts_backtrace_depth; /* How many functions to show in a backtrace
* in error codes.
*/
@@ -188,12 +188,10 @@ static int no_dirty_io_schedulers;
Uint32 verbose; /* See erl_debug.h for information about verbose */
#endif
-int erts_disable_tolerant_timeofday; /* Time correction can be disabled it is
- * not and/or it is too slow.
- */
-
int erts_atom_table_size = ATOM_LIMIT; /* Maximum number of atoms */
+int erts_pd_initial_size = 10;
+
int erts_modified_timing_level;
int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
@@ -269,6 +267,19 @@ this_rel_num(void)
return this_rel;
}
+static ERTS_INLINE void
+set_default_time_adj(int *time_correction_p, ErtsTimeWarpMode *time_warp_mode_p)
+{
+ *time_correction_p = 1;
+ *time_warp_mode_p = ERTS_NO_TIME_WARP_MODE;
+ if (!erts_check_time_adj_support(*time_correction_p,
+ *time_warp_mode_p)) {
+ *time_correction_p = 0;
+ ASSERT(erts_check_time_adj_support(*time_correction_p,
+ *time_warp_mode_p));
+ }
+}
+
/*
* Common error printout function, all error messages
* that don't go to the error logger go through here.
@@ -284,13 +295,22 @@ static int early_init(int *argc, char **argv);
void
erts_short_init(void)
{
- int ncpu = early_init(NULL, NULL);
+
+ int ncpu;
+ int time_correction;
+ ErtsTimeWarpMode time_warp_mode;
+
+ set_default_time_adj(&time_correction,
+ &time_warp_mode);
+ ncpu = early_init(NULL, NULL);
erl_init(ncpu,
ERTS_DEFAULT_MAX_PROCESSES,
0,
ERTS_DEFAULT_MAX_PORTS,
0,
- 0);
+ 0,
+ time_correction,
+ time_warp_mode);
erts_initialized = 1;
}
@@ -300,12 +320,15 @@ erl_init(int ncpu,
int legacy_proc_tab,
int port_tab_sz,
int port_tab_sz_ignore_files,
- int legacy_port_tab)
+ int legacy_port_tab,
+ int time_correction,
+ ErtsTimeWarpMode time_warp_mode)
{
init_benchmarking();
+ erts_bif_unique_init();
erts_init_monitors();
- erts_init_time();
+ erts_init_time(time_correction, time_warp_mode);
erts_init_sys_common_misc();
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
@@ -316,6 +339,7 @@ erl_init(int ncpu,
no_dirty_io_schedulers
#endif
);
+ erts_late_init_time_sup();
erts_init_cpu_topology(); /* Must be after init_scheduling */
erts_init_gc(); /* Must be after init_scheduling */
erts_alloc_late_init();
@@ -365,12 +389,13 @@ erl_init(int ncpu,
erl_nif_init();
}
-static void
+static Eterm
erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
{
int i;
Eterm start_mod;
Eterm args;
+ Eterm res;
Eterm* hp;
Process parent;
ErlSpawnOpts so;
@@ -400,10 +425,11 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
hp += 2;
args = CONS(hp, env, args);
- so.flags = 0;
- (void) erl_create_process(&parent, start_mod, am_start, args, &so);
+ so.flags = SPO_SYSTEM_PROC;
+ res = erl_create_process(&parent, start_mod, am_start, args, &so);
erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
erts_cleanup_empty_process(&parent);
+ return res;
}
Eterm
@@ -509,9 +535,9 @@ void erts_usage(void)
/* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */
- erts_fprintf(stderr, "-c disable continuous date/time correction with\n");
- erts_fprintf(stderr, " respect to uptime\n");
-
+ erts_fprintf(stderr, "-c bool enable or disable time correction\n");
+ erts_fprintf(stderr, "-C mode set time warp mode; valid modes are:\n");
+ erts_fprintf(stderr, " no_time_warp|single_time_warp|multi_time_warp\n");
erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n");
@@ -519,6 +545,8 @@ void erts_usage(void)
H_DEFAULT_SIZE);
erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
VH_DEFAULT_SIZE);
+ erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n",
+ erts_pd_initial_size);
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
@@ -681,7 +709,6 @@ early_init(int *argc, char **argv) /*
erts_sched_compact_load = 1;
erts_printf_eterm_func = erts_printf_term;
- erts_disable_tolerant_timeofday = 0;
display_items = 200;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS;
@@ -1187,7 +1214,11 @@ erl_start(int argc, char **argv)
int port_tab_sz_ignore_files = 0;
int legacy_proc_tab = 0;
int legacy_port_tab = 0;
+ int time_correction;
+ ErtsTimeWarpMode time_warp_mode;
+ set_default_time_adj(&time_correction,
+ &time_warp_mode);
envbufsz = sizeof(envbuf);
if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
@@ -1408,6 +1439,7 @@ erl_start(int argc, char **argv)
*
* h|ms - min_heap_size
* h|mbs - min_bin_vheap_size
+ * h|pds - erts_pd_initial_size
*
*/
if (has_prefix("mbs", sub_param)) {
@@ -1425,6 +1457,14 @@ erl_start(int argc, char **argv)
erts_usage();
}
VERBOSE(DEBUG_SYSTEM, ("using minimum heap size %d\n", H_MIN_SIZE));
+ } else if (has_prefix("pds", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if ((erts_pd_initial_size = atoi(arg)) <= 0) {
+ erts_fprintf(stderr, "bad initial process dictionary size %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using initial process dictionary size %d\n",
+ erts_pd_initial_size));
} else {
/* backward compatibility */
arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1896,15 +1936,56 @@ erl_start(int argc, char **argv)
}
break;
}
- case 'c':
- if (argv[i][2] == 0) { /* -c: documented option */
- erts_disable_tolerant_timeofday = 1;
+ case 'C':
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (sys_strcmp(arg, "no_time_warp") == 0)
+ time_warp_mode = ERTS_NO_TIME_WARP_MODE;
+ else if (sys_strcmp(arg, "single_time_warp") == 0)
+ time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
+ else if (sys_strcmp(arg, "multi_time_warp") == 0)
+ time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
+ else {
+ erts_fprintf(stderr,
+ "Invalid time warp mode: %s\n", arg);
+ erts_usage();
}
+ break;
+ case 'c':
+ if (sys_strcmp(argv[i]+2, "false") == 0)
+ goto time_correction_false;
+ else if (sys_strcmp(argv[i]+2, "true") == 0)
+ goto time_correction_true;
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
else if (argv[i][2] == 'i') { /* -ci: undcoumented option*/
count_instructions = 1;
}
#endif
+ else if (argv[i][2] == '\0') {
+ if (i + 1 >= argc)
+ goto time_correction_false;
+ else {
+ if (sys_strcmp(argv[i+1], "false") == 0) {
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
+ goto time_correction_false;
+ }
+ else if (sys_strcmp(argv[i+1], "true") == 0) {
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
+ time_correction_true:
+ time_correction = 1;
+ break;
+ }
+ else {
+ time_correction_false:
+ time_correction = 0;
+ break;
+ }
+ }
+ }
+ else {
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ erts_fprintf(stderr, "Invalid time correnction value: %s\n", arg);
+ erts_usage();
+ }
break;
case 'W':
arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1950,6 +2031,30 @@ erl_start(int argc, char **argv)
i++;
}
+ if (!erts_check_time_adj_support(time_correction, time_warp_mode)) {
+ char *time_correction_str = time_correction ? "Enabled" : "Disabled";
+ char *time_warp_str = "undefined";
+ switch (time_warp_mode) {
+ case ERTS_NO_TIME_WARP_MODE:
+ time_warp_str = "no";
+ break;
+ case ERTS_SINGLE_TIME_WARP_MODE:
+ time_warp_str = "single";
+ break;
+ case ERTS_MULTI_TIME_WARP_MODE:
+ time_warp_str = "multi";
+ break;
+ default:
+ time_warp_str = "undefined";
+ break;
+ }
+ erts_fprintf(stderr, "%s time correction with %s time warp mode "
+ "is not supported on this platform\n",
+ time_correction_str,
+ time_warp_str);
+ erts_usage();
+ }
+
/* Output format on windows for sprintf defaults to three exponents.
* We use two-exponent to mimic normal sprintf behaviour.
*/
@@ -1983,7 +2088,9 @@ erl_start(int argc, char **argv)
legacy_proc_tab,
port_tab_sz,
port_tab_sz_ignore_files,
- legacy_port_tab);
+ legacy_port_tab,
+ time_correction,
+ time_warp_mode);
load_preloaded();
erts_end_staging_code_ix();
@@ -1991,7 +2098,11 @@ erl_start(int argc, char **argv)
erts_initialized = 1;
- erl_first_process_otp("otp_ring0", NULL, 0, boot_argc, boot_argv);
+ {
+ Eterm init = erl_first_process_otp("otp_ring0", NULL, 0,
+ boot_argc, boot_argv);
+ erts_bif_timer_start_servers(init);
+ }
#ifdef ERTS_SMP
erts_start_schedulers();
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index df7c443387..da85b86c87 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1226,7 +1226,7 @@ erts_instr_init(int stat, int map_stat)
mem_anchor = NULL;
/* Install instrumentation functions */
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index b105ece6f1..261460d054 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -140,7 +140,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
{ "atom_tab", NULL },
- { "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
{ "message_pre_alloc_lock", "address" },
{ "ptimer_pre_alloc_lock", "address", },
@@ -168,6 +167,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "timer_wheel", NULL },
{ "system_block", NULL },
{ "timeofday", NULL },
+ { "get_time", NULL },
+ { "get_corrected_time", NULL },
{ "breakpoints", NULL },
{ "pollsets_lock", NULL },
{ "pix_lock", "address" },
@@ -184,10 +185,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "efile_drv dtrace mutex", NULL },
#endif
{ "mtrace_buf", NULL },
-#ifdef __WIN32__
#ifdef ERTS_SMP
- { "sys_gethrtime", NULL },
-#endif
+ { "os_monotonic_time", NULL },
#endif
{ "erts_alloc_hard_debug", NULL },
{ "hard_dbg_mseg", NULL },
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index cf6996ea06..ddeb56a6be 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -104,11 +104,10 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
}
static void lcnt_time(erts_lcnt_time_t *time) {
-#if 0 || defined(HAVE_GETHRTIME)
- SysHrTime hr_time;
- hr_time = sys_gethrtime();
- time->s = (unsigned long)(hr_time / 1000000000LL);
- time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
+#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
+ ErtsMonotonicTime mtime = ERTS_MONOTONIC_TO_NSEC(erts_os_monotonic_time());
+ time->s = (unsigned long) (mtime / 1000000000LL);
+ time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
#else
SysTimeval tv;
sys_gettimeofday(&tv);
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index ffbb93da1b..09fadd7e9e 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -76,7 +76,7 @@
/* histogram */
#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
-#if 0 || defined(HAVE_GETHRTIME)
+#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
#define ERTS_LCNT_HISTOGRAM_RSHIFT (0)
#else
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index 5e740aacdd..bcbda65da0 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -16,6 +16,9 @@
*
* %CopyrightEnd%
*
+ * hashmaps are an adaption of Rich Hickeys Persistent HashMaps
+ * which were an adaption of Phil Bagwells - Hash Array Mapped Tries
+ *
* Author: Björn-Egil Dahlberg
*/
@@ -62,39 +65,78 @@
* - erts_internal:map_to_tuple_keys/1
*/
+#ifndef DECL_AM
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+#endif
+
+/* for hashmap_from_list/1 */
+typedef struct {
+ Uint32 hx;
+ Uint32 skip;
+ Uint i;
+ Eterm val;
+} hxnode_t;
+
+
+static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB);
+static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args);
+static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB);
+static Eterm hashmap_to_list(Process *p, Eterm map);
+static Eterm hashmap_keys(Process *p, Eterm map);
+static Eterm hashmap_values(Process *p, Eterm map);
+static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node);
+static Eterm map_from_validated_list(Process *p, Eterm list, Uint size);
+static Eterm hashmap_from_validated_list(Process *p, Eterm list, Uint size);
+static Eterm hashmap_from_unsorted_array(ErtsHeapFactory*, hxnode_t *hxns, Uint n, int reject_dupkeys);
+static Eterm hashmap_from_sorted_unique_array(ErtsHeapFactory*, hxnode_t *hxns, Uint n, int is_root);
+static Eterm hashmap_from_chunked_array(ErtsHeapFactory*, hxnode_t *hxns, Uint n, int is_root);
+static Eterm hashmap_info(Process *p, Eterm node);
+static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]);
+static int hxnodecmp(hxnode_t* a, hxnode_t* b);
+static int hxnodecmpkey(hxnode_t* a, hxnode_t* b);
+
/* erlang:map_size/1
* the corresponding instruction is implemented in:
* beam/erl_bif_guard.c
*/
BIF_RETTYPE map_size_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Eterm *hp;
Uint hsz = 0;
- map_t *mp = (map_t*)map_val(BIF_ARG_1);
- Uint n = map_get_size(mp);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ Uint n = flatmap_get_size(mp);
erts_bld_uint(NULL, &hsz, n);
hp = HAlloc(BIF_P, hsz);
BIF_RET(erts_bld_uint(&hp, NULL, n));
+ } else if (is_hashmap(BIF_ARG_1)) {
+ Eterm *head, *hp, res;
+ Uint size, hsz=0;
+
+ head = hashmap_val(BIF_ARG_1);
+ size = head[1];
+ (void) erts_bld_uint(NULL, &hsz, size);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, size);
+ BIF_RET(res);
}
BIF_ERROR(BIF_P, BADARG);
}
-/* maps:to_list/1
- */
+/* maps:to_list/1 */
BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Uint n;
Eterm* hp;
Eterm *ks,*vs, res, tup;
- map_t *mp = (map_t*)map_val(BIF_ARG_1);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
- n = map_get_size(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
hp = HAlloc(BIF_P, (2 + 3) * n);
res = NIL;
@@ -104,6 +146,8 @@ BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
}
BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ return hashmap_to_list(BIF_P, BIF_ARG_1);
}
BIF_ERROR(BIF_P, BADARG);
@@ -113,89 +157,84 @@ BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
* return value if key *matches* a key in the map
*/
-int erts_maps_find(Eterm key, Eterm map, Eterm *value) {
+const Eterm *
+#if HALFWORD_HEAP
+erts_maps_get_rel(Eterm key, Eterm map, Eterm *map_base)
+#else
+erts_maps_get(Eterm key, Eterm map)
+#endif
+{
+ Uint32 hx;
+ if (is_flatmap_rel(map, map_base)) {
+ Eterm *ks, *vs;
+ flatmap_t *mp;
+ Uint n, i;
- Eterm *ks,*vs;
- map_t *mp;
- Uint n,i;
+ mp = (flatmap_t *)flatmap_val_rel(map, map_base);
+ n = flatmap_get_size(mp);
- mp = (map_t*)map_val(map);
- n = map_get_size(mp);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ if (n == 0) {
+ return NULL;
+ }
- for( i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- *value = vs[i];
- return 1;
+ ks = (Eterm *)tuple_val_rel(mp->keys, map_base) + 1;
+ vs = flatmap_get_values(mp);
+
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ return &vs[i];
+ }
+ }
+ }
+
+ for (i = 0; i < n; i++) {
+ if (eq_rel(ks[i], map_base, key, NULL)) {
+ return &vs[i];
+ }
}
+ return NULL;
}
- return 0;
+ ASSERT(is_hashmap_rel(map, map_base));
+ hx = hashmap_make_hash(key);
+
+ return erts_hashmap_get_rel(hx, key, map, map_base);
}
BIF_RETTYPE maps_find_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
- Eterm *hp, value,res;
+ Eterm *hp, res;
+ const Eterm *value;
- if (erts_maps_find(BIF_ARG_1, BIF_ARG_2, &value)) {
+ value = erts_maps_get(BIF_ARG_1, BIF_ARG_2);
+ if (value) {
hp = HAlloc(BIF_P, 3);
res = make_tuple(hp);
*hp++ = make_arityval(2);
*hp++ = am_ok;
- *hp++ = value;
+ *hp++ = *value;
BIF_RET(res);
}
-
BIF_RET(am_error);
}
BIF_ERROR(BIF_P, BADARG);
}
+
/* maps:get/2
* return value if key *matches* a key in the map
* exception bad_key if none matches
*/
-
-int erts_maps_get(Eterm key, Eterm map, Eterm *value) {
- Eterm *ks,*vs;
- map_t *mp;
- Uint n,i;
-
- mp = (map_t*)map_val(map);
- n = map_get_size(mp);
-
- if (n == 0)
- return 0;
-
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
-
- if (is_immed(key)) {
- for( i = 0; i < n; i++) {
- if (ks[i] == key) {
- *value = vs[i];
- return 1;
- }
- }
- }
-
- for( i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- *value = vs[i];
- return 1;
- }
- }
- return 0;
-}
-
BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
Eterm *hp;
- Eterm value, error;
+ Eterm error;
+ const Eterm *value;
char *s_error;
- if (erts_maps_get(BIF_ARG_1, BIF_ARG_2, &value)) {
- BIF_RET(value);
+ value = erts_maps_get(BIF_ARG_1, BIF_ARG_2);
+ if (value) {
+ BIF_RET(*value);
}
s_error = "bad_key";
@@ -213,13 +252,8 @@ BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
*/
BIF_RETTYPE maps_from_list_1(BIF_ALIST_1) {
- Eterm *kv, item = BIF_ARG_1;
- Eterm *hp, *thp,*vs, *ks, keys, res;
- map_t *mp;
- Uint size = 0, unused_size = 0;
- Sint c = 0;
- Sint idx = 0;
-
+ Eterm item = BIF_ARG_1, res, *kv;
+ Uint size = 0;
if (is_list(item) || is_nil(item)) {
/* Calculate size and check validity */
@@ -240,368 +274,1091 @@ BIF_RETTYPE maps_from_list_1(BIF_ALIST_1) {
if (is_not_nil(item))
goto error;
- hp = HAlloc(BIF_P, 3 + 1 + (2 * size));
- thp = hp;
+ if (size > MAP_SMALL_MAP_LIMIT) {
+ BIF_RET(hashmap_from_validated_list(BIF_P, BIF_ARG_1, size));
+ } else {
+ BIF_RET(map_from_validated_list(BIF_P, BIF_ARG_1, size));
+ }
+ }
+
+error:
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static Eterm map_from_validated_list(Process *p, Eterm list, Uint size) {
+ Eterm *kv, item = list;
+ Eterm *hp, *thp,*vs, *ks, keys, res;
+ flatmap_t *mp;
+ Uint unused_size = 0;
+ Sint c = 0;
+ Sint idx = 0;
+
+
+ hp = HAlloc(p, 3 + 1 + (2 * size));
+ thp = hp;
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(size);
+ ks = hp;
+ hp += size;
+ mp = (flatmap_t*)hp;
+ res = make_flatmap(mp);
+ hp += MAP_HEADER_SIZE;
+ vs = hp;
+
+ mp->thing_word = MAP_HEADER;
+ mp->size = size; /* set later, might shrink*/
+ mp->keys = keys;
+
+ if (size == 0)
+ return res;
+
+ /* first entry */
+ kv = tuple_val(CAR(list_val(item)));
+ ks[0] = kv[1];
+ vs[0] = kv[2];
+ size = 1;
+ item = CDR(list_val(item));
+
+ /* insert sort key/value pairs */
+ while(is_list(item)) {
+
+ kv = tuple_val(CAR(list_val(item)));
+
+ /* compare ks backwards
+ * idx represent word index to be written (hole position).
+ * We cannot copy the elements when searching since we might
+ * have an equal key. So we search for just the index first =(
+ *
+ * It is perhaps faster to move the values in the first pass.
+ * Check for uniqueness during insert phase and then have a
+ * second phace compacting the map if duplicates are found
+ * during insert. .. or do someother sort .. shell-sort perhaps.
+ */
+
+ idx = size;
+
+ while(idx > 0 && (c = CMP_TERM(kv[1],ks[idx-1])) < 0) { idx--; }
+
+ if (c == 0) {
+ /* last compare was equal,
+ * i.e. we have to release memory
+ * and overwrite that key/value
+ */
+ ks[idx-1] = kv[1];
+ vs[idx-1] = kv[2];
+ unused_size++;
+ } else {
+ Uint i = size;
+ while(i > idx) {
+ ks[i] = ks[i-1];
+ vs[i] = vs[i-1];
+ i--;
+ }
+ ks[idx] = kv[1];
+ vs[idx] = kv[2];
+ size++;
+ }
+ item = CDR(list_val(item));
+ }
+
+ if (unused_size) {
+ /* the key tuple is embedded in the heap
+ * write a bignum to clear it.
+ */
+ /* release values as normal since they are on the top of the heap */
+
+ ks[size] = make_pos_bignum_header(unused_size - 1);
+ HRelease(p, vs + size + unused_size, vs + size);
+ }
+
+ *thp = make_arityval(size);
+ mp->size = size;
+ return res;
+}
+
+#define swizzle32(D,S) \
+ do { \
+ (D) = ((S) & 0x0000000f) << 28 | ((S) & 0x000000f0) << 20 \
+ | ((S) & 0x00000f00) << 12 | ((S) & 0x0000f000) << 4 \
+ | ((S) & 0x000f0000) >> 4 | ((S) & 0x00f00000) >> 12 \
+ | ((S) & 0x0f000000) >> 20 | ((S) & 0xf0000000) >> 28; \
+ } while(0)
+
+#define maskval(V,L) (((V) >> ((7 - (L))*4)) & 0xf)
+#define cdepth(V1,V2) (hashmap_clz((V1) ^ (V2)) >> 2)
+
+static Eterm hashmap_from_validated_list(Process *p, Eterm list, Uint size) {
+ Eterm item = list;
+ Eterm *hp;
+ Eterm *kv, res;
+ Eterm tmp[2];
+ Uint32 sw, hx;
+ Uint ix = 0;
+ hxnode_t *hxns;
+ ErtsHeapFactory factory;
+
+ ASSERT(size > 0);
+
+ hp = HAlloc(p, (2 * size));
+
+ /* create tmp hx values and leaf ptrs */
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, size * sizeof(hxnode_t));
+
+ while(is_list(item)) {
+ res = CAR(list_val(item));
+ kv = tuple_val(res);
+ hx = hashmap_restore_hash(tmp,0,kv[1]);
+ swizzle32(sw,hx);
+ hxns[ix].hx = sw;
+ hxns[ix].val = CONS(hp, kv[1], kv[2]); hp += 2;
+ hxns[ix].skip = 1; /* will be reassigned in from_array */
+ hxns[ix].i = ix;
+ ix++;
+ item = CDR(list_val(item));
+ }
+
+ factory.p = p;
+ res = hashmap_from_unsorted_array(&factory, hxns, size, 0);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+
+ if (hashmap_size(res) <= MAP_SMALL_MAP_LIMIT) {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv, *ks, *vs;
+ flatmap_t *mp;
+ Eterm keys;
+ Uint n = hashmap_size(res);
+
+ /* build flat structure */
+ hp = HAlloc(p, 3 + 1 + (2 * n));
keys = make_tuple(hp);
- *hp++ = make_arityval(size);
+ *hp++ = make_arityval(n);
ks = hp;
- hp += size;
- mp = (map_t*)hp;
- res = make_map(mp);
+ hp += n;
+ mp = (flatmap_t*)hp;
hp += MAP_HEADER_SIZE;
vs = hp;
mp->thing_word = MAP_HEADER;
- mp->size = size; /* set later, might shrink*/
+ mp->size = n;
mp->keys = keys;
- if (size == 0)
- BIF_RET(res);
+ hashmap_iterator_init(&wstack, res, 0);
- item = BIF_ARG_1;
+ while ((kv=hashmap_iterator_next(&wstack)) != NULL) {
+ *ks++ = CAR(kv);
+ *vs++ = CDR(kv);
+ }
- /* first entry */
- kv = tuple_val(CAR(list_val(item)));
- ks[0] = kv[1];
- vs[0] = kv[2];
- size = 1;
- item = CDR(list_val(item));
+ /* it cannot have multiple keys */
+ erts_validate_and_sort_flatmap(mp);
- /* insert sort key/value pairs */
- while(is_list(item)) {
+ DESTROY_WSTACK(wstack);
+ return make_flatmap(mp);
+ }
- kv = tuple_val(CAR(list_val(item)));
-
- /* compare ks backwards
- * idx represent word index to be written (hole position).
- * We cannot copy the elements when searching since we might
- * have an equal key. So we search for just the index first =(
- *
- * It is perhaps faster to move the values in the first pass.
- * Check for uniqueness during insert phase and then have a
- * second phace compacting the map if duplicates are found
- * during insert. .. or do someother sort .. shell-sort perhaps.
- */
+ return res;
+}
- idx = size;
+Eterm erts_hashmap_from_array(ErtsHeapFactory* factory, Eterm *leafs, Uint n,
+ int reject_dupkeys) {
+ Uint32 sw, hx;
+ Uint ix;
+ hxnode_t *hxns;
+ Eterm res;
+
+ /* create tmp hx values and leaf ptrs */
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, n * sizeof(hxnode_t));
+
+ for (ix = 0; ix < n; ix++) {
+ hx = hashmap_make_hash(*leafs);
+ swizzle32(sw,hx);
+ hxns[ix].hx = sw;
+ hxns[ix].val = make_list(leafs);
+ hxns[ix].skip = 1;
+ hxns[ix].i = ix;
+ leafs += 2;
+ }
- while(idx > 0 && (c = CMP_TERM(kv[1],ks[idx-1])) < 0) { idx--; }
+ res = hashmap_from_unsorted_array(factory, hxns, n, reject_dupkeys);
- if (c == 0) {
- /* last compare was equal,
- * i.e. we have to release memory
- * and overwrite that key/value
- */
- ks[idx-1] = kv[1];
- vs[idx-1] = kv[2];
- unused_size++;
- } else {
- Uint i = size;
- while(i > idx) {
- ks[i] = ks[i-1];
- vs[i] = vs[i-1];
- i--;
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+
+ return res;
+}
+
+
+Eterm erts_hashmap_from_ks_and_vs_extra(Process *p, Eterm *ks, Eterm *vs, Uint n,
+ Eterm key, Eterm value) {
+ Uint32 sw, hx;
+ Uint i,sz;
+ hxnode_t *hxns;
+ ErtsHeapFactory factory;
+ Eterm *hp, res;
+
+ sz = (key == THE_NON_VALUE) ? n : (n + 1);
+ ASSERT(sz > MAP_SMALL_MAP_LIMIT);
+ hp = HAlloc(p, 2 * sz);
+
+ /* create tmp hx values and leaf ptrs */
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, sz * sizeof(hxnode_t));
+
+ for(i = 0; i < n; i++) {
+ hx = hashmap_make_hash(ks[i]);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, ks[i], vs[i]); hp += 2;
+ hxns[i].skip = 1; /* will be reassigned in from_array */
+ hxns[i].i = i;
+ }
+
+ if (key != THE_NON_VALUE) {
+ hx = hashmap_make_hash(key);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, key, value); hp += 2;
+ hxns[i].skip = 1;
+ hxns[i].i = i;
+ }
+
+ factory.p = p;
+ res = hashmap_from_unsorted_array(&factory, hxns, sz, 0);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+
+ return res;
+}
+
+static Eterm hashmap_from_unsorted_array(ErtsHeapFactory* factory,
+ hxnode_t *hxns, Uint n,
+ int reject_dupkeys) {
+ Uint jx = 0, ix = 0, lx, cx;
+ Eterm res;
+
+ if (n == 0) {
+ Eterm *hp;
+ hp = erts_produce_heap(factory, 2, 0);
+ hp[0] = MAP_HEADER_HAMT_HEAD_BITMAP(0);
+ hp[1] = 0;
+
+ return make_hashmap(hp);
+ }
+
+ /* sort and compact array (remove non-unique entries) */
+ qsort(hxns, n, sizeof(hxnode_t), (int (*)(const void *, const void *)) hxnodecmp);
+
+ ix = 0, cx = 0;
+ while(ix < n - 1) {
+ if (hxns[ix].hx == hxns[ix+1].hx) {
+
+ /* find region of equal hash values */
+ jx = ix + 1;
+ while(jx < n && hxns[ix].hx == hxns[jx].hx) { jx++; }
+ /* find all correct keys from region
+ * (last in list but now hash sorted so we check highest id instead) */
+
+ /* resort with keys instead of hash value within region */
+
+ qsort(&hxns[ix], jx - ix, sizeof(hxnode_t),
+ (int (*)(const void *, const void *)) hxnodecmpkey);
+
+ while(ix < jx) {
+ lx = ix;
+ while(ix < jx && EQ(CAR(list_val(hxns[ix].val)), CAR(list_val(hxns[lx].val)))) {
+ if (reject_dupkeys)
+ return THE_NON_VALUE;
+
+ if (hxns[ix].i > hxns[lx].i) {
+ lx = ix;
+ }
+ ix++;
}
- ks[idx] = kv[1];
- vs[idx] = kv[2];
- size++;
+ hxns[cx].hx = hxns[lx].hx;
+ hxns[cx].val = hxns[lx].val;
+ cx++;
}
- item = CDR(list_val(item));
+ ix = jx;
+ continue;
+ }
+ if (ix > cx) {
+ hxns[cx].hx = hxns[ix].hx;
+ hxns[cx].val = hxns[ix].val;
}
+ cx++;
+ ix++;
+ }
- if (unused_size) {
- /* the key tuple is embedded in the heap
- * write a bignum to clear it.
- */
- /* release values as normal since they are on the top of the heap */
+ if (ix < n) {
+ hxns[cx].hx = hxns[ix].hx;
+ hxns[cx].val = hxns[ix].val;
+ cx++;
+ }
- ks[size] = make_pos_bignum_header(unused_size - 1);
- HRelease(BIF_P, vs + size + unused_size, vs + size);
- }
+ if (cx > 1) {
+ /* recursive decompose array */
+ res = hashmap_from_sorted_unique_array(factory, hxns, cx, 0);
+ } else {
+ Eterm *hp;
- *thp = make_arityval(size);
- mp->size = size;
- BIF_RET(res);
+ /* we only have one item, either because n was 1 or
+ * because we hade multiples of the same key.
+ *
+ * hash value has been swizzled, need to drag it down to get the
+ * correct slot. */
+
+ hp = erts_produce_heap(factory, HAMT_HEAD_BITMAP_SZ(1), 0);
+ hp[0] = MAP_HEADER_HAMT_HEAD_BITMAP(1 << ((hxns[0].hx >> 0x1c) & 0xf));
+ hp[1] = 1;
+ hp[2] = hxns[0].val;
+ res = make_hashmap(hp);
}
-error:
+ return res;
+}
- BIF_ERROR(BIF_P, BADARG);
+static Eterm hashmap_from_sorted_unique_array(ErtsHeapFactory* factory,
+ hxnode_t *hxns, Uint n, int lvl) {
+ Eterm res = NIL;
+ Uint i,ix,jx,elems;
+ Uint32 sw, hx;
+ Eterm val;
+ Eterm th[2];
+ hxnode_t *tmp;
+
+ ASSERT(lvl < 32);
+ ix = 0;
+ elems = 1;
+ while (ix < n - 1) {
+ if (hxns[ix].hx == hxns[ix+1].hx) {
+ jx = ix + 1;
+ while (jx < n && hxns[ix].hx == hxns[jx].hx) { jx++; }
+ tmp = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, ((jx - ix)) * sizeof(hxnode_t));
+
+ for(i = 0; i < jx - ix; i++) {
+ val = hxns[i + ix].val;
+ hx = hashmap_restore_hash(th, lvl + 8, CAR(list_val(val)));
+ swizzle32(sw,hx);
+ tmp[i].hx = sw;
+ tmp[i].val = val;
+ tmp[i].i = i;
+ tmp[i].skip = 1;
+ }
+
+ qsort(tmp, jx - ix, sizeof(hxnode_t), (int (*)(const void *, const void *)) hxnodecmp);
+
+ hxns[ix].skip = jx - ix;
+ hxns[ix].val = hashmap_from_sorted_unique_array(factory, tmp, jx - ix, lvl + 8);
+ erts_free(ERTS_ALC_T_TMP, (void *) tmp);
+ ix = jx;
+ if (ix < n) { elems++; }
+ continue;
+ }
+ hxns[ix].skip = 1;
+ elems++;
+ ix++;
+ }
+
+ res = hashmap_from_chunked_array(factory, hxns, elems, !lvl);
+
+ ERTS_FACTORY_HOLE_CHECK(factory);
+
+ return res;
}
-/* maps:is_key/2
- */
+#define HALLOC_EXTRA 200
+static Eterm hashmap_from_chunked_array(ErtsHeapFactory *factory,
+ hxnode_t *hxns, Uint n, int is_root) {
+ Uint ix, d, dn, dc, slot, elems;
+ Uint32 v, vp, vn, hdr;
+ Uint bp, sz;
+ DECLARE_ESTACK(stack);
+ Eterm res = NIL, *hp = NULL, *nhp;
-BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
- if (is_map(BIF_ARG_2)) {
- Eterm *ks, key;
- map_t *mp;
- Uint n,i;
+ ASSERT(n > 1);
- mp = (map_t*)map_val(BIF_ARG_2);
- key = BIF_ARG_1;
- n = map_get_size(mp);
- ks = map_get_keys(mp);
+ /* push initial nodes on the stack,
+ * this is the starting depth */
- if (n == 0)
- BIF_RET(am_false);
+ ix = 0;
+ d = 0;
+ vp = hxns[ix].hx;
+ v = hxns[ix + hxns[ix].skip].hx;
- if (is_immed(key)) {
- for( i = 0; i < n; i++) {
- if (ks[i] == key) {
- BIF_RET(am_true);
- }
+ ASSERT(vp > v);
+ slot = maskval(vp,d);
+
+ while(slot == maskval(v,d)) {
+ ESTACK_PUSH(stack, 1 << slot);
+ d++;
+ slot = maskval(vp,d);
+ }
+
+ res = hxns[ix].val;
+
+ if (hxns[ix].skip > 1) {
+ dc = 7;
+ /* build collision nodes */
+ while (dc > d) {
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << maskval(vp,dc));
+ hp[1] = res;
+ res = make_hashmap(hp);
+ dc--;
+ }
+ }
+
+ ESTACK_PUSH(stack, res);
+ ESTACK_PUSH(stack, 1 << slot);
+
+ /* all of the other nodes .. */
+ elems = n - 2; /* remove first and last elements */
+ while(elems--) {
+ hdr = ESTACK_POP(stack);
+ ix = ix + hxns[ix].skip;
+
+ /* determine if node or subtree should be built by looking
+ * at the next value. */
+
+ vn = hxns[ix + hxns[ix].skip].hx;
+ dn = cdepth(v,vn);
+ ASSERT(v > vn);
+
+ res = hxns[ix].val;
+
+ if (hxns[ix].skip > 1) {
+ int wat = (d > dn) ? d : dn;
+ dc = 7;
+ /* build collision nodes */
+ while (dc > wat) {
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << maskval(v,dc));
+ hp[1] = res;
+ res = make_hashmap(hp);
+ dc--;
}
}
- for( i = 0; i < n; i++) {
- if (EQ(ks[i], key)) {
- BIF_RET(am_true);
+ /* next depth is higher (implies collision) */
+ if (d < dn) {
+ /* hdr is the popped one initially */
+ while(d < dn) {
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ ESTACK_PUSH(stack, hdr | bp);
+ d++;
+ hdr = 0; /* clear hdr for all other collisions */
}
+
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ /* no more collisions */
+ ESTACK_PUSH(stack,res);
+ ESTACK_PUSH(stack,bp);
+ } else if (d == dn) {
+ /* no collisions at all */
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ ESTACK_PUSH(stack,res);
+ ESTACK_PUSH(stack,hdr | bp);
+ } else {
+ /* dn < n, we have a drop and we are done
+ * build nodes and subtree */
+ while (dn != d) {
+ slot = maskval(v, d);
+ bp = 1 << slot;
+ /* OR bitposition before sz calculation to handle
+ * redundant collisions */
+ hdr |= bp;
+ sz = hashmap_bitcount(hdr);
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(sz), HALLOC_EXTRA);
+ nhp = hp;
+ *hp++ = (hdr == 0xffff) ? MAP_HEADER_HAMT_NODE_ARRAY : MAP_HEADER_HAMT_NODE_BITMAP(hdr);
+ *hp++ = res; sz--;
+ while (sz--) { *hp++ = ESTACK_POP(stack); }
+ ASSERT((hp - nhp) < 18);
+ res = make_hashmap(nhp);
+
+ /* we need to pop the next hdr and push if we don't need it */
+
+ hdr = ESTACK_POP(stack);
+ d--;
+ }
+ ESTACK_PUSH(stack, res);
+ ESTACK_PUSH(stack, hdr);
}
- BIF_RET(am_false);
+
+ vp = v;
+ v = vn;
+ d = dn;
+ ERTS_FACTORY_HOLE_CHECK(factory);
+ }
+
+ /* v and vp are reused from above */
+ dn = cdepth(vp,v);
+ ix = ix + hxns[ix].skip;
+ res = hxns[ix].val;
+
+ if (hxns[ix].skip > 1) {
+ dc = 7;
+ /* build collision nodes */
+ while (dc > dn) {
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(1), HALLOC_EXTRA);
+ hp[0] = MAP_HEADER_HAMT_NODE_BITMAP(1 << maskval(v,dc));
+ hp[1] = res;
+ res = make_hashmap(hp);
+ dc--;
+ }
+ }
+
+ hdr = ESTACK_POP(stack);
+ /* pop remaining subtree if any */
+ while (dn) {
+ slot = maskval(v, dn);
+ bp = 1 << slot;
+ /* OR bitposition before sz calculation to handle
+ * redundant collisions */
+ hdr |= bp;
+ sz = hashmap_bitcount(hdr);
+ hp = erts_produce_heap(factory, HAMT_NODE_BITMAP_SZ(sz), HALLOC_EXTRA);
+ nhp = hp;
+ *hp++ = (hdr == 0xffff) ? MAP_HEADER_HAMT_NODE_ARRAY : MAP_HEADER_HAMT_NODE_BITMAP(hdr);
+ *hp++ = res; sz--;
+
+ while (sz--) { *hp++ = ESTACK_POP(stack); }
+ res = make_hashmap(nhp);
+ hdr = ESTACK_POP(stack);
+ dn--;
+ }
+
+ /* and finally the root .. */
+
+ slot = maskval(v, dn);
+ bp = 1 << slot;
+ hdr |= bp;
+ sz = hashmap_bitcount(hdr);
+ hp = erts_produce_heap(factory, sz + /* hdr + item */ (is_root ? 2 : 1), 0);
+ nhp = hp;
+
+ if (is_root) {
+ *hp++ = (hdr == 0xffff) ? MAP_HEADER_HAMT_HEAD_ARRAY : MAP_HEADER_HAMT_HEAD_BITMAP(hdr);
+ *hp++ = n;
+ } else {
+ *hp++ = (hdr == 0xffff) ? MAP_HEADER_HAMT_NODE_ARRAY : MAP_HEADER_HAMT_NODE_BITMAP(hdr);
+ }
+
+ *hp++ = res; sz--;
+ while (sz--) { *hp++ = ESTACK_POP(stack); }
+
+ res = make_hashmap(nhp);
+
+ ASSERT(ESTACK_COUNT(stack) == 0);
+ DESTROY_ESTACK(stack);
+ ERTS_FACTORY_HOLE_CHECK(factory);
+ return res;
+}
+#undef HALLOC_EXTRA
+
+static int hxnodecmpkey(hxnode_t *a, hxnode_t *b) {
+ return CMP_TERM(CAR(list_val(a->val)), CAR(list_val(b->val)));
+}
+
+static int hxnodecmp(hxnode_t *a, hxnode_t *b) {
+ if (a->hx < b->hx)
+ return 1;
+ else if (a->hx == b->hx)
+ return 0;
+ else
+ return -1;
+}
+
+/* maps:is_key/2 */
+
+BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ BIF_RET(erts_maps_get(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
}
BIF_ERROR(BIF_P, BADARG);
}
-/* maps:keys/1
- */
+/* maps:keys/1 */
BIF_RETTYPE maps_keys_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Eterm *hp, *ks, res = NIL;
- map_t *mp;
+ flatmap_t *mp;
Uint n;
- mp = (map_t*)map_val(BIF_ARG_1);
- n = map_get_size(mp);
+ mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ n = flatmap_get_size(mp);
if (n == 0)
BIF_RET(res);
hp = HAlloc(BIF_P, (2 * n));
- ks = map_get_keys(mp);
+ ks = flatmap_get_keys(mp);
while(n--) {
res = CONS(hp, ks[n], res); hp += 2;
}
BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ BIF_RET(hashmap_keys(BIF_P, BIF_ARG_1));
}
BIF_ERROR(BIF_P, BADARG);
}
-/* maps:merge/2
- */
+/* maps:merge/2 */
BIF_RETTYPE maps_merge_2(BIF_ALIST_2) {
- if (is_map(BIF_ARG_1) && is_map(BIF_ARG_2)) {
- Eterm *hp,*thp;
- Eterm tup;
- Eterm *ks,*vs,*ks1,*vs1,*ks2,*vs2;
- map_t *mp1,*mp2,*mp_new;
- Uint n1,n2,i1,i2,need,unused_size=0;
- int c = 0;
-
- mp1 = (map_t*)map_val(BIF_ARG_1);
- mp2 = (map_t*)map_val(BIF_ARG_2);
- n1 = map_get_size(mp1);
- n2 = map_get_size(mp2);
-
- need = MAP_HEADER_SIZE + 1 + 2*(n1 + n2);
-
- hp = HAlloc(BIF_P, need);
- thp = hp;
- tup = make_tuple(thp);
- ks = hp + 1; hp += 1 + n1 + n2;
- mp_new = (map_t*)hp; hp += MAP_HEADER_SIZE;
- vs = hp; hp += n1 + n2;
-
- mp_new->thing_word = MAP_HEADER;
- mp_new->size = 0;
- mp_new->keys = tup;
-
- i1 = 0; i2 = 0;
- ks1 = map_get_keys(mp1);
- vs1 = map_get_values(mp1);
- ks2 = map_get_keys(mp2);
- vs2 = map_get_values(mp2);
-
- while(i1 < n1 && i2 < n2) {
- c = CMP_TERM(ks1[i1],ks2[i2]);
- if ( c == 0) {
- /* use righthand side arguments map value,
- * but advance both maps */
- *ks++ = ks2[i2];
- *vs++ = vs2[i2];
- i1++, i2++, unused_size++;
- } else if ( c < 0) {
- *ks++ = ks1[i1];
- *vs++ = vs1[i1];
- i1++;
- } else {
- *ks++ = ks2[i2];
- *vs++ = vs2[i2];
- i2++;
- }
+ if (is_flatmap(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_2)) {
+ BIF_RET(flatmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2));
+ } else if (is_hashmap(BIF_ARG_2)) {
+ /* Will always become a tree */
+ BIF_RET(map_merge_mixed(BIF_P, BIF_ARG_1, BIF_ARG_2, 0));
+ }
+ } else if (is_hashmap(BIF_ARG_1)) {
+ if (is_hashmap(BIF_ARG_2)) {
+ BIF_RET(hashmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2));
+ } else if (is_flatmap(BIF_ARG_2)) {
+ /* Will always become a tree */
+ BIF_RET(map_merge_mixed(BIF_P, BIF_ARG_2, BIF_ARG_1, 1));
}
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB) {
+ Eterm *hp,*thp;
+ Eterm tup;
+ Eterm *ks,*vs,*ks1,*vs1,*ks2,*vs2;
+ flatmap_t *mp1,*mp2,*mp_new;
+ Uint n,n1,n2,i1,i2,need,unused_size=0;
+ int c = 0;
+
+ mp1 = (flatmap_t*)flatmap_val(nodeA);
+ mp2 = (flatmap_t*)flatmap_val(nodeB);
+ n1 = flatmap_get_size(mp1);
+ n2 = flatmap_get_size(mp2);
+
+ need = MAP_HEADER_SIZE + 1 + 2*(n1 + n2);
- /* copy remaining */
- while (i1 < n1) {
+ hp = HAlloc(p, need);
+ thp = hp;
+ tup = make_tuple(thp);
+ ks = hp + 1; hp += 1 + n1 + n2;
+ mp_new = (flatmap_t*)hp; hp += MAP_HEADER_SIZE;
+ vs = hp; hp += n1 + n2;
+
+ mp_new->thing_word = MAP_HEADER;
+ mp_new->size = 0;
+ mp_new->keys = tup;
+
+ i1 = 0; i2 = 0;
+ ks1 = flatmap_get_keys(mp1);
+ vs1 = flatmap_get_values(mp1);
+ ks2 = flatmap_get_keys(mp2);
+ vs2 = flatmap_get_values(mp2);
+
+ while(i1 < n1 && i2 < n2) {
+ c = CMP_TERM(ks1[i1],ks2[i2]);
+ if (c == 0) {
+ /* use righthand side arguments map value,
+ * but advance both maps */
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i1++, i2++, unused_size++;
+ } else if (c < 0) {
*ks++ = ks1[i1];
*vs++ = vs1[i1];
i1++;
- }
-
- while (i2 < n2) {
+ } else {
*ks++ = ks2[i2];
*vs++ = vs2[i2];
i2++;
}
+ }
- if (unused_size) {
- /* the key tuple is embedded in the heap, write a bignum to clear it.
- *
- * release values as normal since they are on the top of the heap
- * size = n1 + n1 - unused_size
- */
+ /* copy remaining */
+ while (i1 < n1) {
+ *ks++ = ks1[i1];
+ *vs++ = vs1[i1];
+ i1++;
+ }
- *ks = make_pos_bignum_header(unused_size - 1);
- HRelease(BIF_P, vs + unused_size, vs);
- }
+ while (i2 < n2) {
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i2++;
+ }
- mp_new->size = n1 + n2 - unused_size;
- *thp = make_arityval(n1 + n2 - unused_size);
+ if (unused_size) {
+ /* the key tuple is embedded in the heap, write a bignum to clear it.
+ *
+ * release values as normal since they are on the top of the heap
+ * size = n1 + n1 - unused_size
+ */
- BIF_RET(make_map(mp_new));
+ *ks = make_pos_bignum_header(unused_size - 1);
+ HRelease(p, vs + unused_size, vs);
}
- BIF_ERROR(BIF_P, BADARG);
-}
-/* maps:new/2
- */
-BIF_RETTYPE maps_new_0(BIF_ALIST_0) {
- Eterm* hp;
- Eterm tup;
- map_t *mp;
+ n = n1 + n2 - unused_size;
+ *thp = make_arityval(n);
- hp = HAlloc(BIF_P, (MAP_HEADER_SIZE + 1));
- tup = make_tuple(hp);
- *hp++ = make_arityval(0);
+ /* Reshape map to a hashmap if the map exceeds the limit */
- mp = (map_t*)hp;
- mp->thing_word = MAP_HEADER;
- mp->size = 0;
- mp->keys = tup;
+ if (n > MAP_SMALL_MAP_LIMIT) {
+ Uint32 hx,sw;
+ Uint i;
+ Eterm res;
+ hxnode_t *hxns;
+ ErtsHeapFactory factory;
- BIF_RET(make_map(mp));
-}
+ ks = flatmap_get_keys(mp_new);
+ vs = flatmap_get_values(mp_new);
-/* maps:put/3
- */
+ hp = HAlloc(p, 2 * n);
-Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
- Sint n,i;
- Sint c = 0;
- Eterm* hp, *shp;
- Eterm *ks,*vs, res, tup;
- map_t *mp = (map_t*)map_val(map);
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP,n * sizeof(hxnode_t));
- n = map_get_size(mp);
+ for (i = 0; i < n; i++) {
+ hx = hashmap_make_hash(ks[i]);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, ks[i], vs[i]); hp += 2;
+ hxns[i].skip = 1;
+ hxns[i].i = i;
+ }
- if (n == 0) {
- hp = HAlloc(p, MAP_HEADER_SIZE + 1 + 2);
- tup = make_tuple(hp);
- *hp++ = make_arityval(1);
- *hp++ = key;
- res = make_map(hp);
- *hp++ = MAP_HEADER;
- *hp++ = 1;
- *hp++ = tup;
- *hp++ = value;
+ factory.p = p;
+ res = hashmap_from_unsorted_array(&factory, hxns, n, 0);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
return res;
}
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ mp_new->size = n;
+
+ return make_flatmap(mp_new);
+}
+
+static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args) {
+ Eterm *ks, *vs, *hp, res;
+ flatmap_t *mp;
+ Uint n, i;
+ hxnode_t *hxns;
+ Uint32 sw, hx;
+ ErtsHeapFactory factory;
+
+ /* convert flat to tree */
+
+ ASSERT(is_flatmap(flat));
+ ASSERT(is_hashmap(tree));
+
+ mp = (flatmap_t*)flatmap_val(flat);
+ n = flatmap_get_size(mp);
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
- /* only allocate for values,
- * assume key-tuple will be intact
+ hp = HAlloc(p, 2 * n);
+
+ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, n * sizeof(hxnode_t));
+
+ for (i = 0; i < n; i++) {
+ hx = hashmap_make_hash(ks[i]);
+ swizzle32(sw,hx);
+ hxns[i].hx = sw;
+ hxns[i].val = CONS(hp, ks[i], vs[i]); hp += 2;
+ hxns[i].skip = 1;
+ hxns[i].i = i;
+ }
+
+ factory.p = p;
+ res = hashmap_from_unsorted_array(&factory, hxns, n, 0);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) hxns);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+
+ return swap_args ? hashmap_merge(p, tree, res) : hashmap_merge(p, res, tree);
+}
+
+#define HALLOC_EXTRA 200
+
+static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB) {
+#define PSTACK_TYPE struct HashmapMergePStackType
+ struct HashmapMergePStackType {
+ Eterm *srcA, *srcB;
+ Uint32 abm, bbm, rbm; /* node bitmaps */
+ int keepA;
+ int ix;
+ Eterm array[16];
+ };
+ PSTACK_DECLARE(s, 4);
+ struct HashmapMergePStackType* sp = PSTACK_PUSH(s);
+ Eterm *hp, *nhp;
+ Eterm hdrA, hdrB;
+ Eterm th[2];
+ Uint32 ahx, bhx;
+ Uint size; /* total key-value counter */
+ int keepA = 0;
+ unsigned lvl = 0;
+ Eterm res = THE_NON_VALUE;
+
+ /*
+ * Strategy: Do depth-first traversal of both trees (at the same time)
+ * and merge each pair of nodes.
*/
- hp = HAlloc(p, MAP_HEADER_SIZE + n);
- shp = hp; /* save hp, used if optimistic update fails */
- res = make_map(hp);
- *hp++ = MAP_HEADER;
- *hp++ = n;
- *hp++ = mp->keys;
-
- if (is_immed(key)) {
- for( i = 0; i < n; i ++) {
- if (ks[i] == key) {
- *hp++ = value;
- vs++;
- c = 1;
+ {
+ hashmap_head_t* a = (hashmap_head_t*) hashmap_val(nodeA);
+ hashmap_head_t* b = (hashmap_head_t*) hashmap_val(nodeB);
+ size = a->size + b->size;
+ }
+
+recurse:
+
+ if (primary_tag(nodeA) == TAG_PRIMARY_BOXED &&
+ primary_tag(nodeB) == TAG_PRIMARY_LIST) {
+ /* Avoid implementing this combination by switching places */
+ Eterm tmp = nodeA;
+ nodeA = nodeB;
+ nodeB = tmp;
+ keepA = !keepA;
+ }
+
+ switch (primary_tag(nodeA)) {
+ case TAG_PRIMARY_LIST: {
+ sp->srcA = list_val(nodeA);
+ switch (primary_tag(nodeB)) {
+ case TAG_PRIMARY_LIST: { /* LEAF + LEAF */
+ sp->srcB = list_val(nodeB);
+
+ if (EQ(CAR(sp->srcA), CAR(sp->srcB))) {
+ --size;
+ res = keepA ? nodeA : nodeB;
} else {
- *hp++ = *vs++;
+ ahx = hashmap_restore_hash(th, lvl, CAR(sp->srcA));
+ bhx = hashmap_restore_hash(th, lvl, CAR(sp->srcB));
+ sp->abm = 1 << hashmap_index(ahx);
+ sp->bbm = 1 << hashmap_index(bhx);
+
+ sp->srcA = &nodeA;
+ sp->srcB = &nodeB;
}
+ break;
}
- } else {
- for( i = 0; i < n; i ++) {
- if (EQ(ks[i], key)) {
- *hp++ = value;
- vs++;
- c = 1;
- } else {
- *hp++ = *vs++;
+ case TAG_PRIMARY_BOXED: { /* LEAF + NODE */
+ sp->srcB = boxed_val(nodeB);
+ ASSERT(is_header(*sp->srcB));
+ hdrB = *sp->srcB++;
+
+ ahx = hashmap_restore_hash(th, lvl, CAR(sp->srcA));
+ sp->abm = 1 << hashmap_index(ahx);
+ sp->srcA = &nodeA;
+ switch(hdrB & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: sp->srcB++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sp->bbm = 0xffff;
+ break;
+
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sp->bbm = MAP_HEADER_VAL(hdrB);
+ break;
+
+ default:
+ erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK);
+ break;
}
+ break;
+ }
+ default:
+ erl_exit(1, "bad primary tag %ld\r\n", nodeB);
}
+ break;
}
+ case TAG_PRIMARY_BOXED: { /* NODE + NODE */
+ sp->srcA = boxed_val(nodeA);
+ hdrA = *sp->srcA++;
+ ASSERT(is_header(hdrA));
+ switch (hdrA & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: sp->srcA++;
+ case HAMT_SUBTAG_NODE_ARRAY: {
+ ASSERT(primary_tag(nodeB) == TAG_PRIMARY_BOXED);
+ sp->abm = 0xffff;
+ sp->srcB = boxed_val(nodeB);
+ hdrB = *sp->srcB++;
+ ASSERT(is_header(hdrB));
+ switch (hdrB & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: sp->srcB++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sp->bbm = 0xffff;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sp->bbm = MAP_HEADER_VAL(hdrB);
+ break;
+ default:
+ erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK);
+ }
+ break;
+ }
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcA++;
+ case HAMT_SUBTAG_NODE_BITMAP: {
+ ASSERT(primary_tag(nodeB) == TAG_PRIMARY_BOXED);
+ sp->abm = MAP_HEADER_VAL(hdrA);
+ sp->srcB = boxed_val(nodeB);
+ hdrB = *sp->srcB++;
+ ASSERT(is_header(hdrB));
+ switch (hdrB & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: sp->srcB++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sp->bbm = 0xffff;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sp->bbm = MAP_HEADER_VAL(hdrB);
+ break;
- if (c)
- return res;
+ default:
+ erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK);
+ }
+ break;
+ }
+ default:
+ erl_exit(1, "bad primary tag %ld\r\n", nodeA);
+ }
+ break;
+ }
+ default:
+ erl_exit(1, "bad primary tag %ld\r\n", nodeA);
+ }
- /* need to make a new tuple,
- * use old hp since it needs to be recreated anyway.
- */
- tup = make_tuple(shp);
- *shp++ = make_arityval(n+1);
+ for (;;) {
+ if (is_value(res)) { /* We have a complete (sub-)tree or leaf */
+ if (lvl == 0)
+ break;
- hp = HAlloc(p, 3 + n + 1);
- res = make_map(hp);
- *hp++ = MAP_HEADER;
- *hp++ = n + 1;
- *hp++ = tup;
+ /* Pop from stack and continue build parent node */
+ lvl--;
+ sp = PSTACK_POP(s);
+ sp->array[sp->ix++] = res;
+ res = THE_NON_VALUE;
+ if (sp->rbm) {
+ sp->srcA++;
+ sp->srcB++;
+ keepA = sp->keepA;
+ }
+ } else { /* Start build a node */
+ sp->ix = 0;
+ sp->rbm = sp->abm | sp->bbm;
+ ASSERT(!(sp->rbm == 0 && lvl > 0));
+ }
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ while (sp->rbm) {
+ Uint32 next = sp->rbm & (sp->rbm-1);
+ Uint32 bit = sp->rbm ^ next;
+ sp->rbm = next;
+ if (sp->abm & bit) {
+ if (sp->bbm & bit) {
+ /* Bit clash. Push and resolve by recursive merge */
+ if (sp->rbm) {
+ sp->keepA = keepA;
+ }
+ nodeA = *sp->srcA;
+ nodeB = *sp->srcB;
+ lvl++;
+ sp = PSTACK_PUSH(s);
+ goto recurse;
+ } else {
+ sp->array[sp->ix++] = *sp->srcA++;
+ }
+ } else {
+ ASSERT(sp->bbm & bit);
+ sp->array[sp->ix++] = *sp->srcB++;
+ }
+ }
+
+ ASSERT(sp->ix == hashmap_bitcount(sp->abm | sp->bbm));
+ if (lvl == 0) {
+ nhp = HAllocX(p, HAMT_HEAD_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
+ hp = nhp;
+ *hp++ = (sp->ix == 16 ? MAP_HEADER_HAMT_HEAD_ARRAY
+ : MAP_HEADER_HAMT_HEAD_BITMAP(sp->abm | sp->bbm));
+ *hp++ = size;
+ } else {
+ nhp = HAllocX(p, HAMT_NODE_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
+ hp = nhp;
+ *hp++ = (sp->ix == 16 ? make_arityval(16)
+ : MAP_HEADER_HAMT_NODE_BITMAP(sp->abm | sp->bbm));
+ }
+ memcpy(hp, sp->array, sp->ix * sizeof(Eterm));
+ res = make_boxed(nhp);
+ }
+ PSTACK_DESTROY(s);
+ return res;
+}
- ASSERT(n >= 0);
+static int hash_cmp(Uint32 ha, Uint32 hb)
+{
+ int i;
+ for (i=0; i<8; i++) {
+ int cmp = (int)(ha & 0xF) - (int)(hb & 0xF);
+ if (cmp)
+ return cmp;
+ ha >>= 4;
+ hb >>= 4;
+ }
+ return 0;
+}
- /* copy map in order */
- while (n && ((c = CMP_TERM(*ks, key)) < 0)) {
- *shp++ = *ks++;
- *hp++ = *vs++;
- n--;
+int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp)
+{
+ Eterm th[2];
+ unsigned lvl = 0;
+
+ if (ap && bp) {
+ ASSERT(CMP_TERM(CAR(ap), CAR(bp)) != 0);
+ for (;;) {
+ Uint32 ha = hashmap_restore_hash(th, lvl, CAR(ap));
+ Uint32 hb = hashmap_restore_hash(th, lvl, CAR(bp));
+ int cmp = hash_cmp(ha, hb);
+ if (cmp)
+ return cmp;
+ lvl += 8;
+ }
}
+ return ap ? -1 : 1;
+}
- *shp++ = key;
- *hp++ = value;
+/* maps:new/0 */
- ASSERT(n >= 0);
+BIF_RETTYPE maps_new_0(BIF_ALIST_0) {
+ Eterm* hp;
+ Eterm tup;
+ flatmap_t *mp;
- while(n--) {
- *shp++ = *ks++;
- *hp++ = *vs++;
- }
- /* we have one word remaining
- * this will work out fine once we get the size word
- * in the header.
- */
- *shp = make_pos_bignum_header(0);
- return res;
+ hp = HAlloc(BIF_P, (MAP_HEADER_SIZE + 1));
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(0);
+
+ mp = (flatmap_t*)hp;
+ mp->thing_word = MAP_HEADER;
+ mp->size = 0;
+ mp->keys = tup;
+
+ BIF_RET(make_flatmap(mp));
}
+/* maps:put/3 */
+
BIF_RETTYPE maps_put_3(BIF_ALIST_3) {
if (is_map(BIF_ARG_3)) {
BIF_RET(erts_maps_put(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3));
@@ -609,81 +1366,87 @@ BIF_RETTYPE maps_put_3(BIF_ALIST_3) {
BIF_ERROR(BIF_P, BADARG);
}
-/* maps:remove/3
- */
+/* maps:remove/3 */
int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res) {
- Sint n;
- Uint need;
- Eterm *hp_start;
- Eterm *thp, *mhp;
- Eterm *ks, *vs, tup;
- map_t *mp = (map_t*)map_val(map);
+ Uint32 hx;
+ if (is_flatmap(map)) {
+ Sint n;
+ Uint need;
+ Eterm *hp_start;
+ Eterm *thp, *mhp;
+ Eterm *ks, *vs, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
+
+ n = flatmap_get_size(mp);
+
+ if (n == 0) {
+ *res = map;
+ return 1;
+ }
- n = map_get_size(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
- if (n == 0) {
- *res = map;
- return 1;
- }
+ /* Assume key exists.
+ * Release allocated if it didn't.
+ * Allocate key tuple first.
+ */
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ need = n + 1 - 1 + 3 + n - 1; /* tuple - 1 + map - 1 */
+ hp_start = HAlloc(p, need);
+ thp = hp_start;
+ mhp = thp + n; /* offset with tuple heap size */
- /* Assume key exists.
- * Release allocated if it didn't.
- * Allocate key tuple first.
- */
+ tup = make_tuple(thp);
+ *thp++ = make_arityval(n - 1);
- need = n + 1 - 1 + 3 + n - 1; /* tuple - 1 + map - 1 */
- hp_start = HAlloc(p, need);
- thp = hp_start;
- mhp = thp + n; /* offset with tuple heap size */
+ *res = make_flatmap(mhp);
+ *mhp++ = MAP_HEADER;
+ *mhp++ = n - 1;
+ *mhp++ = tup;
- tup = make_tuple(thp);
- *thp++ = make_arityval(n - 1);
-
- *res = make_map(mhp);
- *mhp++ = MAP_HEADER;
- *mhp++ = n - 1;
- *mhp++ = tup;
-
- if (is_immed(key)) {
- while (1) {
- if (*ks == key) {
- goto found_key;
- } else if (--n) {
- *mhp++ = *vs++;
- *thp++ = *ks++;
- } else
- break;
- }
- } else {
- while(1) {
- if (EQ(*ks, key)) {
- goto found_key;
- } else if (--n) {
- *mhp++ = *vs++;
- *thp++ = *ks++;
- } else
- break;
+ if (is_immed(key)) {
+ while (1) {
+ if (*ks == key) {
+ goto found_key;
+ } else if (--n) {
+ *mhp++ = *vs++;
+ *thp++ = *ks++;
+ } else
+ break;
+ }
+ } else {
+ while(1) {
+ if (EQ(*ks, key)) {
+ goto found_key;
+ } else if (--n) {
+ *mhp++ = *vs++;
+ *thp++ = *ks++;
+ } else
+ break;
+ }
}
- }
- /* Not found, remove allocated memory
- * and return previous map.
- */
- HRelease(p, hp_start + need, hp_start);
+ /* Not found, remove allocated memory
+ * and return previous map.
+ */
+ HRelease(p, hp_start + need, hp_start);
- *res = map;
- return 1;
+ *res = map;
+ return 1;
found_key:
- /* Copy rest of keys and values */
- if (--n) {
- sys_memcpy(mhp, vs+1, n*sizeof(Eterm));
- sys_memcpy(thp, ks+1, n*sizeof(Eterm));
+ /* Copy rest of keys and values */
+ if (--n) {
+ sys_memcpy(mhp, vs+1, n*sizeof(Eterm));
+ sys_memcpy(thp, ks+1, n*sizeof(Eterm));
+ }
+ return 1;
}
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ *res = hashmap_delete(p, hx, key, map);
return 1;
}
@@ -697,21 +1460,20 @@ BIF_RETTYPE maps_remove_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADARG);
}
-/* maps:update/3
- */
-
int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res) {
+ Uint32 hx;
+ if (is_flatmap(map)) {
Sint n,i;
Eterm* hp,*shp;
Eterm *ks,*vs;
- map_t *mp = (map_t*)map_val(map);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
- if ((n = map_get_size(mp)) == 0) {
+ if ((n = flatmap_get_size(mp)) == 0) {
return 0;
}
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
/* only allocate for values,
* assume key-tuple will be intact
@@ -749,10 +1511,147 @@ found_key:
vs++;
if (++i < n)
sys_memcpy(hp, vs, (n - i)*sizeof(Eterm));
- *res = make_map(shp);
+ *res = make_flatmap(shp);
return 1;
+ }
+
+ ASSERT(is_hashmap(map));
+ hx = hashmap_make_hash(key);
+ *res = erts_hashmap_insert(p, hx, key, value, map, 1);
+ if (is_value(*res))
+ return 1;
+
+ return 0;
}
+Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
+ Uint32 hx;
+ Eterm res;
+ if (is_flatmap(map)) {
+ Sint n,i;
+ Sint c = 0;
+ Eterm* hp, *shp;
+ Eterm *ks, *vs, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
+
+ n = flatmap_get_size(mp);
+
+ if (n == 0) {
+ hp = HAlloc(p, MAP_HEADER_SIZE + 1 + 2);
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(1);
+ *hp++ = key;
+ res = make_flatmap(hp);
+ *hp++ = MAP_HEADER;
+ *hp++ = 1;
+ *hp++ = tup;
+ *hp++ = value;
+
+ return res;
+ }
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ /* only allocate for values,
+ * assume key-tuple will be intact
+ */
+
+ hp = HAlloc(p, MAP_HEADER_SIZE + n);
+ shp = hp; /* save hp, used if optimistic update fails */
+ res = make_flatmap(hp);
+ *hp++ = MAP_HEADER;
+ *hp++ = n;
+ *hp++ = mp->keys;
+
+ if (is_immed(key)) {
+ for( i = 0; i < n; i ++) {
+ if (ks[i] == key) {
+ *hp++ = value;
+ vs++;
+ c = 1;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ } else {
+ for( i = 0; i < n; i ++) {
+ if (EQ(ks[i], key)) {
+ *hp++ = value;
+ vs++;
+ c = 1;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ }
+
+ if (c)
+ return res;
+
+ /* the map will grow */
+
+ if (n >= MAP_SMALL_MAP_LIMIT) {
+ HRelease(p, shp + MAP_HEADER_SIZE + n, shp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ res = erts_hashmap_from_ks_and_vs_extra(p,ks,vs,n,key,value);
+
+ return res;
+ }
+
+ /* still a small map. need to make a new tuple,
+ * use old hp since it needs to be recreated anyway. */
+
+ tup = make_tuple(shp);
+ *shp++ = make_arityval(n+1);
+
+ hp = HAlloc(p, 3 + n + 1);
+ res = make_flatmap(hp);
+ *hp++ = MAP_HEADER;
+ *hp++ = n + 1;
+ *hp++ = tup;
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ ASSERT(n >= 0);
+
+ /* copy map in order */
+ while (n && ((c = CMP_TERM(*ks, key)) < 0)) {
+ *shp++ = *ks++;
+ *hp++ = *vs++;
+ n--;
+ }
+
+ *shp++ = key;
+ *hp++ = value;
+
+ ASSERT(n >= 0);
+
+ while(n--) {
+ *shp++ = *ks++;
+ *hp++ = *vs++;
+ }
+ /* we have one word remaining
+ * this will work out fine once we get the size word
+ * in the header.
+ */
+ *shp = make_pos_bignum_header(0);
+ return res;
+ }
+ ASSERT(is_hashmap(map));
+
+ hx = hashmap_make_hash(key);
+ res = erts_hashmap_insert(p, hx, key, value, map, 0);
+ ASSERT(is_hashmap(res));
+
+ return res;
+}
+
+/* maps:update/3 */
+
BIF_RETTYPE maps_update_3(BIF_ALIST_3) {
if (is_map(BIF_ARG_3)) {
Eterm res;
@@ -764,38 +1663,845 @@ BIF_RETTYPE maps_update_3(BIF_ALIST_3) {
}
-/* maps:values/1
- */
+/* maps:values/1 */
BIF_RETTYPE maps_values_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
+ if (is_flatmap(BIF_ARG_1)) {
Eterm *hp, *vs, res = NIL;
- map_t *mp;
+ flatmap_t *mp;
Uint n;
- mp = (map_t*)map_val(BIF_ARG_1);
- n = map_get_size(mp);
+ mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+ n = flatmap_get_size(mp);
if (n == 0)
BIF_RET(res);
hp = HAlloc(BIF_P, (2 * n));
- vs = map_get_values(mp);
+ vs = flatmap_get_values(mp);
while(n--) {
res = CONS(hp, vs[n], res); hp += 2;
}
BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ BIF_RET(hashmap_values(BIF_P, BIF_ARG_1));
}
BIF_ERROR(BIF_P, BADARG);
}
-int erts_validate_and_sort_map(map_t* mp)
+static Eterm hashmap_to_list(Process *p, Eterm node) {
+ DECLARE_WSTACK(stack);
+ Eterm *hp, *kv;
+ Eterm res = NIL;
+
+ hp = HAlloc(p, hashmap_size(node) * (2 + 3));
+ hashmap_iterator_init(&stack, node, 0);
+ while ((kv=hashmap_iterator_next(&stack)) != NULL) {
+ Eterm tup = TUPLE2(hp, CAR(kv), CDR(kv));
+ hp += 3;
+ res = CONS(hp, tup, res);
+ hp += 2;
+ }
+ DESTROY_WSTACK(stack);
+ return res;
+}
+
+void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) {
+ Eterm hdr = *hashmap_val(node);
+ Uint sz;
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ default:
+ erl_exit(1, "bad header");
+ }
+
+ WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */
+ (UWord)(!reverse ? 0 : sz+1),
+ (UWord)node);
+}
+
+Eterm* hashmap_iterator_next(ErtsWStack* s) {
+ Eterm node, *ptr, hdr;
+ Uint32 sz;
+ Uint idx;
+
+ for (;;) {
+ ASSERT(!WSTACK_ISEMPTY((*s)));
+ node = (Eterm) WSTACK_POP((*s));
+ if (is_non_value(node)) {
+ return NULL;
+ }
+ idx = (Uint) WSTACK_POP((*s));
+ for (;;) {
+ ASSERT(is_boxed(node));
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ptr++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
+ break;
+ default:
+ erl_exit(1, "bad header");
+ }
+
+ idx++;
+
+ if (idx <= sz) {
+ WSTACK_PUSH2((*s), (UWord)idx, (UWord)node);
+
+ if (is_list(ptr[idx])) {
+ return list_val(ptr[idx]);
+ }
+ ASSERT(is_boxed(ptr[idx]));
+ node = ptr[idx];
+ idx = 0;
+ }
+ else
+ break; /* and pop parent node */
+ }
+ }
+}
+
+Eterm* hashmap_iterator_prev(ErtsWStack* s) {
+ Eterm node, *ptr, hdr;
+ Uint32 sz;
+ Uint idx;
+
+ for (;;) {
+ ASSERT(!WSTACK_ISEMPTY((*s)));
+ node = (Eterm) WSTACK_POP((*s));
+ if (is_non_value(node)) {
+ return NULL;
+ }
+ idx = (Uint) WSTACK_POP((*s));
+ for (;;) {
+ ASSERT(is_boxed(node));
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ptr++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
+ break;
+ default:
+ erl_exit(1, "bad header");
+ }
+
+ if (idx > sz)
+ idx = sz;
+ else
+ idx--;
+
+ if (idx >= 1) {
+ WSTACK_PUSH2((*s), (UWord)idx, (UWord)node);
+
+ if (is_list(ptr[idx])) {
+ return list_val(ptr[idx]);
+ }
+ ASSERT(is_boxed(ptr[idx]));
+ node = ptr[idx];
+ idx = 17;
+ }
+ else
+ break; /* and pop parent node */
+ }
+ }
+}
+
+const Eterm *
+#if HALFWORD_HEAP
+erts_hashmap_get_rel(Uint32 hx, Eterm key, Eterm node, Eterm *map_base)
+#else
+erts_hashmap_get(Uint32 hx, Eterm key, Eterm node)
+#endif
+{
+ Eterm *ptr, hdr;
+ Uint ix,slot, lvl = 0;
+ Uint32 hval,bp;
+ DeclareTmpHeapNoproc(th,2);
+ UseTmpHeapNoproc(2);
+
+ for (;;) {
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST: /* LEAF NODE [K|V] */
+ ptr = list_val(node);
+ UnUseTmpHeapNoproc(2);
+
+ if (eq_rel(CAR(ptr), map_base, key, NULL)) {
+ return &(CDR(ptr));
+ }
+ return NULL;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[ix+1];
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[ix+2];
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+1];
+ break;
+ }
+ /* not occupied */
+ UnUseTmpHeapNoproc(2);
+ return NULL;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+2];
+ break;
+ }
+ /* not occupied */
+ UnUseTmpHeapNoproc(2);
+ return NULL;
+ default:
+ erl_exit(1, "bad header tag %ld\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erl_exit(1, "bad primary tag %p\r\n", node);
+ break;
+ }
+ }
+ UnUseTmpHeapNoproc(2);
+ return NULL;
+}
+
+Eterm erts_hashmap_insert(Process *p, Uint32 hx, Eterm key, Eterm value,
+ Eterm map, int is_update) {
+ Uint size, upsz;
+ Eterm *hp, res = THE_NON_VALUE;
+ DECLARE_ESTACK(stack);
+ if (erts_hashmap_insert_down(hx, key, map, &size, &upsz, &stack, is_update)) {
+ hp = HAlloc(p, size);
+ res = erts_hashmap_insert_up(hp, key, value, &upsz, &stack);
+ }
+
+ DESTROY_ESTACK(stack);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+ ERTS_HOLE_CHECK(p);
+
+ return res;
+}
+
+
+int erts_hashmap_insert_down(Uint32 hx, Eterm key, Eterm node, Uint *sz,
+ Uint *update_size, ErtsEStack *sp, int is_update) {
+ Eterm *ptr;
+ Eterm hdr, ckey;
+ Eterm th[2];
+ Uint32 ix, cix, bp, hval, chx;
+ Uint slot, lvl = 0, clvl;
+ Uint size = 0, n = 0;
+
+ *update_size = 1;
+
+ for (;;) {
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST: /* LEAF NODE [K|V] */
+ ptr = list_val(node);
+ ckey = CAR(ptr);
+ if (EQ(ckey, key)) {
+ *update_size = 0;
+ goto unroll;
+ }
+ if (is_update) {
+ return 0;
+ }
+ goto insert_subnodes;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ size += HAMT_NODE_ARRAY_SZ;
+ ESTACK_PUSH2(*sp, ix, node);
+ node = ptr[ix+1];
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ size += HAMT_HEAD_ARRAY_SZ;
+ ESTACK_PUSH2(*sp, ix, node);
+ node = ptr[ix+2];
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+
+ ESTACK_PUSH(*sp, n);
+ ESTACK_PUSH3(*sp, bp, slot, node);
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+1];
+ ASSERT(HAMT_NODE_BITMAP_SZ(n) <= 17);
+ size += HAMT_NODE_BITMAP_SZ(n);
+ break;
+ }
+ /* not occupied */
+ if (is_update) {
+ return 0;
+ }
+ size += HAMT_NODE_BITMAP_SZ(n+1);
+ goto unroll;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+
+ ESTACK_PUSH(*sp, n);
+ ESTACK_PUSH3(*sp, bp, slot, node);
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+2];
+ ASSERT(HAMT_HEAD_BITMAP_SZ(n) <= 18);
+ size += HAMT_HEAD_BITMAP_SZ(n);
+ break;
+ }
+ /* not occupied */
+ if (is_update) {
+ return 0;
+ }
+ size += HAMT_HEAD_BITMAP_SZ(n+1);
+ goto unroll;
+ default:
+ erl_exit(1, "bad header tag %ld\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erl_exit(1, "bad primary tag %p\r\n", node);
+ break;
+ }
+ }
+insert_subnodes:
+ clvl = lvl;
+ chx = hashmap_restore_hash(th,clvl,ckey);
+ size += HAMT_NODE_BITMAP_SZ(2);
+ ix = hashmap_index(hx);
+ cix = hashmap_index(chx);
+
+ while (cix == ix) {
+ ESTACK_PUSH(*sp, 0);
+ ESTACK_PUSH3(*sp, 1 << ix, 0, MAP_HEADER_HAMT_NODE_BITMAP(0));
+ size += HAMT_NODE_BITMAP_SZ(1);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ chx = hashmap_shift_hash(th,chx,clvl,ckey);
+ ix = hashmap_index(hx);
+ cix = hashmap_index(chx);
+ }
+ ESTACK_PUSH3(*sp, cix, ix, node);
+
+unroll:
+ *sz = size + /* res cons */ 2;
+ return 1;
+}
+
+Eterm erts_hashmap_insert_up(Eterm *hp, Eterm key, Eterm value,
+ Uint *update_size, ErtsEStack *sp) {
+ Eterm node, *ptr, hdr;
+ Eterm res;
+ Eterm *nhp = NULL;
+ Uint32 ix, cix, bp, hval;
+ Uint slot, n;
+ /* Needed for halfword */
+ DeclareTmpHeapNoproc(fake,1);
+ UseTmpHeapNoproc(1);
+
+ res = CONS(hp, key, value); hp += 2;
+
+ do {
+ node = ESTACK_POP(*sp);
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST:
+ ix = (Uint32) ESTACK_POP(*sp);
+ cix = (Uint32) ESTACK_POP(*sp);
+
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP((1 << ix) | (1 << cix));
+ if (ix < cix) {
+ *hp++ = res;
+ *hp++ = node;
+ } else {
+ *hp++ = node;
+ *hp++ = res;
+ }
+ res = make_hashmap(nhp);
+ break;
+ case TAG_PRIMARY_HEADER:
+ /* subnodes, fake it */
+ *fake = node;
+ node = make_boxed(fake);
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ slot = (Uint) ESTACK_POP(*sp);
+ nhp = hp;
+ n = HAMT_NODE_ARRAY_SZ;
+ while(n--) { *hp++ = *ptr++; }
+ nhp[slot+1] = res;
+ res = make_hashmap(nhp);
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ slot = (Uint) ESTACK_POP(*sp);
+ nhp = hp;
+ n = HAMT_HEAD_ARRAY_SZ - 2;
+ *hp++ = MAP_HEADER_HAMT_HEAD_ARRAY; ptr++;
+ *hp++ = (*ptr++) + *update_size;
+ while(n--) { *hp++ = *ptr++; }
+ nhp[slot+2] = res;
+ res = make_hashmap(nhp);
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ slot = (Uint) ESTACK_POP(*sp);
+ bp = (Uint32) ESTACK_POP(*sp);
+ n = (Uint32) ESTACK_POP(*sp);
+ hval = MAP_HEADER_VAL(hdr);
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hval | bp); ptr++;
+
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ if (hval & bp) { ptr++; n--; }
+ while(n--) { *hp++ = *ptr++; }
+
+ if ((hval | bp) == 0xffff) {
+ *nhp = make_arityval(16);
+ }
+ res = make_hashmap(nhp);
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ slot = (Uint) ESTACK_POP(*sp);
+ bp = (Uint32) ESTACK_POP(*sp);
+ n = (Uint32) ESTACK_POP(*sp);
+ hval = MAP_HEADER_VAL(hdr);
+ nhp = hp;
+ *hp++ = MAP_HEADER_HAMT_HEAD_BITMAP(hval | bp); ptr++;
+ *hp++ = (*ptr++) + *update_size;
+
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ if (hval & bp) { ptr++; n--; }
+ while(n--) { *hp++ = *ptr++; }
+
+ if ((hval | bp) == 0xffff) {
+ *nhp = MAP_HEADER_HAMT_HEAD_ARRAY;
+ }
+ res = make_hashmap(nhp);
+ break;
+ default:
+ erl_exit(1, "bad header tag %x\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erl_exit(1, "bad primary tag %x\r\n", primary_tag(node));
+ break;
+ }
+
+ } while(!ESTACK_ISEMPTY(*sp));
+
+ UnUseTmpHeapNoproc(1);
+ return res;
+}
+
+static Eterm hashmap_keys(Process* p, Eterm node) {
+ DECLARE_WSTACK(stack);
+ hashmap_head_t* root;
+ Eterm *hp, *kv;
+ Eterm res = NIL;
+
+ root = (hashmap_head_t*) boxed_val(node);
+ hp = HAlloc(p, root->size * 2);
+ hashmap_iterator_init(&stack, node, 0);
+ while ((kv=hashmap_iterator_next(&stack)) != NULL) {
+ res = CONS(hp, CAR(kv), res);
+ hp += 2;
+ }
+ DESTROY_WSTACK(stack);
+ return res;
+}
+
+static Eterm hashmap_values(Process* p, Eterm node) {
+ DECLARE_WSTACK(stack);
+ hashmap_head_t* root;
+ Eterm *hp, *kv;
+ Eterm res = NIL;
+
+ root = (hashmap_head_t*) boxed_val(node);
+ hp = HAlloc(p, root->size * 2);
+ hashmap_iterator_init(&stack, node, 0);
+ while ((kv=hashmap_iterator_next(&stack)) != NULL) {
+ res = CONS(hp, CDR(kv), res);
+ hp += 2;
+ }
+ DESTROY_WSTACK(stack);
+ return res;
+}
+
+static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm map) {
+ Eterm *hp = NULL, *nhp = NULL, *hp_end = NULL;
+ Eterm th[2];
+ Eterm *ptr;
+ Eterm hdr, res = map, node = map;
+ Uint32 ix, bp, hval;
+ Uint slot, lvl = 0;
+ Uint size = 0, n = 0;
+ DECLARE_ESTACK(stack);
+
+ for (;;) {
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST:
+ if (EQ(CAR(list_val(node)), key)) {
+ goto unroll;
+ }
+ goto not_found;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ size += HAMT_NODE_ARRAY_SZ;
+ ESTACK_PUSH2(stack, ix, node);
+ node = ptr[ix+1];
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = hashmap_index(hx);
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ size += HAMT_HEAD_ARRAY_SZ;
+ ESTACK_PUSH2(stack, ix, node);
+ node = ptr[ix+2];
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+
+ ESTACK_PUSH(stack, n);
+ ESTACK_PUSH3(stack, bp, slot, node);
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+1];
+ ASSERT(HAMT_NODE_BITMAP_SZ(n) <= 17);
+ size += HAMT_NODE_BITMAP_SZ(n);
+ break;
+ }
+ /* not occupied */
+ goto not_found;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ hval = MAP_HEADER_VAL(hdr);
+ ix = hashmap_index(hx);
+ bp = 1 << ix;
+ slot = hashmap_bitcount(hval & (bp - 1));
+ n = hashmap_bitcount(hval);
+
+ ESTACK_PUSH(stack, n);
+ ESTACK_PUSH3(stack, bp, slot, node);
+
+ /* occupied */
+ if (bp & hval) {
+ hx = hashmap_shift_hash(th,hx,lvl,key);
+ node = ptr[slot+2];
+ ASSERT(HAMT_HEAD_BITMAP_SZ(n) <= 18);
+ size += HAMT_HEAD_BITMAP_SZ(n);
+ break;
+ }
+ /* not occupied */
+ goto not_found;
+ default:
+ erl_exit(1, "bad header tag %ld\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ break;
+ default:
+ erl_exit(1, "bad primary tag %p\r\n", node);
+ break;
+ }
+ }
+
+unroll:
+ /* the size is bounded and atleast one less than the previous size */
+ size -= 1;
+ n = hashmap_size(map) - 1;
+
+ if (n <= MAP_SMALL_MAP_LIMIT) {
+ DECLARE_WSTACK(wstack);
+ Eterm *kv, *ks, *vs;
+ flatmap_t *mp;
+ Eterm keys;
+
+ DESTROY_ESTACK(stack);
+
+ /* build flat structure */
+ hp = HAlloc(p, 3 + 1 + (2 * n));
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(n);
+ ks = hp;
+ hp += n;
+ mp = (flatmap_t*)hp;
+ hp += MAP_HEADER_SIZE;
+ vs = hp;
+
+ mp->thing_word = MAP_HEADER;
+ mp->size = n;
+ mp->keys = keys;
+
+ hashmap_iterator_init(&wstack, map, 0);
+
+ while ((kv=hashmap_iterator_next(&wstack)) != NULL) {
+ if (EQ(CAR(kv),key))
+ continue;
+ *ks++ = CAR(kv);
+ *vs++ = CDR(kv);
+ }
+
+ /* it cannot have multiple keys */
+ erts_validate_and_sort_flatmap(mp);
+
+ DESTROY_WSTACK(wstack);
+ return make_flatmap(mp);
+ }
+
+ hp = HAlloc(p, size);
+ hp_end = hp + size;
+ res = THE_NON_VALUE;
+
+ do {
+ node = ESTACK_POP(stack);
+
+ /* all nodes are things */
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ ix = (Uint) ESTACK_POP(stack);
+ nhp = hp;
+ if (res == THE_NON_VALUE) {
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(0xffff ^ (1 << ix)); ptr++;
+ n = 16;
+ n -= ix;
+ while(ix--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ } else {
+ n = HAMT_NODE_ARRAY_SZ;
+ while(n--) { *hp++ = *ptr++; }
+ nhp[ix+1] = res;
+ res = make_hashmap(nhp);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ ix = (Uint) ESTACK_POP(stack);
+ nhp = hp;
+ if (res == THE_NON_VALUE) {
+ n = 16;
+ n -= ix;
+ *hp++ = MAP_HEADER_HAMT_HEAD_BITMAP(0xffff ^ (1 << ix)); ptr++;
+ *hp++ = (*ptr++) - 1;
+ while(ix--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ } else {
+ n = 16;
+ *hp++ = MAP_HEADER_HAMT_HEAD_ARRAY; ptr++;
+ *hp++ = (*ptr++) - 1;
+ while(n--) { *hp++ = *ptr++; }
+ nhp[ix+2] = res;
+ res = make_hashmap(nhp);
+ }
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ slot = (Uint) ESTACK_POP(stack);
+ bp = (Uint32) ESTACK_POP(stack);
+ n = (Uint32) ESTACK_POP(stack);
+ nhp = hp;
+
+ /* bitmap change matrix
+ * res | none leaf bitmap
+ * ----------------------------
+ * n=1 | remove remove keep
+ * n=2 | other keep keep
+ * n>2 | shrink keep keep
+ *
+ * other: (remember, n is 2)
+ * shrink if the other bitmap value is a bitmap node
+ * remove if the other bitmap value is a leaf
+ *
+ * remove:
+ * this bitmap node is removed, res is moved up in tree (could be none)
+ * this is a special case of shrink
+ *
+ * keep:
+ * the current path index is still used down in the tree, need to keep it
+ * copy as usual with the updated res
+ *
+ * shrink:
+ * the current path index is no longer used down in the tree, remove it (shrink)
+ */
+ if (res == THE_NON_VALUE) {
+ if (n == 1) {
+ break;
+ } else if (n == 2) {
+ if (slot == 0) {
+ ix = 2; /* off by one 'cause hdr */
+ } else {
+ ix = 1; /* off by one 'cause hdr */
+ }
+ if (primary_tag(ptr[ix]) == TAG_PRIMARY_LIST) {
+ res = ptr[ix];
+ } else {
+ hval = MAP_HEADER_VAL(hdr);
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hval ^ bp);
+ *hp++ = ptr[ix];
+ res = make_hashmap(nhp);
+ }
+ } else {
+ /* n > 2 */
+ hval = MAP_HEADER_VAL(hdr);
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(hval ^ bp); ptr++;
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ }
+ } else if (primary_tag(res) == TAG_PRIMARY_LIST && n == 1) {
+ break;
+ } else {
+ /* res is bitmap or leaf && n > 1, keep */
+ n -= slot;
+ *hp++ = *ptr++;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ res = make_hashmap(nhp);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ slot = (Uint) ESTACK_POP(stack);
+ bp = (Uint32) ESTACK_POP(stack);
+ n = (Uint32) ESTACK_POP(stack);
+ nhp = hp;
+
+ if (res != THE_NON_VALUE) {
+ *hp++ = *ptr++;
+ *hp++ = (*ptr++) - 1;
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ *hp++ = res;
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ } else {
+ hval = MAP_HEADER_VAL(hdr);
+ *hp++ = MAP_HEADER_HAMT_HEAD_BITMAP(hval ^ bp); ptr++;
+ *hp++ = (*ptr++) - 1;
+ n -= slot;
+ while(slot--) { *hp++ = *ptr++; }
+ ptr++; n--;
+ while(n--) { *hp++ = *ptr++; }
+ }
+ res = make_hashmap(nhp);
+ break;
+ default:
+ erl_exit(1, "bad header tag %x\r\n", hdr & _HEADER_MAP_SUBTAG_MASK);
+ break;
+ }
+ } while(!ESTACK_ISEMPTY(stack));
+ HRelease(p, hp_end, hp);
+not_found:
+ DESTROY_ESTACK(stack);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
+ ERTS_HOLE_CHECK(p);
+ return res;
+}
+
+
+int erts_validate_and_sort_flatmap(flatmap_t* mp)
{
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
- Uint sz = map_get_size(mp);
+ Eterm *ks = flatmap_get_keys(mp);
+ Eterm *vs = flatmap_get_values(mp);
+ Uint sz = flatmap_get_size(mp);
Uint ix,jx;
Eterm tmp;
int c;
@@ -822,6 +2528,55 @@ int erts_validate_and_sort_map(map_t* mp)
return 1;
}
+/* Really rough estimate of sqrt(x)
+ * Guaranteed not to be less than sqrt(x)
+ */
+static int int_sqrt_ceiling(Uint x)
+{
+ int n;
+
+ if (x <= 2)
+ return x;
+
+ n = erts_fit_in_bits_uint(x-1);
+ if (n & 1) {
+ /* Calc: sqrt(2^n) = 2^(n/2) * sqrt(2) ~= 2^(n/2) * 3 / 2 */
+ return (1 << (n/2 - 1)) * 3;
+ }
+ else {
+ /* Calc: sqrt(2^n) = 2^(n/2) */
+ return 1 << (n / 2);
+ }
+}
+
+Uint hashmap_over_estimated_heap_size(Uint k)
+{
+ /* k is nr of key-value pairs.
+ N(k) is expected nr of nodes in hamt.
+
+ Observation:
+ For uniformly distributed hash values, average of N varies between
+ 0.3*k and 0.4*k (with a beautiful sine curve)
+ and standard deviation of N is about sqrt(k)/3.
+
+ Assuming normal probability distribution, we overestimate nr of nodes
+ by 15 std.devs above the average, which gives a probability for overrun
+ less than 1.0e-49 (same magnitude as a git SHA1 collision).
+ */
+ Uint max_nodes = 2*k/5 + (15/3)*int_sqrt_ceiling(k);
+ return (k*2 + /* leaf cons cells */
+ k + /* leaf list terms */
+ max_nodes*2); /* headers + parent boxed terms */
+}
+
+
+BIF_RETTYPE erts_debug_map_info_1(BIF_ALIST_1) {
+ if (is_hashmap(BIF_ARG_1)) {
+ BIF_RET(hashmap_info(BIF_P,BIF_ARG_1));
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
/*
* erts_internal:map_to_tuple_keys/1
*
@@ -829,9 +2584,233 @@ int erts_validate_and_sort_map(map_t* mp)
*/
BIF_RETTYPE erts_internal_map_to_tuple_keys_1(BIF_ALIST_1) {
- if (is_map(BIF_ARG_1)) {
- map_t *mp = (map_t*)map_val(BIF_ARG_1);
+ if (is_flatmap(BIF_ARG_1)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
BIF_RET(mp->keys);
}
BIF_ERROR(BIF_P, BADARG);
}
+
+/*
+ * erts_internal:map_type/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_map_type_1(BIF_ALIST_1) {
+ DECL_AM(hashmap);
+ DECL_AM(hashmap_node);
+ DECL_AM(flatmap);
+ if (is_flatmap(BIF_ARG_1)) {
+ BIF_RET(AM_flatmap);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ Eterm hdr = *(boxed_val(BIF_ARG_1));
+ ASSERT(is_header(hdr));
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ BIF_RET(AM_hashmap);
+ case HAMT_SUBTAG_NODE_ARRAY:
+ case HAMT_SUBTAG_NODE_BITMAP:
+ BIF_RET(AM_hashmap_node);
+ default:
+ erl_exit(1, "bad header");
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/*
+ * erts_internal:map_hashmap_children/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_map_hashmap_children_1(BIF_ALIST_1) {
+ if (is_hashmap(BIF_ARG_1)) {
+ Eterm node = BIF_ARG_1;
+ Eterm *ptr, hdr, *hp, res = NIL;
+ Uint sz = 0;
+ ptr = boxed_val(node);
+ hdr = *ptr;
+
+ ASSERT(is_header(hdr));
+
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sz = 16;
+ ptr += 1;
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ptr += 1;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ptr += 2;
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ sz = 16;
+ ptr += 2;
+ break;
+ default:
+ erl_exit(1, "bad header\r\n");
+ break;
+ }
+ ASSERT(sz < 17);
+ hp = HAlloc(BIF_P, 2*sz);
+ while(sz--) { res = CONS(hp, *ptr++, res); hp += 2; }
+ BIF_RET(res);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+static Eterm hashmap_info(Process *p, Eterm node) {
+ Eterm *hp;
+ Eterm res = NIL, info = NIL;
+ Eterm *ptr, tup, hdr;
+ Uint sz;
+ DECL_AM(depth);
+ DECL_AM(leafs);
+ DECL_AM(bitmaps);
+ DECL_AM(arrays);
+ Uint nleaf=0, nbitmap=0, narray=0;
+ Uint bitmap_usage[16], leaf_usage[16];
+ Uint lvl = 0, clvl;
+ DECLARE_ESTACK(stack);
+
+ for (sz = 0; sz < 16; sz++) {
+ bitmap_usage[sz] = 0;
+ leaf_usage[sz] = 0;
+ }
+
+ ptr = boxed_val(node);
+ ESTACK_PUSH(stack, 0);
+ ESTACK_PUSH(stack, node);
+ do {
+ node = ESTACK_POP(stack);
+ clvl = ESTACK_POP(stack);
+ if (lvl < clvl)
+ lvl = clvl;
+ switch(primary_tag(node)) {
+ case TAG_PRIMARY_LIST:
+ nleaf++;
+ leaf_usage[clvl] += 1;
+ break;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(node);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_NODE_ARRAY:
+ narray++;
+ sz = 16;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+1]);
+ }
+ break;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ nbitmap++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
+ bitmap_usage[sz-1] += 1;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+1]);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ nbitmap++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ bitmap_usage[sz-1] += 1;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+2]);
+ }
+ break;
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ narray++;
+ sz = 16;
+ while(sz--) {
+ ESTACK_PUSH(stack, clvl + 1);
+ ESTACK_PUSH(stack, ptr[sz+2]);
+ }
+ break;
+ default:
+ erl_exit(1, "bad header\r\n");
+ break;
+ }
+ }
+ } while(!ESTACK_ISEMPTY(stack));
+
+
+ /* size */
+ sz = 0;
+ hashmap_bld_tuple_uint(NULL,&sz,16,leaf_usage);
+ hashmap_bld_tuple_uint(NULL,&sz,16,bitmap_usage);
+
+ /* alloc */
+ hp = HAlloc(p, 2+3 + 3*(2+4) + sz);
+
+ info = hashmap_bld_tuple_uint(&hp,NULL,16,leaf_usage);
+ tup = TUPLE3(hp, AM_leafs, make_small(nleaf),info); hp += 4;
+ res = CONS(hp, tup, res); hp += 2;
+
+ info = hashmap_bld_tuple_uint(&hp,NULL,16,bitmap_usage);
+ tup = TUPLE3(hp, AM_bitmaps, make_small(nbitmap), info); hp += 4;
+ res = CONS(hp, tup, res); hp += 2;
+
+ tup = TUPLE3(hp, AM_arrays, make_small(narray),NIL); hp += 4;
+ res = CONS(hp, tup, res); hp += 2;
+
+ tup = TUPLE2(hp, AM_depth, make_small(lvl)); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
+ DESTROY_ESTACK(stack);
+ ERTS_HOLE_CHECK(p);
+ return res;
+}
+
+static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]) {
+ Eterm res = THE_NON_VALUE;
+ Eterm *ts = (Eterm *)erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
+ Uint i;
+
+ for (i = 0; i < n; i++) {
+ ts[i] = erts_bld_uint(hpp, szp, nums[i]);
+ }
+ res = erts_bld_tuplev(hpp, szp, n, ts);
+ erts_free(ERTS_ALC_T_TMP, (void *) ts);
+ return res;
+}
+
+
+/* implementation of builtin emulations */
+
+#if !ERTS_AT_LEAST_GCC_VSN__(3, 4, 0)
+/* Count leading zeros emulation */
+Uint32 hashmap_clz(Uint32 x) {
+ Uint32 y;
+ int n = 32;
+ y = x >>16; if (y != 0) {n = n -16; x = y;}
+ y = x >> 8; if (y != 0) {n = n - 8; x = y;}
+ y = x >> 4; if (y != 0) {n = n - 4; x = y;}
+ y = x >> 2; if (y != 0) {n = n - 2; x = y;}
+ y = x >> 1; if (y != 0) return n - 2;
+ return n - x;
+}
+
+const Uint32 SK5 = 0x55555555, SK3 = 0x33333333;
+const Uint32 SKF0 = 0xF0F0F0F, SKFF = 0xFF00FF;
+
+/* CTPOP emulation */
+Uint32 hashmap_bitcount(Uint32 x) {
+ x -= ((x >> 1 ) & SK5);
+ x = (x & SK3 ) + ((x >> 2 ) & SK3 );
+ x = (x & SKF0) + ((x >> 4 ) & SKF0);
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+#endif
diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h
index cfacb2ec28..1333a734a8 100644
--- a/erts/emulator/beam/erl_map.h
+++ b/erts/emulator/beam/erl_map.h
@@ -22,13 +22,23 @@
#define __ERL_MAP_H__
#include "sys.h"
+
+/* instrinsic wrappers */
+#if ERTS_AT_LEAST_GCC_VSN__(3, 4, 0)
+#define hashmap_clz(x) ((Uint32) __builtin_clz((unsigned int)(x)))
+#define hashmap_bitcount(x) ((Uint32) __builtin_popcount((unsigned int) (x)))
+#else
+Uint32 hashmap_clz(Uint32 x);
+Uint32 hashmap_bitcount(Uint32 x);
+#endif
+
/* MAP */
-typedef struct map_s {
+typedef struct flatmap_s {
Eterm thing_word;
Uint size;
Eterm keys; /* tuple */
-} map_t;
+} flatmap_t;
/* map node
*
* -----------
@@ -42,31 +52,152 @@ typedef struct map_s {
* -----------
*/
+/* the head-node is a bitmap or array with an untagged size */
+
+
+#define hashmap_size(x) (((hashmap_head_t*) hashmap_val(x))->size)
+#define hashmap_size_rel(RTERM, BASE) hashmap_size(rterm2wterm(RTERM, BASE))
+#define hashmap_make_hash(Key) make_internal_hash(Key)
+
+#define hashmap_restore_hash(Heap,Lvl,Key) \
+ (((Lvl) < 8) ? hashmap_make_hash(Key) >> (4*(Lvl)) : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), (Key))) >> (4*((Lvl) & 7)))
+#define hashmap_shift_hash(Heap,Hx,Lvl,Key) \
+ (((++(Lvl)) & 7) ? (Hx) >> 4 : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), Key)))
/* erl_term.h stuff */
-#define make_map(x) make_boxed((Eterm*)(x))
-#define make_map_rel(x, BASE) make_boxed_rel((Eterm*)(x),(BASE))
-#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val((x))))
-#define is_map_rel(RTERM,BASE) is_map(rterm2wterm(RTERM,BASE))
-#define is_not_map(x) (!is_map((x)))
-#define is_map_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_MAP)
-#define header_is_map(x) ((((x) & (_HEADER_SUBTAG_MASK)) == MAP_SUBTAG))
-#define map_val(x) (_unchecked_boxed_val((x)))
-#define map_val_rel(RTERM, BASE) map_val(rterm2wterm(RTERM, BASE))
-
-#define map_get_values(x) (((Eterm *)(x)) + 3)
-#define map_get_keys(x) (((Eterm *)tuple_val(((map_t *)(x))->keys)) + 1)
-#define map_get_size(x) (((map_t*)(x))->size)
+#define make_flatmap(x) make_boxed((Eterm*)(x))
+#define make_flatmap_rel(x, BASE) make_boxed_rel((Eterm*)(x),(BASE))
+#define is_flatmap(x) (is_boxed((x)) && is_flatmap_header(*boxed_val((x))))
+#define is_flatmap_rel(RTERM,BASE) is_flatmap(rterm2wterm(RTERM,BASE))
+#define is_not_flatmap(x) (!is_flatmap((x)))
+#define is_flatmap_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_MAP)
+#define header_is_flatmap(x) ((((x) & (_HEADER_SUBTAG_MASK)) == MAP_SUBTAG))
+#define flatmap_val(x) (_unchecked_boxed_val((x)))
+#define flatmap_val_rel(RTERM, BASE) flatmap_val(rterm2wterm(RTERM, BASE))
+#define flatmap_get_values(x) (((Eterm *)(x)) + 3)
+#define flatmap_get_keys(x) (((Eterm *)tuple_val(((flatmap_t *)(x))->keys)) + 1)
+#define flatmap_get_size(x) (((flatmap_t*)(x))->size)
+
+#ifdef DEBUG
+#define MAP_SMALL_MAP_LIMIT (3)
+#else
+#define MAP_SMALL_MAP_LIMIT (32)
+#endif
#define MAP_HEADER _make_header(1,_TAG_HEADER_MAP)
-#define MAP_HEADER_SIZE (sizeof(map_t) / sizeof(Eterm))
-
-Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map);
-int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res);
-int erts_maps_find(Eterm key, Eterm map, Eterm *value);
-int erts_maps_get(Eterm key, Eterm map, Eterm *value);
-int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res);
-int erts_validate_and_sort_map(map_t* map);
+#define MAP_HEADER_SIZE (sizeof(flatmap_t) / sizeof(Eterm))
+
+struct ErtsWStack_;
+struct ErtsEStack_;
+
+Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map);
+int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res);
+int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res);
+
+Eterm erts_hashmap_insert(Process *p, Uint32 hx, Eterm key, Eterm value,
+ Eterm node, int is_update);
+int erts_hashmap_insert_down(Uint32 hx, Eterm key, Eterm node, Uint *sz,
+ Uint *upsz, struct ErtsEStack_ *sp, int is_update);
+Eterm erts_hashmap_insert_up(Eterm *hp, Eterm key, Eterm value,
+ Uint *upsz, struct ErtsEStack_ *sp);
+
+int erts_validate_and_sort_flatmap(flatmap_t* map);
+Uint hashmap_over_estimated_heap_size(Uint n);
+void hashmap_iterator_init(struct ErtsWStack_* s, Eterm node, int reverse);
+Eterm* hashmap_iterator_next(struct ErtsWStack_* s);
+Eterm* hashmap_iterator_prev(struct ErtsWStack_* s);
+int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp);
+Eterm erts_hashmap_from_array(ErtsHeapFactory*, Eterm *leafs, Uint n, int reject_dupkeys);
+
+#define erts_hashmap_from_ks_and_vs(P, KS, VS, N) \
+ erts_hashmap_from_ks_and_vs_extra((P), (KS), (VS), (N), THE_NON_VALUE, THE_NON_VALUE);
+
+Eterm erts_hashmap_from_ks_and_vs_extra(Process *p, Eterm *ks, Eterm *vs, Uint n,
+ Eterm k, Eterm v);
+
+const Eterm *
+#if HALFWORD_HEAP
+erts_maps_get_rel(Eterm key, Eterm map, Eterm *map_base);
+# define erts_maps_get(A, B) erts_maps_get_rel(A, B, NULL)
+#else
+erts_maps_get(Eterm key, Eterm map);
+# define erts_maps_get_rel(A, B, B_BASE) erts_maps_get(A, B)
+#endif
+
+const Eterm *
+#if HALFWORD_HEAP
+erts_hashmap_get_rel(Uint32 hx, Eterm key, Eterm node, Eterm *map_base);
+# define erts_hashmap_get(Hx, K, M) erts_hashmap_get_rel(Hx, K, M, NULL)
+#else
+erts_hashmap_get(Uint32 hx, Eterm key, Eterm map);
+# define erts_hashmap_get_rel(Hx, K, M, M_BASE) erts_hashmap_get(Hx, K, M)
#endif
+/* hamt nodes v2.0
+ *
+ * node :: leaf | array | bitmap
+ * head
+ */
+typedef struct hashmap_head_s {
+ Eterm thing_word;
+ Uint size;
+ Eterm items[1];
+} hashmap_head_t;
+
+/* thing_word tagscheme
+ * Need two bits for map subtags
+ *
+ * Original HEADER representation:
+ *
+ * aaaaaaaaaaaaaaaa aaaaaaaaaatttt00 arity:26, tag:4
+ *
+ * For maps we have:
+ *
+ * vvvvvvvvvvvvvvvv aaaaaaaamm111100 val:16, arity:8, mtype:2
+ *
+ * unsure about trailing zeros
+ *
+ * map-tag:
+ * 00 - flat map tag (non-hamt) -> val:16 = #items
+ * 01 - map-node bitmap tag -> val:16 = bitmap
+ * 10 - map-head (array-node) -> val:16 = 0xffff
+ * 11 - map-head (bitmap-node) -> val:16 = bitmap
+ */
+
+/* erl_map.h stuff */
+
+#define is_hashmap_header_head(x) ((MAP_HEADER_TYPE(x) & (0x2)))
+
+#define MAKE_MAP_HEADER(Type,Arity,Val) \
+ (_make_header(((((Uint16)(Val)) << MAP_HEADER_ARITY_SZ) | (Arity)) << MAP_HEADER_TAG_SZ | (Type) , _TAG_HEADER_HASHMAP))
+
+#define MAP_HEADER_HAMT_HEAD_ARRAY \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_HAMT_HEAD_ARRAY,0x1,0xffff)
+
+#define MAP_HEADER_HAMT_HEAD_BITMAP(Bmp) \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_HAMT_HEAD_BITMAP,0x1,Bmp)
+
+#define MAP_HEADER_HAMT_NODE_ARRAY \
+ make_arityval(16)
+
+#define MAP_HEADER_HAMT_NODE_BITMAP(Bmp) \
+ MAKE_MAP_HEADER(MAP_HEADER_TAG_HAMT_NODE_BITMAP,0x0,Bmp)
+
+#define HAMT_HEAD_EMPTY_SZ (2)
+#define HAMT_NODE_ARRAY_SZ (17)
+#define HAMT_HEAD_ARRAY_SZ (18)
+#define HAMT_NODE_BITMAP_SZ(n) (1 + n)
+#define HAMT_HEAD_BITMAP_SZ(n) (2 + n)
+
+#define _HEADER_MAP_SUBTAG_MASK (0xfc) /* 2 bits maps tag + 4 bits subtag + 2 ignore bits */
+/* SUBTAG_NODE_ARRAY is in fact a tuple with 16 elements */
+#define HAMT_SUBTAG_NODE_ARRAY (((16 << _HEADER_ARITY_OFFS) | ARITYVAL_SUBTAG) & _HEADER_MAP_SUBTAG_MASK)
+#define HAMT_SUBTAG_NODE_BITMAP ((MAP_HEADER_TAG_HAMT_NODE_BITMAP << _HEADER_ARITY_OFFS) | HASHMAP_SUBTAG)
+#define HAMT_SUBTAG_HEAD_ARRAY ((MAP_HEADER_TAG_HAMT_HEAD_ARRAY << _HEADER_ARITY_OFFS) | HASHMAP_SUBTAG)
+#define HAMT_SUBTAG_HEAD_BITMAP ((MAP_HEADER_TAG_HAMT_HEAD_BITMAP << _HEADER_ARITY_OFFS) | HASHMAP_SUBTAG)
+
+#define hashmap_index(hash) (((Uint32)hash) & 0xf)
+
+
+#endif
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
index 16d4fdc09c..9b864628db 100644
--- a/erts/emulator/beam/erl_math.c
+++ b/erts/emulator/beam/erl_math.c
@@ -207,6 +207,24 @@ BIF_RETTYPE math_log_1(BIF_ALIST_1)
return math_call_1(BIF_P, log, BIF_ARG_1);
}
+#ifdef HAVE_LOG2
+static double
+log2_wrapper(double x)
+{
+ return log2(x);
+}
+#else
+static double
+log2_wrapper(double x)
+{
+ return log(x) / 0.6931471805599453; /* log(2.0); */
+}
+#endif
+
+BIF_RETTYPE math_log2_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, log2_wrapper, BIF_ARG_1);
+}
BIF_RETTYPE math_log10_1(BIF_ALIST_1)
{
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 8870fac7d9..22cbae10d1 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -994,7 +994,7 @@ erts_send_message(Process* sender,
#endif
);
BM_SWAP_TIMER(send,system);
- } else if (sender == receiver) {
+ } else if (sender == receiver && !(sender->flags & F_OFF_HEAP_MSGS)) {
/* Drop message if receiver has a pending exit ... */
#ifdef ERTS_SMP
ErtsProcLocks need_locks = (~(*receiver_locks)
@@ -1146,3 +1146,15 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
}
}
+Eterm* erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+{
+ Eterm* res;
+ if (factory->p) {
+ res = HAllocX(factory->p, need, xtra);
+ } else {
+ res = factory->hp;
+ factory->hp += need;
+ }
+ return res;
+}
+
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 0f3bb8d281..8713941769 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -68,6 +68,21 @@ struct erl_heap_fragment {
Eterm mem[1]; /* Data */
};
+typedef struct {
+ Process* p;
+ Eterm* hp;
+} ErtsHeapFactory;
+
+Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+#ifdef CHECK_FOR_HOLES
+# define ERTS_FACTORY_HOLE_CHECK(f) do { \
+ if ((f)->p) erts_check_for_holes((f)->p); \
+ } while (0)
+#else
+# define ERTS_FACTORY_HOLE_CHECK(p)
+#endif
+
+
typedef struct erl_mesg {
struct erl_mesg* next; /* Next message */
union {
@@ -198,15 +213,25 @@ do { \
if ((M)->data.attached) { \
Uint need__ = erts_msg_attached_data_size((M)); \
if ((ST) - (HT) >= need__) { \
- Uint *htop__ = (HT); \
+ Uint *htop__; \
+ move__attached__msg__data____: \
+ htop__ = (HT); \
erts_move_msg_attached_data_to_heap(&htop__, &MSO((P)), (M));\
ASSERT(htop__ - (HT) <= need__); \
(HT) = htop__; \
} \
else { \
+ int off_heap_msgs__ = (int) (P)->flags & F_OFF_HEAP_MSGS; \
+ if (!off_heap_msgs__) \
+ need__ = 0; \
{ SWPO ; } \
- (FC) -= erts_garbage_collect((P), 0, NULL, 0); \
+ (FC) -= erts_garbage_collect((P), need__, NULL, 0); \
{ SWPI ; } \
+ if (off_heap_msgs__) { \
+ ASSERT((M)->data.attached); \
+ ASSERT((ST) - (HT) >= need__); \
+ goto move__attached__msg__data____; \
+ } \
} \
ASSERT(!(M)->data.attached); \
} \
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index fb11dbbd22..9972890db7 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -82,6 +82,7 @@
/* Type tags for monitors */
#define MON_ORIGIN 1
#define MON_TARGET 3
+#define MON_TIME_OFFSET 7
/* Type tags for links */
#define LINK_PID 1 /* ...Or port */
@@ -103,7 +104,7 @@ typedef struct erts_monitor_or_link {
typedef struct erts_monitor {
struct erts_monitor *left, *right;
Sint16 balance;
- Uint16 type; /* MON_ORIGIN | MON_TARGET */
+ Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_TIME_OFFSET */
Eterm ref;
Eterm pid; /* In case of distributed named monitor, this is the
nodename atom in MON_ORIGIN process, otherwise a pid or
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index c8bb126687..fa1bde1c87 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -627,7 +627,7 @@ erts_mtrace_install_wrapper_functions(void)
if (erts_mtrace_enabled) {
int i;
/* Install trace functions */
- ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
sys_memcpy((void *) real_allctrs,
(void *) erts_allctrs,
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index adc3520ebb..c7c8b3fee3 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -36,6 +36,7 @@
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
#include "erl_process.h"
+#include "erl_bif_unique.h"
#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
#define HAVE_USE_DTRACE 1
#endif
@@ -551,9 +552,7 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
if (refbin == NULL) {
return 0; /* The NIF must take action */
}
- refbin->flags = BIN_FLAG_DRV; /* BUGBUG: Flag? */
erts_refc_init(&refbin->refc, 1);
- refbin->orig_size = (SWord) size;
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
@@ -573,7 +572,6 @@ int enif_realloc_binary(ErlNifBinary* bin, size_t size)
if (!newbin) {
return 0;
}
- newbin->orig_size = size;
bin->ref_bin = newbin;
bin->data = (unsigned char*) newbin->orig_bytes;
bin->size = size;
@@ -1913,12 +1911,16 @@ int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term)
int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)
{
- if (is_map(term)) {
- map_t *mp;
- mp = (map_t*)map_val(term);
- *size = map_get_size(mp);
+ if (is_flatmap(term)) {
+ flatmap_t *mp;
+ mp = (flatmap_t*)flatmap_val(term);
+ *size = flatmap_get_size(mp);
return 1;
}
+ else if (is_hashmap(term)) {
+ *size = hashmap_size(term);
+ return 1;
+ }
return 0;
}
@@ -1926,16 +1928,16 @@ ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
{
Eterm* hp = alloc_heap(env,MAP_HEADER_SIZE+1);
Eterm tup;
- map_t *mp;
+ flatmap_t *mp;
tup = make_tuple(hp);
*hp++ = make_arityval(0);
- mp = (map_t*)hp;
+ mp = (flatmap_t*)hp;
mp->thing_word = MAP_HEADER;
mp->size = 0;
mp->keys = tup;
- return make_map(mp);
+ return make_flatmap(mp);
}
int enif_make_map_put(ErlNifEnv* env,
@@ -1944,7 +1946,7 @@ int enif_make_map_put(ErlNifEnv* env,
Eterm value,
Eterm *map_out)
{
- if (is_not_map(map_in)) {
+ if (!is_map(map_in)) {
return 0;
}
flush_env(env);
@@ -1958,10 +1960,16 @@ int enif_get_map_value(ErlNifEnv* env,
Eterm key,
Eterm *value)
{
- if (is_not_map(map)) {
+ const Eterm *ret;
+ if (!is_map(map)) {
return 0;
}
- return erts_maps_get(key, map, value);
+ ret = erts_maps_get(key, map);
+ if (ret) {
+ *value = *ret;
+ return 1;
+ }
+ return 0;
}
int enif_make_map_update(ErlNifEnv* env,
@@ -1971,7 +1979,7 @@ int enif_make_map_update(ErlNifEnv* env,
Eterm *map_out)
{
int res;
- if (is_not_map(map_in)) {
+ if (!is_map(map_in)) {
return 0;
}
@@ -1987,7 +1995,7 @@ int enif_make_map_remove(ErlNifEnv* env,
Eterm *map_out)
{
int res;
- if (is_not_map(map_in)) {
+ if (!is_map(map_in)) {
return 0;
}
flush_env(env);
@@ -2001,13 +2009,13 @@ int enif_map_iterator_create(ErlNifEnv *env,
ErlNifMapIterator *iter,
ErlNifMapIteratorEntry entry)
{
- if (is_map(map)) {
- map_t *mp = (map_t*)map_val(map);
+ if (is_flatmap(map)) {
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
size_t offset;
switch (entry) {
case ERL_NIF_MAP_ITERATOR_HEAD: offset = 0; break;
- case ERL_NIF_MAP_ITERATOR_TAIL: offset = map_get_size(mp) - 1; break;
+ case ERL_NIF_MAP_ITERATOR_TAIL: offset = flatmap_get_size(mp) - 1; break;
default: goto error;
}
@@ -2016,14 +2024,37 @@ int enif_map_iterator_create(ErlNifEnv *env,
*/
iter->map = map;
- iter->ks = ((Eterm *)map_get_keys(mp)) + offset;
- iter->vs = ((Eterm *)map_get_values(mp)) + offset;
- iter->t_limit = map_get_size(mp) + 1;
+ iter->u.flat.ks = ((Eterm *)flatmap_get_keys(mp)) + offset;
+ iter->u.flat.vs = ((Eterm *)flatmap_get_values(mp)) + offset;
+ iter->size = flatmap_get_size(mp);
iter->idx = offset + 1;
return 1;
}
-
+ else if (is_hashmap(map)) {
+ iter->map = map;
+ iter->size = hashmap_size(map);
+ iter->u.hash.wstack = erts_alloc(ERTS_ALC_T_NIF, sizeof(ErtsDynamicWStack));
+ WSTACK_INIT(iter->u.hash.wstack, ERTS_ALC_T_NIF);
+
+ switch (entry) {
+ case ERL_NIF_MAP_ITERATOR_HEAD:
+ iter->idx = 1;
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 0);
+ iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
+ break;
+ case ERL_NIF_MAP_ITERATOR_TAIL:
+ iter->idx = hashmap_size(map);
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 1);
+ iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
+ break;
+ default:
+ goto error;
+ }
+ ASSERT(!!iter->u.hash.kv == (iter->idx >= 1 &&
+ iter->idx <= iter->size));
+ return 1;
+ }
error:
#ifdef DEBUG
iter->map = THE_NON_VALUE;
@@ -2033,48 +2064,97 @@ error:
void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- /* not used */
+ if (is_hashmap(iter->map)) {
+ WSTACK_DESTROY(iter->u.hash.wstack->ws);
+ erts_free(ERTS_ALC_T_NIF, iter->u.hash.wstack);
+ }
+ else
+ ASSERT(is_flatmap(iter->map));
+
#ifdef DEBUG
iter->map = THE_NON_VALUE;
#endif
-
}
int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1));
- return (iter->t_limit == 1 || iter->idx == iter->t_limit);
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ ASSERT(iter->idx >= 0);
+ ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
+ return (iter->size == 0 || iter->idx > iter->size);
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+ return iter->idx > iter->size;
+ }
}
int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1));
- return (iter->t_limit == 1 || iter->idx == 0);
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ ASSERT(iter->idx >= 0);
+ ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
+ return (iter->size == 0 || iter->idx == 0);
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+ return iter->idx == 0;
+ }
}
int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- if (iter->idx < iter->t_limit) {
- iter->idx++;
- iter->ks++;
- iter->vs++;
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ if (iter->idx <= iter->size) {
+ iter->idx++;
+ iter->u.flat.ks++;
+ iter->u.flat.vs++;
+ }
+ return (iter->idx <= iter->size);
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+
+ if (iter->idx <= hashmap_size(iter->map)) {
+ if (iter->idx < 1) {
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 0);
+ }
+ iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
+ iter->idx++;
+ ASSERT(!!iter->u.hash.kv == (iter->idx <= iter->size));
+ }
+ return iter->idx <= iter->size;
}
- return (iter->idx != iter->t_limit);
}
int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter)
{
- ASSERT(iter && is_map(iter->map));
- if (iter->idx > 0) {
- iter->idx--;
- iter->ks--;
- iter->vs--;
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ if (iter->idx > 0) {
+ iter->idx--;
+ iter->u.flat.ks--;
+ iter->u.flat.vs--;
+ }
+ return iter->idx > 0;
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+
+ if (iter->idx > 0) {
+ if (iter->idx > iter->size) {
+ hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 1);
+ }
+ iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
+ iter->idx--;
+ ASSERT(!!iter->u.hash.kv == (iter->idx > 0));
+ }
+ return iter->idx > 0;
}
- return (iter->idx > 0);
}
int enif_map_iterator_get_pair(ErlNifEnv *env,
@@ -2082,15 +2162,25 @@ int enif_map_iterator_get_pair(ErlNifEnv *env,
Eterm *key,
Eterm *value)
{
- ASSERT(iter && is_map(iter->map));
- if (iter->idx > 0 && iter->idx < iter->t_limit) {
- ASSERT(iter->ks >= map_get_keys(map_val(iter->map)) &&
- iter->ks < (map_get_keys(map_val(iter->map)) + map_get_size(map_val(iter->map))));
- ASSERT(iter->vs >= map_get_values(map_val(iter->map)) &&
- iter->vs < (map_get_values(map_val(iter->map)) + map_get_size(map_val(iter->map))));
- *key = *(iter->ks);
- *value = *(iter->vs);
- return 1;
+ ASSERT(iter);
+ if (is_flatmap(iter->map)) {
+ if (iter->idx > 0 && iter->idx <= iter->size) {
+ ASSERT(iter->u.flat.ks >= flatmap_get_keys(flatmap_val(iter->map)) &&
+ iter->u.flat.ks < (flatmap_get_keys(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
+ ASSERT(iter->u.flat.vs >= flatmap_get_values(flatmap_val(iter->map)) &&
+ iter->u.flat.vs < (flatmap_get_values(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
+ *key = *(iter->u.flat.ks);
+ *value = *(iter->u.flat.vs);
+ return 1;
+ }
+ }
+ else {
+ ASSERT(is_hashmap(iter->map));
+ if (iter->idx > 0 && iter->idx <= iter->size) {
+ *key = CAR(iter->u.hash.kv);
+ *value = CDR(iter->u.hash.kv);
+ return 1;
+ }
}
return 0;
}
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 849024453c..9b2b90c82d 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -201,10 +201,18 @@ typedef enum
typedef struct /* All fields all internal and may change */
{
ERL_NIF_TERM map;
- ERL_NIF_UINT t_limit;
+ ERL_NIF_UINT size;
ERL_NIF_UINT idx;
- ERL_NIF_TERM *ks;
- ERL_NIF_TERM *vs;
+ union {
+ struct {
+ ERL_NIF_TERM *ks;
+ ERL_NIF_TERM *vs;
+ }flat;
+ struct {
+ struct ErtsDynamicWStack_* wstack;
+ ERL_NIF_TERM* kv;
+ }hash;
+ }u;
void* __spare__[2]; /* for future additions to be ABI compatible (same struct size) */
} ErlNifMapIterator;
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index d18760dc43..ac5b139f8d 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,6 +25,7 @@
#include "sys.h"
#include "big.h"
#include "erl_map.h"
+#include "erl_binary.h"
#define PRINT_CHAR(CNT, FN, ARG, C) \
do { \
@@ -138,6 +139,25 @@ is_printable_string(Eterm list, Eterm* base)
return 0;
}
+static int is_printable_ascii(byte* bytep, Uint bytesize, Uint bitoffs)
+{
+ if (!bitoffs) {
+ while (bytesize--) {
+ if (*bytep < ' ' || *bytep >= 127)
+ return 0;
+ bytep++;
+ }
+ } else {
+ while (bytesize--) {
+ byte octet = (bytep[0] << bitoffs) | (bytep[1] >> (8-bitoffs));
+ if (octet < ' ' || octet >= 127)
+ return 0;
+ bytep++;
+ }
+ }
+ return 1;
+}
+
/* print a atom doing what quoting is necessary */
static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount)
{
@@ -227,6 +247,17 @@ static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount)
#define PRT_PATCH_FUN_SIZE ((Eterm) 7)
#define PRT_LAST_ARRAY_ELEMENT ((Eterm) 8) /* Note! Must be last... */
+#if 0
+static char *format_binary(Uint16 x, char *b) {
+ int z;
+ b[16] = '\0';
+ for (z = 0; z < 16; z++) {
+ b[15-z] = ((x>>z) & 0x1) ? '1' : '0';
+ }
+ return b;
+}
+#endif
+
static int
print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
Eterm* obj_base) /* ignored if !HALFWORD_HEAP */
@@ -283,13 +314,9 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
tl = CDR(cons);
if (is_not_nil(tl)) {
if (is_list(tl)) {
- WSTACK_PUSH(s, tl);
- WSTACK_PUSH(s, PRT_ONE_CONS);
- WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH3(s, tl, PRT_ONE_CONS, PRT_COMMA);
} else {
- WSTACK_PUSH(s, tl);
- WSTACK_PUSH(s, PRT_TERM);
- WSTACK_PUSH(s, PRT_BAR);
+ WSTACK_PUSH3(s, tl, PRT_TERM, PRT_BAR);
}
}
}
@@ -299,9 +326,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
break;
default: /* PRT_LAST_ARRAY_ELEMENT+1 and upwards */
obj = *popped.ptr;
- WSTACK_PUSH(s, (UWord) (popped.ptr + 1));
- WSTACK_PUSH(s, val-1);
- WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH3(s, (UWord) (popped.ptr + 1), val-1, PRT_COMMA);
break;
}
break;
@@ -431,8 +456,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
WSTACK_PUSH(s,PRT_CLOSE_TUPLE);
++nobj;
if (i > 0) {
- WSTACK_PUSH(s, (UWord) nobj);
- WSTACK_PUSH(s, PRT_LAST_ARRAY_ELEMENT+i-1);
+ WSTACK_PUSH2(s, (UWord) nobj, PRT_LAST_ARRAY_ELEMENT+i-1);
}
break;
case FLOAT_DEF: {
@@ -446,13 +470,65 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
PRINT_STRING(res, fn, arg, "#MatchState");
}
else {
- ProcBin* pb = (ProcBin *) binary_val(wobj);
- if (pb->size == 1)
- PRINT_STRING(res, fn, arg, "<<1 byte>>");
- else {
+ byte* bytep;
+ Uint bytesize = binary_size_rel(obj,obj_base);
+ Uint bitoffs;
+ Uint bitsize;
+ byte octet;
+ ERTS_GET_BINARY_BYTES_REL(obj, bytep, bitoffs, bitsize, obj_base);
+
+ if (bitsize || !bytesize
+ || !is_printable_ascii(bytep, bytesize, bitoffs)) {
+ int is_first = 1;
PRINT_STRING(res, fn, arg, "<<");
- PRINT_UWORD(res, fn, arg, 'u', 0, 1, (ErlPfUWord) pb->size);
- PRINT_STRING(res, fn, arg, " bytes>>");
+ while (bytesize) {
+ if (is_first)
+ is_first = 0;
+ else
+ PRINT_CHAR(res, fn, arg, ',');
+ if (bitoffs)
+ octet = (bytep[0] << bitoffs) | (bytep[1] >> (8-bitoffs));
+ else
+ octet = bytep[0];
+ PRINT_UWORD(res, fn, arg, 'u', 0, 1, octet);
+ ++bytep;
+ --bytesize;
+ }
+ if (bitsize) {
+ Uint bits = bitoffs + bitsize;
+ octet = bytep[0];
+ if (bits < 8)
+ octet >>= 8 - bits;
+ else if (bits > 8) {
+ bits -= 8; /* bits in last byte */
+ octet <<= bits;
+ octet |= bytep[1] >> (8 - bits);
+ }
+ octet &= (1 << bitsize) - 1;
+ if (is_first)
+ is_first = 0;
+ else
+ PRINT_CHAR(res, fn, arg, ',');
+ PRINT_UWORD(res, fn, arg, 'u', 0, 1, octet);
+ PRINT_CHAR(res, fn, arg, ':');
+ PRINT_UWORD(res, fn, arg, 'u', 0, 1, bitsize);
+ }
+ PRINT_STRING(res, fn, arg, ">>");
+ }
+ else {
+ PRINT_STRING(res, fn, arg, "<<\"");
+ while (bytesize) {
+ if (bitoffs)
+ octet = (bytep[0] << bitoffs) | (bytep[1] >> (8-bitoffs));
+ else
+ octet = bytep[0];
+ if (octet == '"')
+ PRINT_CHAR(res, fn, arg, '\\');
+ PRINT_CHAR(res, fn, arg, octet);
+ ++bytep;
+ --bytesize;
+ }
+ PRINT_STRING(res, fn, arg, "\">>");
}
}
break;
@@ -492,33 +568,73 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
{
Uint n;
Eterm *ks, *vs;
- map_t *mp = (map_t *)map_val(wobj);
- n = map_get_size(mp);
- ks = map_get_keys(mp);
- vs = map_get_values(mp);
+ flatmap_t *mp = (flatmap_t *)flatmap_val(wobj);
+ n = flatmap_get_size(mp);
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
PRINT_CHAR(res, fn, arg, '#');
PRINT_CHAR(res, fn, arg, '{');
WSTACK_PUSH(s, PRT_CLOSE_TUPLE);
if (n > 0) {
n--;
- WSTACK_PUSH(s, vs[n]);
- WSTACK_PUSH(s, PRT_TERM);
- WSTACK_PUSH(s, PRT_ASSOC);
- WSTACK_PUSH(s, ks[n]);
- WSTACK_PUSH(s, PRT_TERM);
-
+ WSTACK_PUSH5(s, vs[n], PRT_TERM, PRT_ASSOC, ks[n], PRT_TERM);
while (n--) {
- WSTACK_PUSH(s, PRT_COMMA);
- WSTACK_PUSH(s, vs[n]);
- WSTACK_PUSH(s, PRT_TERM);
- WSTACK_PUSH(s, PRT_ASSOC);
- WSTACK_PUSH(s, ks[n]);
- WSTACK_PUSH(s, PRT_TERM);
+ WSTACK_PUSH6(s, PRT_COMMA, vs[n], PRT_TERM, PRT_ASSOC,
+ ks[n], PRT_TERM);
}
}
}
break;
+ case HASHMAP_DEF:
+ {
+ Uint n,mapval;
+ Eterm *head;
+ head = hashmap_val(wobj);
+ mapval = MAP_HEADER_VAL(*head);
+ switch (MAP_HEADER_TYPE(*head)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY:
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP:
+ PRINT_STRING(res, fn, arg, "#<");
+ PRINT_UWORD(res, fn, arg, 'x', 0, 1, mapval);
+ PRINT_STRING(res, fn, arg, ">{");
+ WSTACK_PUSH(s,PRT_CLOSE_TUPLE);
+ n = hashmap_bitcount(mapval);
+ ASSERT(n < 17);
+ head += 2;
+ if (n > 0) {
+ n--;
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ while (n--) {
+ WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ }
+ }
+ break;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP:
+ n = hashmap_bitcount(mapval);
+ head++;
+ PRINT_CHAR(res, fn, arg, '<');
+ PRINT_UWORD(res, fn, arg, 'x', 0, 1, mapval);
+ PRINT_STRING(res, fn, arg, ">{");
+ WSTACK_PUSH(s,PRT_CLOSE_TUPLE);
+ ASSERT(n < 17);
+ if (n > 0) {
+ n--;
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ while (n--) {
+ WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH(s, head[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ }
+ }
+ break;
+ }
+ }
+ break;
default:
PRINT_STRING(res, fn, arg, "<unknown:");
PRINT_POINTER(res, fn, arg, wobj);
@@ -528,17 +644,17 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
L_done:
-
DESTROY_WSTACK(s);
return res;
}
+
int
erts_printf_term(fmtfn_t fn, void* arg, ErlPfEterm term, long precision,
ErlPfEterm* term_base)
{
int res;
- ASSERT(sizeof(ErlPfEterm) == sizeof(Eterm));
+ ERTS_CT_ASSERT(sizeof(ErlPfEterm) == sizeof(Eterm));
res = print_term(fn, arg, (Eterm)term, &precision, (Eterm*)term_base);
if (res < 0)
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index ea63d20dfa..f74a2ee54c 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -43,6 +43,7 @@
#include "erl_async.h"
#include "dtrace-wrapper.h"
#include "erl_ptab.h"
+#include "erl_bif_unique.h"
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
@@ -152,7 +153,7 @@ extern BeamInstr beam_continue_exit[];
/* Eager check I/O not supported on OSE yet. */
int erts_eager_check_io = 0;
#else
-int erts_eager_check_io = 0;
+int erts_eager_check_io = 1;
#endif
int erts_sched_compact_load;
int erts_sched_balance_util = 0;
@@ -457,8 +458,7 @@ do { \
static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
- int yreg);
+static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
@@ -702,8 +702,8 @@ init_sched_wall_time(ErtsSchedWallTime *swtp)
static ERTS_INLINE Uint64
sched_wall_time_ts(void)
{
-#ifdef HAVE_GETHRTIME
- return (Uint64) sys_gethrtime();
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return (Uint64) erts_os_monotonic_time();
#else
Uint64 res;
SysTimeval tv;
@@ -2186,7 +2186,7 @@ aux_work_timeout_late_init(void)
{
aux_work_tmo->initialized = 1;
if (erts_atomic32_read_nob(&aux_work_tmo->refc)) {
- aux_work_tmo->timer.data.active = 0;
+ erts_init_timer(&aux_work_tmo->timer.data);
erts_set_timer(&aux_work_tmo->timer.data,
aux_work_timeout,
NULL,
@@ -2219,7 +2219,6 @@ aux_work_timeout(void *unused)
if (refc != 1
|| 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) {
/* Setup next timeout... */
- aux_work_tmo->timer.data.active = 0;
erts_set_timer(&aux_work_tmo->timer.data,
aux_work_timeout,
NULL,
@@ -2238,7 +2237,7 @@ setup_aux_work_timer(void)
else
#endif
{
- aux_work_tmo->timer.data.active = 0;
+ erts_init_timer(&aux_work_tmo->timer.data);
erts_set_timer(&aux_work_tmo->timer.data,
aux_work_timeout,
NULL,
@@ -2318,7 +2317,6 @@ erts_active_schedulers(void)
ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
- ASSERT(as >= 0);
return as;
}
@@ -2640,6 +2638,13 @@ thr_prgr_fin_wait(void *vssi)
static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp);
+void
+erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time)
+{
+ /* TODO only poke when needed (based on timeout_time) */
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1));
+}
+
static void *
aux_thread(void *unused)
{
@@ -2648,6 +2653,11 @@ aux_thread(void *unused)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
+ ErtsTimerWheel *timer_wheel = erts_default_timer_wheel;
+ ErtsNextTimeoutRef nxt_tmo_ref = erts_get_next_timeout_reference(timer_wheel);
+
+ if (!timer_wheel)
+ ERTS_INTERNAL_ERROR("Missing aux timer wheel");
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -2671,6 +2681,7 @@ aux_thread(void *unused)
sched_prep_spin_wait(ssi);
while (1) {
+ ErtsMonotonicTime current_time;
erts_aint32_t flgs;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -2682,28 +2693,56 @@ aux_thread(void *unused)
erts_thr_progress_leader_update(NULL);
}
- if (!aux_work) {
- if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
- erts_thr_progress_prepare_wait(NULL);
+ if (aux_work) {
+ current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(nxt_tmo_ref)) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_bump_timers(timer_wheel, current_time);
+ }
+ }
+ else {
+ ErtsMonotonicTime timeout_time;
+ timeout_time = erts_check_next_timeout_time(timer_wheel,
+ ERTS_SEC_TO_MONOTONIC(10*60));
+ current_time = erts_get_monotonic_time();
+ if (current_time >= timeout_time) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ }
+ else {
+ if (thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_prepare_wait(NULL);
- ERTS_SCHED_FAIR_YIELD();
+ ERTS_SCHED_FAIR_YIELD();
- flgs = sched_spin_wait(ssi, 0);
+ flgs = sched_spin_wait(ssi, 0);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ current_time = erts_get_monotonic_time();
+ do {
+ Sint64 timeout;
+ if (current_time >= timeout_time)
+ break;
+ timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
+ - current_time
+ - 1) + 1;
+ res = erts_tse_twait(ssi->event, timeout);
+ current_time = erts_get_monotonic_time();
+ } while (res == EINTR);
+ }
}
+ erts_thr_progress_finalize_wait(NULL);
}
- erts_thr_progress_finalize_wait(NULL);
+ if (current_time >= timeout_time)
+ erts_bump_timers(timer_wheel, current_time);
}
flgs = sched_prep_spin_wait(ssi);
@@ -2770,6 +2809,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_wall_time_change(esdp, thr_prgr_active);
while (1) {
+ ErtsMonotonicTime current_time;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
@@ -2783,34 +2823,65 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_leader_update(esdp);
}
- if (aux_work)
+ if (aux_work) {
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
else {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
+ ErtsMonotonicTime timeout_time;
+ timeout_time = erts_check_next_timeout_time(esdp->timer_wheel,
+ ERTS_SEC_TO_MONOTONIC(10*60));
+ current_time = erts_get_monotonic_time();
+ if (current_time >= timeout_time) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
}
- erts_thr_progress_prepare_wait(esdp);
}
+ else {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ }
- ERTS_SCHED_FAIR_YIELD();
+ ERTS_SCHED_FAIR_YIELD();
- flgs = sched_spin_wait(ssi, spincount);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ current_time = erts_get_monotonic_time();
+ do {
+ Sint64 timeout;
+ if (current_time >= timeout_time)
+ break;
+ timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
+ - current_time
+ - 1) + 1;
+ res = erts_tse_twait(ssi->event, timeout);
+ current_time = erts_get_monotonic_time();
+ } while (res == EINTR);
+ }
}
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ erts_thr_progress_finalize_wait(esdp);
}
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
+ if (current_time >= timeout_time)
+ erts_bump_timers(esdp->timer_wheel, current_time);
}
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
@@ -2843,7 +2914,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
else
#endif
{
- erts_aint_t dt;
erts_smp_atomic32_set_relb(&function_calls, 0);
*fcalls = 0;
@@ -2868,6 +2938,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
goto sys_aux_work;
while (spincount-- > 0) {
+ ErtsMonotonicTime current_time;
sys_poll_aux_work:
@@ -2877,8 +2948,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(!erts_port_task_have_outstanding_io_tasks());
erl_sys_schedule(1); /* Might give us something to do */
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
+ current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
+ erts_bump_timers(esdp->timer_wheel, current_time);
sys_aux_work:
#ifndef ERTS_SMP
@@ -2993,8 +3065,11 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erl_sys_schedule(0);
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
+ {
+ ErtsMonotonicTime current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
#ifndef ERTS_SMP
if (rq->len == 0 && !rq->misc.start)
@@ -5264,6 +5339,10 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
#else
esdp->no = (Uint) num;
#endif
+
+ esdp->timer_wheel = erts_default_timer_wheel;
+ esdp->next_tmo_ref = erts_get_next_timeout_reference(esdp->timer_wheel);
+
esdp->ssi = ssi;
esdp->current_process = NULL;
esdp->current_port = NULL;
@@ -5276,6 +5355,9 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->run_queue = runq;
esdp->run_queue->scheduler = esdp;
+ esdp->thr_id = (Uint32) num;
+ erts_sched_bif_unique_init(esdp);
+
if (daww_ptr) {
init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
#ifdef ERTS_SMP
@@ -5836,6 +5918,13 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
int check_emigration_need;
#endif
+#ifdef ERTS_SMP
+ if ((p->static_flags & ERTS_STC_FLG_PREFER_SCHED)
+ && p->preferred_run_queue != RUNQ_READ_RQ(&p->run_queue)) {
+ RUNQ_SET_RQ(&p->run_queue, p->preferred_run_queue);
+ }
+#endif
+
a = state;
while (1) {
@@ -5874,6 +5963,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
free_proxy_proc(proxy);
erts_smp_runq_lock(c_rq);
+
return 0;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -6586,7 +6676,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
int res;
do {
- res = erts_tse_wait(ssi->event);
+ res = erts_tse_twait(ssi->event, -1);
} while (res == EINTR);
}
}
@@ -6749,6 +6839,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
while (1) {
+ ErtsMonotonicTime current_time;
erts_aint32_t qmask;
erts_aint32_t flgs;
@@ -6773,30 +6864,64 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
- if (!aux_work) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
+ if (aux_work) {
+ current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
}
- erts_thr_progress_prepare_wait(esdp);
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
+ }
+ else {
+ ErtsMonotonicTime timeout_time;
+ timeout_time = erts_check_next_timeout_time(esdp->timer_wheel,
+ ERTS_SEC_TO_MONOTONIC(60*60));
+ current_time = erts_get_monotonic_time();
+
+ if (current_time >= timeout_time) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+ else {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED)) {
- int res;
-
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ current_time = erts_get_monotonic_time();
+ do {
+ Sint64 timeout;
+ if (current_time >= timeout_time)
+ break;
+ timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
+ - current_time
+ - 1) + 1;
+ res = erts_tse_twait(ssi->event, timeout);
+ current_time = erts_get_monotonic_time();
+ } while (res == EINTR);
+ }
}
+ erts_thr_progress_finalize_wait(esdp);
}
- erts_thr_progress_finalize_wait(esdp);
+
+ if (current_time >= timeout_time)
+ erts_bump_timers(esdp->timer_wheel, current_time);
}
flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
@@ -7615,6 +7740,9 @@ sched_thread_func(void *vesdp)
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
Uint no = esdp->no;
+
+ esdp->timer_wheel = erts_create_timer_wheel((int) no);
+ esdp->next_tmo_ref = erts_get_next_timeout_reference(esdp->timer_wheel);
#ifdef ERTS_SMP
ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -7717,6 +7845,8 @@ sched_dirty_cpu_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ esdp->thr_id += erts_no_schedulers;
+
erts_thr_progress_register_unmanaged_thread(&callbacks);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -7778,6 +7908,8 @@ sched_dirty_io_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers;
+
erts_thr_progress_register_unmanaged_thread(&callbacks);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -7835,23 +7967,17 @@ erts_start_schedulers(void)
Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
+ char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
opts.detached = 1;
-#ifdef ETHR_HAVE_THREAD_NAMES
- opts.name = malloc(80);
- if (!opts.name) {
- ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
- }
-#endif
+ opts.name = name;
#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "runq_supervisor");
-#endif
+ erts_snprintf(opts.name, 16, "runq_supervisor");
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
erl_exit(1, "Failed to create run-queue supervision event\n");
@@ -7878,9 +8004,7 @@ erts_start_schedulers(void)
ASSERT(actual == esdp->no - 1);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "scheduler_%d", actual + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1);
#ifdef __OSE__
/* This should be done in the bind strategy */
@@ -7902,18 +8026,14 @@ erts_start_schedulers(void)
int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1);
-#endif
+ erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1);
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix);
@@ -7924,9 +8044,7 @@ erts_start_schedulers(void)
ERTS_THR_MEMORY_BARRIER;
-#ifdef ETHR_HAVE_THREAD_NAMES
- sprintf(opts.name, "aux");
-#endif
+ erts_snprintf(opts.name, 16, "aux");
#ifdef __OSE__
opts.coreNo = 0;
@@ -7952,9 +8070,6 @@ erts_start_schedulers(void)
erts_send_error_to_logger_nogl(dsbufp);
}
-#ifdef ETHR_HAVE_THREAD_NAMES
- free(opts.name);
-#endif
}
#endif /* ERTS_SMP */
@@ -8897,7 +9012,6 @@ Process *schedule(Process *p, int calls)
{
Process *proxy_p = NULL;
ErtsRunQueue *rq;
- erts_aint_t dt;
ErtsSchedulerData *esdp;
int context_reds;
int fcalls;
@@ -9027,11 +9141,13 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- dt = erts_do_time_read_and_reset();
- if (dt) {
- erts_smp_runq_unlock(rq);
- erts_bump_timer(dt);
- erts_smp_runq_lock(rq);
+ {
+ ErtsMonotonicTime current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ erts_smp_runq_unlock(rq);
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ erts_smp_runq_lock(rq);
+ }
}
BM_STOP_TIMER(system);
@@ -9177,6 +9293,7 @@ Process *schedule(Process *p, int calls)
else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
(fcalls > input_reductions &&
prepare_for_sys_schedule(esdp, !0))) {
+ ErtsMonotonicTime current_time;
/*
* Schedule system-level activities.
*/
@@ -9189,8 +9306,10 @@ Process *schedule(Process *p, int calls)
#endif
erts_smp_runq_unlock(rq);
erl_sys_schedule(1);
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
+
+ current_time = erts_get_monotonic_time();
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
+ erts_bump_timers(esdp->timer_wheel, current_time);
#ifdef ERTS_SMP
erts_smp_runq_lock(rq);
@@ -10436,7 +10555,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
init_arg.run_queue = rq;
init_arg.state = state;
- ASSERT(((char *) p) == ((char *) &p->common));
+ ERTS_CT_ASSERT(offsetof(Process,common) == 0);
if (!erts_ptab_new_element(&erts_proc,
&p->common,
@@ -10495,7 +10614,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
int ix = so->scheduler-1;
ASSERT(0 <= ix && ix < erts_no_run_queues);
rq = ERTS_RUNQ_IX(ix);
- state |= ERTS_PSFLG_BOUND;
+ if (!(so->flags & SPO_PREFER_SCHED)) {
+ /* Unsupported feature... */
+ state |= ERTS_PSFLG_BOUND;
+ }
}
prio = (erts_aint32_t) so->priority;
}
@@ -10503,6 +10625,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET)
| ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET));
+ if (so->flags & SPO_OFF_HEAP_MSGS)
+ state |= ERTS_PSFLG_OFF_HEAP_MSGS;
+
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -10526,11 +10651,25 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
heap_need = arg_size;
p->flags = erts_default_process_flags;
+ if (so->flags & SPO_OFF_HEAP_MSGS)
+ p->flags |= F_OFF_HEAP_MSGS;
+#ifdef ERTS_SMP
+ p->preferred_run_queue = NULL;
+#endif
+ p->static_flags = 0;
+ if (so->flags & SPO_SYSTEM_PROC)
+ p->static_flags |= ERTS_STC_FLG_SYSTEM_PROC;
if (so->flags & SPO_USE_ARGS) {
p->min_heap_size = so->min_heap_size;
p->min_vheap_size = so->min_vheap_size;
p->max_gen_gcs = so->max_gen_gcs;
+ if (so->flags & SPO_PREFER_SCHED) {
+#ifdef ERTS_SMP
+ p->preferred_run_queue = rq;
+#endif
+ p->static_flags |= ERTS_STC_FLG_PREFER_SCHED;
+ }
} else {
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
@@ -10608,7 +10747,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef ERTS_SMP
p->common.u.alive.ptimer = NULL;
#else
- sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer));
+ erts_init_timer(&p->common.u.alive.tm);
#endif
p->common.u.alive.reg = NULL;
@@ -10801,7 +10940,7 @@ void erts_init_empty_process(Process *p)
#ifdef ERTS_SMP
p->common.u.alive.ptimer = NULL;
#else
- memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer));
+ erts_init_timer(&p->common.u.alive.tm);
#endif
p->next = NULL;
p->off_heap.first = NULL;
@@ -10851,6 +10990,8 @@ void erts_init_empty_process(Process *p)
p->parent = NIL;
p->approx_started = 0;
+ p->static_flags = 0;
+
p->common.u.alive.started_interval = 0;
#ifdef HIPE
@@ -10876,6 +11017,7 @@ void erts_init_empty_process(Process *p)
p->pending_suspenders = NULL;
p->pending_exit.reason = THE_NON_VALUE;
p->pending_exit.bp = NULL;
+ p->preferred_run_queue = NULL;
erts_proc_lock_init(p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
@@ -11501,7 +11643,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ErtsMonitor *rmon;
Process *rp;
- if (mon->type == MON_ORIGIN) {
+ switch (mon->type) {
+ case MON_ORIGIN:
/* We are monitoring someone else, we need to demonitor that one.. */
if (is_atom(mon->pid)) { /* remote by name */
ASSERT(is_node_name_atom(mon->pid));
@@ -11564,7 +11707,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
}
}
- } else { /* type == MON_TARGET */
+ break;
+ case MON_TARGET:
ASSERT(mon->type == MON_TARGET);
ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
if (is_internal_port(mon->pid)) {
@@ -11623,6 +11767,12 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
}
}
+ break;
+ case MON_TIME_OFFSET:
+ erts_demonitor_time_offset(mon->ref);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid monitor type");
}
done:
/* As the monitors are previously removed from the process,
@@ -11781,6 +11931,9 @@ erts_do_exit_process(Process* p, Eterm reason)
}
#endif
+ if (p->static_flags & ERTS_STC_FLG_SYSTEM_PROC)
+ erl_exit(1, "System process %T terminated: %T\n", p->common.id, reason);
+
#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* By locking all locks (main lock is already locked) when going
@@ -12155,7 +12308,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
}
erts_program_counter_info(to, to_arg, p);
for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
}
}
@@ -12212,7 +12365,7 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
static int
-stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -12241,6 +12394,214 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
/*
+ * Print scheduler information
+ */
+void
+erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
+ int i;
+ erts_aint32_t flg;
+ Process *p;
+
+ erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+
+#ifdef ERTS_SMP
+ flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags);
+ erts_print(to, to_arg, "Scheduler Sleep Info Flags: ");
+ for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case ERTS_SSI_FLG_SLEEPING:
+ erts_print(to, to_arg, "SLEEPING"); break;
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_print(to, to_arg, "POLL_SLEEPING"); break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_print(to, to_arg, "TSE_SLEEPING"); break;
+ case ERTS_SSI_FLG_WAITING:
+ erts_print(to, to_arg, "WAITING"); break;
+ case ERTS_SSI_FLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+#endif
+
+ flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work);
+ erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: ");
+ for (i = 0; i < ERTS_SSI_AUX_WORK_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP:
+ erts_print(to, to_arg, "DELAYED_AW_WAKEUP"); break;
+ case ERTS_SSI_AUX_WORK_DD:
+ erts_print(to, to_arg, "DELAYED_DEALLOC"); break;
+ case ERTS_SSI_AUX_WORK_DD_THR_PRGR:
+ erts_print(to, to_arg, "DELAYED_DEALLOC_THR_PRGR"); break;
+ case ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC:
+ erts_print(to, to_arg, "FIX_ALLOC_DEALLOC"); break;
+ case ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM:
+ erts_print(to, to_arg, "FIX_ALLOC_LOWER_LIM"); break;
+ case ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP:
+ erts_print(to, to_arg, "THR_PRGR_LATER_OP"); break;
+ case ERTS_SSI_AUX_WORK_ASYNC_READY:
+ erts_print(to, to_arg, "ASYNC_READY"); break;
+ case ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN:
+ erts_print(to, to_arg, "ASYNC_READY_CLEAN"); break;
+ case ERTS_SSI_AUX_WORK_MISC_THR_PRGR:
+ erts_print(to, to_arg, "MISC_THR_PRGR"); break;
+ case ERTS_SSI_AUX_WORK_MISC:
+ erts_print(to, to_arg, "MISC"); break;
+ case ERTS_SSI_AUX_WORK_CHECK_CHILDREN:
+ erts_print(to, to_arg, "CHECK_CHILDREN"); break;
+ case ERTS_SSI_AUX_WORK_SET_TMO:
+ erts_print(to, to_arg, "SET_TMO"); break;
+ case ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK:
+ erts_print(to, to_arg, "MSEG_CACHE_CHECK"); break;
+ case ERTS_SSI_AUX_WORK_REAP_PORTS:
+ erts_print(to, to_arg, "REAP_PORTS"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+
+ erts_print(to, to_arg, "Current Port: ");
+ if (esdp->current_port)
+ erts_print(to, to_arg, "%T", esdp->current_port->common.id);
+ erts_print(to, to_arg, "\n");
+
+ p = esdp->current_process;
+ erts_print(to, to_arg, "Current Process: ");
+ if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) {
+ flg = erts_smp_atomic32_read_dirty(&p->state);
+ erts_print(to, to_arg, "%T\n", p->common.id);
+
+ erts_print(to, to_arg, "Current Process State: ");
+ erts_dump_process_state(to, to_arg, flg);
+
+ erts_print(to, to_arg, "Current Process Internal State: ");
+ erts_dump_extended_process_state(to, to_arg, flg);
+
+ erts_print(to, to_arg, "Current Process Program counter: %p (", p->i);
+ print_function_from_pc(to, to_arg, p->i);
+ erts_print(to, to_arg, ")\n");
+ erts_print(to, to_arg, "Current Process CP: %p (", p->cp);
+ print_function_from_pc(to, to_arg, p->cp);
+ erts_print(to, to_arg, ")\n");
+
+ /* Getting this stacktrace can segfault if we are very very
+ unlucky if called while a process is being garbage collected.
+ Therefore we only call this on other schedulers if we either
+ have protection against segfaults, or we know that the process
+ is not garbage collecting. It *should* always be safe to call
+ on a process owned by us, even if it is currently being garbage
+ collected.
+ */
+ erts_print(to, to_arg, "Current Process Limited Stack Trace:\n");
+ erts_limited_stack_trace(to, to_arg, p);
+ } else
+ erts_print(to, to_arg, "\n");
+
+ for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) {
+ erts_print(to, to_arg, "Run Queue ");
+ switch (i) {
+ case PRIORITY_MAX:
+ erts_print(to, to_arg, "Max ");
+ break;
+ case PRIORITY_HIGH:
+ erts_print(to, to_arg, "High ");
+ break;
+ case PRIORITY_NORMAL:
+ erts_print(to, to_arg, "Normal ");
+ break;
+ case PRIORITY_LOW:
+ erts_print(to, to_arg, "Low ");
+ break;
+ default:
+ erts_print(to, to_arg, "Unknown ");
+ break;
+ }
+ erts_print(to, to_arg, "Length: %d\n",
+ erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len));
+ }
+ erts_print(to, to_arg, "Run Queue Port Length: %d\n",
+ erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len));
+
+ flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags);
+ erts_print(to, to_arg, "Run Queue Flags: ");
+ for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (flg & chk) {
+ switch (chk) {
+ case (1 << PRIORITY_MAX):
+ erts_print(to, to_arg, "NONEMPTY_MAX"); break;
+ case (1 << PRIORITY_HIGH):
+ erts_print(to, to_arg, "NONEMPTY_HIGH"); break;
+ case (1 << PRIORITY_NORMAL):
+ erts_print(to, to_arg, "NONEMPTY_NORMAL"); break;
+ case (1 << PRIORITY_LOW):
+ erts_print(to, to_arg, "NONEMPTY_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)):
+ erts_print(to, to_arg, "EMIGRATE_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)):
+ erts_print(to, to_arg, "IMMIGRATE_LOW"); break;
+ case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_MAX"); break;
+ case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_HIGH"); break;
+ case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_NORMAL"); break;
+ case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EVACUATE_SHFT)):
+ erts_print(to, to_arg, "EVACUATE_LOW"); break;
+ case ERTS_RUNQ_FLG_OUT_OF_WORK:
+ erts_print(to, to_arg, "OUT_OF_WORK"); break;
+ case ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK:
+ erts_print(to, to_arg, "HALFTIME_OUT_OF_WORK"); break;
+ case ERTS_RUNQ_FLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_RUNQ_FLG_CHK_CPU_BIND:
+ erts_print(to, to_arg, "CHK_CPU_BIND"); break;
+ case ERTS_RUNQ_FLG_INACTIVE:
+ erts_print(to, to_arg, "INACTIVE"); break;
+ case ERTS_RUNQ_FLG_NONEMPTY:
+ erts_print(to, to_arg, "NONEMPTY"); break;
+ case ERTS_RUNQ_FLG_PROTECTED:
+ erts_print(to, to_arg, "PROTECTED"); break;
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
+ }
+ if (flg > chk)
+ erts_print(to, to_arg, " | ");
+ flg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+}
+
+/*
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
* on all schedulers, then schedules itself out.
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index f50b217d4a..743711cc3b 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -58,6 +58,7 @@ typedef struct process Process;
#include "external.h"
#include "erl_mseg.h"
#include "erl_async.h"
+#include "erl_gc.h"
#ifdef HIPE
#include "hipe_process.h"
@@ -169,6 +170,8 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLG_PROTECTED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 7)
+
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
| ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
@@ -251,6 +254,8 @@ typedef enum {
#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_FLGS_MAX 5
+
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -282,6 +287,8 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12)
#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13)
+#define ERTS_SSI_AUX_WORK_MAX 14
+
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -341,7 +348,7 @@ typedef struct {
} ErtsRunQueueInfo;
-#ifdef HAVE_GETHRTIME
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
# undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1
#endif
@@ -483,11 +490,6 @@ typedef struct {
} ErtsSchedWallTime;
typedef struct {
- Uint64 reclaimed;
- Uint64 garbage_cols;
-} ErtsGCInfo;
-
-typedef struct {
int sched;
erts_aint32_t aux_work;
} ErtsDelayedAuxWorkWakeupJob;
@@ -562,6 +564,8 @@ struct ErtsSchedulerData_ {
Eterm* x_reg_array; /* X registers */
FloatDef* f_reg_array; /* Floating point registers. */
+ ErtsTimerWheel *timer_wheel;
+ ErtsNextTimeoutRef next_tmo_ref;
#ifdef ERTS_SMP
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
@@ -588,6 +592,10 @@ struct ErtsSchedulerData_ {
ErtsAuxWorkData aux_work_data;
ErtsAtomCacheMap atom_cache_map;
+ Uint32 thr_id;
+ Uint64 unique;
+ Uint64 ref;
+
ErtsSchedAllocData alloc_data;
Uint64 reductions;
@@ -934,6 +942,8 @@ struct process {
Eterm parent; /* Pid of process that created this process. */
erts_approx_time_t approx_started; /* Time when started. */
+ Uint32 static_flags; /* Flags that do *not* change */
+
/* This is the place, where all fields that differs between memory
* architectures, have gone to.
*/
@@ -965,6 +975,7 @@ struct process {
ErtsSchedulerData *scheduler_data;
Eterm suspendee;
ErtsPendingSuspend *pending_suspenders;
+ ErtsRunQueue *preferred_run_queue;
erts_smp_atomic_t run_queue;
#ifdef HIPE
struct hipe_process_state_smp hipe_smp;
@@ -1074,11 +1085,15 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17)
+#define ERTS_PSFLG_OFF_HEAP_MSGS ERTS_PSFLG_BIT(18)
#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(18)
-#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(19)
-#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(20)
-#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(21)
+#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(19)
+#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(20)
+#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(21)
+#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(22)
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 23)
+#else
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 19)
#endif
#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \
@@ -1093,6 +1108,12 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+/*
+ * Static flags that do not change after process creation.
+ */
+#define ERTS_STC_FLG_SYSTEM_PROC (((Uint32) 1) << 0)
+#define ERTS_STC_FLG_PREFER_SCHED (((Uint32) 1) << 1)
+
/* The sequential tracing token is a tuple of size 5:
*
* {Flags, Label, Serial, Sender}
@@ -1120,6 +1141,9 @@ void erts_check_for_holes(Process* p);
#define SPO_LINK 1
#define SPO_USE_ARGS 2
#define SPO_MONITOR 4
+#define SPO_OFF_HEAP_MSGS 8
+#define SPO_SYSTEM_PROC 16
+#define SPO_PREFER_SCHED 32
/*
* The following struct contains options for a process to be spawned.
@@ -1207,6 +1231,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC */
+#define F_OFF_HEAP_MSGS (1 << 12)
/* process trace_flags */
#define F_SENSITIVE (1 << 0)
@@ -1614,7 +1639,11 @@ void erts_cleanup_empty_process(Process* p);
void erts_debug_verify_clean_empty_process(Process* p);
#endif
void erts_stack_dump(int to, void *to_arg, Process *);
+void erts_limited_stack_trace(int to, void *to_arg, Process *);
void erts_program_counter_info(int to, void *to_arg, Process *);
+void erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp);
+void erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg);
+void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg);
Eterm erts_get_process_priority(Process *p);
Eterm erts_set_process_priority(Process *p, Eterm prio);
@@ -2219,6 +2248,8 @@ extern int erts_disable_proc_not_running_opt;
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
+void erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time);
+
#ifdef ERTS_SMP
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 23e5bf737f..00761f2d0e 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -47,7 +47,7 @@
/* Hash constant macros */
#define MAX_HASH 1342177280UL
-#define INITIAL_SIZE 10
+#define INITIAL_SIZE (erts_pd_initial_size)
/* Hash utility macros */
#define HASH_RANGE(PDict) ((PDict)->homeSize + (PDict)->splitPosition)
@@ -82,6 +82,7 @@
static void pd_hash_erase(Process *p, Eterm id, Eterm *ret);
static void pd_hash_erase_all(Process *p);
static Eterm pd_hash_get_keys(Process *p, Eterm value);
+static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd);
static Eterm pd_hash_get_all(Process *p, ProcDict *pd);
static Eterm pd_hash_put(Process *p, Eterm id, Eterm value);
@@ -275,6 +276,16 @@ BIF_RETTYPE get_1(BIF_ALIST_1)
BIF_RET(ret);
}
+BIF_RETTYPE get_keys_0(BIF_ALIST_0)
+{
+ Eterm ret;
+
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_get_all_keys(BIF_P,BIF_P->dictionary);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
BIF_RETTYPE get_keys_1(BIF_ALIST_1)
{
Eterm ret;
@@ -412,6 +423,47 @@ Eterm erts_pd_hash_get(Process *p, Eterm id)
return am_undefined;
}
+#define PD_GET_TKEY(Dst,Src) \
+do { \
+ ASSERT(is_tuple((Src))); \
+ ASSERT(arityval(*((Eterm*)tuple_val((Src)))) == 2); \
+ (Dst) = ((Eterm*)tuple_val((Src)))[1]; \
+} while(0)
+
+static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd) {
+ Eterm* hp;
+ Eterm res = NIL;
+ Eterm tmp, tmp2;
+ unsigned int i;
+ unsigned int num;
+
+ if (pd == NULL) {
+ return res;
+ }
+
+ num = HASH_RANGE(pd);
+ hp = HAlloc(p, pd->numElements * 2);
+
+ for (i = 0; i < num; ++i) {
+ tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ PD_GET_TKEY(tmp,tmp);
+ res = CONS(hp, tmp, res);
+ hp += 2;
+ } else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ tmp2 = TCAR(tmp);
+ PD_GET_TKEY(tmp2,tmp2);
+ res = CONS(hp, tmp2, res);
+ hp += 2;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+ return res;
+}
+#undef PD_GET_TKEY
+
static Eterm pd_hash_get_keys(Process *p, Eterm value)
{
Eterm *hp;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 2f3cf23b00..36bb6b2f0e 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -43,8 +43,9 @@ static void dump_process_info(int to, void *to_arg, Process *p);
static void dump_element(int to, void *to_arg, Eterm x);
static void dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep);
static void dump_element_nl(int to, void *to_arg, Eterm x);
-static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
+static int stack_element_dump(int to, void *to_arg, Eterm* sp,
int yreg);
+static void stack_trace_dump(int to, void *to_arg, Eterm* sp);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static void heap_dump(int to, void *to_arg, Eterm x);
static void dump_binaries(int to, void *to_arg, Binary* root);
@@ -148,7 +149,7 @@ dump_process_info(int to, void *to_arg, Process *p)
if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
}
erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
@@ -243,9 +244,65 @@ dump_element_nl(int to, void *to_arg, Eterm x)
erts_putc(to, to_arg, '\n');
}
+static void
+stack_trace_dump(int to, void *to_arg, Eterm *sp) {
+ Eterm x = *sp;
+ if (is_CP(x)) {
+ erts_print(to, to_arg, "%p:", sp);
+ erts_print(to, to_arg, "SReturn addr 0x%X (", cp_val(x));
+ print_function_from_pc(to, to_arg, cp_val(x));
+ erts_print(to, to_arg, ")\n");
+ }
+}
+
+void
+erts_limited_stack_trace(int to, void *to_arg, Process *p)
+{
+ Eterm* sp;
+
+
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
+ return;
+ }
+
+ if (STACK_START(p) < STACK_TOP(p)) {
+ return;
+ }
+
+ if ((STACK_START(p) - STACK_TOP(p)) < 512) {
+ if (erts_sys_is_area_readable((char*)STACK_TOP(p),
+ (char*)STACK_START(p)))
+ for (sp = STACK_TOP(p); sp < STACK_START(p); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_TOP(p), STACK_START(p));
+ } else {
+ sp = STACK_TOP(p);
+ if (erts_sys_is_area_readable((char*)STACK_TOP(p),
+ (char*)(STACK_TOP(p) + 25)))
+ for (; sp < (STACK_TOP(p) + 256); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_TOP(p), STACK_TOP(p) + 256);
+
+ erts_print(to, to_arg, "%p: skipping %d frames\n",
+ sp, STACK_START(p) - STACK_TOP(p) - 512);
+
+ if (erts_sys_is_area_readable((char*)(STACK_START(p) - 256),
+ (char*)STACK_START(p)))
+ for (sp = STACK_START(p) - 256; sp < STACK_START(p); sp++)
+ stack_trace_dump(to, to_arg, sp);
+ else
+ erts_print(to, to_arg, "Could not read from stack memory: %p - %p\n",
+ STACK_START(p) - 256, STACK_START(p));
+ }
+
+}
static int
-stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -508,3 +565,114 @@ dump_externally(int to, void *to_arg, Eterm term)
erts_print(to, to_arg, "%02X", *s++);
}
}
+
+void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg) {
+ if (psflg & ERTS_PSFLG_FREE)
+ erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
+ else if (psflg & ERTS_PSFLG_EXITING)
+ erts_print(to, to_arg, "Exiting\n");
+ else if (psflg & ERTS_PSFLG_GC) {
+ erts_print(to, to_arg, "Garbing\n");
+ }
+ else if (psflg & ERTS_PSFLG_SUSPENDED)
+ erts_print(to, to_arg, "Suspended\n");
+ else if (psflg & ERTS_PSFLG_RUNNING) {
+ erts_print(to, to_arg, "Running\n");
+ }
+ else if (psflg & ERTS_PSFLG_ACTIVE)
+ erts_print(to, to_arg, "Scheduled\n");
+ else
+ erts_print(to, to_arg, "Waiting\n");
+}
+
+void
+erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
+
+ int i;
+
+ switch (ERTS_PSFLGS_GET_ACT_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "ACT_PRIO_MAX | "); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "ACT_PRIO_HIGH | "); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "ACT_PRIO_NORMAL | "); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "ACT_PRIO_LOW | "); break;
+ }
+ switch (ERTS_PSFLGS_GET_USR_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "USR_PRIO_MAX | "); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "USR_PRIO_HIGH | "); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "USR_PRIO_NORMAL | "); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "USR_PRIO_LOW | "); break;
+ }
+ switch (ERTS_PSFLGS_GET_PRQ_PRIO(psflg)) {
+ case PRIORITY_MAX: erts_print(to, to_arg, "PRQ_PRIO_MAX"); break;
+ case PRIORITY_HIGH: erts_print(to, to_arg, "PRQ_PRIO_HIGH"); break;
+ case PRIORITY_NORMAL: erts_print(to, to_arg, "PRQ_PRIO_NORMAL"); break;
+ case PRIORITY_LOW: erts_print(to, to_arg, "PRQ_PRIO_LOW"); break;
+ }
+
+ psflg &= ~(ERTS_PSFLGS_ACT_PRIO_MASK |
+ ERTS_PSFLGS_USR_PRIO_MASK |
+ ERTS_PSFLGS_PRQ_PRIO_MASK);
+
+ if (psflg)
+ erts_print(to, to_arg, " | ");
+
+ for (i = 0; i < ERTS_PSFLG_MAX && psflg; i++) {
+ erts_aint32_t chk = (1 << i);
+ if (psflg & chk) {
+ switch (chk) {
+ case ERTS_PSFLG_IN_PRQ_MAX:
+ erts_print(to, to_arg, "IN_PRQ_MAX"); break;
+ case ERTS_PSFLG_IN_PRQ_HIGH:
+ erts_print(to, to_arg, "IN_PRQ_HIGH"); break;
+ case ERTS_PSFLG_IN_PRQ_NORMAL:
+ erts_print(to, to_arg, "IN_PRQ_NORMAL"); break;
+ case ERTS_PSFLG_IN_PRQ_LOW:
+ erts_print(to, to_arg, "IN_PRQ_LOW"); break;
+ case ERTS_PSFLG_FREE:
+ erts_print(to, to_arg, "FREE"); break;
+ case ERTS_PSFLG_EXITING:
+ erts_print(to, to_arg, "EXITING"); break;
+ case ERTS_PSFLG_PENDING_EXIT:
+ erts_print(to, to_arg, "PENDING_EXIT"); break;
+ case ERTS_PSFLG_ACTIVE:
+ erts_print(to, to_arg, "ACTIVE"); break;
+ case ERTS_PSFLG_IN_RUNQ:
+ erts_print(to, to_arg, "IN_RUNQ"); break;
+ case ERTS_PSFLG_RUNNING:
+ erts_print(to, to_arg, "RUNNING"); break;
+ case ERTS_PSFLG_SUSPENDED:
+ erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_PSFLG_GC:
+ erts_print(to, to_arg, "GC"); break;
+ case ERTS_PSFLG_BOUND:
+ erts_print(to, to_arg, "BOUND"); break;
+ case ERTS_PSFLG_TRAP_EXIT:
+ erts_print(to, to_arg, "TRAP_EXIT"); break;
+ case ERTS_PSFLG_ACTIVE_SYS:
+ erts_print(to, to_arg, "ACTIVE_SYS"); break;
+ case ERTS_PSFLG_RUNNING_SYS:
+ erts_print(to, to_arg, "RUNNING_SYS"); break;
+ case ERTS_PSFLG_PROXY:
+ erts_print(to, to_arg, "PROXY"); break;
+ case ERTS_PSFLG_DELAYED_SYS:
+ erts_print(to, to_arg, "DELAYED_SYS"); break;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ case ERTS_PSFLG_DIRTY_CPU_PROC:
+ erts_print(to, to_arg, "DIRTY_CPU_PROC"); break;
+ case ERTS_PSFLG_DIRTY_IO_PROC:
+ erts_print(to, to_arg, "DIRTY_IO_PROC"); break;
+ case ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q:
+ erts_print(to, to_arg, "DIRTY_CPU_PROC_IN_Q"); break;
+ case ERTS_PSFLG_DIRTY_IO_PROC_IN_Q:
+ erts_print(to, to_arg, "DIRTY_IO_PROC_IN_Q"); break;
+#endif
+ default:
+ erts_print(to, to_arg, "UNKNOWN(%d)", chk); break;
+ }
+ if (psflg > chk)
+ erts_print(to, to_arg, " | ");
+ psflg -= chk;
+ }
+ }
+ erts_print(to, to_arg, "\n");
+}
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 28cbe7004f..d6fb88ea61 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -86,11 +86,13 @@ unsigned tag_val_def(Wterm x)
case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): return EXTERNAL_PID_DEF;
case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): return EXTERNAL_PORT_DEF;
case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): return EXTERNAL_REF_DEF;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE): return MAP_DEF;
case (_TAG_HEADER_REFC_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
case (_TAG_HEADER_HEAP_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
case (_TAG_HEADER_SUB_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
- case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE): return MAP_DEF;
+ case (_TAG_HEADER_HASHMAP >> _TAG_PRIMARY_SIZE): return HASHMAP_DEF;
}
+
break;
}
case TAG_PRIMARY_IMMED1: {
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 37014ccf94..1625a4ec15 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -141,6 +141,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define HEAP_BINARY_SUBTAG (0x9 << _TAG_PRIMARY_SIZE) /* BINARY */
#define SUB_BINARY_SUBTAG (0xA << _TAG_PRIMARY_SIZE) /* BINARY */
/* _BINARY_XXX_MASK depends on 0xB being unused */
+#define HASHMAP_SUBTAG (0xB << _TAG_PRIMARY_SIZE) /* HASHMAP */
#define EXTERNAL_PID_SUBTAG (0xC << _TAG_PRIMARY_SIZE) /* EXTERNAL_PID */
#define EXTERNAL_PORT_SUBTAG (0xD << _TAG_PRIMARY_SIZE) /* EXTERNAL_PORT */
#define EXTERNAL_REF_SUBTAG (0xE << _TAG_PRIMARY_SIZE) /* EXTERNAL_REF */
@@ -162,6 +163,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG)
#define _TAG_HEADER_BIN_MATCHSTATE (TAG_PRIMARY_HEADER|BIN_MATCHSTATE_SUBTAG)
#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG)
+#define _TAG_HEADER_HASHMAP (TAG_PRIMARY_HEADER|HASHMAP_SUBTAG)
#define _TAG_HEADER_MASK 0x3F
@@ -296,9 +298,11 @@ _ET_DECLARE_CHECKED(Uint,atom_val,Eterm)
#define atom_val(x) _ET_APPLY(atom_val,(x))
/* header (arityval or thing) access methods */
-#define _make_header(sz,tag) ((Uint)(((sz) << _HEADER_ARITY_OFFS) + (tag)))
+#define _make_header(sz,tag) ((Uint)(((Uint)(sz) << _HEADER_ARITY_OFFS) + (tag)))
#define is_header(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_HEADER)
-#define _unchecked_header_arity(x) ((x) >> _HEADER_ARITY_OFFS)
+//#define _unchecked_header_arity(x) ((x) >> _HEADER_ARITY_OFFS)
+#define _unchecked_header_arity(x) \
+ (is_hashmap_header(x) ? MAP_HEADER_ARITY(x) : ((x) >> _HEADER_ARITY_OFFS))
_ET_DECLARE_CHECKED(Uint,header_arity,Eterm)
#define header_arity(x) _ET_APPLY(header_arity,(x))
@@ -361,6 +365,7 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
((((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_REFC_BIN) || \
(((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_HEAP_BIN) || \
(((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_SUB_BIN))
+
#define make_binary(x) make_boxed((Eterm*)(x))
#define is_binary(x) (is_boxed((x)) && is_binary_header(*boxed_val((x))))
#define is_not_binary(x) (!is_binary((x)))
@@ -990,6 +995,33 @@ _ET_DECLARE_CHECKED(Uint32*,external_ref_data,Wterm)
_ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#define external_ref_node(x) _ET_APPLY(external_ref_node,(x))
+/* maps */
+
+#define MAP_HEADER_TAG_SZ (2)
+#define MAP_HEADER_ARITY_SZ (8)
+#define MAP_HEADER_VAL_SZ (16)
+
+#define MAP_HEADER_TAG_FLAT (0x0)
+#define MAP_HEADER_TAG_HAMT_NODE_BITMAP (0x1)
+#define MAP_HEADER_TAG_HAMT_HEAD_ARRAY (0x2)
+#define MAP_HEADER_TAG_HAMT_HEAD_BITMAP (0x3)
+
+#define MAP_HEADER_TYPE(Hdr) (((Hdr) >> (_HEADER_ARITY_OFFS)) & (0x3))
+#define MAP_HEADER_ARITY(Hdr) (((Hdr) >> (_HEADER_ARITY_OFFS + MAP_HEADER_TAG_SZ)) & (0xff))
+#define MAP_HEADER_VAL(Hdr) (((Hdr) >> (_HEADER_ARITY_OFFS + MAP_HEADER_TAG_SZ + MAP_HEADER_ARITY_SZ)) & (0xffff))
+
+#define make_hashmap(x) make_boxed((Eterm*)(x))
+#define make_hashmap_rel make_boxed_rel
+#define is_hashmap(x) (is_boxed((x)) && is_hashmap_header(*boxed_val((x))))
+#define is_not_hashmap(x) (!is_hashmap(x))
+#define is_hashmap_rel(RTERM,BASE) is_hashmap(rterm2wterm(RTERM,BASE))
+#define is_hashmap_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_HASHMAP)
+#define hashmap_val(x) _unchecked_boxed_val((x))
+#define hashmap_val_rel(RTERM, BASE) hashmap_val(rterm2wterm(RTERM, BASE))
+
+#define is_map(x) (is_flatmap(x) || is_hashmap(x))
+#define is_map_rel(x,BASE) (is_flatmap_rel(x,BASE) || is_hashmap_rel(x,BASE))
+
/* number tests */
#define is_integer(x) (is_small(x) || is_big(x))
@@ -1081,20 +1113,23 @@ _ET_DECLARE_CHECKED(Uint,y_reg_index,Uint)
#define BINARY_DEF 0x0
#define LIST_DEF 0x1
#define NIL_DEF 0x2
-#define MAP_DEF 0x3
-#define TUPLE_DEF 0x4
-#define PID_DEF 0x5
-#define EXTERNAL_PID_DEF 0x6
-#define PORT_DEF 0x7
-#define EXTERNAL_PORT_DEF 0x8
-#define EXPORT_DEF 0x9
-#define FUN_DEF 0xa
-#define REF_DEF 0xb
-#define EXTERNAL_REF_DEF 0xc
-#define ATOM_DEF 0xd
-#define FLOAT_DEF 0xe
-#define BIG_DEF 0xf
-#define SMALL_DEF 0x10
+#define HASHMAP_DEF 0x3
+#define MAP_DEF 0x4
+#define TUPLE_DEF 0x5
+#define PID_DEF 0x6
+#define EXTERNAL_PID_DEF 0x7
+#define PORT_DEF 0x8
+#define EXTERNAL_PORT_DEF 0x9
+#define EXPORT_DEF 0xa
+#define FUN_DEF 0xb
+#define REF_DEF 0xc
+#define EXTERNAL_REF_DEF 0xd
+#define ATOM_DEF 0xe
+#define FLOAT_DEF 0xf
+#define BIG_DEF 0x10
+#define SMALL_DEF 0x11
+
+#define FIRST_VACANT_TAG_DEF 0x12
#if ET_DEBUG
extern unsigned tag_val_def_debug(Wterm, const char*, unsigned);
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 664c479eb6..4c9b00d2ee 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -1335,25 +1335,10 @@ erts_thr_progress_block(void)
thr_progress_block(tmp_thr_prgr_data(NULL), 1);
}
-void
-erts_thr_progress_fatal_error_block(SWord timeout,
- ErtsThrPrgrData *tmp_tpd_bufp)
+int
+erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp)
{
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
- erts_aint32_t bc;
- SWord time_left = timeout;
- SysTimeval to;
-
- /*
- * Counting poll intervals may give us a too long timeout
- * if cpu is busy. If we got tolerant time of day we use it
- * to prevent this.
- */
- if (!erts_disable_tolerant_timeofday) {
- erts_get_timeval(&to);
- to.tv_sec += timeout / 1000;
- to.tv_sec += timeout % 1000;
- }
if (!tpd) {
/*
@@ -1366,9 +1351,24 @@ erts_thr_progress_fatal_error_block(SWord timeout,
init_tmp_thr_prgr_data(tpd);
}
- bc = thr_progress_block(tpd, 0);
- if (bc == 0)
- return; /* Succefully blocked all managed threads */
+ /* Returns number of threads that have not yes been blocked */
+ return thr_progress_block(tpd, 0);
+}
+
+void
+erts_thr_progress_fatal_error_wait(SWord timeout) {
+ erts_aint32_t bc;
+ SWord time_left = timeout;
+ ErtsMonotonicTime timeout_time;
+
+ /*
+ * Counting poll intervals may give us a too long timeout
+ * if cpu is busy. We use timeout time to try to prevent
+ * this. In case we havn't got time correction this may
+ * however fail too...
+ */
+ timeout_time = erts_get_monotonic_time();
+ timeout_time += ERTS_MSEC_TO_MONOTONIC((ErtsMonotonicTime) timeout);
while (1) {
if (erts_milli_sleep(ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL) == 0)
@@ -1378,14 +1378,8 @@ erts_thr_progress_fatal_error_block(SWord timeout,
break; /* Succefully blocked all managed threads */
if (time_left <= 0)
break; /* Timeout */
- if (!erts_disable_tolerant_timeofday) {
- SysTimeval now;
- erts_get_timeval(&now);
- if (now.tv_sec > to.tv_sec)
- break; /* Timeout */
- if (now.tv_sec == to.tv_sec && now.tv_usec >= to.tv_usec)
- break; /* Timeout */
- }
+ if (timeout_time <= erts_get_monotonic_time())
+ break; /* Timeout */
}
}
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 03ddbd467c..cf11c4e114 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -83,8 +83,8 @@ typedef struct {
ErtsThrPrgrLeaderState leader_state;
} ErtsThrPrgrData;
-void erts_thr_progress_fatal_error_block(SWord timeout,
- ErtsThrPrgrData *tmp_tpd_bufp);
+int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp);
+void erts_thr_progress_fatal_error_wait(SWord timeout);
#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 7214f3ea33..dc20ac207f 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -476,6 +476,7 @@ ERTS_GLB_INLINE void erts_thr_detach(erts_tid_t tid);
ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
+ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
int enable_lcnt);
@@ -651,16 +652,24 @@ ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo);
+ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo);
ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
ERTS_GLB_INLINE int erts_thr_get_main_status(void);
ERTS_GLB_INLINE void erts_thr_yield(void);
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
sigset_t *oset);
ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
+
+#ifdef USE_THREADS
+ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig);
+#endif
+
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
#ifdef USE_THREADS
@@ -2129,6 +2138,16 @@ erts_thr_self(void)
#endif
}
+ERTS_GLB_INLINE int
+erts_thr_getname(erts_tid_t tid, char *buf, size_t len)
+{
+#ifdef USE_THREADS
+ return ethr_getname(tid, buf, len);
+#else
+ return -1;
+#endif
+}
+
ERTS_GLB_INLINE int
erts_equal_tids(erts_tid_t x, erts_tid_t y)
@@ -3473,6 +3492,27 @@ ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
#endif
}
+ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo)
+{
+#ifdef USE_THREADS
+ return ethr_event_twait(&((ethr_ts_event *) ep)->event,
+ (ethr_sint64_t) tmo);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
+{
+#ifdef USE_THREADS
+ return ethr_event_stwait(&((ethr_ts_event *) ep)->event,
+ spincount,
+ (ethr_sint64_t) tmo);
+#else
+ return ENOTSUP;
+#endif
+}
+
ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
#ifdef USE_THREADS
@@ -3517,6 +3557,15 @@ ERTS_GLB_INLINE void erts_thr_yield(void)
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
+erts_thr_kill(erts_tid_t tid, int sig) {
+#ifdef USE_THREADS
+ int res = ethr_kill((ethr_tid)tid, sig);
+ if (res)
+ erts_thr_fatal_error(res, "killing thread");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
#ifdef USE_THREADS
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 4bbdcaa3e3..c9cda4d10e 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -20,11 +20,16 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
-#define ERTS_SHORT_TIME_T_MAX ERTS_AINT32_T_MAX
-#define ERTS_SHORT_TIME_T_MIN ERTS_AINT32_T_MIN
-typedef erts_aint32_t erts_short_time_t;
+#if defined(DEBUG) || 0
+#define ERTS_TIME_ASSERT(B) ERTS_ASSERT(B)
+#else
+#define ERTS_TIME_ASSERT(B) ((void) 1)
+#endif
+
+typedef struct ErtsTimerWheel_ ErtsTimerWheel;
+typedef erts_atomic64_t * ErtsNextTimeoutRef;
+extern ErtsTimerWheel *erts_default_timer_wheel;
-extern erts_smp_atomic32_t do_time; /* set at clock interrupt */
extern SysTimeval erts_first_emu_time;
/*
@@ -34,8 +39,8 @@ typedef struct erl_timer {
struct erl_timer* next; /* next entry tiw slot or chain */
struct erl_timer* prev; /* prev entry tiw slot or chain */
Uint slot; /* slot in timer wheel */
- Uint count; /* number of loops remaining */
- int active; /* 1=activated, 0=deactivated */
+ erts_smp_atomic_t wheel;
+ ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
/* called when timeout */
void (*timeout)(void*);
/* called when cancel (may be NULL) */
@@ -62,7 +67,6 @@ union ErtsSmpPTimer_ {
ErtsSmpPTimer *next;
};
-
void erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
Eterm id,
ErlTimeoutProc timeout_func,
@@ -70,36 +74,42 @@ void erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
void erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer);
#endif
+void erts_monitor_time_offset(Eterm id, Eterm ref);
+int erts_demonitor_time_offset(Eterm ref);
+
+void erts_late_init_time_sup(void);
+
/* timer-wheel api */
-void erts_init_time(void);
+ErtsTimerWheel *erts_create_timer_wheel(int);
+ErtsNextTimeoutRef erts_get_next_timeout_reference(ErtsTimerWheel *);
+void erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode);
void erts_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
void erts_cancel_timer(ErlTimer*);
-void erts_bump_timer(erts_short_time_t);
-Uint erts_timer_wheel_memory_size(void);
Uint erts_time_left(ErlTimer *);
-erts_short_time_t erts_next_time(void);
+void erts_bump_timers(ErtsTimerWheel *, ErtsMonotonicTime);
+Uint erts_timer_wheel_memory_size(void);
#ifdef DEBUG
void erts_p_slpq(void);
#endif
-ERTS_GLB_INLINE erts_short_time_t erts_do_time_read_and_reset(void);
-ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t);
+ErtsMonotonicTime erts_check_next_timeout_time(ErtsTimerWheel *,
+ ErtsMonotonicTime);
+
+ERTS_GLB_INLINE void erts_init_timer(ErlTimer *p);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE erts_short_time_t erts_do_time_read_and_reset(void)
+ERTS_GLB_INLINE void erts_init_timer(ErlTimer *p)
{
- erts_short_time_t time = erts_smp_atomic32_xchg_acqb(&do_time, 0);
- if (time < 0)
- erl_exit(ERTS_ABORT_EXIT, "Internal time management error\n");
- return time;
+ erts_smp_atomic_init_nob(&p->wheel, (erts_aint_t) NULL);
}
-ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed)
+ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
{
- erts_smp_atomic32_add_relb(&do_time, elapsed);
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb((erts_atomic64_t *) nxt_tmo_ref);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -107,7 +117,7 @@ ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed)
/* time_sup */
-#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
# ifndef HAVE_ERTS_NOW_CPU
# define HAVE_ERTS_NOW_CPU
# ifdef HAVE_GETHRVTIME
@@ -121,25 +131,228 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
typedef UWord erts_approx_time_t;
erts_approx_time_t erts_get_approx_time(void);
-void erts_get_timeval(SysTimeval *tv);
-erts_time_t erts_get_time(void);
+int erts_has_time_correction(void);
+int erts_check_time_adj_support(int time_correction,
+ ErtsTimeWarpMode time_warp_mode);
+
+ErtsTimeWarpMode erts_time_warp_mode(void);
+
+typedef enum {
+ ERTS_TIME_OFFSET_PRELIMINARY,
+ ERTS_TIME_OFFSET_FINAL,
+ ERTS_TIME_OFFSET_VOLATILE
+} ErtsTimeOffsetState;
+
+ErtsTimeOffsetState erts_time_offset_state(void);
+ErtsTimeOffsetState erts_finalize_time_offset(void);
+struct process;
+Eterm erts_get_monotonic_start_time(struct process *c_p);
+Eterm erts_monotonic_time_source(struct process*c_p);
+
+#ifdef SYS_CLOCK_RESOLUTION
+#define ERTS_CLKTCK_RESOLUTION ((ErtsMonotonicTime) (SYS_CLOCK_RESOLUTION*1000))
+#else
+#define ERTS_CLKTCK_RESOLUTION (erts_time_sup__.r.o.clktck_resolution)
+#endif
+
+struct erts_time_sup_read_only__ {
+ ErtsMonotonicTime monotonic_time_unit;
+#ifndef SYS_CLOCK_RESOLUTION
+ ErtsMonotonicTime clktck_resolution;
+#endif
+};
+
+typedef struct {
+ union {
+ struct erts_time_sup_read_only__ o;
+ char align__[(((sizeof(struct erts_time_sup_read_only__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } r;
+} ErtsTimeSupData;
+
+extern ErtsTimeSupData erts_time_sup__;
-ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
+ERTS_GLB_INLINE Uint64
+erts_time_unit_conversion(Uint64 value,
+ Uint32 from_time_unit,
+ Uint32 to_time_unit);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE int
-erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p)
+ERTS_GLB_INLINE Uint64
+erts_time_unit_conversion(Uint64 value,
+ Uint32 from_time_unit,
+ Uint32 to_time_unit)
{
- if (t1p->tv_sec == t2p->tv_sec) {
- if (t1p->tv_usec < t2p->tv_usec)
- return -1;
- else if (t1p->tv_usec > t2p->tv_usec)
- return 1;
- return 0;
- }
- return t1p->tv_sec < t2p->tv_sec ? -1 : 1;
+ Uint64 high, low, result;
+ if (value <= ~((Uint64) 0)/to_time_unit)
+ return (value*to_time_unit)/from_time_unit;
+
+ low = value & ((Uint64) 0xffffffff);
+ high = (value >> 32) & ((Uint64) 0xffffffff);
+
+ low *= to_time_unit;
+ high *= to_time_unit;
+
+ high += (low >> 32) & ((Uint64) 0xffffffff);
+ low &= ((Uint64) 0xffffffff);
+
+ result = high % from_time_unit;
+ high /= from_time_unit;
+ high <<= 32;
+
+ result <<= 32;
+ result += low;
+ result /= from_time_unit;
+ result += high;
+
+ return result;
}
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+
+/*
+ * If the monotonic time unit is a compile time constant,
+ * it is assumed (and need) to be a power of 10.
+ */
+
+#define ERTS_MONOTONIC_TIME_UNIT \
+ ((ErtsMonotonicTime) ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT)
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT == 1000*1000*1000
+/* Nano-second time unit */
+
+#define ERTS_MONOTONIC_TO_SEC__(NSEC) ((NSEC) / (1000*1000*1000))
+#define ERTS_MONOTONIC_TO_MSEC__(NSEC) ((NSEC) / (1000*1000))
+#define ERTS_MONOTONIC_TO_USEC__(NSEC) ((NSEC) / 1000)
+#define ERTS_MONOTONIC_TO_NSEC__(NSEC) (NSEC)
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) (((ErtsMonotonicTime) (SEC))*(1000*1000*1000))
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) (((ErtsMonotonicTime) (MSEC))*(1000*1000))
+#define ERTS_USEC_TO_MONOTONIC__(USEC) (((ErtsMonotonicTime) (USEC))*1000)
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) ((ErtsMonotonicTime) (NSEC))
+
+#elif ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT == 1000*1000
+/* Micro-second time unit */
+
+#define ERTS_MONOTONIC_TO_SEC__(USEC) ((USEC) / (1000*1000))
+#define ERTS_MONOTONIC_TO_MSEC__(USEC) ((USEC) / 1000)
+#define ERTS_MONOTONIC_TO_USEC__(USEC) (USEC)
+#define ERTS_MONOTONIC_TO_NSEC__(USEC) ((USEC)*1000)
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) (((ErtsMonotonicTime) (SEC))*(1000*1000))
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) (((ErtsMonotonicTime) (MSEC))*1000)
+#define ERTS_USEC_TO_MONOTONIC__(USEC) ((ErtsMonotonicTime) (USEC))
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) (((ErtsMonotonicTime) (NSEC))/1000)
+
+#elif ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT == 1000
+/* Milli-second time unit */
+
+#define ERTS_MONOTONIC_TO_SEC__(MSEC) ((USEC)/(1000))
+#define ERTS_MONOTONIC_TO_MSEC__(MSEC) (MSEC)
+#define ERTS_MONOTONIC_TO_USEC__(MSEC) ((MSEC)*1000)
+#define ERTS_MONOTONIC_TO_NSEC__(MSEC) ((MSEC)*(1000*1000))
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) (((ErtsMonotonicTime) (SEC))*1000)
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) ((ErtsMonotonicTime) (MSEC))
+#define ERTS_USEC_TO_MONOTONIC__(USEC) (((ErtsMonotonicTime) (USEC))/1000)
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) (((ErtsMonotonicTime) (NSEC))/(1000*1000))
+
+#else
+#error Missing implementation for monotonic time unit
+#endif
+
+#define ERTS_MONOTONIC_TO_CLKTCKS__(MON) \
+ ((MON) / (ERTS_MONOTONIC_TIME_UNIT/ERTS_CLKTCK_RESOLUTION))
+#define ERTS_CLKTCKS_TO_MONOTONIC__(TCKS) \
+ ((TCKS) * (ERTS_MONOTONIC_TIME_UNIT/ERTS_CLKTCK_RESOLUTION))
+
+#else /* !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+
+#define ERTS_MONOTONIC_TIME_UNIT (erts_time_sup__.r.o.monotonic_time_unit)
+
+#define ERTS_CONV_FROM_MON_UNIT___(M, TO) \
+ ((ErtsMonotonicTime) \
+ erts_time_unit_conversion((Uint64) (M), \
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT, \
+ (Uint32) (TO)))
+
+#define ERTS_CONV_TO_MON_UNIT___(M, FROM) \
+ ((ErtsMonotonicTime) \
+ erts_time_unit_conversion((Uint64) (M), \
+ (Uint32) (FROM), \
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT)) \
+
+#define ERTS_MONOTONIC_TO_SEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1)
+#define ERTS_MONOTONIC_TO_MSEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1000)
+#define ERTS_MONOTONIC_TO_USEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1000*1000)
+#define ERTS_MONOTONIC_TO_NSEC__(M) \
+ ERTS_CONV_FROM_MON_UNIT___((M), 1000*1000*1000)
+
+#define ERTS_SEC_TO_MONOTONIC__(SEC) \
+ ERTS_CONV_TO_MON_UNIT___((SEC), 1)
+#define ERTS_MSEC_TO_MONOTONIC__(MSEC) \
+ ERTS_CONV_TO_MON_UNIT___((MSEC), 1000)
+#define ERTS_USEC_TO_MONOTONIC__(USEC) \
+ ERTS_CONV_TO_MON_UNIT___((USEC), 1000*1000)
+#define ERTS_NSEC_TO_MONOTONIC__(NSEC) \
+ ERTS_CONV_TO_MON_UNIT___((NSEC), 1000*1000*1000)
+
+#define ERTS_MONOTONIC_TO_CLKTCKS__(MON) \
+ ERTS_CONV_FROM_MON_UNIT___((MON), ERTS_CLKTCK_RESOLUTION)
+#define ERTS_CLKTCKS_TO_MONOTONIC__(TCKS) \
+ ERTS_CONV_TO_MON_UNIT___((TCKS), ERTS_CLKTCK_RESOLUTION)
+
+#endif /* !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+
+#define ERTS_MSEC_TO_CLKTCKS__(MON) \
+ ((MON) * (ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_CLKTCKS_TO_MSEC__(TCKS) \
+ ((TCKS) / (ERTS_CLKTCK_RESOLUTION/1000))
+
+#define ERTS_MONOTONIC_TO_SEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_SEC__((X)))
+#define ERTS_MONOTONIC_TO_MSEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_MSEC__((X)))
+#define ERTS_MONOTONIC_TO_USEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_USEC__((X)))
+#define ERTS_MONOTONIC_TO_NSEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_NSEC__((X)))
+#define ERTS_SEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_SEC_TO_MONOTONIC__((X)))
+#define ERTS_MSEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MSEC_TO_MONOTONIC__((X)))
+#define ERTS_USEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_USEC_TO_MONOTONIC__((X)))
+#define ERTS_NSEC_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_NSEC_TO_MONOTONIC__((X)))
+
+#define ERTS_MONOTONIC_TO_CLKTCKS(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MONOTONIC_TO_CLKTCKS__((X)))
+#define ERTS_CLKTCKS_TO_MONOTONIC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_CLKTCKS_TO_MONOTONIC__((X)))
+
+#define ERTS_MSEC_TO_CLKTCKS(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_MSEC_TO_CLKTCKS__((X)))
+#define ERTS_CLKTCKS_TO_MSEC(X) \
+ (ERTS_TIME_ASSERT((X) >= 0), \
+ ERTS_CLKTCKS_TO_MSEC__((X)))
+
#endif /* ERL_TIME_H__ */
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 3272a5326d..b809fa8316 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -18,60 +18,8 @@
*/
/*
-** Support routines for the timer wheel
-**
-** This code contains two strategies for dealing with
-** date/time changes in the system.
-** If the system has some kind of high resolution timer (HAVE_GETHRTIME),
-** the high resolution timer is used to correct the time-of-day and the
-** timeouts, the base source is the hrtimer, but at certain intervals the
-** OS time-of-day is checked and if it is not within certain bounds, the
-** delivered time gets slowly adjusted for each call until
-** it corresponds to the system time (built-in adjtime...).
-** The call gethrtime() is detected by autoconf on Unix, but other
-** platforms may define it in erl_*_sys.h and implement
-** their own high resolution timer. The high resolution timer
-** strategy is (probably) best on all systems where the timer have
-** a resolution higher or equal to gettimeofday (or what's implemented
-** is sys_gettimeofday()). The actual resolution is the interesting thing,
-** not the unit's thats used (i.e. on VxWorks, nanoseconds can be
-** retrieved in terms of units, but the actual resolution is the same as
-** for the clock ticks).
-** If the systems best timer routine is kernel ticks returned from
-** sys_times(), and the actual resolution of sys_gettimeofday() is
-** better (like most unixes that does not have any realtime extensions),
-** another strategy is used. The tolerant gettimeofday() corrects
-** the value with respect to uptime (sys_times() return value) and checks
-** for correction both when delivering timeticks and delivering nowtime.
-** this strategy is slower, but accurate on systems without better timer
-** routines. The kernel tick resolution is not enough to implement
-** a gethrtime routine. On Linux and other non solaris unix-boxes the second
-** strategy is used, on all other platforms we use the first.
-**
-** The following is expected (from sys.[ch] and erl_*_sys.h):
-**
-** 64 bit integers. So it is, and so it will be.
-**
-** sys_init_time(), will return the clock resolution in MS and
-** that's about it. More could be added of course
-** If the clock-rate is constant (i.e. 1 ms) one can define
-** SYS_CLOCK_RESOLUTION (to 1),
-** which makes erts_deliver_time/erts_time_remaining a bit faster.
-**
-** if HAVE_GETHRTIME is defined:
-** sys_gethrtime() will return a SysHrTime (long long) representing
-** nanoseconds, sys_init_hrtime() will do any initialization.
-** else
-** a long (64bit) integer type called Sint64 should be defined.
-**
-** sys_times() will return clock_ticks since start and
-** fill in a SysTimes structure (struct tms). Instead of CLK_TCK,
-** SYS_CLK_TCK is used to determine the resolution of kernel ticks.
-**
-** sys_gettimeofday() will take a SysTimeval (a struct timeval) as parameter
-** and fill it in as gettimeofday(X,NULL).
-**
-*/
+ * Support routines for the time
+ */
#ifdef HAVE_CONFIG_H
# include "config.h"
@@ -80,384 +28,1028 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
-
+
static erts_smp_mtx_t erts_timeofday_mtx;
-
-static SysTimeval inittv; /* Used everywhere, the initial time-of-day */
+static erts_smp_mtx_t erts_get_time_mtx;
static SysTimes t_start; /* Used in elapsed_time_both */
-static SysTimeval gtv; /* Used in wall_clock_elapsed_time_both */
-static SysTimeval then; /* Used in get_now */
-static SysTimeval last_emu_time; /* Used in erts_get_emu_time() */
-SysTimeval erts_first_emu_time; /* Used in erts_get_emu_time() */
+static ErtsMonotonicTime prev_wall_clock_elapsed; /* Used in wall_clock_elapsed_time_both */
+static ErtsMonotonicTime previous_now; /* Used in get_now */
+
+static ErtsMonitor *time_offset_monitors = NULL;
+static Uint no_time_offset_monitors = 0;
-union {
- erts_smp_atomic_t time;
- char align[ERTS_CACHE_LINE_SIZE];
-} approx erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+#ifdef DEBUG
+static int time_sup_initialized = 0;
+#endif
+
+#define ERTS_MONOTONIC_TIME_KILO \
+ ((ErtsMonotonicTime) 1000)
+#define ERTS_MONOTONIC_TIME_MEGA \
+ (ERTS_MONOTONIC_TIME_KILO*ERTS_MONOTONIC_TIME_KILO)
+#define ERTS_MONOTONIC_TIME_GIGA \
+ (ERTS_MONOTONIC_TIME_MEGA*ERTS_MONOTONIC_TIME_KILO)
+#define ERTS_MONOTONIC_TIME_TERA \
+ (ERTS_MONOTONIC_TIME_GIGA*ERTS_MONOTONIC_TIME_KILO)
static void
-init_approx_time(void)
+schedule_send_time_offset_changed_notifications(ErtsMonotonicTime new_offset);
+
+/*
+ * NOTE! ERTS_MONOTONIC_TIME_START *need* to be a multiple
+ * of ERTS_MONOTONIC_TIME_UNIT.
+ */
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+
+#ifdef ARCH_32
+/*
+ * Want to use a big-num of arity 2 as long as possible (584 years
+ * in the nano-second time unit case).
+ */
+#define ERTS_MONOTONIC_TIME_START \
+ (((((((ErtsMonotonicTime) 1) << 32)-1) \
+ / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT) \
+ + ERTS_MONOTONIC_TIME_UNIT)
+
+#else /* ARCH_64 */
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT <= 1000*1000
+
+/*
+ * Using micro second time unit or lower. Start at zero since
+ * time will remain an immediate for a very long time anyway
+ * (18279 years in the micro second case)...
+ */
+#define ERTS_MONOTONIC_TIME_START ((ErtsMonotonicTime) 0)
+
+#else /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 1000*1000 */
+
+/*
+ * Want to use an immediate as long as possible (36 years in the
+ * nano-second time unit case).
+*/
+#define ERTS_MONOTONIC_TIME_START \
+ ((((ErtsMonotonicTime) MIN_SMALL) \
+ / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT)
+
+#endif /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 1000*1000 */
+
+#endif /* ARCH_64 */
+
+#define ERTS_MONOTONIC_OFFSET_NATIVE ERTS_MONOTONIC_TIME_START
+#define ERTS_MONOTONIC_OFFSET_NSEC ERTS_MONOTONIC_TO_NSEC__(ERTS_MONOTONIC_TIME_START)
+#define ERTS_MONOTONIC_OFFSET_USEC ERTS_MONOTONIC_TO_USEC__(ERTS_MONOTONIC_TIME_START)
+#define ERTS_MONOTONIC_OFFSET_MSEC ERTS_MONOTONIC_TO_MSEC__(ERTS_MONOTONIC_TIME_START)
+#define ERTS_MONOTONIC_OFFSET_SEC ERTS_MONOTONIC_TO_SEC__(ERTS_MONOTONIC_TIME_START)
+
+#else /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+
+/*
+ * Initialized in erts_init_time_sup()...
+ */
+
+#define ERTS_MONOTONIC_TIME_START (time_sup.r.o.start)
+#define ERTS_MONOTONIC_OFFSET_NATIVE (time_sup.r.o.start_offset.native)
+#define ERTS_MONOTONIC_OFFSET_NSEC (time_sup.r.o.start_offset.nsec)
+#define ERTS_MONOTONIC_OFFSET_USEC (time_sup.r.o.start_offset.usec)
+#define ERTS_MONOTONIC_OFFSET_MSEC (time_sup.r.o.start_offset.msec)
+#define ERTS_MONOTONIC_OFFSET_SEC (time_sup.r.o.start_offset.sec)
+
+#endif /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+
+#define ERTS_MONOTONIC_TO_SYS_TIME_VAL(TVP, MT) \
+ do { \
+ ErtsMonotonicTime sec__, usec__; \
+ sec__ = ERTS_MONOTONIC_TO_SEC((MT)); \
+ usec__ = ERTS_MONOTONIC_TO_USEC((MT)) - sec__*1000000; \
+ ASSERT(usec__ < 1000000); \
+ (TVP)->tv_sec = sec__; \
+ (TVP)->tv_usec = usec__; \
+ } while (0)
+
+#define ERTS_MAX_SYSTEM_TIME_DIFF ERTS_MSEC_TO_MONOTONIC(10)
+#define ERTS_SYSTEM_TIME_DIFF_EXCEED_LIMIT(ESYSTIME, OSSYSTIME) \
+ (((Uint64) (ESYSTIME)) - (((Uint64) (OSSYSTIME)) \
+ - ERTS_MAX_SYSTEM_TIME_DIFF) \
+ > 2*ERTS_MAX_SYSTEM_TIME_DIFF)
+
+#define ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF (ERTS_MAX_SYSTEM_TIME_DIFF/2)
+#define ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(500)
+
+struct time_sup_read_only__ {
+ ErtsMonotonicTime (*get_time)(void);
+ int correction;
+ ErtsTimeWarpMode warp_mode;
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ ErtsMonotonicTime moffset;
+ int os_monotonic_disable;
+ char *os_monotonic_func;
+ char *os_monotonic_clock_id;
+ int os_monotonic_locked;
+ Uint64 os_monotonic_resolution;
+#endif
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+ ErtsMonotonicTime start;
+ struct {
+ ErtsMonotonicTime native;
+ ErtsMonotonicTime nsec;
+ ErtsMonotonicTime usec;
+ ErtsMonotonicTime msec;
+ ErtsMonotonicTime sec;
+ } start_offset;
+#endif
+};
+
+typedef struct {
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+ ErtsMonotonicTime drift; /* Correction for os monotonic drift */
+#endif
+ ErtsMonotonicTime error; /* Correction for error between system times */
+} ErtsMonotonicCorrection;
+
+typedef struct {
+ ErtsMonotonicTime erl_mtime;
+ ErtsMonotonicTime os_mtime;
+ ErtsMonotonicCorrection correction;
+} ErtsMonotonicCorrectionInstance;
+
+#define ERTS_DRIFT_INTERVALS 5
+typedef struct {
+ struct {
+ struct {
+ ErtsMonotonicTime sys;
+ ErtsMonotonicTime mon;
+ } diff;
+ struct {
+ ErtsMonotonicTime sys;
+ ErtsMonotonicTime mon;
+ } time;
+ } intervals[ERTS_DRIFT_INTERVALS];
+ struct {
+ ErtsMonotonicTime sys;
+ ErtsMonotonicTime mon;
+ } acc;
+ int ix;
+ int dirty_counter;
+} ErtsMonotonicDriftData;
+
+typedef struct {
+ ErtsMonotonicCorrectionInstance prev;
+ ErtsMonotonicCorrectionInstance curr;
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+ ErtsMonotonicDriftData drift;
+#endif
+ ErtsMonotonicTime last_check;
+ int short_check_interval;
+} ErtsMonotonicCorrectionData;
+
+struct time_sup_infrequently_changed__ {
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ struct {
+ erts_smp_rwmtx_t rwmtx;
+ ErlTimer timer;
+ ErtsMonotonicCorrectionData cdata;
+ } parmon;
+ ErtsMonotonicTime minit;
+#endif
+ int finalized_offset;
+ SysTimeval inittv; /* Used everywhere, the initial time-of-day */
+ ErtsMonotonicTime not_corrected_moffset;
+ erts_atomic64_t offset;
+};
+
+struct time_sup_frequently_changed__ {
+ ErtsMonotonicTime last_not_corrected_time;
+};
+
+static struct {
+ union {
+ struct time_sup_read_only__ o;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(struct time_sup_read_only__))];
+ } r;
+ union {
+ struct time_sup_infrequently_changed__ c;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(struct time_sup_infrequently_changed__))];
+ } inf;
+ union {
+ struct time_sup_frequently_changed__ c;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(struct time_sup_frequently_changed__))];
+ } f;
+} time_sup erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+ErtsTimeSupData erts_time_sup__ erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+/*
+ * erts_get_approx_time() returns an *approximate* time
+ * in seconds. NOTE that this time may jump backwards!!!
+ */
+erts_approx_time_t
+erts_get_approx_time(void)
{
- erts_smp_atomic_init_nob(&approx.time, 0);
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+
+ return (erts_approx_time_t) tv.tv_sec;
}
-static ERTS_INLINE erts_approx_time_t
-get_approx_time(void)
+static ERTS_INLINE void
+init_time_offset(ErtsMonotonicTime offset)
{
- return (erts_approx_time_t) erts_smp_atomic_read_nob(&approx.time);
+ erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
static ERTS_INLINE void
-update_approx_time(SysTimeval *tv)
+set_time_offset(ErtsMonotonicTime offset)
{
- erts_approx_time_t new_secs = (erts_approx_time_t) tv->tv_sec;
- erts_approx_time_t old_secs = get_approx_time();
- if (old_secs != new_secs)
- erts_smp_atomic_set_nob(&approx.time, new_secs);
+ erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
-/*
- * erts_get_approx_time() returns an *approximate* time
- * in seconds. NOTE that this time may jump backwards!!!
- */
-erts_approx_time_t
-erts_get_approx_time(void)
+static ERTS_INLINE ErtsMonotonicTime
+get_time_offset(void)
{
- return get_approx_time();
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset);
}
-#ifdef HAVE_GETHRTIME
-int erts_disable_tolerant_timeofday;
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+
+/*
+ * Time correction adjustments made due to
+ * error between Erlang system time and OS
+ * system time:
+ * - Large adjustment ~1%
+ * - Small adjustment ~0.05%
+ */
+#define ERTS_TCORR_ERR_UNIT 2048
+#define ERTS_TCORR_ERR_LARGE_ADJ 20
+#define ERTS_TCORR_ERR_SMALL_ADJ 1
-static SysHrTime hr_init_time, hr_last_correction_check,
- hr_correction, hr_last_time;
+#define ERTS_INIT_SHORT_INTERVAL_COUNTER 10
+#define ERTS_LONG_TIME_CORRECTION_CHECK ERTS_SEC_TO_MONOTONIC(60)
+#define ERTS_SHORT_TIME_CORRECTION_CHECK ERTS_SEC_TO_MONOTONIC(15)
-static void init_tolerant_timeofday(void)
+#define ERTS_TIME_DRIFT_MAX_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(100)
+#define ERTS_TIME_DRIFT_MIN_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(5)
+
+static ERTS_INLINE ErtsMonotonicTime
+calc_corrected_erl_mtime(ErtsMonotonicTime os_mtime,
+ ErtsMonotonicCorrectionInstance *cip,
+ ErtsMonotonicTime *os_mdiff_p)
{
- /* Should be in sys.c */
-#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF)
- if (sysconf(_SC_NPROCESSORS_CONF) > 1) {
- char b[1024];
- int maj,min,build;
- os_flavor(b,1024);
- os_version(&maj,&min,&build);
- if (!strcmp(b,"sunos") && maj <= 5 && min <= 7) {
- erts_disable_tolerant_timeofday = 1;
- }
- }
+ ErtsMonotonicTime erl_mtime, diff = os_mtime - cip->os_mtime;
+ ERTS_TIME_ASSERT(diff >= 0);
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+ diff += (cip->correction.drift*diff)/ERTS_MONOTONIC_TIME_UNIT;
#endif
- hr_init_time = sys_gethrtime();
- hr_last_correction_check = hr_last_time = hr_init_time;
- hr_correction = 0;
+ erl_mtime = cip->erl_mtime;
+ erl_mtime += diff;
+ erl_mtime += cip->correction.error*(diff/ERTS_TCORR_ERR_UNIT);
+ if (os_mdiff_p)
+ *os_mdiff_p = diff;
+ return erl_mtime;
}
-static void get_tolerant_timeofday(SysTimeval *tv)
+static ErtsMonotonicTime get_corrected_time(void)
{
- SysHrTime diff_time, curr;
+ ErtsMonotonicTime os_mtime;
+ ErtsMonotonicCorrectionData cdata;
+ ErtsMonotonicCorrectionInstance *cip;
+
+ erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+
+ os_mtime = erts_os_monotonic_time();
+
+ cdata = time_sup.inf.c.parmon.cdata;
- if (erts_disable_tolerant_timeofday) {
- sys_gettimeofday(tv);
- return;
+ erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+
+ if (os_mtime >= cdata.curr.os_mtime)
+ cip = &cdata.curr;
+ else {
+ if (os_mtime < cdata.prev.os_mtime)
+ erl_exit(ERTS_ABORT_EXIT,
+ "OS monotonic time stepped backwards\n");
+ cip = &cdata.prev;
}
- *tv = inittv;
- diff_time = ((curr = sys_gethrtime()) + hr_correction - hr_init_time) / 1000;
- if (curr < hr_init_time) {
- erl_exit(1,"Unexpected behaviour from operating system high "
- "resolution timer");
+ return calc_corrected_erl_mtime(os_mtime, cip, NULL);
+}
+
+static void
+check_time_correction(void *unused)
+{
+ ErtsMonotonicCorrectionData cdata;
+ ErtsMonotonicCorrection new_correction;
+ ErtsMonotonicCorrectionInstance *cip;
+ ErtsMonotonicTime mdiff, sdiff, os_mtime, erl_mtime, os_stime, erl_stime, time_offset;
+ Uint timeout;
+ SysTimeval tod;
+ int set_new_correction, begin_short_intervals = 0;
+
+ erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+
+ ASSERT(time_sup.inf.c.finalized_offset);
+
+ os_mtime = erts_os_monotonic_time();
+ sys_gettimeofday(&tod);
+
+ cdata = time_sup.inf.c.parmon.cdata;
+
+ erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+
+ if (os_mtime < cdata.curr.os_mtime)
+ erl_exit(ERTS_ABORT_EXIT,
+ "OS monotonic time stepped backwards\n");
+ cip = &cdata.curr;
+
+ erl_mtime = calc_corrected_erl_mtime(os_mtime, cip, &mdiff);
+ time_offset = get_time_offset();
+ erl_stime = erl_mtime + time_offset;
+
+ os_stime = ERTS_SEC_TO_MONOTONIC(tod.tv_sec);
+ os_stime += ERTS_USEC_TO_MONOTONIC(tod.tv_usec);
+
+ sdiff = erl_stime - os_stime;
+
+ new_correction = cip->correction;
+
+ if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE
+ && (sdiff < -2*ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF
+ || 2*ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF < sdiff)) {
+ /* System time diff exeeded limits; change time offset... */
+ time_offset -= sdiff;
+ sdiff = 0;
+ set_time_offset(time_offset);
+ schedule_send_time_offset_changed_notifications(time_offset);
+ begin_short_intervals = 1;
+ if (cdata.curr.correction.error == 0)
+ set_new_correction = 0;
+ else {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
+ }
+ else if (cdata.curr.correction.error == 0) {
+ if (sdiff < -ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF) {
+ set_new_correction = 1;
+ if (sdiff < -ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF)
+ new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else if (sdiff > ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF) {
+ set_new_correction = 1;
+ if (sdiff > ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF)
+ new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = -ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else {
+ set_new_correction = 0;
+ }
+ }
+ else if (cdata.curr.correction.error > 0) {
+ if (sdiff < 0) {
+ if (cdata.curr.correction.error == ERTS_TCORR_ERR_LARGE_ADJ
+ || -ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF <= sdiff)
+ set_new_correction = 0;
+ else {
+ new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
+ set_new_correction = 1;
+ }
+ }
+ else if (sdiff > ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF) {
+ set_new_correction = 1;
+ if (sdiff > ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF)
+ new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = -ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
+ }
+ else /* if (cdata.curr.correction.error < 0) */ {
+ if (0 < sdiff) {
+ if (cdata.curr.correction.error == -ERTS_TCORR_ERR_LARGE_ADJ
+ || sdiff <= ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF)
+ set_new_correction = 0;
+ else {
+ new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
+ set_new_correction = 1;
+ }
+ set_new_correction = 0;
+ }
+ else if (sdiff < -ERTS_TIME_CORRECTION_SMALL_ADJ_DIFF) {
+ set_new_correction = 1;
+ if (sdiff < -ERTS_TIME_CORRECTION_LARGE_ADJ_DIFF)
+ new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
+ else
+ new_correction.error = ERTS_TCORR_ERR_SMALL_ADJ;
+ }
+ else {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
}
- if ((curr - hr_last_correction_check) / 1000 > 1000000) {
- /* Check the correction need */
- SysHrTime tv_diff, diffdiff;
- SysTimeval tmp;
- int done = 0;
-
- sys_gettimeofday(&tmp);
- tv_diff = ((SysHrTime) tmp.tv_sec) * 1000000 + tmp.tv_usec;
- tv_diff -= ((SysHrTime) inittv.tv_sec) * 1000000 + inittv.tv_usec;
- diffdiff = diff_time - tv_diff;
- if (diffdiff > 10000) {
- SysHrTime corr = (curr - hr_last_time) / 100;
- if (corr / 1000 >= diffdiff) {
- ++done;
- hr_correction -= ((SysHrTime)diffdiff) * 1000;
- } else {
- hr_correction -= corr;
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+ {
+ ErtsMonotonicDriftData *ddp = &time_sup.inf.c.parmon.cdata.drift;
+ int ix = ddp->ix;
+ ErtsMonotonicTime mtime_diff, old_os_mtime;
+
+ old_os_mtime = ddp->intervals[ix].time.mon;
+ mtime_diff = os_mtime - old_os_mtime;
+
+ if (mtime_diff >= ERTS_SEC_TO_MONOTONIC(10)) {
+ ErtsMonotonicTime drift_adj, drift_adj_diff, old_os_stime,
+ stime_diff, mtime_acc, stime_acc, avg_drift_adj;
+
+ old_os_stime = ddp->intervals[ix].time.sys;
+
+ mtime_acc = ddp->acc.mon;
+ stime_acc = ddp->acc.sys;
+
+ avg_drift_adj = ((stime_acc - mtime_acc)*ERTS_MONOTONIC_TIME_UNIT) / mtime_acc;
+
+ mtime_diff = os_mtime - old_os_mtime;
+ stime_diff = os_stime - old_os_stime;
+ drift_adj = ((stime_diff - mtime_diff)*ERTS_MONOTONIC_TIME_UNIT) / mtime_diff;
+
+ ix++;
+ if (ix >= ERTS_DRIFT_INTERVALS)
+ ix = 0;
+ mtime_acc -= ddp->intervals[ix].diff.mon;
+ mtime_acc += mtime_diff;
+ stime_acc -= ddp->intervals[ix].diff.sys;
+ stime_acc += stime_diff;
+
+ ddp->intervals[ix].diff.mon = mtime_diff;
+ ddp->intervals[ix].diff.sys = stime_diff;
+ ddp->intervals[ix].time.mon = os_mtime;
+ ddp->intervals[ix].time.sys = os_stime;
+
+ ddp->ix = ix;
+ ddp->acc.mon = mtime_acc;
+ ddp->acc.sys = stime_acc;
+
+ /*
+ * If calculated drift adjustment is if off by more than 20% from the
+ * average drift we interpret this as a discontinous leap in system
+ * time and ignore it. If it actually is a change in drift we will
+ * later detect this when the average drift change.
+ */
+ drift_adj_diff = avg_drift_adj - drift_adj;
+ if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF
+ || ERTS_TIME_DRIFT_MAX_ADJ_DIFF < drift_adj_diff) {
+ ddp->dirty_counter = ERTS_DRIFT_INTERVALS;
+ begin_short_intervals = 1;
}
- diff_time = (curr + hr_correction - hr_init_time) / 1000;
- } else if (diffdiff < -10000) {
- SysHrTime corr = (curr - hr_last_time) / 100;
- if (corr / 1000 >= -diffdiff) {
- ++done;
- hr_correction -= ((SysHrTime)diffdiff) * 1000;
- } else {
- hr_correction += corr;
+ else {
+ if (ddp->dirty_counter <= 0) {
+ drift_adj = ((stime_acc - mtime_acc)*ERTS_MONOTONIC_TIME_UNIT) / mtime_acc;
+ }
+ if (ddp->dirty_counter >= 0) {
+ if (ddp->dirty_counter == 0) {
+ /* Force set new drift correction... */
+ set_new_correction = 1;
+ }
+ ddp->dirty_counter--;
+ }
+ drift_adj_diff = drift_adj - new_correction.drift;
+ if (drift_adj_diff) {
+ if (drift_adj_diff > ERTS_TIME_DRIFT_MAX_ADJ_DIFF)
+ drift_adj_diff = ERTS_TIME_DRIFT_MAX_ADJ_DIFF;
+ else if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF)
+ drift_adj_diff = -ERTS_TIME_DRIFT_MAX_ADJ_DIFF;
+ new_correction.drift += drift_adj_diff;
+
+ if (drift_adj_diff < -ERTS_TIME_DRIFT_MIN_ADJ_DIFF
+ || ERTS_TIME_DRIFT_MIN_ADJ_DIFF < drift_adj_diff) {
+ set_new_correction = 1;
+ }
+ }
}
- diff_time = (curr + hr_correction - hr_init_time) / 1000;
- } else {
- ++done;
}
- if (done) {
- hr_last_correction_check = curr;
+ }
+#endif
+
+ begin_short_intervals |= set_new_correction;
+
+ if (begin_short_intervals) {
+ time_sup.inf.c.parmon.cdata.short_check_interval
+ = ERTS_INIT_SHORT_INTERVAL_COUNTER;
+ }
+ else if ((os_mtime - time_sup.inf.c.parmon.cdata.last_check
+ >= ERTS_SHORT_TIME_CORRECTION_CHECK - ERTS_MONOTONIC_TIME_UNIT)
+ && time_sup.inf.c.parmon.cdata.short_check_interval > 0) {
+ time_sup.inf.c.parmon.cdata.short_check_interval--;
+ }
+ time_sup.inf.c.parmon.cdata.last_check = os_mtime;
+
+ if (new_correction.error == 0)
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
+ else {
+ ErtsMonotonicTime ecorr = new_correction.error;
+ if (sdiff < 0)
+ sdiff = -1*sdiff;
+ if (ecorr < 0)
+ ecorr = -1*ecorr;
+ if (sdiff > ecorr*(ERTS_LONG_TIME_CORRECTION_CHECK/ERTS_TCORR_ERR_UNIT))
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
+ else {
+ timeout = ERTS_MONOTONIC_TO_MSEC((ERTS_TCORR_ERR_UNIT*sdiff)/ecorr);
+ if (timeout < 10)
+ timeout = 10;
}
}
- tv->tv_sec += (int) (diff_time / ((SysHrTime) 1000000));
- tv->tv_usec += (int) (diff_time % ((SysHrTime) 1000000));
- if (tv->tv_usec >= 1000000) {
- tv->tv_usec -= 1000000;
- tv->tv_sec += 1;
+
+ if (timeout > ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK)
+ && time_sup.inf.c.parmon.cdata.short_check_interval) {
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
}
- hr_last_time = curr;
-}
+
+ if (set_new_correction) {
+ erts_smp_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx);
-#define correction (hr_correction/1000000)
+ os_mtime = erts_os_monotonic_time();
-#else /* !HAVE_GETHRTIME */
-#if !defined(CORRECT_USING_TIMES)
-#define init_tolerant_timeofday()
-#define get_tolerant_timeofday(tvp) sys_gettimeofday(tvp)
-#else
+ /* Save previous correction instance */
+ time_sup.inf.c.parmon.cdata.prev = *cip;
-typedef Sint64 Milli;
-
-static clock_t init_ct;
-static Sint64 ct_wrap;
-static Milli init_tv_m;
-static Milli correction_supress;
-static Milli last_ct_diff;
-static Milli last_cc;
-static clock_t last_ct;
-
-/* sys_times() might need to be wrapped and the values shifted (right)
- a bit to cope with newer linux (2.5.*) kernels, this has to be taken care
- of dynamically to start with, a special version that uses
- the times() return value as a high resolution timer can be made
- to fully utilize the faster ticks, like on windows, but for now, we'll
- settle with this silly workaround */
-#ifdef ERTS_WRAP_SYS_TIMES
-#define KERNEL_TICKS() (sys_times_wrap() & \
- ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
-#else
-SysTimes dummy_tms;
+ /*
+ * Current correction instance begin when
+ * OS monotonic time has increased one unit.
+ */
+ os_mtime++;
-#define KERNEL_TICKS() (sys_times(&dummy_tms) & \
- ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
+ /*
+ * Erlang monotonic time corresponding to
+ * next OS monotonic time using previous
+ * correction.
+ */
+ erl_mtime = calc_corrected_erl_mtime(os_mtime, cip, NULL);
-#endif
+ /*
+ * Save new current correction instance.
+ */
+ time_sup.inf.c.parmon.cdata.curr.erl_mtime = erl_mtime;
+ time_sup.inf.c.parmon.cdata.curr.os_mtime = os_mtime;
+ time_sup.inf.c.parmon.cdata.curr.correction = new_correction;
-static void init_tolerant_timeofday(void)
-{
- last_ct = init_ct = KERNEL_TICKS();
- last_cc = 0;
- init_tv_m = (((Milli) inittv.tv_sec) * 1000) +
- (inittv.tv_usec / 1000);
- ct_wrap = 0;
- correction_supress = 0;
+ erts_smp_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx);
+ }
+
+ erts_set_timer(&time_sup.inf.c.parmon.timer,
+ check_time_correction,
+ NULL,
+ NULL,
+ timeout);
}
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
-static void get_tolerant_timeofday(SysTimeval *tvp)
+static void
+init_check_time_correction(void *unused)
{
- clock_t current_ct;
- SysTimeval current_tv;
- Milli ct_diff;
- Milli tv_diff;
- Milli current_correction;
- Milli act_correction; /* long shown to be too small */
- Milli max_adjust;
-
- if (erts_disable_tolerant_timeofday) {
- sys_gettimeofday(tvp);
- return;
+ ErtsMonotonicDriftData *ddp;
+ ErtsMonotonicTime old_mtime, old_stime, mtime, stime, mtime_diff, stime_diff;
+ int ix;
+ SysTimeval tod;
+
+ ddp = &time_sup.inf.c.parmon.cdata.drift;
+ ix = ddp->ix;
+ old_mtime = ddp->intervals[0].time.mon;
+ old_stime = ddp->intervals[0].time.sys;
+
+ mtime = erts_os_monotonic_time();
+ sys_gettimeofday(&tod);
+
+ stime = ERTS_SEC_TO_MONOTONIC(tod.tv_sec);
+ stime += ERTS_USEC_TO_MONOTONIC(tod.tv_usec);
+
+ mtime_diff = mtime - old_mtime;
+ stime_diff = stime - old_stime;
+ if (100*stime_diff < 80*mtime_diff || 120*mtime_diff < 100*stime_diff ) {
+ /* Had a system time leap... pretend no drift... */
+ stime_diff = mtime_diff;
+ }
+
+ /*
+ * We use old time values in order to trigger
+ * a drift adjustment, and repeat this interval
+ * in all slots...
+ */
+ for (ix = 0; ix < ERTS_DRIFT_INTERVALS; ix++) {
+ ddp->intervals[ix].diff.mon = mtime_diff;
+ ddp->intervals[ix].diff.sys = stime_diff;
+ ddp->intervals[ix].time.mon = old_mtime;
+ ddp->intervals[ix].time.sys = old_stime;
}
-#ifdef ERTS_WRAP_SYS_TIMES
-#define TICK_MS (1000 / SYS_CLK_TCK_WRAP)
+ ddp->acc.sys = stime_diff*ERTS_DRIFT_INTERVALS;
+ ddp->acc.mon = mtime_diff*ERTS_DRIFT_INTERVALS;
+ ddp->ix = 0;
+ ddp->dirty_counter = ERTS_DRIFT_INTERVALS;
+
+ check_time_correction(NULL);
+}
+
+#endif
+
+static ErtsMonotonicTime
+finalize_corrected_time_offset(SysTimeval *todp)
+{
+ ErtsMonotonicTime os_mtime;
+ ErtsMonotonicCorrectionData cdata;
+ ErtsMonotonicCorrectionInstance *cip;
+
+ erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+
+ os_mtime = erts_os_monotonic_time();
+ sys_gettimeofday(todp);
+
+ cdata = time_sup.inf.c.parmon.cdata;
+
+ erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+
+ if (os_mtime < cdata.curr.os_mtime)
+ erl_exit(ERTS_ABORT_EXIT,
+ "OS monotonic time stepped backwards\n");
+ cip = &cdata.curr;
+
+ return calc_corrected_erl_mtime(os_mtime, cip, NULL);
+}
+
+static void
+late_init_time_correction(void)
+{
+ if (time_sup.inf.c.finalized_offset) {
+ erts_init_timer(&time_sup.inf.c.parmon.timer);
+ erts_set_timer(&time_sup.inf.c.parmon.timer,
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+ init_check_time_correction,
#else
-#define TICK_MS (1000 / SYS_CLK_TCK)
+ check_time_correction,
#endif
- current_ct = KERNEL_TICKS();
- sys_gettimeofday(&current_tv);
-
- /* I dont know if uptime can move some units backwards
- on some systems, but I allow for small backward
- jumps to avoid such problems if they exist...*/
- if (last_ct > 100 && current_ct < (last_ct - 100)) {
- ct_wrap += ((Sint64) 1) << ((sizeof(clock_t) * 8) - 1);
+ NULL,
+ NULL,
+ ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK));
}
- last_ct = current_ct;
- ct_diff = ((ct_wrap + current_ct) - init_ct) * TICK_MS;
+}
- /*
- * We will adjust the time in milliseconds and we allow for 1%
- * adjustments, but if this function is called more often then every 100
- * millisecond (which is obviously possible), we will never adjust, so
- * we accumulate small times by setting last_ct_diff iff max_adjust > 0
- */
- if ((max_adjust = (ct_diff - last_ct_diff)/100) > 0)
- last_ct_diff = ct_diff;
+#endif /* ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT */
- tv_diff = ((((Milli) current_tv.tv_sec) * 1000) +
- (current_tv.tv_usec / 1000)) - init_tv_m;
+static ErtsMonotonicTime get_not_corrected_time(void)
+{
+ SysTimeval tmp_tv;
+ ErtsMonotonicTime stime, mtime;
- current_correction = ((ct_diff - tv_diff) / TICK_MS) * TICK_MS; /* trunc */
+ erts_smp_mtx_lock(&erts_get_time_mtx);
- /*
- * We allow the current_correction value to wobble a little, as it
- * suffers from the low resolution of the kernel ticks.
- * if it hasn't changed more than one tick in either direction,
- * we will keep the old value.
- */
- if ((last_cc > current_correction + TICK_MS) ||
- (last_cc < current_correction - TICK_MS)) {
- last_cc = current_correction;
- } else {
- current_correction = last_cc;
- }
-
- /*
- * As time goes, we try to get the actual correction to 0,
- * that is, make erlangs time correspond to the systems dito.
- * The act correction is what we seem to need (current_correction)
- * minus the correction suppression. The correction supression
- * will change slowly (max 1% of elapsed time) but in millisecond steps.
- */
- act_correction = current_correction - correction_supress;
- if (max_adjust > 0) {
- /*
- * Here we slowly adjust erlangs time to correspond with the
- * system time by changing the correction_supress variable.
- * It can change max_adjust milliseconds which is 1% of elapsed time
- */
- if (act_correction > 0) {
- if (current_correction - correction_supress > max_adjust) {
- correction_supress += max_adjust;
- } else {
- correction_supress = current_correction;
- }
- act_correction = current_correction - correction_supress;
- } else if (act_correction < 0) {
- if (correction_supress - current_correction > max_adjust) {
- correction_supress -= max_adjust;
- } else {
- correction_supress = current_correction;
+ sys_gettimeofday(&tmp_tv);
+
+ stime = ERTS_SEC_TO_MONOTONIC(tmp_tv.tv_sec);
+ stime += ERTS_USEC_TO_MONOTONIC(tmp_tv.tv_usec);
+
+ mtime = stime - time_sup.inf.c.not_corrected_moffset;
+
+ if (mtime >= time_sup.f.c.last_not_corrected_time)
+ time_sup.f.c.last_not_corrected_time = mtime;
+ else {
+ mtime = time_sup.f.c.last_not_corrected_time;
+
+ if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE) {
+ ErtsMonotonicTime new_offset = stime - mtime;
+ new_offset = ERTS_MONOTONIC_TO_USEC(new_offset);
+ new_offset = ERTS_USEC_TO_MONOTONIC(new_offset);
+ if (time_sup.inf.c.not_corrected_moffset != new_offset) {
+ time_sup.inf.c.not_corrected_moffset = new_offset;
+ set_time_offset(new_offset);
+ schedule_send_time_offset_changed_notifications(new_offset);
}
- act_correction = current_correction - correction_supress;
}
+
}
- /*
- * The actual correction will correct the timeval so that system
- * time warps gets smothed down.
- */
- current_tv.tv_sec += act_correction / 1000;
- current_tv.tv_usec += (act_correction % 1000) * 1000;
-
- if (current_tv.tv_usec >= 1000000) {
- ++current_tv.tv_sec ;
- current_tv.tv_usec -= 1000000;
- } else if (current_tv.tv_usec < 0) {
- --current_tv.tv_sec;
- current_tv.tv_usec += 1000000;
- }
- *tvp = current_tv;
-#undef TICK_MS
+
+ ASSERT(stime == mtime + time_sup.inf.c.not_corrected_moffset);
+
+ erts_smp_mtx_unlock(&erts_get_time_mtx);
+
+ return mtime;
}
-#endif /* CORRECT_USING_TIMES */
-#endif /* !HAVE_GETHRTIME */
+int erts_check_time_adj_support(int time_correction,
+ ErtsTimeWarpMode time_warp_mode)
+{
+ if (!time_correction)
+ return 1;
-/*
-** Why this? Well, most platforms have a constant clock resolution of 1,
-** we dont want the deliver_time/time_remaining routines to waste
-** time dividing and multiplying by/with a variable that's always one.
-** so the return value of sys_init_time is ignored on those platforms.
-*/
-
-#ifndef SYS_CLOCK_RESOLUTION
-static int clock_resolution;
-#define CLOCK_RESOLUTION clock_resolution
+ /* User wants time correction */
+
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return !time_sup.r.o.os_monotonic_disable;
#else
-#define CLOCK_RESOLUTION SYS_CLOCK_RESOLUTION
+ return 0;
#endif
+}
-/*
-** The clock resolution should really be the resolution of the
-** time function in use, which on most platforms
-** is 1. On VxWorks the resolution should be
-** the number of ticks per second (or 1, which would work nicely to).
-**
-** Setting lower resolutions is mostly interesting when timers are used
-** instead of something like select.
-*/
-
-static SysTimeval last_delivered;
-
-static void init_erts_deliver_time(const SysTimeval *inittv)
+int
+erts_has_time_correction(void)
{
- /* We set the initial values for deliver_time here */
- last_delivered = *inittv;
- last_delivered.tv_usec = 1000 * (last_delivered.tv_usec / 1000);
- /* ms resolution */
+ return time_sup.r.o.correction;
}
-static void do_erts_deliver_time(const SysTimeval *current)
+void erts_init_sys_time_sup(void)
{
- SysTimeval cur_time;
- erts_time_t elapsed;
-
- /* calculate and deliver appropriate number of ticks */
- cur_time = *current;
- cur_time.tv_usec = 1000 * (cur_time.tv_usec / 1000); /* ms resolution */
- elapsed = (1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
- (cur_time.tv_usec - last_delivered.tv_usec) / 1000) /
- CLOCK_RESOLUTION;
+ ErtsSysInitTimeResult sys_init_time_res
+ = ERTS_SYS_INIT_TIME_RESULT_INITER;
- /* Sometimes the time jump backwards,
- resulting in a negative elapsed time. We compensate for
- this by simply pretend as if the time stood still. :) */
+ sys_init_time(&sys_init_time_res);
- if (elapsed > 0) {
+ erts_time_sup__.r.o.monotonic_time_unit
+ = sys_init_time_res.os_monotonic_time_unit;
- ASSERT(elapsed < ((erts_time_t) ERTS_SHORT_TIME_T_MAX));
+#ifndef SYS_CLOCK_RESOLUTION
+ erts_time_sup__.r.o.clktck_resolution
+ = sys_init_time_res.sys_clock_resolution;
+ erts_time_sup__.r.o.clktck_resolution *= 1000;
+#endif
- erts_do_time_add((erts_short_time_t) elapsed);
- last_delivered = cur_time;
- }
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ time_sup.r.o.os_monotonic_disable
+ = !sys_init_time_res.have_os_monotonic;
+ time_sup.r.o.os_monotonic_func
+ = sys_init_time_res.os_monotonic_info.func;
+ time_sup.r.o.os_monotonic_clock_id
+ = sys_init_time_res.os_monotonic_info.clock_id;
+ time_sup.r.o.os_monotonic_locked
+ = sys_init_time_res.os_monotonic_info.locked_use;
+ time_sup.r.o.os_monotonic_resolution
+ = sys_init_time_res.os_monotonic_info.resolution;
+#endif
}
int
-erts_init_time_sup(void)
+erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
{
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+ ErtsMonotonicTime abs_start;
+#endif
+
+ ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
+
erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
+ erts_smp_mtx_init(&erts_get_time_mtx, "get_time");
- init_approx_time();
+ time_sup.r.o.correction = time_correction;
+ time_sup.r.o.warp_mode = time_warp_mode;
+
+ if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE)
+ time_sup.inf.c.finalized_offset = 0;
+ else
+ time_sup.inf.c.finalized_offset = ~0;
+
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+
+#ifdef ARCH_32
+ time_sup.r.o.start = ((((ErtsMonotonicTime) 1) << 32)-1);
+ time_sup.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
+ time_sup.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
+ time_sup.r.o.start += ERTS_MONOTONIC_TIME_UNIT;
+ abs_start = time_sup.r.o.start;
+#else /* ARCH_64 */
+ if (ERTS_MONOTONIC_TIME_UNIT <= 1000*1000)
+ abs_start = time_sup.r.o.start = 0;
+ else {
+ time_sup.r.o.start = ((ErtsMonotonicTime) MIN_SMALL);
+ time_sup.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
+ time_sup.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
+ abs_start = -1*time_sup.r.o.start;
+ }
+#endif
- last_emu_time.tv_sec = 0;
- last_emu_time.tv_usec = 0;
+ time_sup.r.o.start_offset.native = time_sup.r.o.start;
+ time_sup.r.o.start_offset.nsec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_start,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1000*1000*1000);
+ time_sup.r.o.start_offset.usec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_start,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1000*1000);
+ time_sup.r.o.start_offset.msec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_start,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1000);
+ time_sup.r.o.start_offset.sec = (ErtsMonotonicTime)
+ erts_time_unit_conversion((Uint64) abs_start,
+ (Uint32) ERTS_MONOTONIC_TIME_UNIT,
+ (Uint32) 1);
+ if (time_sup.r.o.start < 0) {
+ time_sup.r.o.start_offset.nsec *= -1;
+ time_sup.r.o.start_offset.usec *= -1;
+ time_sup.r.o.start_offset.msec *= -1;
+ time_sup.r.o.start_offset.sec *= -1;
+ }
-#ifndef SYS_CLOCK_RESOLUTION
- clock_resolution = sys_init_time();
-#else
- (void) sys_init_time();
#endif
- sys_gettimeofday(&inittv);
+
+ if (ERTS_MONOTONIC_TIME_UNIT < ERTS_CLKTCK_RESOLUTION)
+ ERTS_INTERNAL_ERROR("Too small monotonic time time unit");
+
+#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ time_sup.r.o.correction = 0;
+#else
+ if (time_sup.r.o.os_monotonic_disable)
+ time_sup.r.o.correction = 0;
+
+ if (time_sup.r.o.correction) {
+ ErtsMonotonicCorrectionData *cdatap;
+ erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ ErtsMonotonicTime offset;
+ time_sup.inf.c.minit = erts_os_monotonic_time();
+ sys_gettimeofday(&time_sup.inf.c.inittv);
+ time_sup.r.o.moffset = -1*time_sup.inf.c.minit;
+ offset = ERTS_SEC_TO_MONOTONIC(time_sup.inf.c.inittv.tv_sec);
+ offset += ERTS_USEC_TO_MONOTONIC(time_sup.inf.c.inittv.tv_usec);
+ init_time_offset(offset);
+
+ rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx,
+ &rwmtx_opts, "get_corrected_time");
+
+ cdatap = &time_sup.inf.c.parmon.cdata;
-#ifdef HAVE_GETHRTIME
- sys_init_hrtime();
+#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+ cdatap->drift.intervals[0].time.sys
+ = ERTS_SEC_TO_MONOTONIC(time_sup.inf.c.inittv.tv_sec);
+ cdatap->drift.intervals[0].time.sys
+ += ERTS_USEC_TO_MONOTONIC(time_sup.inf.c.inittv.tv_usec);
+ cdatap->drift.intervals[0].time.mon = time_sup.inf.c.minit;
+ cdatap->curr.correction.drift = 0;
+#endif
+ cdatap->curr.correction.error = 0;
+ cdatap->curr.erl_mtime = 0;
+ cdatap->curr.os_mtime = time_sup.inf.c.minit;
+ cdatap->last_check = time_sup.inf.c.minit;
+ cdatap->short_check_interval = ERTS_INIT_SHORT_INTERVAL_COUNTER;
+ cdatap->prev = cdatap->curr;
+
+ time_sup.r.o.get_time = get_corrected_time;
+ }
+ else
#endif
- init_tolerant_timeofday();
+ {
+ ErtsMonotonicTime stime, offset;
+ time_sup.r.o.get_time = get_not_corrected_time;
+ stime = ERTS_SEC_TO_MONOTONIC(time_sup.inf.c.inittv.tv_sec);
+ stime += ERTS_USEC_TO_MONOTONIC(time_sup.inf.c.inittv.tv_usec);
+ offset = stime;
+ time_sup.inf.c.not_corrected_moffset = offset;
+ init_time_offset(offset);
+ time_sup.f.c.last_not_corrected_time = 0;
+ }
+
+ prev_wall_clock_elapsed = 0;
- init_erts_deliver_time(&inittv);
- gtv = inittv;
- then.tv_sec = then.tv_usec = 0;
+ previous_now = ERTS_MONOTONIC_TO_USEC(get_time_offset());
- erts_deliver_time();
+#ifdef DEBUG
+ time_sup_initialized = 1;
+#endif
- return CLOCK_RESOLUTION;
+ return ERTS_CLKTCK_RESOLUTION/1000;
}
+
+void
+erts_late_init_time_sup(void)
+{
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ /* Timer wheel must be initialized */
+ if (time_sup.r.o.get_time == get_corrected_time)
+ late_init_time_correction();
+#endif
+}
+
+ErtsTimeWarpMode erts_time_warp_mode(void)
+{
+ return time_sup.r.o.warp_mode;
+}
+
+ErtsTimeOffsetState erts_time_offset_state(void)
+{
+ switch (time_sup.r.o.warp_mode) {
+ case ERTS_NO_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_FINAL;
+ case ERTS_SINGLE_TIME_WARP_MODE:
+ if (time_sup.inf.c.finalized_offset)
+ return ERTS_TIME_OFFSET_FINAL;
+ return ERTS_TIME_OFFSET_PRELIMINARY;
+ case ERTS_MULTI_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_VOLATILE;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time warp mode");
+ return ERTS_TIME_OFFSET_VOLATILE;
+ }
+}
+
+/*
+ * erts_finalize_time_offset() will only change time offset
+ * the first time it is called when the emulator has been
+ * started in "single time warp" mode. Returns previous
+ * state:
+ * * ERTS_TIME_OFFSET_PRELIMINARY - Finalization performed
+ * * ERTS_TIME_OFFSET_FINAL - Already finialized; nothing changed
+ * * ERTS_TIME_OFFSET_VOLATILE - Not supported, either in
+ * * no correction mode (or multi time warp mode; not yet implemented).
+ */
+
+ErtsTimeOffsetState
+erts_finalize_time_offset(void)
+{
+ switch (time_sup.r.o.warp_mode) {
+ case ERTS_NO_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_FINAL;
+ case ERTS_MULTI_TIME_WARP_MODE:
+ return ERTS_TIME_OFFSET_VOLATILE;
+ case ERTS_SINGLE_TIME_WARP_MODE: {
+ ErtsTimeOffsetState res = ERTS_TIME_OFFSET_FINAL;
+
+ erts_smp_mtx_lock(&erts_get_time_mtx);
+
+ if (!time_sup.inf.c.finalized_offset) {
+ ErtsMonotonicTime mtime, new_offset;
+ SysTimeval tv;
+
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (!time_sup.r.o.correction)
+#endif
+ {
+ ErtsMonotonicTime stime;
+ sys_gettimeofday(&tv);
+
+ stime = ERTS_SEC_TO_MONOTONIC(tv.tv_sec);
+ stime += ERTS_USEC_TO_MONOTONIC(tv.tv_usec);
+
+ mtime = stime - time_sup.inf.c.not_corrected_moffset;
+
+ if (mtime >= time_sup.f.c.last_not_corrected_time) {
+ time_sup.f.c.last_not_corrected_time = mtime;
+ new_offset = time_sup.inf.c.not_corrected_moffset;
+ }
+ else {
+ mtime = time_sup.f.c.last_not_corrected_time;
+
+ ASSERT(time_sup.inf.c.not_corrected_moffset != stime - mtime);
+ new_offset = stime - mtime;
+ time_sup.inf.c.not_corrected_moffset = new_offset;
+ }
+
+ }
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ else {
+ mtime = finalize_corrected_time_offset(&tv);
+ new_offset = ERTS_SEC_TO_MONOTONIC(tv.tv_sec);
+ new_offset += ERTS_USEC_TO_MONOTONIC(tv.tv_usec);
+ new_offset -= mtime;
+
+ }
+#endif
+ new_offset = ERTS_MONOTONIC_TO_USEC(new_offset);
+ new_offset = ERTS_USEC_TO_MONOTONIC(new_offset);
+
+ set_time_offset(new_offset);
+ schedule_send_time_offset_changed_notifications(new_offset);
+
+ time_sup.inf.c.finalized_offset = ~0;
+ res = ERTS_TIME_OFFSET_PRELIMINARY;
+ }
+
+ erts_smp_mtx_unlock(&erts_get_time_mtx);
+
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (res == ERTS_TIME_OFFSET_PRELIMINARY
+ && time_sup.r.o.get_time == get_corrected_time) {
+ late_init_time_correction();
+ }
+#endif
+
+ return res;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid time warp mode");
+ return ERTS_TIME_OFFSET_VOLATILE;
+ }
+}
+
/* info functions */
void
@@ -498,23 +1090,16 @@ elapsed_time_both(UWord *ms_user, UWord *ms_sys,
void
wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
{
- UWord prev_total;
- SysTimeval tv;
+ ErtsMonotonicTime now, elapsed;
erts_smp_mtx_lock(&erts_timeofday_mtx);
- get_tolerant_timeofday(&tv);
-
- *ms_total = 1000 * (tv.tv_sec - inittv.tv_sec) +
- (tv.tv_usec - inittv.tv_usec) / 1000;
+ now = time_sup.r.o.get_time();
- prev_total = 1000 * (gtv.tv_sec - inittv.tv_sec) +
- (gtv.tv_usec - inittv.tv_usec) / 1000;
- *ms_diff = *ms_total - prev_total;
- gtv = tv;
-
- /* must sync the machine's idea of time here */
- do_erts_deliver_time(&tv);
+ elapsed = ERTS_MONOTONIC_TO_MSEC(now);
+ *ms_total = (UWord) elapsed;
+ *ms_diff = (UWord) (elapsed - prev_wall_clock_elapsed);
+ prev_wall_clock_elapsed = elapsed;
erts_smp_mtx_unlock(&erts_timeofday_mtx);
}
@@ -890,38 +1475,41 @@ univ_to_local(Sint *year, Sint *month, Sint *day,
return 0;
}
-
/* get a timestamp */
void
get_now(Uint* megasec, Uint* sec, Uint* microsec)
{
- SysTimeval now;
+ ErtsMonotonicTime now_megasec, now_sec, now, mtime, time_offset;
+ mtime = time_sup.r.o.get_time();
+ time_offset = get_time_offset();
+ now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset);
+
erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&now);
- do_erts_deliver_time(&now);
-
- /* Make sure time is later than last */
- if (then.tv_sec > now.tv_sec ||
- (then.tv_sec == now.tv_sec && then.tv_usec >= now.tv_usec)) {
- now = then;
- now.tv_usec++;
- }
- /* Check for carry from above + general reasonability */
- if (now.tv_usec >= 1000000) {
- now.tv_usec = 0;
- now.tv_sec++;
- }
- then = now;
+
+ /* Make sure now time is later than last time */
+ if (now <= previous_now)
+ now = previous_now + 1;
+
+ previous_now = now;
erts_smp_mtx_unlock(&erts_timeofday_mtx);
-
- *megasec = (Uint) (now.tv_sec / 1000000);
- *sec = (Uint) (now.tv_sec % 1000000);
- *microsec = (Uint) (now.tv_usec);
- update_approx_time(&now);
+ now_megasec = now / ERTS_MONOTONIC_TIME_TERA;
+ now_sec = now / ERTS_MONOTONIC_TIME_MEGA;
+ *megasec = (Uint) now_megasec;
+ *sec = (Uint) (now_sec - now_megasec*ERTS_MONOTONIC_TIME_MEGA);
+ *microsec = (Uint) (now - now_sec*ERTS_MONOTONIC_TIME_MEGA);
+
+ ASSERT(((ErtsMonotonicTime) *megasec)*ERTS_MONOTONIC_TIME_TERA
+ + ((ErtsMonotonicTime) *sec)*ERTS_MONOTONIC_TIME_MEGA
+ + ((ErtsMonotonicTime) *microsec) == now);
+}
+
+ErtsMonotonicTime
+erts_get_monotonic_time(void)
+{
+ return time_sup.r.o.get_time();
}
void
@@ -934,102 +1522,466 @@ get_sys_now(Uint* megasec, Uint* sec, Uint* microsec)
*megasec = (Uint) (now.tv_sec / 1000000);
*sec = (Uint) (now.tv_sec % 1000000);
*microsec = (Uint) (now.tv_usec);
-
- update_approx_time(&now);
}
+#ifdef HAVE_ERTS_NOW_CPU
+void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
+ SysCpuTime t;
+ SysTimespec tp;
-/* deliver elapsed *ticks* to the machine - takes a pointer
- to a struct timeval representing current time (to save
- a gettimeofday() where possible) or NULL */
+ sys_get_proc_cputime(t, tp);
+ *microsec = (Uint)(tp.tv_nsec / 1000);
+ t = (tp.tv_sec / 1000000);
+ *megasec = (Uint)(t % 1000000);
+ *sec = (Uint)(tp.tv_sec % 1000000);
+}
+#endif
-void erts_deliver_time(void) {
- SysTimeval now;
-
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&now);
- do_erts_deliver_time(&now);
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+#include "big.h"
- update_approx_time(&now);
+void
+erts_monitor_time_offset(Eterm id, Eterm ref)
+{
+ erts_smp_mtx_lock(&erts_get_time_mtx);
+ erts_add_monitor(&time_offset_monitors, MON_TIME_OFFSET, ref, id, NIL);
+ no_time_offset_monitors++;
+ erts_smp_mtx_unlock(&erts_get_time_mtx);
}
-/* get *real* time (not ticks) remaining until next timeout - if there
- isn't one, give a "long" time, that is guaranteed
- to not cause overflow when we report elapsed time later on */
+int
+erts_demonitor_time_offset(Eterm ref)
+{
+ int res;
+ ErtsMonitor *mon;
+ ASSERT(is_internal_ref(ref));
+ erts_smp_mtx_lock(&erts_get_time_mtx);
+ mon = erts_remove_monitor(&time_offset_monitors, ref);
+ if (!mon)
+ res = 0;
+ else {
+ ASSERT(no_time_offset_monitors > 0);
+ no_time_offset_monitors--;
+ res = 1;
+ }
+ erts_smp_mtx_unlock(&erts_get_time_mtx);
+ if (res)
+ erts_destroy_monitor(mon);
+ return res;
+}
-void erts_time_remaining(SysTimeval *rem_time)
+typedef struct {
+ Eterm pid;
+ Eterm ref;
+ Eterm heap[REF_THING_SIZE];
+} ErtsTimeOffsetMonitorInfo;
+
+typedef struct {
+ Uint ix;
+ ErtsTimeOffsetMonitorInfo *to_mon_info;
+} ErtsTimeOffsetMonitorContext;
+
+static void
+save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
{
- erts_time_t ticks;
- SysTimeval cur_time;
- erts_time_t elapsed;
-
- /* erts_next_time() returns no of ticks to next timeout or -1 if none */
-
- ticks = (erts_time_t) erts_next_time();
- if (ticks == (erts_time_t) -1) {
- /* timer queue empty */
- /* this will cause at most 100000000 ticks */
- rem_time->tv_sec = 100000;
- rem_time->tv_usec = 0;
- } else {
- /* next timeout after ticks ticks */
- ticks *= CLOCK_RESOLUTION;
-
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&cur_time);
- cur_time.tv_usec = 1000 *
- (cur_time.tv_usec / 1000);/* ms resolution*/
- elapsed = 1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
- (cur_time.tv_usec - last_delivered.tv_usec) / 1000;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ ErtsTimeOffsetMonitorContext *cntxt;
+ Eterm *from_hp, *to_hp;
+ Uint mix;
+ int hix;
+
+ cntxt = (ErtsTimeOffsetMonitorContext *) vcntxt;
+ mix = (cntxt->ix)++;
+ cntxt->to_mon_info[mix].pid = mon->pid;
+ to_hp = &cntxt->to_mon_info[mix].heap[0];
+
+ ASSERT(is_internal_ref(mon->ref));
+ from_hp = internal_ref_val(mon->ref);
+ ASSERT(thing_arityval(*from_hp) + 1 == REF_THING_SIZE);
+
+ for (hix = 0; hix < REF_THING_SIZE; hix++)
+ to_hp[hix] = from_hp[hix];
+
+ cntxt->to_mon_info[mix].ref
+ = make_internal_ref(&cntxt->to_mon_info[mix].heap[0]);
+
+}
+
+static void
+send_time_offset_changed_notifications(void *new_offsetp)
+{
+ ErtsMonotonicTime new_offset;
+ ErtsTimeOffsetMonitorInfo *to_mon_info;
+ Uint no_monitors;
+ char *tmp = NULL;
+
+#ifdef ARCH_64
+ new_offset = (ErtsMonotonicTime) new_offsetp;
+#else
+ new_offset = *((ErtsMonotonicTime *) new_offsetp);
+ erts_free(ERTS_ALC_T_NEW_TIME_OFFSET, new_offsetp);
+#endif
+ new_offset -= ERTS_MONOTONIC_OFFSET_NATIVE;
+
+ erts_smp_mtx_lock(&erts_get_time_mtx);
+
+ no_monitors = no_time_offset_monitors;
+ if (no_monitors) {
+ ErtsTimeOffsetMonitorContext cntxt;
+ Uint alloc_sz;
- if (ticks <= elapsed) { /* Ooops, better hurry */
- rem_time->tv_sec = rem_time->tv_usec = 0;
- return;
+ /* Monitor info array size */
+ alloc_sz = no_monitors*sizeof(ErtsTimeOffsetMonitorInfo);
+ /* + template max size */
+ alloc_sz += 6*sizeof(Eterm); /* 5-tuple */
+ alloc_sz += ERTS_MAX_SINT64_HEAP_SIZE*sizeof(Eterm); /* max offset size */
+ tmp = erts_alloc(ERTS_ALC_T_TMP, alloc_sz);
+
+ to_mon_info = (ErtsTimeOffsetMonitorInfo *) tmp;
+ cntxt.ix = 0;
+ cntxt.to_mon_info = to_mon_info;
+
+ erts_doforall_monitors(time_offset_monitors,
+ save_time_offset_monitor,
+ &cntxt);
+
+ ASSERT(cntxt.ix == no_monitors);
+ }
+
+ erts_smp_mtx_unlock(&erts_get_time_mtx);
+
+ if (no_monitors) {
+ Eterm *hp, *patch_refp, new_offset_term, message_template;
+ Uint mix, hsz;
+
+ /* Make message template */
+
+ hp = (Eterm *) (tmp + no_monitors*sizeof(ErtsTimeOffsetMonitorInfo));
+
+ hsz = 6; /* 5-tuple */
+ hsz += REF_THING_SIZE;
+ hsz += ERTS_SINT64_HEAP_SIZE(new_offset);
+
+ if (IS_SSMALL(new_offset))
+ new_offset_term = make_small(new_offset);
+ else
+ new_offset_term = erts_sint64_to_big(new_offset, &hp);
+ message_template = TUPLE5(hp,
+ am_CHANGE,
+ THE_NON_VALUE, /* Patch point for ref */
+ am_time_offset,
+ am_clock_service,
+ new_offset_term);
+ patch_refp = &hp[2];
+
+ ASSERT(*patch_refp == THE_NON_VALUE);
+
+ for (mix = 0; mix < no_monitors; mix++) {
+ Process *rp = erts_proc_lookup(to_mon_info[mix].pid);
+ if (rp) {
+ Eterm ref = to_mon_info[mix].ref;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (erts_lookup_monitor(ERTS_P_MONITORS(rp), ref)) {
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ Eterm message;
+
+ hp = erts_alloc_message_heap(hsz, &bp, &ohp, rp, &rp_locks);
+ *patch_refp = ref;
+ ASSERT(hsz == size_object(message_template));
+ message = copy_struct(message_template, hsz, &hp, ohp);
+ erts_queue_message(rp, &rp_locks, bp, message, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+ }
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
}
- rem_time->tv_sec = (ticks - elapsed) / 1000;
- rem_time->tv_usec = 1000 * ((ticks - elapsed) % 1000);
+
+ erts_free(ERTS_ALC_T_TMP, tmp);
}
}
-void erts_get_timeval(SysTimeval *tv)
+static void
+schedule_send_time_offset_changed_notifications(ErtsMonotonicTime new_offset)
{
- erts_smp_mtx_lock(&erts_timeofday_mtx);
- get_tolerant_timeofday(tv);
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
- update_approx_time(tv);
+#ifdef ARCH_64
+ void *new_offsetp = (void *) new_offset;
+ ASSERT(sizeof(void *) == sizeof(ErtsMonotonicTime));
+#else
+ void *new_offsetp = erts_alloc(ERTS_ALC_T_NEW_TIME_OFFSET,
+ sizeof(ErtsMonotonicTime));
+ *((ErtsMonotonicTime *) new_offsetp) = new_offset;
+#endif
+ erts_schedule_misc_aux_work(1,
+ send_time_offset_changed_notifications,
+ new_offsetp);
}
-erts_time_t
-erts_get_time(void)
+static ERTS_INLINE Eterm
+make_time_val(Process *c_p, ErtsMonotonicTime time_val)
{
- SysTimeval sys_tv;
-
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(&sys_tv);
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ Sint64 val = (Sint64) time_val;
+ Eterm *hp;
+ Uint sz;
- update_approx_time(&sys_tv);
+ if (IS_SSMALL(val))
+ return make_small(val);
- return sys_tv.tv_sec;
+ sz = ERTS_SINT64_HEAP_SIZE(val);
+ hp = HAlloc(c_p, sz);
+ return erts_sint64_to_big(val, &hp);
}
-#ifdef HAVE_ERTS_NOW_CPU
-void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
- SysCpuTime t;
- SysTimespec tp;
+Eterm
+erts_get_monotonic_start_time(struct process *c_p)
+{
+ return make_time_val(c_p, ERTS_MONOTONIC_OFFSET_NATIVE);
+}
- sys_get_proc_cputime(t, tp);
- *microsec = (Uint)(tp.tv_nsec / 1000);
- t = (tp.tv_sec / 1000000);
- *megasec = (Uint)(t % 1000000);
- *sec = (Uint)(tp.tv_sec % 1000000);
+static Eterm
+bld_monotonic_time_source(Uint **hpp, Uint *szp, Sint64 os_mtime)
+{
+#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return NIL;
+#else
+ int i = 0;
+ Eterm k[5];
+ Eterm v[5];
+
+ if (time_sup.r.o.os_monotonic_disable)
+ return NIL;
+
+ k[i] = erts_bld_atom(hpp, szp, "function");
+ v[i++] = erts_bld_atom(hpp, szp, time_sup.r.o.os_monotonic_func);
+
+ if (time_sup.r.o.os_monotonic_clock_id) {
+ k[i] = erts_bld_atom(hpp, szp, "clock_id");
+ v[i++] = erts_bld_atom(hpp, szp, time_sup.r.o.os_monotonic_clock_id);
+ }
+
+ if (time_sup.r.o.os_monotonic_resolution) {
+ k[i] = erts_bld_atom(hpp, szp, "resolution");
+ v[i++] = erts_bld_uint64(hpp, szp, time_sup.r.o.os_monotonic_resolution);
+ }
+
+ k[i] = erts_bld_atom(hpp, szp, "parallel");
+ v[i++] = time_sup.r.o.os_monotonic_locked ? am_no : am_yes;
+
+ k[i] = erts_bld_atom(hpp, szp, "time");
+ v[i++] = erts_bld_sint64(hpp, szp, os_mtime);
+
+ return erts_bld_2tup_list(hpp, szp, (Sint) i, k, v);
+#endif
}
+
+Eterm
+erts_monotonic_time_source(struct process *c_p)
+{
+ Uint hsz = 0;
+ Eterm *hp = NULL;
+ Sint64 os_mtime = 0;
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (!time_sup.r.o.os_monotonic_disable)
+ os_mtime = (Sint64) erts_os_monotonic_time();
+#endif
+
+ bld_monotonic_time_source(NULL, &hsz, os_mtime);
+ if (hsz)
+ hp = HAlloc(c_p, hsz);
+ return bld_monotonic_time_source(&hp, NULL, os_mtime);
+}
+
+
+#include "bif.h"
+
+static ERTS_INLINE Eterm
+time_unit_conversion(Process *c_p, Eterm term, ErtsMonotonicTime val, ErtsMonotonicTime muloff)
+{
+ ErtsMonotonicTime result;
+ BIF_RETTYPE ret;
+
+ if (val < 0)
+ goto trap_to_erlang_code;
+
+ /* Convert to common user specified time units */
+ switch (term) {
+ case am_seconds:
+ case make_small(1):
+ result = ERTS_MONOTONIC_TO_SEC(val) + muloff*ERTS_MONOTONIC_OFFSET_SEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+ case am_milli_seconds:
+ case make_small(1000):
+ result = ERTS_MONOTONIC_TO_MSEC(val) + muloff*ERTS_MONOTONIC_OFFSET_MSEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+ case am_micro_seconds:
+ case make_small(1000*1000):
+ result = ERTS_MONOTONIC_TO_USEC(val) + muloff*ERTS_MONOTONIC_OFFSET_USEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+#ifdef ARCH_64
+ case am_nano_seconds:
+ case make_small(1000*1000*1000):
+ result = ERTS_MONOTONIC_TO_NSEC(val) + muloff*ERTS_MONOTONIC_OFFSET_NSEC;
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
#endif
+ default: {
+ Eterm value, native_res;
+#ifndef ARCH_64
+ Sint user_res;
+ if (term == am_nano_seconds)
+ goto to_nano_seconds;
+ if (term_to_Sint(term, &user_res)) {
+ if (user_res == 1000*1000*1000) {
+ to_nano_seconds:
+ result = (ERTS_MONOTONIC_TO_NSEC(val)
+ + muloff*ERTS_MONOTONIC_OFFSET_NSEC);
+ ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
+ break;
+ }
+ if (user_res <= 0)
+ goto badarg;
+ }
+#else
+ if (is_small(term)) {
+ if (signed_val(term) <= 0)
+ goto badarg;
+ }
+#endif
+ else if (is_big(term)) {
+ if (big_sign(term))
+ goto badarg;
+ }
+ else {
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ break;
+ }
+
+ trap_to_erlang_code:
+ /* Do it in erlang code instead; pass along values to use... */
+ value = make_time_val(c_p, val + muloff*ERTS_MONOTONIC_OFFSET_NATIVE);
+ native_res = make_time_val(c_p, ERTS_MONOTONIC_TIME_UNIT);
+
+ ERTS_BIF_PREP_TRAP3(ret, erts_convert_time_unit_trap, c_p,
+ value, native_res, term);
+
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Built in functions */
+
+BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
+ BIF_RET(make_time_val(BIF_P, mtime));
+}
+
+BIF_RETTYPE monotonic_time_1(BIF_ALIST_1)
+{
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, time_sup.r.o.get_time(), 1));
+}
+
+BIF_RETTYPE system_time_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime mtime, offset;
+ mtime = time_sup.r.o.get_time();
+ offset = get_time_offset();
+ BIF_RET(make_time_val(BIF_P, mtime + offset));
+}
+
+BIF_RETTYPE system_time_1(BIF_ALIST_0)
+{
+ ErtsMonotonicTime mtime, offset;
+ mtime = time_sup.r.o.get_time();
+ offset = get_time_offset();
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime + offset, 0));
+}
+
+BIF_RETTYPE erts_internal_time_unit_0(BIF_ALIST_0)
+{
+ BIF_RET(make_time_val(BIF_P, ERTS_MONOTONIC_TIME_UNIT));
+}
+
+BIF_RETTYPE time_offset_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime time_offset = get_time_offset();
+ time_offset -= ERTS_MONOTONIC_OFFSET_NATIVE;
+ BIF_RET(make_time_val(BIF_P, time_offset));
+}
+
+BIF_RETTYPE time_offset_1(BIF_ALIST_1)
+{
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, get_time_offset(), -1));
+}
+
+
+BIF_RETTYPE timestamp_0(BIF_ALIST_0)
+{
+ Eterm *hp, res;
+ ErtsMonotonicTime stime, mtime, all_sec, offset;
+ Uint mega_sec, sec, micro_sec;
+
+ mtime = time_sup.r.o.get_time();
+ offset = get_time_offset();
+ stime = ERTS_MONOTONIC_TO_USEC(mtime + offset);
+ all_sec = stime / ERTS_MONOTONIC_TIME_MEGA;
+ mega_sec = (Uint) (stime / ERTS_MONOTONIC_TIME_TERA);
+ sec = (Uint) (all_sec - (((ErtsMonotonicTime) mega_sec)
+ * ERTS_MONOTONIC_TIME_MEGA));
+ micro_sec = (Uint) (stime - all_sec*ERTS_MONOTONIC_TIME_MEGA);
+
+ ASSERT(((ErtsMonotonicTime) mega_sec)*ERTS_MONOTONIC_TIME_TERA
+ + ((ErtsMonotonicTime) sec)*ERTS_MONOTONIC_TIME_MEGA
+ + micro_sec == stime);
+
+ /*
+ * Mega seconds is the only value that potentially
+ * ever could be a bignum. However, that wont happen
+ * during at least the next 4 million years...
+ *
+ * (System time will also have wrapped in the
+ * 64-bit integer before we get there...)
+ */
+
+ ASSERT(IS_USMALL(0, mega_sec));
+ ASSERT(IS_USMALL(0, sec));
+ ASSERT(IS_USMALL(0, micro_sec));
+
+ hp = HAlloc(BIF_P, 4);
+ res = TUPLE3(hp,
+ make_small(mega_sec),
+ make_small(sec),
+ make_small(micro_sec));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE os_system_time_0(BIF_ALIST_0)
+{
+ ErtsMonotonicTime stime;
+ SysTimeval tod;
+ sys_gettimeofday(&tod);
+ stime = ERTS_SEC_TO_MONOTONIC(tod.tv_sec);
+ stime += ERTS_USEC_TO_MONOTONIC(tod.tv_usec);
+ BIF_RET(make_time_val(BIF_P, stime));
+}
+
+BIF_RETTYPE os_system_time_1(BIF_ALIST_0)
+{
+ ErtsMonotonicTime stime;
+ SysTimeval tod;
+ sys_gettimeofday(&tod);
+ stime = ERTS_SEC_TO_MONOTONIC(tod.tv_sec);
+ stime += ERTS_USEC_TO_MONOTONIC(tod.tv_usec);
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, stime, 0));
+}
+
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ea5c850a30..2f9969b0e7 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2225,7 +2225,7 @@ trace_gc(Process *p, Eterm what)
Eterm* limit;
#endif
- ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
+ ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
UseTmpHeap(LOCAL_HEAP_SIZE,p);
@@ -3492,16 +3492,13 @@ init_sys_msg_dispatcher(void)
thr_opts.coreNo = 0;
#endif
thr_opts.detached = 1;
+ thr_opts.name = "sys_msg_dispatcher";
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
-#ifdef ETHR_HAVE_THREAD_NAMES
- thr_opts.name = "sys_msg_dispatcher";
-#endif
-
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index c32f8fd61c..7cb8972e29 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -113,12 +113,14 @@ void erts_silence_warn_unused_result(long unused);
int erts_fit_in_bits_int64(Sint64);
int erts_fit_in_bits_int32(Sint32);
+int erts_fit_in_bits_uint(Uint);
int erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
+Uint32 make_internal_hash(Eterm);
void erts_save_emu_args(int argc, char **argv);
Eterm erts_get_emu_args(struct process *c_p);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index b7de8208ad..3a9fb1e07b 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -20,8 +20,6 @@
#ifndef __ERL_VM_H__
#define __ERL_VM_H__
-/* #define ERTS_OPCODE_COUNTER_SUPPORT */
-
/* FORCE_HEAP_FRAGS:
* Debug provocation to make HAlloc always create heap fragments (if allowed)
* even if there is room on heap.
@@ -119,9 +117,9 @@
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
#if HALFWORD_HEAP
-# define ERTS_HOLE_MARKER (0xaf5e78ccU)
+# define ERTS_HOLE_MARKER (0xdeadbeef)
#else
-# define ERTS_HOLE_MARKER (((0xaf5e78ccUL << 24) << 8) | 0xaf5e78ccUL)
+# define ERTS_HOLE_MARKER (((0xdeadbeef << 24) << 8) | 0xdeadbeef)
#endif
#endif
@@ -174,6 +172,7 @@ extern int H_MIN_SIZE; /* minimum (heap + stack) */
extern int BIN_VH_MIN_SIZE; /* minimum virtual (bin) heap */
extern int erts_atom_table_size;/* Atom table size */
+extern int erts_pd_initial_size;/* Initial Process dictionary table size */
#define ORIG_CREATION 0
#define INTERNAL_CREATION 255
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 45d1f7514e..82c60840e5 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -36,7 +36,9 @@
#include "erl_process.h"
#include "error.h"
#include "external.h"
+#define ERL_WANT_HIPE_BIF_WRAPPER__
#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
#include "big.h"
#include "dist.h"
#include "erl_binary.h"
@@ -498,15 +500,37 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint
return ep;
}
-Uint erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp)
+int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp,
+ Uint* szp)
{
- Uint sz = 0;
+ Uint sz;
+ if (encode_size_struct_int(NULL, acmp, term, flags, NULL, &sz)) {
+ return -1;
+ } else {
#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
#endif
- sz++ /* VERSION_MAGIC */;
- sz += encode_size_struct2(acmp, term, flags);
- return sz;
+ sz++ /* VERSION_MAGIC */;
+
+ *szp += sz;
+ return 0;
+ }
+}
+
+int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp)
+{
+ Uint sz;
+ if (encode_size_struct_int(&ctx->u.sc, ctx->acmp, term, ctx->flags, &ctx->reds, &sz)) {
+ return -1;
+ } else {
+#ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (!(ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE))
+#endif
+ sz++ /* VERSION_MAGIC */;
+
+ *szp += sz;
+ return 0;
+ }
}
Uint erts_encode_ext_size(Eterm term)
@@ -527,19 +551,16 @@ Uint erts_encode_ext_size_ets(Eterm term)
}
-void erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap *acmp)
+int erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap *acmp,
+ TTBEncodeContext* ctx, Sint* reds)
{
- byte *ep = *ext;
-#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
-#endif
- *ep++ = VERSION_MAGIC;
- ep = enc_term(acmp, term, ep, flags, NULL);
- if (!ep)
- erl_exit(ERTS_ABORT_EXIT,
- "%s:%d:erts_encode_dist_ext(): Internal data structure error\n",
- __FILE__, __LINE__);
- *ext = ep;
+ if (!ctx || !ctx->wstack.wstart) {
+ #ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ #endif
+ *(*ext)++ = VERSION_MAGIC;
+ }
+ return enc_term_int(ctx, acmp, term, *ext, flags, NULL, reds, ext);
}
void erts_encode_ext(Eterm term, byte **ext)
@@ -1161,7 +1182,8 @@ typedef struct {
Eterm* hp_end;
int remaining_n;
char* remaining_bytes;
- Eterm* maps_head;
+ Eterm* maps_list;
+ struct dec_term_hamt_placeholder* hamt_list;
} B2TDecodeContext;
typedef struct {
@@ -1487,7 +1509,8 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
ctx->u.dc.hp_start = HAlloc(p, ctx->heap_size);
ctx->u.dc.hp = ctx->u.dc.hp_start;
ctx->u.dc.hp_end = ctx->u.dc.hp_start + ctx->heap_size;
- ctx->u.dc.maps_head = NULL;
+ ctx->u.dc.maps_list = NULL;
+ ctx->u.dc.hamt_list = NULL;
ctx->state = B2TDecode;
/*fall through*/
case B2TDecode:
@@ -1740,54 +1763,14 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
return erts_term_to_binary_simple(p, Term, size, level, flags);
}
-/* Define for testing */
-/* #define EXTREME_TTB_TRAPPING 1 */
+/* Define EXTREME_TTB_TRAPPING for testing in dist.h */
#ifndef EXTREME_TTB_TRAPPING
-#define TERM_TO_BINARY_LOOP_FACTOR 32
#define TERM_TO_BINARY_COMPRESS_CHUNK (1 << 18)
#else
-#define TERM_TO_BINARY_LOOP_FACTOR 1
#define TERM_TO_BINARY_COMPRESS_CHUNK 10
#endif
-
-
-typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState;
-typedef struct TTBSizeContext_ {
- Uint flags;
- int level;
- Uint result;
- Eterm obj;
- ErtsEStack estack;
-} TTBSizeContext;
-
-typedef struct TTBEncodeContext_ {
- Uint flags;
- int level;
- byte* ep;
- Eterm obj;
- ErtsWStack wstack;
- Binary *result_bin;
-} TTBEncodeContext;
-
-typedef struct {
- Uint real_size;
- Uint dest_len;
- byte *dbytes;
- Binary *result_bin;
- Binary *destination_bin;
- z_stream stream;
-} TTBCompressContext;
-
-typedef struct {
- int alive;
- TTBState state;
- union {
- TTBSizeContext sc;
- TTBEncodeContext ec;
- TTBCompressContext cc;
- } s;
-} TTBContext;
+#define TERM_TO_BINARY_MEMCPY_FACTOR 8
static void ttb_context_destructor(Binary *context_bin)
{
@@ -1899,8 +1882,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
result_bin = erts_bin_nrml_alloc(size);
- result_bin->flags = 0;
- result_bin->orig_size = size;
erts_refc_init(&result_bin->refc, 0);
result_bin->orig_bytes[0] = VERSION_MAGIC;
/* Next state immediately, no need to export context */
@@ -1925,7 +1906,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
real_size = endp - bytes;
result_bin = erts_bin_realloc(context->s.ec.result_bin,real_size);
- result_bin->orig_size = real_size;
level = context->s.ec.level;
BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
if (level == 0 || real_size < 6) { /* We are done */
@@ -1962,8 +1942,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context->s.cc.result_bin = result_bin;
result_bin = erts_bin_nrml_alloc(real_size);
- result_bin->flags = 0;
- result_bin->orig_size = real_size;
erts_refc_init(&result_bin->refc, 0);
result_bin->orig_bytes[0] = VERSION_MAGIC;
@@ -2005,7 +1983,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
erl_zlib_deflate_finish(&(context->s.cc.stream));
result_bin = erts_bin_realloc(context->s.cc.destination_bin,
context->s.cc.dest_len+6);
- result_bin->orig_size = context->s.cc.dest_len+6;
context->s.cc.destination_bin = NULL;
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
@@ -2327,8 +2304,10 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
#define ENC_TERM ((Eterm) 0)
#define ENC_ONE_CONS ((Eterm) 1)
#define ENC_PATCH_FUN_SIZE ((Eterm) 2)
-#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 3)
-
+#define ENC_BIN_COPY ((Eterm) 3)
+#define ENC_MAP_PAIR ((Eterm) 4)
+#define ENC_HASHMAP_NODE ((Eterm) 5)
+#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 6)
static byte*
enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
@@ -2364,6 +2343,9 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
WSTACK_RESTORE(s, &ctx->wstack);
ep = ctx->ep;
obj = ctx->obj;
+ if (is_non_value(obj)) {
+ goto outer_loop;
+ }
}
}
@@ -2387,8 +2369,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
obj = CAR(cons);
tl = CDR(cons);
- WSTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
- WSTACK_PUSH(s, tl);
+ WSTACK_PUSH2(s, (is_list(tl) ? ENC_ONE_CONS : ENC_TERM),
+ tl);
}
break;
case ENC_PATCH_FUN_SIZE:
@@ -2401,6 +2383,46 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
put_int32(ep - size_p, size_p);
}
goto outer_loop;
+ case ENC_BIN_COPY: {
+ Uint bits = (Uint)obj;
+ Uint bitoffs = WSTACK_POP(s);
+ byte* bytes = (byte*) WSTACK_POP(s);
+ byte* dst = (byte*) WSTACK_POP(s);
+ if (bits > r * (TERM_TO_BINARY_MEMCPY_FACTOR * 8)) {
+ Uint n = r * TERM_TO_BINARY_MEMCPY_FACTOR;
+ WSTACK_PUSH5(s, (UWord)(dst + n), (UWord)(bytes + n), bitoffs,
+ ENC_BIN_COPY, bits - 8*n);
+ bits = 8*n;
+ copy_binary_to_buffer(dst, 0, bytes, bitoffs, bits);
+ obj = THE_NON_VALUE;
+ r = 0; /* yield */
+ break;
+ } else {
+ copy_binary_to_buffer(dst, 0, bytes, bitoffs, bits);
+ r -= bits / (TERM_TO_BINARY_MEMCPY_FACTOR * 8);
+ goto outer_loop;
+ }
+ }
+ case ENC_MAP_PAIR: {
+ Uint pairs_left = obj;
+ Eterm *vptr = (Eterm*) WSTACK_POP(s);
+ Eterm *kptr = (Eterm*) WSTACK_POP(s);
+
+ obj = *kptr;
+ if (--pairs_left > 0) {
+ WSTACK_PUSH4(s, (UWord)(kptr+1), (UWord)(vptr+1),
+ ENC_MAP_PAIR, pairs_left);
+ }
+ WSTACK_PUSH2(s, ENC_TERM, *vptr);
+ break;
+ }
+ case ENC_HASHMAP_NODE:
+ if (is_list(obj)) { /* leaf node [K|V] */
+ ptr = list_val(obj);
+ WSTACK_PUSH2(s, ENC_TERM, CDR(ptr));
+ obj = CAR(ptr);
+ }
+ break;
case ENC_LAST_ARRAY_ELEMENT:
/* obj is the tuple */
{
@@ -2419,17 +2441,16 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
#else
Eterm* ptr = (Eterm *) obj;
#endif
- WSTACK_PUSH(s, val-1);
obj = *ptr++;
- WSTACK_PUSH(s, (UWord)ptr);
+ WSTACK_PUSH2(s, val-1, (UWord)ptr);
}
break;
}
L_jump_start:
- if (ctx && --r == 0) {
- *reds = r;
+ if (ctx && --r <= 0) {
+ *reds = 0;
ctx->obj = obj;
ctx->ep = ep;
WSTACK_SAVE(s, &ctx->wstack);
@@ -2578,35 +2599,62 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 4;
}
if (i > 0) {
- WSTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
- WSTACK_PUSH(s, (UWord)ptr);
+ WSTACK_PUSH2(s, ENC_LAST_ARRAY_ELEMENT+i-1, (UWord)ptr);
}
break;
case MAP_DEF:
{
- map_t *mp = (map_t*)map_val(obj);
- Uint size = map_get_size(mp);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(obj);
+ Uint size = flatmap_get_size(mp);
*ep++ = MAP_EXT;
put_int32(size, ep); ep += 4;
if (size > 0) {
- Eterm *kptr = map_get_keys(mp);
- Eterm *vptr = map_get_values(mp);
-
- for (i = size-1; i >= 1; i--) {
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) vptr[i]);
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) kptr[i]);
- }
+ Eterm *kptr = flatmap_get_keys(mp);
+ Eterm *vptr = flatmap_get_values(mp);
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) vptr[0]);
+ WSTACK_PUSH4(s, (UWord)kptr, (UWord)vptr,
+ ENC_MAP_PAIR, size);
+ }
+ }
+ break;
- obj = kptr[0];
- goto L_jump_start;
+ case HASHMAP_DEF:
+ {
+ Eterm hdr;
+ Uint node_sz;
+ ptr = boxed_val(obj);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ *ep++ = MAP_EXT;
+ ptr++;
+ put_int32(*ptr, ep); ep += 4;
+ /*fall through*/
+ case HAMT_SUBTAG_NODE_ARRAY:
+ node_sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ *ep++ = MAP_EXT;
+ ptr++;
+ put_int32(*ptr, ep); ep += 4;
+ /*fall through*/
+ case HAMT_SUBTAG_NODE_BITMAP:
+ node_sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(node_sz < 17);
+ break;
+ default:
+ erl_exit(1, "bad header\r\n");
+ }
+
+ ptr++;
+ WSTACK_RESERVE(s, node_sz*2);
+ while(node_sz--) {
+ WSTACK_FAST_PUSH(s, ENC_HASHMAP_NODE);
+ WSTACK_FAST_PUSH(s, *ptr++);
}
}
break;
@@ -2644,6 +2692,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Uint bitoffs;
Uint bitsize;
byte* bytes;
+ byte* data_dst;
ERTS_GET_BINARY_BYTES(obj, bytes, bitoffs, bitsize);
if (dflags & DFLAG_INTERNAL_TAGS) {
@@ -2689,7 +2738,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
j = binary_size(obj);
put_int32(j, ep);
ep += 4;
- copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j);
+ data_dst = ep;
ep += j;
} else if (dflags & DFLAG_BIT_BINARIES) {
/* Bit-level binary. */
@@ -2699,7 +2748,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 4;
*ep++ = bitsize;
ep[j] = 0; /* Zero unused bits at end of binary */
- copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j+bitsize);
+ data_dst = ep;
ep += j + 1;
} else {
/*
@@ -2713,11 +2762,18 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
put_int32((j+1), ep);
ep += 4;
ep[j] = 0; /* Zero unused bits at end of binary */
- copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j+bitsize);
+ data_dst = ep;
ep += j+1;
*ep++ = SMALL_INTEGER_EXT;
*ep++ = bitsize;
}
+ if (ctx && j > r * TERM_TO_BINARY_MEMCPY_FACTOR) {
+ WSTACK_PUSH5(s, (UWord)data_dst, (UWord)bytes, bitoffs,
+ ENC_BIN_COPY, 8*j + bitsize);
+ } else {
+ copy_binary_to_buffer(data_dst, 0, bytes, bitoffs,
+ 8 * j + bitsize);
+ }
}
break;
case EXPORT_DEF:
@@ -2746,13 +2802,12 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
case FUN_DEF:
{
ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
+ int ei;
if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) {
- int ei;
-
*ep++ = NEW_FUN_EXT;
- WSTACK_PUSH(s, ENC_PATCH_FUN_SIZE);
- WSTACK_PUSH(s, (UWord) ep); /* Position for patching in size */
+ WSTACK_PUSH2(s, ENC_PATCH_FUN_SIZE,
+ (UWord) ep); /* Position for patching in size */
ep += 4;
*ep = funp->arity;
ep += 1;
@@ -2766,16 +2821,6 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags, off_heap);
ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags, off_heap);
ep = enc_pid(acmp, funp->creator, ep, dflags);
-
- fun_env:
- for (ei = funp->num_free-1; ei > 0; ei--) {
- WSTACK_PUSH(s, ENC_TERM);
- WSTACK_PUSH(s, (UWord) funp->env[ei]);
- }
- if (funp->num_free != 0) {
- obj = funp->env[0];
- goto L_jump_start;
- }
} else {
/*
* Communicating with an obsolete erl_interface or
@@ -2807,7 +2852,13 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
*ep++ = SMALL_TUPLE_EXT;
put_int8(funp->num_free, ep);
ep += 1;
- goto fun_env;
+ }
+ for (ei = funp->num_free-1; ei > 0; ei--) {
+ WSTACK_PUSH2(s, ENC_TERM, (UWord) funp->env[ei]);
+ }
+ if (funp->num_free != 0) {
+ obj = funp->env[0];
+ goto L_jump_start;
}
}
break;
@@ -2889,9 +2940,19 @@ undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end)
#endif /* DEBUG */
}
+struct dec_term_hamt_placeholder
+{
+ struct dec_term_hamt_placeholder* next;
+ Eterm* objp; /* write result here */
+ Uint size; /* nr of leafs */
+ Eterm leafs[1];
+};
+
+#define DEC_TERM_HAMT_PLACEHOLDER_SIZE \
+ (offsetof(struct dec_term_hamt_placeholder, leafs) / sizeof(Eterm))
/* Decode term from external format into *objp.
-** On failure return NULL and (R13B04) *hpp will be unchanged.
+** On failure return NULL and *hpp will be unchanged.
*/
static byte*
dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
@@ -2901,7 +2962,8 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
int n;
ErtsAtomEncoding char_enc;
register Eterm* hp; /* Please don't take the address of hp */
- Eterm *maps_head; /* for validation of maps */
+ Eterm *maps_list; /* for preprocessing of small maps */
+ struct dec_term_hamt_placeholder* hamt_list; /* for preprocessing of big maps */
Eterm* next;
SWord reds;
@@ -2911,7 +2973,8 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
next = ctx->u.dc.next;
ep = ctx->u.dc.ep;
hpp = &ctx->u.dc.hp;
- maps_head = ctx->u.dc.maps_head;
+ maps_list = ctx->u.dc.maps_list;
+ hamt_list = ctx->u.dc.hamt_list;
if (ctx->state != B2TDecode) {
int n_limit = reds;
@@ -2992,7 +3055,8 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
reds = ERTS_SWORD_MAX;
next = objp;
*next = (Eterm) (UWord) NULL;
- maps_head = NULL;
+ maps_list = NULL;
+ hamt_list = NULL;
}
hp = *hpp;
@@ -3389,8 +3453,6 @@ dec_term_atom_common:
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- dbin->flags = 0;
- dbin->orig_size = n;
erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
hp += PROC_BIN_SIZE;
@@ -3398,6 +3460,7 @@ dec_term_atom_common:
pb->size = n;
pb->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -3443,14 +3506,13 @@ dec_term_atom_common:
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- dbin->flags = 0;
- dbin->orig_size = n;
erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
pb->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -3532,46 +3594,67 @@ dec_term_atom_common:
break;
case MAP_EXT:
{
- map_t *mp;
Uint32 size,n;
Eterm *kptr,*vptr;
Eterm keys;
size = get_int32(ep); ep += 4;
- keys = make_tuple(hp);
- *hp++ = make_arityval(size);
- hp += size;
- kptr = hp - 1;
-
- mp = (map_t*)hp;
- hp += MAP_HEADER_SIZE;
- hp += size;
- vptr = hp - 1;
-
- /* kptr, last word for keys
- * vptr, last word for values
- */
-
- /*
- * Use thing_word to link through decoded maps.
- * The list of maps is for later validation.
- */
-
- mp->thing_word = (Eterm) COMPRESS_POINTER(maps_head);
- maps_head = (Eterm *) mp;
-
- mp->size = size;
- mp->keys = keys;
- *objp = make_map(mp);
-
- for (n = size; n; n--) {
- *vptr = (Eterm) COMPRESS_POINTER(next);
- *kptr = (Eterm) COMPRESS_POINTER(vptr);
- next = kptr;
- vptr--;
- kptr--;
- }
+ if (size <= MAP_SMALL_MAP_LIMIT) {
+ flatmap_t *mp;
+
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(size);
+ hp += size;
+ kptr = hp - 1;
+
+ mp = (flatmap_t*)hp;
+ hp += MAP_HEADER_SIZE;
+ hp += size;
+ vptr = hp - 1;
+
+ /* kptr, last word for keys
+ * vptr, last word for values
+ */
+
+ /*
+ * Use thing_word to link through decoded maps.
+ * The list of maps is for later validation.
+ */
+
+ mp->thing_word = (Eterm) COMPRESS_POINTER(maps_list);
+ maps_list = (Eterm *) mp;
+
+ mp->size = size;
+ mp->keys = keys;
+ *objp = make_flatmap(mp);
+
+ for (n = size; n; n--) {
+ *vptr = (Eterm) COMPRESS_POINTER(next);
+ *kptr = (Eterm) COMPRESS_POINTER(vptr);
+ next = kptr;
+ vptr--;
+ kptr--;
+ }
+ }
+ else { /* Make hamt */
+ struct dec_term_hamt_placeholder* holder =
+ (struct dec_term_hamt_placeholder*) hp;
+
+ holder->next = hamt_list;
+ hamt_list = holder;
+ holder->objp = objp;
+ holder->size = size;
+
+ hp += DEC_TERM_HAMT_PLACEHOLDER_SIZE;
+
+ for (n = size; n; n--) {
+ CDR(hp) = (Eterm) COMPRESS_POINTER(next);
+ CAR(hp) = (Eterm) COMPRESS_POINTER(&CDR(hp));
+ next = &CAR(hp);
+ hp += 2;
+ }
+ }
}
break;
case NEW_FUN_EXT:
@@ -3749,6 +3832,7 @@ dec_term_atom_common:
hp += PROC_BIN_SIZE;
pb->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
pb->flags = 0;
*objp = make_binary(pb);
break;
@@ -3766,6 +3850,7 @@ dec_term_atom_common:
hp += PROC_BIN_SIZE;
pb->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
pb->flags = 0;
sub = (ErlSubBin*)hp;
@@ -3792,7 +3877,7 @@ dec_term_atom_common:
ctx->u.dc.ep = ep;
ctx->u.dc.next = next;
ctx->u.dc.hp = hp;
- ctx->u.dc.maps_head = maps_head;
+ ctx->u.dc.maps_list = maps_list;
ctx->reds = 0;
return NULL;
}
@@ -3807,12 +3892,40 @@ dec_term_atom_common:
* - done here for when we know it is complete.
*/
- while (maps_head) {
- next = (Eterm *)(EXPAND_POINTER(*maps_head));
- *maps_head = MAP_HEADER;
- if (!erts_validate_and_sort_map((map_t*)maps_head))
+ while (maps_list) {
+ next = (Eterm *)(EXPAND_POINTER(*maps_list));
+ *maps_list = MAP_HEADER;
+ if (!erts_validate_and_sort_flatmap((flatmap_t*)maps_list))
goto error;
- maps_head = next;
+ maps_list = next;
+ }
+
+ /* Iterate through all the hamts and build tree nodes.
+ */
+ if (hamt_list) {
+ ErtsHeapFactory factory;
+
+ factory.p = NULL;
+ factory.hp = hp;
+ /* We assume heap will suffice (see hashmap_over_estimated_heap_size) */
+
+ do {
+ struct dec_term_hamt_placeholder* hamt = hamt_list;
+ *hamt->objp = erts_hashmap_from_array(&factory,
+ hamt->leafs,
+ hamt->size,
+ 1);
+ if (is_non_value(*hamt->objp))
+ goto error;
+
+ hamt_list = hamt->next;
+
+ /* Yes, we waste a couple of heap words per hamt
+ for the temporary placeholder */
+ *(Eterm*)hamt = make_pos_bignum_header(DEC_TERM_HAMT_PLACEHOLDER_SIZE-1);
+ } while (hamt_list);
+
+ hp = factory.hp;
}
if (ctx) {
@@ -4010,15 +4123,15 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
break;
case MAP_DEF:
{
- map_t *mp = (map_t*)map_val(obj);
- Uint size = map_get_size(mp);
+ flatmap_t *mp = (flatmap_t*)flatmap_val(obj);
+ Uint size = flatmap_get_size(mp);
Uint i;
Eterm *ptr;
result += 1 + 4; /* tag + 4 bytes size */
/* push values first */
- ptr = map_get_values(mp);
+ ptr = flatmap_get_values(mp);
i = size;
while(i--) {
if (is_list(*ptr)) {
@@ -4032,7 +4145,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
++ptr;
}
- ptr = map_get_keys(mp);
+ ptr = flatmap_get_keys(mp);
i = size;
while(i--) {
if (is_list(*ptr)) {
@@ -4048,6 +4161,38 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
goto outer_loop;
}
break;
+
+ case HASHMAP_DEF:
+ {
+ Eterm *ptr;
+ Eterm hdr;
+ Uint node_sz;
+ ptr = boxed_val(obj);
+ hdr = *ptr;
+ ASSERT(is_header(hdr));
+ switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: ptr++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ node_sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP: ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ node_sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(node_sz < 17);
+ break;
+ default:
+ erl_exit(1, "bad header\r\n");
+ }
+
+ ptr++;
+ ESTACK_RESERVE(s, node_sz);
+ while(node_sz--) {
+ ESTACK_FAST_PUSH(s, *ptr++);
+ }
+ result += 1 + 4; /* tag + 4 bytes size */
+ }
+
+ break;
case FLOAT_DEF:
if (dflags & DFLAG_NEW_FLOATS) {
result += 9;
@@ -4150,6 +4295,8 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
return 0;
}
+
+
static Sint
decoded_size(byte *ep, byte* endp, int internal_tags, B2TContext* ctx)
{
@@ -4343,7 +4490,11 @@ init_done:
n = get_int32(ep);
ep += 4;
ADDTERMS(2*n);
- heap_size += 3 + n + 1 + n;
+ if (n <= MAP_SMALL_MAP_LIMIT) {
+ heap_size += 3 + n + 1 + n;
+ } else {
+ heap_size += hashmap_over_estimated_heap_size(n);
+ }
break;
case STRING_EXT:
CHKSIZE(2);
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index 10565f67e5..50fcfa04d6 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -150,6 +150,7 @@ typedef struct {
Uint extsize;
} ErtsBinary2TermState;
+
/* -------------------------------------------------------------------------- */
void erts_init_atom_cache_map(ErtsAtomCacheMap *);
@@ -160,8 +161,12 @@ void erts_finalize_atom_cache_map(ErtsAtomCacheMap *, Uint32);
Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *);
byte *erts_encode_ext_dist_header_finalize(byte *, ErtsAtomCache *, Uint32);
-Uint erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap *);
-void erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *);
+struct erts_dsig_send_context;
+int erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap*, Uint* szp);
+int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp);
+struct TTBEncodeContext_;
+int erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *,
+ struct TTBEncodeContext_ *, Sint* reds);
Uint erts_encode_ext_size(Eterm);
Uint erts_encode_ext_size_2(Eterm, unsigned);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 32a2dc43e8..634fe533d0 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -41,6 +41,7 @@
#include "error.h"
#include "erl_utils.h"
#include "erl_port.h"
+#include "erl_gc.h"
struct enif_environment_t /* ErlNifEnv */
{
@@ -347,8 +348,6 @@ extern Uint display_items; /* no of items to display in traces etc */
extern int erts_backtrace_depth;
extern erts_smp_atomic32_t erts_max_gen_gcs;
-extern int erts_disable_tolerant_timeofday;
-
extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */
extern int stackdump_on_exit;
@@ -371,16 +370,17 @@ extern int stackdump_on_exit;
* DESTROY_ESTACK(Stack)
*/
-typedef struct {
+typedef struct ErtsEStack_ {
Eterm* start;
Eterm* sp;
Eterm* end;
+ Eterm* edefault;
ErtsAlcType_t alloc_type;
}ErtsEStack;
#define DEF_ESTACK_SIZE (16)
-void erl_grow_estack(ErtsEStack*, Eterm* def_stack);
+void erl_grow_estack(ErtsEStack*, Uint need);
#define ESTK_CONCAT(a,b) a##b
#define ESTK_DEF_STACK(s) ESTK_CONCAT(s,_default_estack)
@@ -390,22 +390,23 @@ void erl_grow_estack(ErtsEStack*, Eterm* def_stack);
ESTK_DEF_STACK(s), /* start */ \
ESTK_DEF_STACK(s), /* sp */ \
ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \
+ ESTK_DEF_STACK(s), /* default */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
#define ESTACK_CHANGE_ALLOCATOR(s,t) \
do { \
- if (s.start != ESTK_DEF_STACK(s)) { \
+ if ((s).start != ESTK_DEF_STACK(s)) { \
erl_exit(1, "Internal error - trying to change allocator " \
"type of active estack\n"); \
} \
- s.alloc_type = (t); \
+ (s).alloc_type = (t); \
} while (0)
#define DESTROY_ESTACK(s) \
do { \
- if (s.start != ESTK_DEF_STACK(s)) { \
- erts_free(s.alloc_type, s.start); \
+ if ((s).start != ESTK_DEF_STACK(s)) { \
+ erts_free((s).alloc_type, (s).start); \
} \
} while(0)
@@ -416,16 +417,17 @@ do { \
*/
#define ESTACK_SAVE(s,dst)\
do {\
- if (s.start == ESTK_DEF_STACK(s)) {\
+ if ((s).start == ESTK_DEF_STACK(s)) {\
UWord _wsz = ESTACK_COUNT(s);\
- (dst)->start = erts_alloc(s.alloc_type,\
+ (dst)->start = erts_alloc((s).alloc_type,\
DEF_ESTACK_SIZE * sizeof(Eterm));\
- memcpy((dst)->start, s.start,_wsz*sizeof(Eterm));\
+ memcpy((dst)->start, (s).start,_wsz*sizeof(Eterm));\
(dst)->sp = (dst)->start + _wsz;\
(dst)->end = (dst)->start + DEF_ESTACK_SIZE;\
- (dst)->alloc_type = s.alloc_type;\
+ (dst)->edefault = NULL;\
+ (dst)->alloc_type = (s).alloc_type;\
} else\
- *(dst) = s;\
+ *(dst) = (s);\
} while (0)
#define DESTROY_SAVED_ESTACK(estack)\
@@ -444,72 +446,114 @@ do {\
*/
#define ESTACK_RESTORE(s, src) \
do { \
- ASSERT(s.start == ESTK_DEF_STACK(s)); \
- s = *(src); /* struct copy */ \
+ ASSERT((s).start == ESTK_DEF_STACK(s)); \
+ (s) = *(src); /* struct copy */ \
(src)->start = NULL; \
- ASSERT(s.sp >= s.start); \
- ASSERT(s.sp <= s.end); \
+ ASSERT((s).sp >= (s).start); \
+ ASSERT((s).sp <= (s).end); \
} while (0)
-#define ESTACK_IS_STATIC(s) (s.start == ESTK_DEF_STACK(s)))
+#define ESTACK_IS_STATIC(s) ((s).start == ESTK_DEF_STACK(s))
-#define ESTACK_PUSH(s, x) \
-do { \
- if (s.sp == s.end) { \
- erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
- } \
- *s.sp++ = (x); \
+#define ESTACK_PUSH(s, x) \
+do { \
+ if ((s).sp == (s).end) { \
+ erl_grow_estack(&(s), 1); \
+ } \
+ *(s).sp++ = (x); \
} while(0)
#define ESTACK_PUSH2(s, x, y) \
do { \
- if (s.sp > s.end - 2) { \
- erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ if ((s).sp > (s).end - 2) { \
+ erl_grow_estack(&(s), 2); \
} \
- *s.sp++ = (x); \
- *s.sp++ = (y); \
+ *(s).sp++ = (x); \
+ *(s).sp++ = (y); \
} while(0)
#define ESTACK_PUSH3(s, x, y, z) \
do { \
- if (s.sp > s.end - 3) { \
- erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ if ((s).sp > (s).end - 3) { \
+ erl_grow_estack(&s, 3); \
} \
- *s.sp++ = (x); \
- *s.sp++ = (y); \
- *s.sp++ = (z); \
+ *(s).sp++ = (x); \
+ *(s).sp++ = (y); \
+ *(s).sp++ = (z); \
} while(0)
-#define ESTACK_COUNT(s) (s.sp - s.start)
-#define ESTACK_ISEMPTY(s) (s.sp == s.start)
-#define ESTACK_POP(s) (*(--s.sp))
+#define ESTACK_PUSH4(s, E1, E2, E3, E4) \
+do { \
+ if ((s).sp > (s).end - 4) { \
+ erl_grow_estack(&s, 4); \
+ } \
+ *(s).sp++ = (E1); \
+ *(s).sp++ = (E2); \
+ *(s).sp++ = (E3); \
+ *(s).sp++ = (E4); \
+} while(0)
+
+#define ESTACK_RESERVE(s, push_cnt) \
+do { \
+ if ((s).sp > (s).end - (push_cnt)) { \
+ erl_grow_estack(&(s), (push_cnt)); \
+ } \
+} while(0)
+
+/* Must be preceded by ESTACK_RESERVE */
+#define ESTACK_FAST_PUSH(s, x) \
+do { \
+ ASSERT((s).sp < (s).end); \
+ *s.sp++ = (x); \
+} while(0)
+
+#define ESTACK_COUNT(s) ((s).sp - (s).start)
+#define ESTACK_ISEMPTY(s) ((s).sp == (s).start)
+#define ESTACK_POP(s) (*(--(s).sp))
/*
* WSTACK: same as ESTACK but with UWord instead of Eterm
*/
-typedef struct {
+typedef struct ErtsWStack_ {
UWord* wstart;
UWord* wsp;
UWord* wend;
+ UWord* wdefault;
ErtsAlcType_t alloc_type;
}ErtsWStack;
#define DEF_WSTACK_SIZE (16)
-void erl_grow_wstack(ErtsWStack*, UWord* def_stack);
+void erl_grow_wstack(ErtsWStack*, Uint need);
#define WSTK_CONCAT(a,b) a##b
#define WSTK_DEF_STACK(s) WSTK_CONCAT(s,_default_wstack)
-#define DECLARE_WSTACK(s) \
+#define WSTACK_DECLARE(s) \
UWord WSTK_DEF_STACK(s)[DEF_WSTACK_SIZE]; \
ErtsWStack s = { \
WSTK_DEF_STACK(s), /* wstart */ \
WSTK_DEF_STACK(s), /* wsp */ \
WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \
+ WSTK_DEF_STACK(s), /* wdflt */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
+#define DECLARE_WSTACK WSTACK_DECLARE
+
+typedef struct ErtsDynamicWStack_ {
+ UWord default_stack[DEF_WSTACK_SIZE];
+ ErtsWStack ws;
+}ErtsDynamicWStack;
+
+#define WSTACK_INIT(dwsp, ALC_TYPE) \
+do { \
+ (dwsp)->ws.wstart = (dwsp)->default_stack; \
+ (dwsp)->ws.wsp = (dwsp)->default_stack; \
+ (dwsp)->ws.wend = (dwsp)->default_stack + DEF_WSTACK_SIZE;\
+ (dwsp)->ws.wdefault = (dwsp)->default_stack; \
+ (dwsp)->ws.alloc_type = ALC_TYPE; \
+} while (0)
#define WSTACK_CHANGE_ALLOCATOR(s,t) \
do { \
@@ -520,13 +564,20 @@ do { \
s.alloc_type = (t); \
} while (0)
-#define DESTROY_WSTACK(s) \
+#define WSTACK_DESTROY(s) \
do { \
- if (s.wstart != WSTK_DEF_STACK(s)) { \
+ if (s.wstart != s.wdefault) { \
erts_free(s.alloc_type, s.wstart); \
} \
} while(0)
+#define DESTROY_WSTACK WSTACK_DESTROY
+#define WSTACK_DEBUG(s) \
+ do { \
+ fprintf(stderr, "wstack size = %ld\r\n", s.wsp - s.wstart); \
+ fprintf(stderr, "wstack wstart = %p\r\n", s.wstart); \
+ fprintf(stderr, "wstack wsp = %p\r\n", s.wsp); \
+ } while(0)
/*
* Do not free the stack after this, it may have pointers into what
@@ -541,6 +592,7 @@ do {\
memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\
(dst)->wsp = (dst)->wstart + _wsz;\
(dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\
+ (dst)->wdefault = NULL;\
(dst)->alloc_type = s.alloc_type;\
} else\
*(dst) = s;\
@@ -569,12 +621,12 @@ do { \
ASSERT(s.wsp <= s.wend); \
} while (0)
-#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s)))
+#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s))
#define WSTACK_PUSH(s, x) \
do { \
if (s.wsp == s.wend) { \
- erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ erl_grow_wstack(&s, 1); \
} \
*s.wsp++ = (x); \
} while(0)
@@ -582,7 +634,7 @@ do { \
#define WSTACK_PUSH2(s, x, y) \
do { \
if (s.wsp > s.wend - 2) { \
- erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ erl_grow_wstack(&s, 2); \
} \
*s.wsp++ = (x); \
*s.wsp++ = (y); \
@@ -590,17 +642,129 @@ do { \
#define WSTACK_PUSH3(s, x, y, z) \
do { \
- if (s.wsp > s.wend - 3) { \
- erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ if (s.wsp > s.wend - 3) { \
+ erl_grow_wstack(&s, 3); \
} \
*s.wsp++ = (x); \
*s.wsp++ = (y); \
*s.wsp++ = (z); \
} while(0)
+#define WSTACK_PUSH4(s, A1, A2, A3, A4) \
+do { \
+ if (s.wsp > s.wend - 4) { \
+ erl_grow_wstack(&s, 4); \
+ } \
+ *s.wsp++ = (A1); \
+ *s.wsp++ = (A2); \
+ *s.wsp++ = (A3); \
+ *s.wsp++ = (A4); \
+} while(0)
+
+#define WSTACK_PUSH5(s, A1, A2, A3, A4, A5) \
+do { \
+ if (s.wsp > s.wend - 5) { \
+ erl_grow_wstack(&s, 5); \
+ } \
+ *s.wsp++ = (A1); \
+ *s.wsp++ = (A2); \
+ *s.wsp++ = (A3); \
+ *s.wsp++ = (A4); \
+ *s.wsp++ = (A5); \
+} while(0)
+
+#define WSTACK_PUSH6(s, A1, A2, A3, A4, A5, A6) \
+do { \
+ if (s.wsp > s.wend - 6) { \
+ erl_grow_wstack(&s, 6); \
+ } \
+ *s.wsp++ = (A1); \
+ *s.wsp++ = (A2); \
+ *s.wsp++ = (A3); \
+ *s.wsp++ = (A4); \
+ *s.wsp++ = (A5); \
+ *s.wsp++ = (A6); \
+} while(0)
+
+#define WSTACK_RESERVE(s, push_cnt) \
+do { \
+ if (s.wsp > s.wend - (push_cnt)) { \
+ erl_grow_wstack(&s, (push_cnt)); \
+ } \
+} while(0)
+
+/* Must be preceded by WSTACK_RESERVE */
+#define WSTACK_FAST_PUSH(s, x) \
+do { \
+ ASSERT(s.wsp < s.wend); \
+ *s.wsp++ = (x); \
+} while(0)
+
#define WSTACK_COUNT(s) (s.wsp - s.wstart)
#define WSTACK_ISEMPTY(s) (s.wsp == s.wstart)
-#define WSTACK_POP(s) (*(--s.wsp))
+#define WSTACK_POP(s) ((ASSERT(s.wsp > s.wstart)),*(--s.wsp))
+
+#define WSTACK_ROLLBACK(s, count) (ASSERT(WSTACK_COUNT(s) >= (count)), \
+ s.wsp = s.wstart + (count))
+
+/* PSTACK - Stack of any type.
+ * Usage:
+ * {
+ * #define PSTACK_TYPE MyType
+ * PSTACK_DECLARE(s,16);
+ * MyType *sp = PSTACK_PUSH(s);
+ *
+ * sp->x = ....
+ * sp->y = ....
+ * sp = PSTACK_PUSH(s);
+ * ...
+ * sp = PSTACK_POP(s);
+ * if (PSTACK_IS_EMPTY(s)) {
+ * // sp is invalid when stack is empty after pop
+ * }
+ *
+ * PSTACK_DESTROY(s);
+ * }
+ */
+
+
+typedef struct ErtsPStack_ {
+ byte* pstart;
+ byte* psp;
+ byte* pend;
+ ErtsAlcType_t alloc_type;
+}ErtsPStack;
+
+void erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes);
+#define PSTK_CONCAT(a,b) a##b
+#define PSTK_DEF_STACK(s) PSTK_CONCAT(s,_default_pstack)
+
+#define PSTACK_DECLARE(s, DEF_PSTACK_SIZE) \
+PSTACK_TYPE PSTK_DEF_STACK(s)[DEF_PSTACK_SIZE]; \
+ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \
+ (byte*)(PSTK_DEF_STACK(s) - 1), /* psp */ \
+ (byte*)(PSTK_DEF_STACK(s) + (DEF_PSTACK_SIZE)), /* pend */\
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+}
+
+#define PSTACK_DESTROY(s) \
+do { \
+ if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \
+ erts_free(s.alloc_type, s.pstart); \
+ } \
+} while(0)
+
+#define PSTACK_IS_EMPTY(s) (s.psp < s.pstart)
+
+#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), (PSTACK_TYPE*)(s.psp))
+
+#define PSTACK_PUSH(s) \
+ (s.psp += sizeof(PSTACK_TYPE), \
+ ((s.psp == s.pend) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
+ sizeof(PSTACK_TYPE)) : (void)0), \
+ ((PSTACK_TYPE*) s.psp))
+
+#define PSTACK_POP(s) ((PSTACK_TYPE*) (s.psp -= sizeof(PSTACK_TYPE)))
/* binary.c */
@@ -623,9 +787,6 @@ erts_bld_port_info(Eterm **hpp,
void erts_bif_info_init(void);
/* bif.c */
-Eterm erts_make_ref(Process *);
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
-void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
ERTS_GLB_INLINE Eterm
erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
@@ -810,23 +971,6 @@ void MD5Init(MD5_CTX *);
void MD5Update(MD5_CTX *, unsigned char *, unsigned int);
void MD5Final(unsigned char [16], MD5_CTX *);
-/* ggc.c */
-
-void erts_gc_info(ErtsGCInfo *gcip);
-void erts_init_gc(void);
-int erts_garbage_collect(Process*, int, Eterm*, int);
-void erts_garbage_collect_hibernate(Process* p);
-Eterm erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(Process* p, Eterm* literals,
- Uint lit_size,
- struct erl_off_heap_header* oh);
-Uint erts_next_heap_size(Uint, Uint);
-Eterm erts_heap_sizes(Process* p);
-
-void erts_offset_off_heap(ErlOffHeap *, Sint, Eterm*, Eterm*);
-void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
-void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
-void erts_free_heap_frags(Process* p);
/* io.c */
@@ -1161,7 +1305,9 @@ erts_alloc_message_heap_state(Uint size,
state = erts_smp_atomic32_read_acqb(&receiver->state);
if (statep)
*statep = state;
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ if (state & (ERTS_PSFLG_OFF_HEAP_MSGS
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))
goto allocate_in_mbuf;
#endif
@@ -1181,7 +1327,9 @@ erts_alloc_message_heap_state(Uint size,
state = erts_smp_atomic32_read_nob(&receiver->state);
if (statep)
*statep = state;
- if ((state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ if ((state & (ERTS_PSFLG_OFF_HEAP_MSGS
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))
|| (receiver->flags & F_DISABLE_GC)
|| HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) {
/*
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 7c6696405b..46f0eba5e0 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -47,6 +47,7 @@
#include "external.h"
#include "dtrace-wrapper.h"
#include "erl_map.h"
+#include "erl_bif_unique.h"
extern ErlDrvEntry fd_driver_entry;
#ifndef __OSE__
@@ -391,7 +392,7 @@ static Port *create_port(char *name,
/* Set default tracing */
erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
- ASSERT(((char *) prt) == ((char *) &prt->common));
+ ERTS_CT_ASSERT(offsetof(Port,common) == 0);
#if !ERTS_PORT_INIT_INSTR_NEED_ID
/*
@@ -3153,8 +3154,6 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
Binary* bptr;
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, buf, len);
@@ -5352,7 +5351,11 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_MAP: { /* int */
ERTS_DDT_CHK_ENOUGH_ARGS(1);
if ((int) ptr[0] < 0) ERTS_DDT_FAIL;
- need += MAP_HEADER_SIZE + 1 + 2*ptr[0];
+ if (ptr[0] > MAP_SMALL_MAP_LIMIT) {
+ need += hashmap_over_estimated_heap_size(ptr[0]);
+ } else {
+ need += MAP_HEADER_SIZE + 1 + 2*ptr[0];
+ }
depth -= 2*ptr[0];
if (depth < 0) ERTS_DDT_FAIL;
ptr++;
@@ -5507,8 +5510,6 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
ProcBin* pbp;
Binary* bp = erts_bin_nrml_alloc(size);
ASSERT(bufp);
- bp->flags = 0;
- bp->orig_size = (SWord) size;
erts_refc_init(&bp->refc, 1);
sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
pbp = (ProcBin *) hp;
@@ -5598,31 +5599,52 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_MAP: { /* int */
int size = (int)ptr[0];
- Eterm* tp = hp;
- Eterm* vp;
- map_t *mp;
-
- *tp = make_arityval(size);
-
- hp += 1 + size;
- mp = (map_t*)hp;
- mp->thing_word = MAP_HEADER;
- mp->size = size;
- mp->keys = make_tuple(tp);
- mess = make_map(mp);
-
- hp += MAP_HEADER_SIZE + size; /* advance "heap" pointer */
-
- tp += size; /* point at last key */
- vp = hp - 1; /* point at last value */
-
- while(size--) {
- *vp-- = ESTACK_POP(stack);
- *tp-- = ESTACK_POP(stack);
- }
- if (!erts_validate_and_sort_map(mp))
- ERTS_DDT_FAIL;
- ptr++;
+ if (size > MAP_SMALL_MAP_LIMIT) {
+ int ix = 2*size;
+ ErtsHeapFactory factory;
+ Eterm* leafs = hp;
+
+ hp += 2*size;
+ while(ix--) { *--hp = ESTACK_POP(stack); }
+
+ hp += 2*size;
+ factory.p = NULL;
+ factory.hp = hp;
+ /* We assume heap will suffice (see hashmap_over_estimated_heap_size) */
+
+ mess = erts_hashmap_from_array(&factory, leafs, size, 1);
+
+ if (is_non_value(mess))
+ ERTS_DDT_FAIL;
+
+ hp = factory.hp;
+ } else {
+ Eterm* tp = hp;
+ Eterm* vp;
+ flatmap_t *mp;
+
+ *tp = make_arityval(size);
+
+ hp += 1 + size;
+ mp = (flatmap_t*)hp;
+ mp->thing_word = MAP_HEADER;
+ mp->size = size;
+ mp->keys = make_tuple(tp);
+ mess = make_flatmap(mp);
+
+ hp += MAP_HEADER_SIZE + size; /* advance "heap" pointer */
+
+ tp += size; /* point at last key */
+ vp = hp - 1; /* point at last value */
+
+ while(size--) {
+ *vp-- = ESTACK_POP(stack);
+ *tp-- = ESTACK_POP(stack);
+ }
+ if (!erts_validate_and_sort_flatmap(mp))
+ ERTS_DDT_FAIL;
+ }
+ ptr++;
break;
}
@@ -6000,9 +6022,7 @@ driver_alloc_binary(ErlDrvSizeT size)
bin = erts_bin_drv_alloc_fnf((Uint) size);
if (!bin)
return NULL; /* The driver write must take action */
- bin->flags = BIN_FLAG_DRV;
erts_refc_init(&bin->refc, 1);
- bin->orig_size = (SWord) size;
return Binary2ErlDrvBinary(bin);
}
@@ -6032,7 +6052,6 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
if (!newbin)
return NULL;
- newbin->orig_size = size;
return Binary2ErlDrvBinary(newbin);
}
@@ -6706,7 +6725,7 @@ static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
{
RefThing *refp;
ASSERT(is_internal_ref(ref));
- ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
+ ERTS_CT_ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
refp = ref_thing_ptr(ref);
memset(mon,0,sizeof(ErlDrvMonitor));
memcpy(mon,refp,sizeof(RefThing));
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 68fcc177ae..d3649080dc 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -166,22 +166,26 @@ is_tuple Fail=f S | select_tuple_arity S=d Fail=f Size=u Rest=* => \
select_tuple_arity S=d Fail=f Size=u Rest=* => \
gen_select_tuple_arity(S, Fail, Size, Rest)
-i_select_val r f I
-i_select_val x f I
-i_select_val y f I
+i_select_val_bins r f I
+i_select_val_bins x f I
+i_select_val_bins y f I
-i_select_val2 r f c f c f
-i_select_val2 x f c f c f
-i_select_val2 y f c f c f
+i_select_val_lins r f I
+i_select_val_lins x f I
+i_select_val_lins y f I
-i_select_tuple_arity2 r f A f A f
-i_select_tuple_arity2 x f A f A f
-i_select_tuple_arity2 y f A f A f
+i_select_val2 r f c c f f
+i_select_val2 x f c c f f
+i_select_val2 y f c c f f
i_select_tuple_arity r f I
i_select_tuple_arity x f I
i_select_tuple_arity y f I
+i_select_tuple_arity2 r f A A f f
+i_select_tuple_arity2 x f A A f f
+i_select_tuple_arity2 y f A A f f
+
i_jump_on_val_zero r f I
i_jump_on_val_zero x f I
i_jump_on_val_zero y f I
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index c29d4b3777..a0f35fef1b 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -21,6 +21,25 @@
#define __SYS_H__
+#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
+# undef ERTS_CAN_INLINE
+# define ERTS_CAN_INLINE 0
+# undef ERTS_INLINE
+# define ERTS_INLINE
+#endif
+
+#if ERTS_CAN_INLINE
+#define ERTS_GLB_INLINE static ERTS_INLINE
+#else
+#define ERTS_GLB_INLINE
+#endif
+
+#if ERTS_CAN_INLINE || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
+# define ERTS_GLB_INLINE_INCL_FUNC_DEF 1
+#else
+# define ERTS_GLB_INLINE_INCL_FUNC_DEF 0
+#endif
+
#if defined(VALGRIND) && !defined(NO_FPE_SIGNALS)
# define NO_FPE_SIGNALS
#endif
@@ -132,24 +151,8 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# endif
#endif
-#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
-# undef ERTS_CAN_INLINE
-# define ERTS_CAN_INLINE 0
-# undef ERTS_INLINE
-# define ERTS_INLINE
-#endif
-
-#if ERTS_CAN_INLINE
-#define ERTS_GLB_INLINE static ERTS_INLINE
-#else
-#define ERTS_GLB_INLINE
-#endif
-
-#if ERTS_CAN_INLINE || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
-# define ERTS_GLB_INLINE_INCL_FUNC_DEF 1
-#else
-# define ERTS_GLB_INLINE_INCL_FUNC_DEF 0
-#endif
+#define ERTS_MK_VSN_INT(Major, Minor, Build) \
+ ((((Major) & 0x3ff) << 20) | (((Minor) & 0x3ff) << 10) | ((Build) & 0x3ff))
#ifndef ERTS_EXIT_AFTER_DUMP
# define ERTS_EXIT_AFTER_DUMP exit
@@ -188,6 +191,32 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
# define ASSERT(e) ((void) 1)
#endif
+/* ERTS_UNDEF can be used to silence false warnings about
+ * "variable may be used uninitialized" while keeping the variable
+ * marked as undefined by valgrind.
+ */
+#ifdef VALGRIND
+# define ERTS_UNDEF(V,I)
+#else
+# define ERTS_UNDEF(V,I) V = I
+#endif
+
+/*
+ * Compile time assert
+ * (the actual compiler error msg can be a bit confusing)
+ */
+#if ERTS_AT_LEAST_GCC_VSN__(3,1,1)
+# define ERTS_CT_ASSERT(e) \
+ do { \
+ enum { compile_time_assert__ = __builtin_choose_expr((e),0,(void)0) }; \
+ } while(0)
+#else
+# define ERTS_CT_ASSERT(e) \
+ do { \
+ enum { compile_time_assert__ = 1/(e) }; \
+ } while (0)
+#endif
+
/*
* Microsoft C/C++: We certainly want to use stdarg.h and prototypes.
* But MSC doesn't define __STDC__, unless we compile with the -Za
@@ -359,17 +388,45 @@ typedef Sint SWord;
typedef UWord BeamInstr;
#ifndef HAVE_INT64
-#if SIZEOF_LONG == 8
-#define HAVE_INT64 1
+# if SIZEOF_LONG == 8
+# define HAVE_INT64 1
typedef unsigned long Uint64;
typedef long Sint64;
-#elif SIZEOF_LONG_LONG == 8
-#define HAVE_INT64 1
+# ifdef ULONG_MAX
+# define ERTS_UINT64_MAX ULONG_MAX
+# endif
+# ifdef LONG_MAX
+# define ERTS_SINT64_MAX LONG_MAX
+# endif
+# ifdef LONG_MIN
+# define ERTS_SINT64_MIN LONG_MIN
+# endif
+# elif SIZEOF_LONG_LONG == 8
+# define HAVE_INT64 1
typedef unsigned long long Uint64;
typedef long long Sint64;
-#else
-#define HAVE_INT64 0
+# ifdef ULLONG_MAX
+# define ERTS_UINT64_MAX ULLONG_MAX
+# endif
+# ifdef LLONG_MAX
+# define ERTS_SINT64_MAX LLONG_MAX
+# endif
+# ifdef LLONG_MIN
+# define ERTS_SINT64_MIN LLONG_MIN
+# endif
+# else
+# error "No 64-bit integer type found"
+# endif
#endif
+
+#ifndef ERTS_UINT64_MAX
+# define ERTS_UINT64_MAX (~((Uint64) 0))
+#endif
+#ifndef ERTS_SINT64_MAX
+# define ERTS_SINT64_MAX ((Sint64) ((((Uint64) 1) << 63)-1))
+#endif
+#ifndef ERTS_SINT64_MIN
+# define ERTS_SINT64_MIN (-1*(((Sint64) 1) << 63))
#endif
#if SIZEOF_LONG == 4
@@ -646,10 +703,32 @@ extern char *erts_default_arg0;
extern char os_type[];
-extern int sys_init_time(void);
+typedef enum {
+ ERTS_NO_TIME_WARP_MODE,
+ ERTS_SINGLE_TIME_WARP_MODE,
+ ERTS_MULTI_TIME_WARP_MODE
+} ErtsTimeWarpMode;
+
+typedef struct {
+ int have_os_monotonic;
+ ErtsMonotonicTime os_monotonic_time_unit;
+ ErtsMonotonicTime sys_clock_resolution;
+ struct {
+ Uint64 resolution;
+ char *func;
+ char *clock_id;
+ int locked_use;
+ } os_monotonic_info;
+} ErtsSysInitTimeResult;
+
+#define ERTS_SYS_INIT_TIME_RESULT_INITER \
+ {0, (ErtsMonotonicTime) -1, (ErtsMonotonicTime) 1}
+
+extern void erts_init_sys_time_sup(void);
+extern void sys_init_time(ErtsSysInitTimeResult *);
extern void erts_deliver_time(void);
extern void erts_time_remaining(SysTimeval *);
-extern int erts_init_time_sup(void);
+extern int erts_init_time_sup(int, ErtsTimeWarpMode);
extern void erts_sys_init_float(void);
extern void erts_thread_init_float(void);
extern void erts_thread_disable_fpe(void);
@@ -700,7 +779,7 @@ extern char *erts_sys_ddll_error(int code);
void erts_sys_schedule_interrupt(int set);
#ifdef ERTS_SMP
-void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec);
+void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime);
void erts_sys_main_thread(void);
#endif
@@ -739,6 +818,7 @@ int univ_to_local(
int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
void get_now(Uint*, Uint*, Uint*);
+ErtsMonotonicTime erts_get_monotonic_time(void);
void get_sys_now(Uint*, Uint*, Uint*);
void set_break_quit(void (*)(void), void (*)(void));
@@ -756,6 +836,8 @@ typedef struct {
} ErtsCheckIoDebugInfo;
int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
+int erts_sys_is_area_readable(char *start, char *stop);
+
/* xxxP */
#define SYS_DEFAULT_FLOAT_DECIMALS 20
void init_sys_float(void);
@@ -784,6 +866,11 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
+#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
+extern void sys_thr_resume(erts_tid_t tid);
+extern void sys_thr_suspend(erts_tid_t tid);
+#endif
+
/* utils.c */
/* Options to sys_alloc_opt */
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 2fd8e0cf00..9f997e1d0b 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -83,7 +83,8 @@
#define ASSERT_NO_LOCKED_LOCKS
#endif
-static erts_smp_mtx_t tiw_lock;
+#define ERTS_MONOTONIC_DAY ERTS_SEC_TO_MONOTONIC(60*60*24)
+#define ERTS_CLKTCKS_DAY ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY)
/* BEGIN tiw_lock protected variables
@@ -91,18 +92,12 @@ static erts_smp_mtx_t tiw_lock;
** The individual timer cells in tiw are also protected by the same mutex.
*/
+/* timing wheel size NEED to be a power of 2 */
#ifdef SMALL_MEMORY
-#define TIW_SIZE 8192
+#define TIW_SIZE (1 << 13)
#else
-#define TIW_SIZE 65536 /* timing wheel size (should be a power of 2) */
+#define TIW_SIZE (1 << 20)
#endif
-static ErlTimer** tiw; /* the timing wheel, allocated in init_time() */
-static Uint tiw_pos; /* current position in wheel */
-static Uint tiw_nto; /* number of timeouts in wheel */
-static Uint tiw_min;
-static ErlTimer *tiw_min_ptr;
-
-/* END tiw_lock protected variables */
/* Actual interval time chosen by sys_init_time() */
@@ -114,83 +109,135 @@ static int tiw_itime; /* Constant after init */
# define TIW_ITIME tiw_itime
#endif
-erts_smp_atomic32_t do_time; /* set at clock interrupt */
-static ERTS_INLINE erts_short_time_t do_time_read(void)
+struct ErtsTimerWheel_ {
+ ErlTimer *w[TIW_SIZE];
+ ErtsMonotonicTime pos;
+ Uint nto;
+ struct {
+ ErlTimer *head;
+ ErlTimer **tail;
+ Uint nto;
+ } at_once;
+ int true_next_timeout_time;
+ ErtsMonotonicTime next_timeout_time;
+ erts_atomic64_t next_timeout;
+ erts_smp_atomic32_t is_bumping;
+ erts_smp_mtx_t lock;
+};
+
+ErtsTimerWheel *erts_default_timer_wheel; /* managed by aux thread */
+
+static ERTS_INLINE ErtsTimerWheel *
+get_timer_wheel(ErlTimer *p)
+{
+ return (ErtsTimerWheel *) erts_smp_atomic_read_acqb(&p->wheel);
+}
+
+static ERTS_INLINE void
+set_timer_wheel(ErlTimer *p, ErtsTimerWheel *tiw)
{
- return erts_smp_atomic32_read_acqb(&do_time);
+ erts_smp_atomic_set_relb(&p->wheel, (erts_aint_t) tiw);
}
-static ERTS_INLINE erts_short_time_t do_time_update(void)
+static ERTS_INLINE void
+init_next_timeout(ErtsTimerWheel *tiw,
+ ErtsMonotonicTime time)
{
- return do_time_read();
+ erts_atomic64_init_nob(&tiw->next_timeout,
+ (erts_aint64_t) time);
}
-static ERTS_INLINE void do_time_init(void)
+static ERTS_INLINE void
+set_next_timeout(ErtsTimerWheel *tiw,
+ ErtsMonotonicTime time,
+ int true_timeout)
{
- erts_smp_atomic32_init_nob(&do_time, 0);
+ tiw->true_next_timeout_time = true_timeout;
+ tiw->next_timeout_time = time;
+ erts_atomic64_set_relb(&tiw->next_timeout,
+ (erts_aint64_t) time);
}
/* get the time (in units of TIW_ITIME) to the next timeout,
or -1 if there are no timeouts */
-static erts_short_time_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
+static ERTS_INLINE ErtsMonotonicTime
+find_next_timeout(ErtsTimerWheel *tiw,
+ ErtsMonotonicTime curr_time,
+ ErtsMonotonicTime max_search_time)
{
- int i, tm, nto;
- Uint32 min;
- ErlTimer* p;
- erts_short_time_t dt;
-
- if (tiw_nto == 0)
- return -1; /* no timeouts in wheel */
+ int start_ix, tiw_pos_ix;
+ ErlTimer *p;
+ int true_min_timeout;
+ ErtsMonotonicTime min_timeout, min_timeout_pos, slot_timeout_pos, timeout_limit;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&tiw->lock));
+
+ if (tiw->true_next_timeout_time)
+ return tiw->next_timeout_time;
- if (tiw_min_ptr) {
- min = tiw_min;
- dt = do_time_read();
- return ((min >= dt) ? (min - dt) : 0);
+ /* We never set next timeout beyond timeout_limit */
+ timeout_limit = curr_time + ERTS_MONOTONIC_DAY;
+
+ if (tiw->nto == 0) { /* no timeouts in wheel */
+ true_min_timeout = tiw->true_next_timeout_time = 0;
+ min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(timeout_limit);
+ goto found_next;
}
-
- /* start going through wheel to find next timeout */
- tm = nto = 0;
- min = (Uint32) -1; /* max Uint32 */
- i = tiw_pos;
+
+ /*
+ * Don't want others entering trying to bump
+ * timers while we are checking...
+ */
+ set_next_timeout(tiw, timeout_limit, 0);
+
+ true_min_timeout = 1;
+ slot_timeout_pos = tiw->pos;
+ min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time + max_search_time);
+
+ start_ix = tiw_pos_ix = (int) (tiw->pos & (TIW_SIZE-1));
+
do {
- p = tiw[i];
- while (p != NULL) {
- nto++;
- if (p->count == 0) {
- /* found next timeout */
- dt = do_time_read();
- /* p->count is zero */
- tiw_min_ptr = p;
- tiw_min = tm;
- return ((tm >= dt) ? (tm - dt) : 0);
- } else {
- /* keep shortest time in 'min' */
- if (tm + p->count*TIW_SIZE < min) {
- min = tm + p->count*TIW_SIZE;
- tiw_min_ptr = p;
- tiw_min = min;
- }
+ slot_timeout_pos++;
+ if (slot_timeout_pos >= min_timeout_pos) {
+ true_min_timeout = 0;
+ break;
+ }
+
+ p = tiw->w[tiw_pos_ix];
+
+ while (p) {
+ ErtsMonotonicTime timeout_pos;
+ ASSERT(p != p->next);
+ timeout_pos = p->timeout_pos;
+ if (min_timeout_pos > timeout_pos) {
+ min_timeout_pos = timeout_pos;
+ if (min_timeout_pos <= slot_timeout_pos)
+ goto found_next;
}
p = p->next;
}
- /* when we have found all timeouts the shortest time will be in min */
- if (nto == tiw_nto) break;
- tm++;
- i = (i + 1) % TIW_SIZE;
- } while (i != tiw_pos);
- dt = do_time_read();
- if (min <= (Uint32) dt)
- return 0;
- if ((min - (Uint32) dt) > (Uint32) ERTS_SHORT_TIME_T_MAX)
- return ERTS_SHORT_TIME_T_MAX;
- return (erts_short_time_t) (min - (Uint32) dt);
+
+ tiw_pos_ix++;
+ if (tiw_pos_ix == TIW_SIZE)
+ tiw_pos_ix = 0;
+ } while (start_ix != tiw_pos_ix);
+
+found_next:
+
+ min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
+ if (min_timeout != tiw->next_timeout_time)
+ set_next_timeout(tiw, min_timeout, true_min_timeout);
+
+ return min_timeout;
}
-static void remove_timer(ErlTimer *p) {
+static void
+remove_timer(ErtsTimerWheel *tiw, ErlTimer *p)
+{
/* first */
if (!p->prev) {
- tiw[p->slot] = p->next;
+ tiw->w[p->slot] = p->next;
if(p->next)
p->next->prev = NULL;
} else {
@@ -207,79 +254,164 @@ static void remove_timer(ErlTimer *p) {
p->next = NULL;
p->prev = NULL;
- /* Make sure cancel callback isn't called */
- p->active = 0;
- tiw_nto--;
+
+ set_timer_wheel(p, NULL);
+ tiw->nto--;
+}
+
+ErtsMonotonicTime
+erts_check_next_timeout_time(ErtsTimerWheel *tiw,
+ ErtsMonotonicTime max_search_time)
+{
+ ErtsMonotonicTime next, curr;
+
+ curr = erts_get_monotonic_time();
+
+ erts_smp_mtx_lock(&tiw->lock);
+
+ next = find_next_timeout(tiw, curr, max_search_time);
+
+ erts_smp_mtx_unlock(&tiw->lock);
+
+ return next;
}
-/* Private export to erl_time_sup.c */
-erts_short_time_t erts_next_time(void)
+#ifndef DEBUG
+#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) ((void) 0)
+#else
+#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) debug_check_safe_to_skip_to((TIW), (TO))
+static void
+debug_check_safe_to_skip_to(ErtsTimerWheel *tiw, ErtsMonotonicTime skip_to_pos)
{
- erts_short_time_t ret;
+ int slots, ix;
+ ErlTimer *tmr;
+ ErtsMonotonicTime tmp;
+
+ ix = (int) (tiw->pos & (TIW_SIZE-1));
+ tmp = skip_to_pos - tiw->pos;
+ ASSERT(tmp >= 0);
+ if (tmp < (ErtsMonotonicTime) TIW_SIZE)
+ slots = (int) tmp;
+ else
+ slots = TIW_SIZE;
- erts_smp_mtx_lock(&tiw_lock);
- (void)do_time_update();
- ret = next_time_internal();
- erts_smp_mtx_unlock(&tiw_lock);
- return ret;
+ while (slots > 0) {
+ tmr = tiw->w[ix];
+ while (tmr) {
+ ASSERT(tmr->timeout_pos > skip_to_pos);
+ tmr = tmr->next;
+ }
+ ix++;
+ if (ix == TIW_SIZE)
+ ix = 0;
+ slots--;
+ }
}
+#endif
-static ERTS_INLINE void bump_timer_internal(erts_short_time_t dt) /* PRE: tiw_lock is write-locked */
+void
+erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
- Uint keep_pos;
- Uint count;
- ErlTimer *p, **prev, *timeout_head, **timeout_tail;
- Uint dtime = (Uint) dt;
-
- /* no need to bump the position if there aren't any timeouts */
- if (tiw_nto == 0) {
- erts_smp_mtx_unlock(&tiw_lock);
- return;
+ int tiw_pos_ix, slots;
+ ErlTimer *p, *timeout_head, **timeout_tail;
+ ErtsMonotonicTime bump_to, tmp_slots;
+
+ if (erts_smp_atomic32_cmpxchg_nob(&tiw->is_bumping, 1, 0) != 0)
+ return; /* Another thread is currently bumping... */
+
+ bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+
+ erts_smp_mtx_lock(&tiw->lock);
+
+ if (tiw->pos >= bump_to) {
+ timeout_head = NULL;
+ goto done;
}
- /* if do_time > TIW_SIZE we want to go around just once */
- count = (Uint)(dtime / TIW_SIZE) + 1;
- keep_pos = (tiw_pos + dtime) % TIW_SIZE;
- if (dtime > TIW_SIZE) dtime = TIW_SIZE;
-
- timeout_head = NULL;
- timeout_tail = &timeout_head;
- while (dtime > 0) {
- /* this is to decrease the counters with the right amount */
- /* when dtime >= TIW_SIZE */
- if (tiw_pos == keep_pos) count--;
- prev = &tiw[tiw_pos];
- while ((p = *prev) != NULL) {
- ASSERT( p != p->next);
- if (p->count < count) { /* we have a timeout */
- /* remove min time */
- if (tiw_min_ptr == p) {
- tiw_min_ptr = NULL;
- tiw_min = 0;
- }
+ /* Don't want others here while we are bumping... */
+ set_next_timeout(tiw, curr_time + ERTS_MONOTONIC_DAY, 0);
+ if (!tiw->at_once.head) {
+ timeout_head = NULL;
+ timeout_tail = &timeout_head;
+ }
+ else {
+ ASSERT(tiw->nto >= tiw->at_once.nto);
+ timeout_head = tiw->at_once.head;
+ timeout_tail = tiw->at_once.tail;
+ tiw->nto -= tiw->at_once.nto;
+ tiw->at_once.head = NULL;
+ tiw->at_once.tail = &tiw->at_once.head;
+ tiw->at_once.nto = 0;
+ }
+
+ if (tiw->nto == 0) {
+ ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, bump_to);
+ tiw->pos = bump_to;
+ goto done;
+ }
+
+ if (tiw->true_next_timeout_time) {
+ ErtsMonotonicTime skip_until_pos;
+ /*
+ * No need inspecting slots where we know no timeouts
+ * to trigger should reside.
+ */
+
+ skip_until_pos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
+ if (skip_until_pos > bump_to)
+ skip_until_pos = bump_to;
+
+ ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, skip_until_pos);
+ ASSERT(skip_until_pos > tiw->pos);
+
+ tiw->pos = skip_until_pos - 1;
+ }
+
+ tiw_pos_ix = (int) ((tiw->pos+1) & (TIW_SIZE-1));
+ tmp_slots = (bump_to - tiw->pos);
+ if (tmp_slots < (ErtsMonotonicTime) TIW_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = TIW_SIZE;
+
+ while (slots > 0) {
+ p = tiw->w[tiw_pos_ix];
+ while (p) {
+ ErlTimer *next = p->next;
+ ASSERT(p != next);
+ if (p->timeout_pos <= bump_to) { /* we have a timeout */
/* Remove from list */
- remove_timer(p);
+ remove_timer(tiw, p);
*timeout_tail = p; /* Insert in timeout queue */
timeout_tail = &p->next;
}
- else {
- /* no timeout, just decrease counter */
- p->count -= count;
- prev = &p->next;
- }
+ p = next;
}
- tiw_pos = (tiw_pos + 1) % TIW_SIZE;
- dtime--;
+ tiw_pos_ix++;
+ if (tiw_pos_ix == TIW_SIZE)
+ tiw_pos_ix = 0;
+ slots--;
}
- tiw_pos = keep_pos;
- if (tiw_min_ptr)
- tiw_min -= dt;
-
- erts_smp_mtx_unlock(&tiw_lock);
+
+ ASSERT(tmp_slots >= (ErtsMonotonicTime) TIW_SIZE
+ || tiw_pos_ix == (int) ((bump_to+1) & (TIW_SIZE-1)));
+
+ tiw->pos = bump_to;
+
+ /* Search at most two seconds ahead... */
+ (void) find_next_timeout(tiw, curr_time, ERTS_SEC_TO_MONOTONIC(2));
+
+done:
+
+ erts_smp_mtx_unlock(&tiw->lock);
+ erts_smp_atomic32_set_nob(&tiw->is_bumping, 0);
+
/* Call timedout timers callbacks */
while (timeout_head) {
+ ErlTimeoutProc timeout;
+ void *arg;
p = timeout_head;
timeout_head = p->next;
/* Here comes hairy use of the timer fields!
@@ -288,35 +420,69 @@ static ERTS_INLINE void bump_timer_internal(erts_short_time_t dt) /* PRE: tiw_lo
* accesses any field until the ->timeout
* callback is called.
*/
+ ASSERT(p->timeout_pos <= bump_to);
p->next = NULL;
p->prev = NULL;
p->slot = 0;
- (*p->timeout)(p->arg);
+ timeout = p->timeout;
+ arg = p->arg;
+ (*timeout)(arg);
}
}
-void erts_bump_timer(erts_short_time_t dt) /* dt is value from do_time */
+Uint
+erts_timer_wheel_memory_size(void)
+{
+#ifdef ERTS_SMP
+ return sizeof(ErtsTimerWheel)*(1 + erts_no_schedulers);
+#else
+ return sizeof(ErtsTimerWheel);
+#endif
+}
+
+ErtsTimerWheel *
+erts_create_timer_wheel(int no)
{
- erts_smp_mtx_lock(&tiw_lock);
- bump_timer_internal(dt);
+ ErtsMonotonicTime mtime;
+ int i;
+ ErtsTimerWheel *tiw;
+ tiw = (ErtsTimerWheel *) erts_alloc(ERTS_ALC_T_TIMER_WHEEL,
+ sizeof(ErtsTimerWheel));
+ for(i = 0; i < TIW_SIZE; i++)
+ tiw->w[i] = NULL;
+
+ erts_smp_atomic32_init_nob(&tiw->is_bumping, 0);
+ erts_smp_mtx_init_x(&tiw->lock, "timer_wheel", make_small(no));
+
+ mtime = erts_get_monotonic_time();
+ tiw->pos = ERTS_MONOTONIC_TO_CLKTCKS(mtime);
+ tiw->nto = 0;
+ tiw->at_once.head = NULL;
+ tiw->at_once.tail = &tiw->at_once.head;
+ tiw->at_once.nto = 0;
+ tiw->true_next_timeout_time = 0;
+ tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY;
+ init_next_timeout(tiw, mtime + ERTS_MONOTONIC_DAY);
+ return tiw;
}
-Uint
-erts_timer_wheel_memory_size(void)
+ErtsNextTimeoutRef
+erts_get_next_timeout_reference(ErtsTimerWheel *tiw)
{
- return (Uint) TIW_SIZE * sizeof(ErlTimer*);
+ return (ErtsNextTimeoutRef) &tiw->next_timeout;
}
+
/* this routine links the time cells into a free list at the start
and sets the time queue as empty */
void
-erts_init_time(void)
+erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode)
{
- int i, itime;
+ int itime;
/* system dependent init; must be done before do_time_init()
if timer thread is enabled */
- itime = erts_init_time_sup();
+ itime = erts_init_time_sup(time_correction, time_warp_mode);
#ifdef TIW_ITIME_IS_CONSTANT
if (itime != TIW_ITIME) {
erl_exit(ERTS_ABORT_EXIT, "timer resolution mismatch %d != %d", itime, TIW_ITIME);
@@ -325,117 +491,110 @@ erts_init_time(void)
tiw_itime = itime;
#endif
- erts_smp_mtx_init(&tiw_lock, "timer_wheel");
-
- tiw = (ErlTimer**) erts_alloc(ERTS_ALC_T_TIMER_WHEEL,
- TIW_SIZE * sizeof(ErlTimer*));
- for(i = 0; i < TIW_SIZE; i++)
- tiw[i] = NULL;
- do_time_init();
- tiw_pos = tiw_nto = 0;
- tiw_min_ptr = NULL;
- tiw_min = 0;
+ erts_default_timer_wheel = erts_create_timer_wheel(0);
}
+void
+erts_set_timer(ErlTimer *p, ErlTimeoutProc timeout,
+ ErlCancelProc cancel, void *arg, Uint to)
+{
+ ErtsMonotonicTime timeout_time, timeout_pos;
+ ErtsMonotonicTime curr_time;
+ ErtsTimerWheel *tiw;
+ ErtsSchedulerData *esdp;
+
+ curr_time = erts_get_monotonic_time();
+ esdp = erts_get_scheduler_data();
+ if (esdp)
+ tiw = esdp->timer_wheel;
+ else
+ tiw = erts_default_timer_wheel;
+ erts_smp_mtx_lock(&tiw->lock);
+ if (get_timer_wheel(p))
+ ERTS_INTERNAL_ERROR("Double set timer");
-/*
-** Insert a process into the time queue, with a timeout 't'
-*/
-static void
-insert_timer(ErlTimer* p, Uint t)
-{
- Uint tm;
- Uint64 ticks;
+ p->timeout = timeout;
+ p->cancel = cancel;
+ p->arg = arg;
- /* The current slot (tiw_pos) in timing wheel is the next slot to be
- * be processed. Hence no extra time tick is needed.
- *
- * (x + y - 1)/y is precisely the "number of bins" formula.
- */
- ticks = (t + (TIW_ITIME - 1)) / TIW_ITIME;
+ if (to == 0) {
+ timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->nto++;
+ tiw->at_once.nto++;
+ *tiw->at_once.tail = p;
+ p->next = NULL;
+ p->timeout_pos = timeout_pos;
+ timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ }
+ else {
+ int tm;
+ ErtsMonotonicTime ticks;
- /*
- * Ticks must be a Uint64, or the addition may overflow here,
- * resulting in an incorrect value for p->count below.
- */
- ticks += do_time_update(); /* Add backlog of unprocessed time */
-
- /* calculate slot */
- tm = (ticks + tiw_pos) % TIW_SIZE;
- p->slot = (Uint) tm;
- p->count = (Uint) (ticks / TIW_SIZE);
+ ticks = ERTS_MSEC_TO_CLKTCKS(to);
+ timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time - 1) + 1 + ticks;
+
+ /* calculate slot */
+ tm = (int) (timeout_pos & (TIW_SIZE-1));
+ p->slot = (Uint) tm;
- /* insert at head of list at slot */
- p->next = tiw[tm];
- p->prev = NULL;
- if (p->next != NULL)
- p->next->prev = p;
- tiw[tm] = p;
+ /* insert at head of list at slot */
+ p->next = tiw->w[tm];
+ p->prev = NULL;
+ if (p->next != NULL)
+ p->next->prev = p;
+ tiw->w[tm] = p;
+ tiw->nto++;
- /* insert min time */
- if ((tiw_nto == 0) || ((tiw_min_ptr != NULL) && (ticks < tiw_min))) {
- tiw_min = ticks;
- tiw_min_ptr = p;
- }
- if ((tiw_min_ptr == p) && (ticks > tiw_min)) {
- /* some other timer might be 'min' now */
- tiw_min = 0;
- tiw_min_ptr = NULL;
+ timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ p->timeout_pos = timeout_pos;
+
+ ASSERT(ERTS_MSEC_TO_MONOTONIC(to) <= timeout_time - curr_time);
+ ASSERT(timeout_time - curr_time
+ < ERTS_MSEC_TO_MONOTONIC(to) + ERTS_CLKTCKS_TO_MONOTONIC(1));
}
- tiw_nto++;
-}
+ if (timeout_time < tiw->next_timeout_time)
+ set_next_timeout(tiw, timeout_time, 1);
-void
-erts_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
- void* arg, Uint t)
-{
+ set_timer_wheel(p, tiw);
+
+ erts_smp_mtx_unlock(&tiw->lock);
- erts_deliver_time();
- erts_smp_mtx_lock(&tiw_lock);
- if (p->active) { /* XXX assert ? */
- erts_smp_mtx_unlock(&tiw_lock);
- return;
- }
- p->timeout = timeout;
- p->cancel = cancel;
- p->arg = arg;
- p->active = 1;
- insert_timer(p, t);
- erts_smp_mtx_unlock(&tiw_lock);
#if defined(ERTS_SMP)
- if (t <= (Uint) ERTS_SHORT_TIME_T_MAX)
- erts_sys_schedule_interrupt_timed(1, (erts_short_time_t) t);
+ if (tiw == erts_default_timer_wheel)
+ erts_interupt_aux_thread_timed(timeout_time);
#endif
+
}
void
-erts_cancel_timer(ErlTimer* p)
+erts_cancel_timer(ErlTimer *p)
{
- erts_smp_mtx_lock(&tiw_lock);
- if (!p->active) { /* allow repeated cancel (drivers) */
- erts_smp_mtx_unlock(&tiw_lock);
+ ErtsTimerWheel *tiw;
+ ErlCancelProc cancel;
+ void *arg;
+
+ tiw = get_timer_wheel(p);
+ if (!tiw)
return;
- }
+
+ erts_smp_mtx_lock(&tiw->lock);
+ if (tiw != get_timer_wheel(p))
+ cancel = NULL;
+ else {
+ remove_timer(tiw, p);
+ p->slot = 0;
- /* is it the 'min' timer, remove min */
- if (p == tiw_min_ptr) {
- tiw_min_ptr = NULL;
- tiw_min = 0;
+ cancel = p->cancel;
+ arg = p->arg;
}
+ erts_smp_mtx_unlock(&tiw->lock);
- remove_timer(p);
- p->slot = p->count = 0;
-
- if (p->cancel != NULL) {
- erts_smp_mtx_unlock(&tiw_lock);
- (*p->cancel)(p->arg);
- return;
- }
- erts_smp_mtx_unlock(&tiw_lock);
+ if (cancel)
+ (*cancel)(arg);
}
/*
@@ -447,59 +606,58 @@ erts_cancel_timer(ErlTimer* p)
Uint
erts_time_left(ErlTimer *p)
{
- Uint left;
- erts_short_time_t dt;
+ ErtsTimerWheel *tiw;
+ ErtsMonotonicTime current_time, timeout_time;
- erts_smp_mtx_lock(&tiw_lock);
-
- if (!p->active) {
- erts_smp_mtx_unlock(&tiw_lock);
+ tiw = get_timer_wheel(p);
+ if (!tiw)
return 0;
- }
- if (p->slot < tiw_pos)
- left = (p->count + 1) * TIW_SIZE + p->slot - tiw_pos;
+ erts_smp_mtx_lock(&tiw->lock);
+ if (tiw != get_timer_wheel(p))
+ timeout_time = ERTS_MONOTONIC_TIME_MIN;
else
- left = p->count * TIW_SIZE + p->slot - tiw_pos;
- dt = do_time_read();
- if (left < dt)
- left = 0;
- else
- left -= dt;
-
- erts_smp_mtx_unlock(&tiw_lock);
+ timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos);
+ erts_smp_mtx_unlock(&tiw->lock);
- return (Uint) left * TIW_ITIME;
+ current_time = erts_get_monotonic_time();
+ if (timeout_time <= current_time)
+ return 0;
+ return (Uint) ERTS_MONOTONIC_TO_MSEC(timeout_time - current_time);
}
#ifdef DEBUG
void erts_p_slpq(void)
{
+ ErtsTimerWheel *tiw = erts_default_timer_wheel;
+ ErtsMonotonicTime current_time = erts_get_monotonic_time();
int i;
ErlTimer* p;
- erts_smp_mtx_lock(&tiw_lock);
+ erts_smp_mtx_lock(&tiw->lock);
/* print the whole wheel, starting at the current position */
- erts_printf("\ntiw_pos = %d tiw_nto %d\n", tiw_pos, tiw_nto);
- i = tiw_pos;
- if (tiw[i] != NULL) {
+ erts_printf("\ncurrent time = %bps tiw_pos = %d tiw_nto %d\n",
+ current_time, tiw->pos, tiw->nto);
+ i = tiw->pos;
+ if (tiw->w[i] != NULL) {
erts_printf("%d:\n", i);
- for(p = tiw[i]; p != NULL; p = p->next) {
- erts_printf(" (count %d, slot %d)\n",
- p->count, p->slot);
+ for(p = tiw->w[i]; p != NULL; p = p->next) {
+ erts_printf(" (timeout time %bps, slot %d)\n",
+ ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos),
+ p->slot);
}
}
- for(i = (i+1)%TIW_SIZE; i != tiw_pos; i = (i+1)%TIW_SIZE) {
- if (tiw[i] != NULL) {
+ for(i = ((i+1) & (TIW_SIZE-1)); i != (tiw->pos & (TIW_SIZE-1)); i = ((i+1) & (TIW_SIZE-1))) {
+ if (tiw->w[i] != NULL) {
erts_printf("%d:\n", i);
- for(p = tiw[i]; p != NULL; p = p->next) {
- erts_printf(" (count %d, slot %d)\n",
- p->count, p->slot);
+ for(p = tiw->w[i]; p != NULL; p = p->next) {
+ erts_printf(" (timeout time %bps, slot %d)\n",
+ ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos), p->slot);
}
}
}
- erts_smp_mtx_unlock(&tiw_lock);
+ erts_smp_mtx_unlock(&tiw->lock);
}
#endif /* DEBUG */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index f810fca9a4..127f1e4a6a 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -49,6 +49,10 @@
#include "beam_bp.h"
#include "erl_ptab.h"
#include "erl_check_io.h"
+#include "erl_bif_unique.h"
+#ifdef HIPE
+# include "hipe_mode_switch.h"
+#endif
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -187,12 +191,18 @@ erts_set_hole_marker(Eterm* ptr, Uint sz)
* Helper function for the ESTACK macros defined in global.h.
*/
void
-erl_grow_estack(ErtsEStack* s, Eterm* default_estack)
+erl_grow_estack(ErtsEStack* s, Uint need)
{
Uint old_size = (s->end - s->start);
- Uint new_size = old_size * 2;
+ Uint new_size;
Uint sp_offs = s->sp - s->start;
- if (s->start != default_estack) {
+
+ if (need < old_size)
+ new_size = 2*old_size;
+ else
+ new_size = ((need / old_size) + 2) * old_size;
+
+ if (s->start != s->edefault) {
s->start = erts_realloc(s->alloc_type, s->start,
new_size*sizeof(Eterm));
} else {
@@ -207,12 +217,18 @@ erl_grow_estack(ErtsEStack* s, Eterm* default_estack)
* Helper function for the WSTACK macros defined in global.h.
*/
void
-erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
+erl_grow_wstack(ErtsWStack* s, Uint need)
{
Uint old_size = (s->wend - s->wstart);
- Uint new_size = old_size * 2;
+ Uint new_size;
Uint sp_offs = s->wsp - s->wstart;
- if (s->wstart != default_wstack) {
+
+ if (need < old_size)
+ new_size = 2 * old_size;
+ else
+ new_size = ((need / old_size) + 2) * old_size;
+
+ if (s->wstart != s->wdefault) {
s->wstart = erts_realloc(s->alloc_type, s->wstart,
new_size*sizeof(UWord));
} else {
@@ -224,6 +240,32 @@ erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
s->wsp = s->wstart + sp_offs;
}
+/*
+ * Helper function for the PSTACK macros defined in global.h.
+ */
+void
+erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
+{
+ Uint old_size = s->pend - s->pstart;
+ Uint new_size;
+ Uint sp_offs = s->psp - s->pstart;
+
+ if (need_bytes < old_size)
+ new_size = 2 * old_size;
+ else
+ new_size = ((need_bytes / old_size) + 2) * old_size;
+
+ if (s->pstart != default_pstack) {
+ s->pstart = erts_realloc(s->alloc_type, s->pstart, new_size);
+ } else {
+ byte* new_ptr = erts_alloc(s->alloc_type, new_size);
+ sys_memcpy(new_ptr, s->pstart, old_size);
+ s->pstart = new_ptr;
+ }
+ s->pend = s->pstart + new_size;
+ s->psp = s->pstart + sp_offs;
+}
+
/* CTYPE macros */
#define LATIN1
@@ -311,6 +353,17 @@ int erts_fit_in_bits_int32(Sint32 value)
return fit_in_bits((Sint64) (Uint32) value, 4);
}
+int erts_fit_in_bits_uint(Uint value)
+{
+#if ERTS_SIZEOF_ETERM == 4
+ return fit_in_bits((Sint64) (Uint32) value, 4);
+#elif ERTS_SIZEOF_ETERM == 8
+ return fit_in_bits(value, 5);
+#else
+# error "No way, Jose"
+#endif
+}
+
int
erts_print(int to, void *arg, char *format, ...)
{
@@ -789,10 +842,10 @@ Uint32 make_hash(Eterm term_arg)
unsigned op;
/* Must not collide with the real tag_val_def's: */
-#define MAKE_HASH_TUPLE_OP 0x11
-#define MAKE_HASH_TERM_ARRAY_OP 0x12
-#define MAKE_HASH_CDR_PRE_OP 0x13
-#define MAKE_HASH_CDR_POST_OP 0x14
+#define MAKE_HASH_TUPLE_OP (FIRST_VACANT_TAG_DEF)
+#define MAKE_HASH_TERM_ARRAY_OP (FIRST_VACANT_TAG_DEF+1)
+#define MAKE_HASH_CDR_PRE_OP (FIRST_VACANT_TAG_DEF+2)
+#define MAKE_HASH_CDR_POST_OP (FIRST_VACANT_TAG_DEF+3)
/*
** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
@@ -972,21 +1025,9 @@ tail_recur:
break;
}
case MAP_DEF:
+ case HASHMAP_DEF:
{
- map_t *mp = (map_t *)map_val(term);
- int size = map_get_size(mp);
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
-
- /* Use a prime with size to remedy some of
- * the {} and <<>> hash problems */
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size;
- if (size == 0)
- break;
-
- /* push values first */
- WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
- WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
+ hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
break;
}
case TUPLE_DEF:
@@ -1092,10 +1133,11 @@ Uint32
make_hash2(Eterm term)
{
Uint32 hash;
- Uint32 hash_xor_keys = 0;
- Uint32 hash_xor_values = 0;
+ Uint32 hash_xor_pairs;
DeclareTmpHeapNoproc(tmp_big,2);
+ ERTS_UNDEF(hash_xor_pairs, 0);
+
/* (HCONST * {2, ..., 16}) mod 2^32 */
#define HCONST_2 0x3c6ef372UL
#define HCONST_3 0xdaa66d2bUL
@@ -1112,10 +1154,15 @@ make_hash2(Eterm term)
#define HCONST_14 0xa708a81eUL
#define HCONST_15 0x454021d7UL
#define HCONST_16 0xe3779b90UL
+#define HCONST_17 0x81af1549UL
+#define HCONST_18 0x1fe68f02UL
+#define HCONST_19 0xbe1e08bbUL
+#define HCONST_20 0x5c558274UL
+#define HCONST_21 0xfa8cfc2dUL
#define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF))
-#define HASH_MAP_KEY (_make_header(2,_TAG_HEADER_REF))
-#define HASH_MAP_VAL (_make_header(3,_TAG_HEADER_REF))
+#define HASH_MAP_PAIR (_make_header(2,_TAG_HEADER_REF))
+#define HASH_CDR (_make_header(3,_TAG_HEADER_REF))
#define UINT32_HASH_2(Expr1, Expr2, AConst) \
do { \
@@ -1138,6 +1185,13 @@ make_hash2(Eterm term)
} while(0)
#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+
+#ifdef ARCH_64
+# define POINTER_HASH(Ptr, AConst) UINT32_HASH_2((Uint32)(UWord)(Ptr), (((UWord)(Ptr)) >> 32), AConst)
+#else
+# define POINTER_HASH(Ptr, AConst) UINT32_HASH(Ptr, AConst)
+#endif
+
/* Optimization. Simple cases before declaration of estack. */
if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
switch (term & _TAG_IMMED1_MASK) {
@@ -1192,9 +1246,9 @@ make_hash2(Eterm term)
if (c > 0)
UINT32_HASH(sh, HCONST_4);
if (is_list(term)) {
- term = *ptr;
- tmp = *++ptr;
- ESTACK_PUSH(s, tmp);
+ tmp = CDR(ptr);
+ ESTACK_PUSH(s, tmp);
+ term = CAR(ptr);
}
}
break;
@@ -1211,51 +1265,92 @@ make_hash2(Eterm term)
UINT32_HASH(arity, HCONST_9);
if (arity == 0) /* Empty tuple */
goto hash2_common;
- for (i = arity; i >= 1; i--) {
- tmp = elem[i];
- ESTACK_PUSH(s, tmp);
+ for (i = arity; ; i--) {
+ term = elem[i];
+ if (i == 1)
+ break;
+ ESTACK_PUSH(s, term);
}
- goto hash2_common;
}
break;
case MAP_SUBTAG:
{
- map_t *mp = (map_t *)map_val(term);
+ flatmap_t *mp = (flatmap_t *)flatmap_val(term);
int i;
- int size = map_get_size(mp);
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
+ int size = flatmap_get_size(mp);
+ Eterm *ks = flatmap_get_keys(mp);
+ Eterm *vs = flatmap_get_values(mp);
UINT32_HASH(size, HCONST_16);
if (size == 0) {
goto hash2_common;
}
- ESTACK_PUSH(s, hash_xor_values);
- ESTACK_PUSH(s, hash_xor_keys);
+ /* We want a portable hash function that is *independent* of
+ * the order in which keys and values are encountered.
+ * We therefore calculate context independent hashes for all .
+ * key-value pairs and then xor them together.
+ */
+ ESTACK_PUSH(s, hash_xor_pairs);
ESTACK_PUSH(s, hash);
ESTACK_PUSH(s, HASH_MAP_TAIL);
hash = 0;
- hash_xor_keys = 0;
- hash_xor_values = 0;
- for (i = size - 1; i >= 0; i--) {
- tmp = vs[i];
- ESTACK_PUSH(s, HASH_MAP_VAL);
- ESTACK_PUSH(s, tmp);
- }
- /* We do not want to expose the tuple representation.
- * Do not push the keys as a tuple.
- */
+ hash_xor_pairs = 0;
for (i = size - 1; i >= 0; i--) {
- tmp = ks[i];
- ESTACK_PUSH(s, HASH_MAP_KEY);
- ESTACK_PUSH(s, tmp);
+ ESTACK_PUSH(s, HASH_MAP_PAIR);
+ ESTACK_PUSH(s, vs[i]);
+ ESTACK_PUSH(s, ks[i]);
}
goto hash2_common;
}
break;
+ case HASHMAP_SUBTAG:
+ {
+ Eterm* ptr = boxed_val(term) + 1;
+ Uint size;
+ int i;
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ size = *ptr++;
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0)
+ goto hash2_common;
+ ESTACK_PUSH(s, hash_xor_pairs);
+ ESTACK_PUSH(s, hash);
+ ESTACK_PUSH(s, HASH_MAP_TAIL);
+ hash = 0;
+ hash_xor_pairs = 0;
+ }
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_NODE_ARRAY:
+ i = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ case HAMT_SUBTAG_NODE_BITMAP:
+ i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ default:
+ erl_exit(1, "bad header");
+ }
+ while (i) {
+ if (is_list(*ptr)) {
+ Eterm* cons = list_val(*ptr);
+ ESTACK_PUSH(s, HASH_MAP_PAIR);
+ ESTACK_PUSH(s, CDR(cons));
+ ESTACK_PUSH(s, CAR(cons));
+ }
+ else {
+ ASSERT(is_boxed(*ptr));
+ ESTACK_PUSH(s, *ptr);
+ }
+ i--; ptr++;
+ }
+ goto hash2_common;
+ }
+ break;
case EXPORT_SUBTAG:
{
Export* ep = *((Export **) (export_val(term) + 1));
-
UINT32_HASH_2
(ep->code[2],
atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
@@ -1270,7 +1365,6 @@ make_hash2(Eterm term)
{
ErlFunThing* funp = (ErlFunThing *) fun_val(term);
Uint num_free = funp->num_free;
-
UINT32_HASH_2
(num_free,
atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
@@ -1351,7 +1445,8 @@ make_hash2(Eterm term)
do {
Uint t;
Uint32 x, y;
- t = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ ASSERT(i < n);
+ t = BIG_DIGIT(ptr, i++);
x = t & 0xffffffff;
y = t >> 32;
UINT32_HASH_2(x, y, con);
@@ -1459,20 +1554,397 @@ make_hash2(Eterm term)
switch (term) {
case HASH_MAP_TAIL: {
hash = (Uint32) ESTACK_POP(s);
- UINT32_HASH(hash_xor_keys, HCONST_16);
- UINT32_HASH(hash_xor_values, HCONST_16);
- hash_xor_keys = (Uint32) ESTACK_POP(s);
- hash_xor_values = (Uint32) ESTACK_POP(s);
+ UINT32_HASH(hash_xor_pairs, HCONST_19);
+ hash_xor_pairs = (Uint32) ESTACK_POP(s);
goto hash2_common;
}
- case HASH_MAP_KEY:
- hash_xor_keys ^= hash;
+ case HASH_MAP_PAIR:
+ hash_xor_pairs ^= hash;
hash = 0;
goto hash2_common;
- case HASH_MAP_VAL:
- hash_xor_values ^= hash;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Term hash function for internal use.
+ *
+ * Limitation #1: Is not "portable" in any way between different VM instances.
+ *
+ * Limitation #2: The hash value is only valid as long as the term exists
+ * somewhere in the VM. Why? Because external pids, ports and refs are hashed
+ * by mixing the node *pointer* value. If a node disappears and later reappears
+ * with a new ErlNode struct, externals from that node will hash different than
+ * before.
+ *
+ * One IMPORTANT property must hold (for hamt).
+ * EVERY BIT of the term that is significant for equality (see EQ)
+ * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a
+ * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).
+ *
+ * This is why we can not use cached hash values for atoms for example.
+ *
+ */
+
+#define CONST_HASH(AConst) \
+do { /* Lightweight mixing of constant (type info) */ \
+ hash ^= AConst; \
+ hash = (hash << 17) ^ (hash >> (32-17)); \
+} while (0)
+
+Uint32
+make_internal_hash(Eterm term)
+{
+ Uint32 hash;
+ Uint32 hash_xor_pairs;
+
+ ERTS_UNDEF(hash_xor_pairs, 0);
+
+ /* Optimization. Simple cases before declaration of estack. */
+ if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
+ hash = 0;
+ #if ERTS_SIZEOF_ETERM == 8
+ UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
+ #elif ERTS_SIZEOF_ETERM == 4
+ UINT32_HASH(term, HCONST);
+ #else
+ # error "No you don't"
+ #endif
+ return hash;
+ }
+ {
+ Eterm tmp;
+ DECLARE_ESTACK(s);
+
+ UseTmpHeapNoproc(2);
+ hash = 0;
+ for (;;) {
+ switch (primary_tag(term)) {
+ case TAG_PRIMARY_LIST:
+ {
+ int c = 0;
+ Uint32 sh = 0;
+ Eterm* ptr = list_val(term);
+ while (is_byte(*ptr)) {
+ /* Optimization for strings. */
+ sh = (sh << 8) + unsigned_val(*ptr);
+ if (c == 3) {
+ UINT32_HASH(sh, HCONST_4);
+ c = sh = 0;
+ } else {
+ c++;
+ }
+ term = CDR(ptr);
+ if (is_not_list(term))
+ break;
+ ptr = list_val(term);
+ }
+ if (c > 0)
+ UINT32_HASH(sh, HCONST_4);
+ if (is_list(term)) {
+ tmp = CDR(ptr);
+ CONST_HASH(HCONST_17); /* Hash CAR in cons cell */
+ ESTACK_PUSH(s, tmp);
+ if (is_not_list(tmp)) {
+ ESTACK_PUSH(s, HASH_CDR);
+ }
+ term = CAR(ptr);
+ }
+ }
+ break;
+ case TAG_PRIMARY_BOXED:
+ {
+ Eterm hdr = *boxed_val(term);
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ {
+ int i;
+ int arity = header_arity(hdr);
+ Eterm* elem = tuple_val(term);
+ UINT32_HASH(arity, HCONST_9);
+ if (arity == 0) /* Empty tuple */
+ goto pop_next;
+ for (i = arity; ; i--) {
+ term = elem[i];
+ if (i == 1)
+ break;
+ ESTACK_PUSH(s, term);
+ }
+ }
+ break;
+ case MAP_SUBTAG:
+ {
+ flatmap_t *mp = (flatmap_t *)flatmap_val(term);
+ int i;
+ int size = flatmap_get_size(mp);
+ Eterm *ks = flatmap_get_keys(mp);
+ Eterm *vs = flatmap_get_values(mp);
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0) {
+ goto pop_next;
+ }
+ /* We want a hash function that is *independent* of
+ * the order in which keys and values are encountered.
+ * We therefore calculate context independent hashes for all .
+ * key-value pairs and then xor them together.
+ */
+ ESTACK_PUSH(s, hash_xor_pairs);
+ ESTACK_PUSH(s, hash);
+ ESTACK_PUSH(s, HASH_MAP_TAIL);
+ hash = 0;
+ hash_xor_pairs = 0;
+ for (i = size - 1; i >= 0; i--) {
+ ESTACK_PUSH(s, HASH_MAP_PAIR);
+ ESTACK_PUSH(s, vs[i]);
+ ESTACK_PUSH(s, ks[i]);
+ }
+ goto pop_next;
+ }
+ break;
+ case HASHMAP_SUBTAG:
+ {
+ Eterm* ptr = boxed_val(term) + 1;
+ Uint size;
+ int i;
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ size = *ptr++;
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0)
+ goto pop_next;
+ ESTACK_PUSH(s, hash_xor_pairs);
+ ESTACK_PUSH(s, hash);
+ ESTACK_PUSH(s, HASH_MAP_TAIL);
+ hash = 0;
+ hash_xor_pairs = 0;
+ }
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ case HAMT_SUBTAG_NODE_ARRAY:
+ i = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ case HAMT_SUBTAG_NODE_BITMAP:
+ i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ default:
+ erl_exit(1, "bad header");
+ }
+ while (i) {
+ if (is_list(*ptr)) {
+ Eterm* cons = list_val(*ptr);
+ ESTACK_PUSH(s, HASH_MAP_PAIR);
+ ESTACK_PUSH(s, CDR(cons));
+ ESTACK_PUSH(s, CAR(cons));
+ }
+ else {
+ ASSERT(is_boxed(*ptr));
+ ESTACK_PUSH(s, *ptr);
+ }
+ i--; ptr++;
+ }
+ goto pop_next;
+ }
+ break;
+ case EXPORT_SUBTAG:
+ {
+ Export* ep = *((Export **) (export_val(term) + 1));
+ /* Assumes Export entries never moves */
+ POINTER_HASH(ep, HCONST_14);
+ goto pop_next;
+ }
+
+ case FUN_SUBTAG:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(term);
+ Uint num_free = funp->num_free;
+ UINT32_HASH_2(num_free, funp->fe->module, HCONST_20);
+ UINT32_HASH_2(funp->fe->old_index, funp->fe->old_uniq, HCONST_21);
+ if (num_free == 0) {
+ goto pop_next;
+ } else {
+ Eterm* bptr = funp->env + num_free - 1;
+ while (num_free-- > 1) {
+ term = *bptr--;
+ ESTACK_PUSH(s, term);
+ }
+ term = *bptr;
+ }
+ }
+ break;
+ case REFC_BINARY_SUBTAG:
+ case HEAP_BINARY_SUBTAG:
+ case SUB_BINARY_SUBTAG:
+ {
+ byte* bptr;
+ unsigned sz = binary_size(term);
+ Uint32 con = HCONST_13 + hash;
+ Uint bitoffs;
+ Uint bitsize;
+
+ ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
+ if (sz == 0 && bitsize == 0) {
+ hash = con;
+ } else {
+ if (bitoffs == 0) {
+ hash = block_hash(bptr, sz, con);
+ if (bitsize > 0) {
+ UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
+ HCONST_15);
+ }
+ } else {
+ byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
+ sz + (bitsize != 0));
+ erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
+ hash = block_hash(buf, sz, con);
+ if (bitsize > 0) {
+ UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
+ HCONST_15);
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ }
+ }
+ goto pop_next;
+ }
+ break;
+ case POS_BIG_SUBTAG:
+ case NEG_BIG_SUBTAG:
+ {
+ Eterm* ptr = big_val(term);
+ Uint i = 0;
+ Uint n = BIG_SIZE(ptr);
+ Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
+#if D_EXP == 16
+ do {
+ Uint32 x, y;
+ x = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
+ y = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
+#elif D_EXP == 32
+ do {
+ Uint32 x, y;
+ x = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ y = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
+#elif D_EXP == 64
+ do {
+ Uint t;
+ Uint32 x, y;
+ ASSERT(i < n);
+ t = BIG_DIGIT(ptr, i++);
+ x = t & 0xffffffff;
+ y = t >> 32;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
+#else
+#error "unsupported D_EXP size"
+#endif
+ goto pop_next;
+ }
+ break;
+ case REF_SUBTAG:
+ UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
+ ASSERT(internal_ref_no_of_numbers(term) == 3);
+ UINT32_HASH_2(internal_ref_numbers(term)[1],
+ internal_ref_numbers(term)[2], HCONST_8);
+ goto pop_next;
+
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing* thing = external_thing_ptr(term);
+
+ ASSERT(external_thing_ref_no_of_numbers(thing) == 3);
+ /* See limitation #2 */
+ #ifdef ARCH_64
+ POINTER_HASH(thing->node, HCONST_7);
+ UINT32_HASH(external_thing_ref_numbers(thing)[0], HCONST_7);
+ #else
+ UINT32_HASH_2(thing->node,
+ external_thing_ref_numbers(thing)[0], HCONST_7);
+ #endif
+ UINT32_HASH_2(external_thing_ref_numbers(thing)[1],
+ external_thing_ref_numbers(thing)[2], HCONST_8);
+ goto pop_next;
+ }
+ case EXTERNAL_PID_SUBTAG: {
+ ExternalThing* thing = external_thing_ptr(term);
+ /* See limitation #2 */
+ #ifdef ARCH_64
+ POINTER_HASH(thing->node, HCONST_5);
+ UINT32_HASH(thing->data.ui[0], HCONST_5);
+ #else
+ UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_5);
+ #endif
+ goto pop_next;
+ }
+ case EXTERNAL_PORT_SUBTAG: {
+ ExternalThing* thing = external_thing_ptr(term);
+ /* See limitation #2 */
+ #ifdef ARCH_64
+ POINTER_HASH(thing->node, HCONST_6);
+ UINT32_HASH(thing->data.ui[0], HCONST_6);
+ #else
+ UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_6);
+ #endif
+ goto pop_next;
+ }
+ case FLOAT_SUBTAG:
+ {
+ FloatDef ff;
+ GET_DOUBLE(term, ff);
+ UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
+ goto pop_next;
+ }
+
+ default:
+ erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ }
+ }
+ break;
+ case TAG_PRIMARY_IMMED1:
+ #if ERTS_SIZEOF_ETERM == 8
+ UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
+ #else
+ UINT32_HASH(term, HCONST);
+ #endif
+ goto pop_next;
+
+ default:
+ erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+
+ pop_next:
+ if (ESTACK_ISEMPTY(s)) {
+ DESTROY_ESTACK(s);
+ UnUseTmpHeapNoproc(2);
+ return hash;
+ }
+
+ term = ESTACK_POP(s);
+
+ switch (term) {
+ case HASH_MAP_TAIL: {
+ hash = (Uint32) ESTACK_POP(s);
+ UINT32_HASH(hash_xor_pairs, HCONST_19);
+ hash_xor_pairs = (Uint32) ESTACK_POP(s);
+ goto pop_next;
+ }
+ case HASH_MAP_PAIR:
+ hash_xor_pairs ^= hash;
hash = 0;
- goto hash2_common;
+ goto pop_next;
+
+ case HASH_CDR:
+ CONST_HASH(HCONST_18); /* Hash CDR i cons cell */
+ goto pop_next;
default:
break;
}
@@ -1480,9 +1952,10 @@ make_hash2(Eterm term)
}
}
+#undef CONST_HASH
#undef HASH_MAP_TAIL
-#undef HASH_MAP_KEY
-#undef HASH_MAP_VAL
+#undef HASH_MAP_PAIR
+#undef HASH_CDR
#undef UINT32_HASH_2
#undef UINT32_HASH
@@ -1699,21 +2172,9 @@ tail_recur:
break;
case MAP_DEF:
+ case HASHMAP_DEF:
{
- map_t *mp = (map_t *)map_val(term);
- int size = map_get_size(mp);
- Eterm *ks = map_get_keys(mp);
- Eterm *vs = map_get_values(mp);
-
- /* Use a prime with size to remedy some of
- * the {} and <<>> hash problems */
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size;
- if (size == 0)
- break;
-
- /* push values first */
- WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
- WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
+ hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
break;
}
case TUPLE_DEF:
@@ -2122,13 +2583,13 @@ tailrecur_ne:
}
case MAP_SUBTAG:
{
- aa = map_val_rel(a, a_base);
+ aa = flatmap_val_rel(a, a_base);
if (!is_boxed(b) || *boxed_val_rel(b,b_base) != *aa)
goto not_equal;
- bb = map_val_rel(b,b_base);
- sz = map_get_size((map_t*)aa);
+ bb = flatmap_val_rel(b,b_base);
+ sz = flatmap_get_size((flatmap_t*)aa);
- if (sz != map_get_size((map_t*)bb)) goto not_equal;
+ if (sz != flatmap_get_size((flatmap_t*)bb)) goto not_equal;
if (sz == 0) goto pop_next;
aa += 2;
@@ -2327,6 +2788,32 @@ tailrecur_ne:
}
break; /* not equal */
}
+ case HASHMAP_SUBTAG:
+ {
+ if (!is_boxed(b) || *boxed_val_rel(b,b_base) != hdr)
+ goto not_equal;
+
+ aa = hashmap_val_rel(a, a_base) + 1;
+ bb = hashmap_val_rel(b, b_base) + 1;
+ switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY:
+ aa++; bb++;
+ case HAMT_SUBTAG_NODE_ARRAY:
+ sz = 16;
+ break;
+ case HAMT_SUBTAG_HEAD_BITMAP:
+ aa++; bb++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz > 0 && sz < 16);
+ break;
+ default:
+ erl_exit(1, "Unknown hashmap subsubtag\n");
+ }
+ goto term_array;
+ }
+ default:
+ ASSERT(!"Unknown boxed subtab in EQ");
}
break;
}
@@ -2450,7 +2937,18 @@ Sint erts_cmp_rel_opt(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base, int exact)
Sint erts_cmp(Eterm a, Eterm b, int exact)
#endif
{
- DECLARE_WSTACK(stack);
+#define PSTACK_TYPE struct erts_cmp_hashmap_state
+ struct erts_cmp_hashmap_state {
+ Sint wstack_rollback;
+ int was_exact;
+ Eterm *ap;
+ Eterm *bp;
+ Eterm min_key;
+ Sint cmp_res; /* result so far -1,0,+1 */
+ };
+ PSTACK_DECLARE(hmap_stack, 1);
+ WSTACK_DECLARE(stack);
+ WSTACK_DECLARE(b_stack); /* only used by hashmaps */
Eterm* aa;
Eterm* bb;
int i;
@@ -2466,6 +2964,26 @@ Sint erts_cmp(Eterm a, Eterm b, int exact)
Uint32 *anum;
Uint32 *bnum;
+/* The WSTACK contains naked Eterms and Operations marked with header-tags */
+#define OP_BITS 4
+#define OP_MASK 0xF
+#define TERM_ARRAY_OP 0
+#define SWITCH_EXACT_OFF_OP 1
+#define HASHMAP_PHASE1_ARE_KEYS_EQUAL 2
+#define HASHMAP_PHASE1_IS_MIN_KEY 3
+#define HASHMAP_PHASE1_CMP_VALUES 4
+#define HASHMAP_PHASE2_ARE_KEYS_EQUAL 5
+#define HASHMAP_PHASE2_IS_MIN_KEY_A 6
+#define HASHMAP_PHASE2_IS_MIN_KEY_B 7
+
+
+#define OP_WORD(OP) (((OP) << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
+#define TERM_ARRAY_OP_WORD(SZ) OP_WORD(((SZ) << OP_BITS) | TERM_ARRAY_OP)
+
+#define GET_OP(WORD) (ASSERT(is_header(WORD)), ((WORD) >> _TAG_PRIMARY_SIZE) & OP_MASK)
+#define GET_OP_ARG(WORD) (ASSERT(is_header(WORD)), ((WORD) >> (OP_BITS + _TAG_PRIMARY_SIZE)))
+
+
#define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
#define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
@@ -2481,6 +2999,8 @@ Sint erts_cmp(Eterm a, Eterm b, int exact)
} while (0)
+bodyrecur:
+ j = 0;
tailrecur:
if (is_same(a,a_base,b,b_base)) { /* Equal values or pointers. */
goto pop_next;
@@ -2608,24 +3128,83 @@ tailrecur_ne:
++bb;
goto term_array;
case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
- if (!is_map_rel(b,b_base)) {
+ if (!is_flatmap_rel(b,b_base)) {
a_tag = MAP_DEF;
goto mixed_types;
}
- aa = (Eterm *)map_val_rel(a,a_base);
- bb = (Eterm *)map_val_rel(b,b_base);
+ aa = (Eterm *)flatmap_val_rel(a,a_base);
+ bb = (Eterm *)flatmap_val_rel(b,b_base);
- i = map_get_size((map_t*)aa);
- if (i != map_get_size((map_t*)bb)) {
- RETURN_NEQ((int)(i - map_get_size((map_t*)bb)));
+ i = flatmap_get_size((flatmap_t*)aa);
+ if (i != flatmap_get_size((flatmap_t*)bb)) {
+ RETURN_NEQ((int)(i - flatmap_get_size((flatmap_t*)bb)));
}
if (i == 0) {
goto pop_next;
}
aa += 2;
bb += 2;
- i += 1; /* increment for tuple-keys */
- goto term_array;
+ if (exact) {
+ i += 1; /* increment for tuple-keys */
+ goto term_array;
+ }
+ else {
+ /* Value array */
+ WSTACK_PUSH3(stack, (UWord)(bb+1), (UWord)(aa+1), TERM_ARRAY_OP_WORD(i));
+ /* Switch back from 'exact' key compare */
+ WSTACK_PUSH(stack, OP_WORD(SWITCH_EXACT_OFF_OP));
+ /* Now do 'exact' compare of key tuples */
+ a = *aa;
+ b = *bb;
+ exact = 1;
+ goto bodyrecur;
+ }
+
+ case (_TAG_HEADER_HASHMAP >> _TAG_PRIMARY_SIZE) :
+ {
+ struct erts_cmp_hashmap_state* sp;
+ if (!is_hashmap_rel(b,b_base)) {
+ a_tag = HASHMAP_DEF;
+ goto mixed_types;
+ }
+ i = hashmap_size_rel(a,a_base) - hashmap_size_rel(b,b_base);
+ if (i) {
+ RETURN_NEQ(i);
+ }
+ if (hashmap_size_rel(a,a_base) == 0) {
+ goto pop_next;
+ }
+
+ /* Hashmap compare strategy:
+ Phase 1. While keys are identical
+ Do synchronous stepping through leafs of both trees in hash
+ order. Maintain value compare result of minimal key.
+
+ Phase 2. If key diff was found in phase 1
+ Ignore values from now on.
+ Continue iterate trees by always advancing the one
+ lagging behind hash-wise. Identical keys are skipped.
+ A minimal key can only be candidate as tie-breaker if we
+ have passed that hash value in the other tree (which means
+ the key did not exist in the other tree).
+ */
+
+ sp = PSTACK_PUSH(hmap_stack);
+ hashmap_iterator_init(&stack, a, 0);
+ hashmap_iterator_init(&b_stack, b, 0);
+ sp->ap = hashmap_iterator_next(&stack);
+ sp->bp = hashmap_iterator_next(&b_stack);
+ sp->cmp_res = 0;
+ ASSERT(sp->ap && sp->bp);
+
+ a = CAR(sp->ap);
+ b = CAR(sp->bp);
+ sp->was_exact = exact;
+ exact = 1;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
if (!is_float_rel(b,b_base)) {
a_tag = FLOAT_DEF;
@@ -2985,8 +3564,7 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
goto not_equal;
}
} else {
- /* (ab)Use TAG_PRIMARY_HEADER to recognize a term_array */
- WSTACK_PUSH3(stack, i, (UWord)bb, (UWord)aa | TAG_PRIMARY_HEADER);
+ WSTACK_PUSH3(stack, (UWord)bb, (UWord)aa, TERM_ARRAY_OP_WORD(i));
goto tailrecur_ne;
}
}
@@ -2998,22 +3576,179 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
pop_next:
if (!WSTACK_ISEMPTY(stack)) {
UWord something = WSTACK_POP(stack);
- if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
- aa = (Eterm*) something;
- bb = (Eterm*) WSTACK_POP(stack);
- i = WSTACK_POP(stack);
- goto term_array;
+ struct erts_cmp_hashmap_state* sp;
+ if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* an operation */
+ switch (GET_OP(something)) {
+ case TERM_ARRAY_OP:
+ i = GET_OP_ARG(something);
+ aa = (Eterm*)WSTACK_POP(stack);
+ bb = (Eterm*) WSTACK_POP(stack);
+ goto term_array;
+
+ case SWITCH_EXACT_OFF_OP:
+ /* Done with exact compare of map keys, switch back */
+ ASSERT(exact);
+ exact = 0;
+ goto pop_next;
+
+ case HASHMAP_PHASE1_ARE_KEYS_EQUAL: {
+ sp = PSTACK_TOP(hmap_stack);
+ if (j) {
+ /* Key diff found, enter phase 2 */
+ if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
+ sp->min_key = CAR(sp->ap);
+ sp->cmp_res = -1;
+ sp->ap = hashmap_iterator_next(&stack);
+ }
+ else {
+ sp->min_key = CAR(sp->bp);
+ sp->cmp_res = 1;
+ sp->bp = hashmap_iterator_next(&b_stack);
+ }
+ exact = 1; /* only exact key compares in phase 2 */
+ goto case_HASHMAP_PHASE2_LOOP;
+ }
+
+ /* No key diff found so far, compare values if min key */
+
+ if (sp->cmp_res) {
+ a = CAR(sp->ap);
+ b = sp->min_key;
+ exact = 1;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_IS_MIN_KEY));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ /* no min key-value found yet */
+ a = CDR(sp->ap);
+ b = CDR(sp->bp);
+ exact = sp->was_exact;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ case HASHMAP_PHASE1_IS_MIN_KEY:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j < 0) {
+ a = CDR(sp->ap);
+ b = CDR(sp->bp);
+ exact = sp->was_exact;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ goto case_HASHMAP_PHASE1_LOOP;
+
+ case HASHMAP_PHASE1_CMP_VALUES:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j) {
+ sp->cmp_res = j;
+ sp->min_key = CAR(sp->ap);
+ }
+ case_HASHMAP_PHASE1_LOOP:
+ sp->ap = hashmap_iterator_next(&stack);
+ sp->bp = hashmap_iterator_next(&b_stack);
+ if (!sp->ap) {
+ /* end of maps with identical keys */
+ ASSERT(!sp->bp);
+ j = sp->cmp_res;
+ exact = sp->was_exact;
+ (void) PSTACK_POP(hmap_stack);
+ ON_CMP_GOTO(j);
+ }
+ a = CAR(sp->ap);
+ b = CAR(sp->bp);
+ exact = 1;
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+
+ case_HASHMAP_PHASE2_LOOP:
+ if (sp->ap && sp->bp) {
+ a = CAR(sp->ap);
+ b = CAR(sp->bp);
+ ASSERT(exact);
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_ARE_KEYS_EQUAL));
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ goto case_HASHMAP_PHASE2_NEXT_STEP;
+
+ case HASHMAP_PHASE2_ARE_KEYS_EQUAL:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j == 0) {
+ /* keys are equal, skip them */
+ sp->ap = hashmap_iterator_next(&stack);
+ sp->bp = hashmap_iterator_next(&b_stack);
+ goto case_HASHMAP_PHASE2_LOOP;
+ }
+ /* fall through */
+ case_HASHMAP_PHASE2_NEXT_STEP:
+ if (sp->ap || sp->bp) {
+ if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
+ ASSERT(sp->ap);
+ a = CAR(sp->ap);
+ b = sp->min_key;
+ ASSERT(exact);
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_A));
+ }
+ else { /* hash_cmp > 0 */
+ ASSERT(sp->bp);
+ a = CAR(sp->bp);
+ b = sp->min_key;
+ ASSERT(exact);
+ WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_B));
+ }
+ sp->wstack_rollback = WSTACK_COUNT(stack);
+ goto bodyrecur;
+ }
+ /* End of both maps */
+ j = sp->cmp_res;
+ exact = sp->was_exact;
+ (void) PSTACK_POP(hmap_stack);
+ ON_CMP_GOTO(j);
+
+ case HASHMAP_PHASE2_IS_MIN_KEY_A:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j < 0) {
+ sp->min_key = CAR(sp->ap);
+ sp->cmp_res = -1;
+ }
+ sp->ap = hashmap_iterator_next(&stack);
+ goto case_HASHMAP_PHASE2_LOOP;
+
+ case HASHMAP_PHASE2_IS_MIN_KEY_B:
+ sp = PSTACK_TOP(hmap_stack);
+ if (j < 0) {
+ sp->min_key = CAR(sp->bp);
+ sp->cmp_res = 1;
+ }
+ sp->bp = hashmap_iterator_next(&b_stack);
+ goto case_HASHMAP_PHASE2_LOOP;
+
+ default:
+ ASSERT(!"Invalid cmp op");
+ } /* switch */
}
a = (Eterm) something;
b = (Eterm) WSTACK_POP(stack);
goto tailrecur;
}
- DESTROY_WSTACK(stack);
+ ASSERT(PSTACK_IS_EMPTY(hmap_stack));
+ PSTACK_DESTROY(hmap_stack);
+ WSTACK_DESTROY(stack);
+ WSTACK_DESTROY(b_stack);
return 0;
not_equal:
- DESTROY_WSTACK(stack);
+ if (!PSTACK_IS_EMPTY(hmap_stack)) {
+ WSTACK_ROLLBACK(stack, PSTACK_TOP(hmap_stack)->wstack_rollback);
+ goto pop_next;
+ }
+ PSTACK_DESTROY(hmap_stack);
+ WSTACK_DESTROY(stack);
+ WSTACK_DESTROY(b_stack);
return j;
#undef CMP_NODES
@@ -3776,7 +4511,7 @@ erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
res->timer.timeout_func = timeout_func;
res->timer.timer_ref = timer_ref;
res->timer.id = id;
- res->timer.tm.active = 0; /* MUST be initalized */
+ erts_init_timer(&res->timer.tm);
ASSERT(!*timer_ref);
@@ -4353,8 +5088,8 @@ erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
*/
Uint64 erts_timestamp_millis(void)
{
-#ifdef HAVE_GETHRTIME
- return (Uint64) (sys_gethrtime() / 1000000);
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return ERTS_MONOTONIC_TO_MSEC(erts_os_monotonic_time());
#else
Uint64 res;
SysTimeval tv;
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 3c6922eb8e..5196eb51c6 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -3957,9 +3957,9 @@ static int inet_init()
if (0 != erl_drv_tsd_key_create("inet_buffer_stack_key", &buffer_stack_key))
goto error;
- ASSERT(sizeof(struct in_addr) == 4);
+ ERTS_CT_ASSERT(sizeof(struct in_addr) == 4);
# if defined(HAVE_IN6) && defined(AF_INET6)
- ASSERT(sizeof(struct in6_addr) == 16);
+ ERTS_CT_ASSERT(sizeof(struct in6_addr) == 16);
# endif
INIT_ATOM(ok);
@@ -4005,7 +4005,7 @@ static int inet_init()
#ifdef HAVE_SCTP
/* Check the size of SCTP AssocID -- currently both this driver and the
Erlang part require 32 bit: */
- ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
+ ERTS_CT_ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
# if defined(HAVE_SCTP_BINDX)
p_sctp_bindx = sctp_bindx;
# if defined(HAVE_SCTP_PEELOFF)
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
index 3143e4511d..f7b2d91d23 100644
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ b/erts/emulator/drivers/common/zlib_drv.c
@@ -62,8 +62,17 @@
#define CRC32_COMBINE 23
#define ADLER32_COMBINE 24
+#define INFLATE_CHUNK 25
+
+
#define DEFAULT_BUFSZ 4000
+/* This flag is used in the same places, where zlib return codes
+ * (Z_OK, Z_STREAM_END, Z_NEED_DICT) are. So, we need to set it to
+ * relatively large value to avoid possible value clashes in future.
+ * */
+#define INFLATE_HAS_MORE 100
+
static int zlib_init(void);
static ErlDrvData zlib_start(ErlDrvPort port, char* buf);
static void zlib_stop(ErlDrvData e);
@@ -295,6 +304,58 @@ static int zlib_inflate(ZLibData* d, int flush)
return res;
}
+static int zlib_inflate_chunk(ZLibData* d)
+{
+ int res = Z_OK;
+
+ if ((d->bin == NULL) && (zlib_output_init(d) < 0)) {
+ errno = ENOMEM;
+ return Z_ERRNO;
+ }
+
+ while ((driver_sizeq(d->port) > 0) && (d->s.avail_out > 0) &&
+ (res != Z_STREAM_END)) {
+ int vlen;
+ SysIOVec* iov = driver_peekq(d->port, &vlen);
+ int len;
+
+ d->s.next_in = iov[0].iov_base;
+ d->s.avail_in = iov[0].iov_len;
+ while((d->s.avail_in > 0) && (d->s.avail_out > 0) && (res != Z_STREAM_END)) {
+ res = inflate(&d->s, Z_NO_FLUSH);
+ if (res == Z_NEED_DICT) {
+ /* Essential to eat the header bytes that zlib has looked at */
+ len = iov[0].iov_len - d->s.avail_in;
+ driver_deq(d->port, len);
+ return res;
+ }
+ if (res == Z_BUF_ERROR) {
+ /* Was possible more output, but actually not */
+ res = Z_OK;
+ }
+ else if (res < 0) {
+ return res;
+ }
+ }
+ len = iov[0].iov_len - d->s.avail_in;
+ driver_deq(d->port, len);
+ }
+
+ /* We are here because all input was consumed or EOS reached or output
+ * buffer is full */
+ if (d->want_crc) {
+ d->crc = crc32(d->crc, (unsigned char*) d->bin->orig_bytes,
+ d->binsz - d->s.avail_out);
+ }
+ zlib_output(d);
+ if ((res == Z_OK) && (d->s.avail_in > 0))
+ res = INFLATE_HAS_MORE;
+ else if (res == Z_STREAM_END) {
+ d->inflate_eos_seen = 1;
+ }
+ return res;
+}
+
static int zlib_deflate(ZLibData* d, int flush)
{
int res = Z_OK;
@@ -568,6 +629,18 @@ static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *bu
return zlib_return(res, rbuf, rlen);
}
+ case INFLATE_CHUNK:
+ if (d->state != ST_INFLATE) goto badarg;
+ if (len != 0) goto badarg;
+ res = zlib_inflate_chunk(d);
+ if (res == INFLATE_HAS_MORE) {
+ return zlib_value2(4, 0, rbuf, rlen);
+ } else if (res == Z_NEED_DICT) {
+ return zlib_value2(3, d->s.adler, rbuf, rlen);
+ } else {
+ return zlib_return(res, rbuf, rlen);
+ }
+
case GET_QSIZE:
return zlib_value(driver_sizeq(d->port), rbuf, rlen);
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index 491e0a090e..a5960716f2 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -32,6 +32,10 @@ static ErlDrvData ttysl_start(ErlDrvPort, char*);
#ifdef HAVE_TERMCAP /* else make an empty driver that can not be opened */
+#ifndef WANT_NONBLOCKING
+#define WANT_NONBLOCKING
+#endif
+
#include "sys.h"
#include <ctype.h>
#include <stdlib.h>
@@ -39,6 +43,7 @@ static ErlDrvData ttysl_start(ErlDrvPort, char*);
#include <string.h>
#include <signal.h>
#include <fcntl.h>
+#include <limits.h>
#include <locale.h>
#include <unistd.h>
#include <termios.h>
@@ -57,6 +62,14 @@ static ErlDrvData ttysl_start(ErlDrvPort, char*);
#include <langinfo.h>
#endif
+#if defined IOV_MAX
+#define MAXIOV IOV_MAX
+#elif defined UIO_MAXIOV
+#define MAXIOV UIO_MAXIOV
+#else
+#define MAXIOV 16
+#endif
+
#define TRUE 1
#define FALSE 0
@@ -80,12 +93,15 @@ static volatile int cols_needs_update = FALSE;
#define OP_INSC 2
#define OP_DELC 3
#define OP_BEEP 4
+#define OP_PUTC_SYNC 5
/* Control op */
#define CTRL_OP_GET_WINSIZE 100
#define CTRL_OP_GET_UNICODE_STATE 101
#define CTRL_OP_SET_UNICODE_STATE 102
-
+/* We use 1024 as the buf size as that was the default buf size of FILE streams
+ on all platforms that I checked. */
+#define TTY_BUFFSIZE 1024
static int lbuf_size = BUFSIZ;
static Uint32 *lbuf; /* The current line buffer */
@@ -113,13 +129,19 @@ static int lpos; /* The current "cursor position" in the line buf
/* Main interface functions. */
static void ttysl_stop(ErlDrvData);
static void ttysl_from_erlang(ErlDrvData, char*, ErlDrvSizeT);
+static void ttysl_to_tty(ErlDrvData, ErlDrvEvent);
+static void ttysl_flush_tty(ErlDrvData);
static void ttysl_from_tty(ErlDrvData, ErlDrvEvent);
static void ttysl_stop_select(ErlDrvEvent, void*);
static Sint16 get_sint16(char*);
static ErlDrvPort ttysl_port;
static int ttysl_fd;
-static FILE *ttysl_out;
+static int ttysl_terminate = 0;
+static int ttysl_send_ok = 0;
+static ErlDrvBinary *putcbuf;
+static int putcpos;
+static int putclen;
/* Functions that work on the line buffer. */
static int start_lbuf(void);
@@ -201,22 +223,22 @@ struct erl_drv_entry ttsl_driver_entry = {
IF_IMPL(ttysl_stop),
IF_IMPL(ttysl_from_erlang),
IF_IMPL(ttysl_from_tty),
- NULL,
- "tty_sl",
- NULL,
- NULL,
+ IF_IMPL(ttysl_to_tty),
+ "tty_sl", /* driver_name */
+ NULL, /* finish */
+ NULL, /* handle */
IF_IMPL(ttysl_control),
NULL, /* timeout */
NULL, /* outputv */
NULL, /* ready_async */
- NULL, /* flush */
+ IF_IMPL(ttysl_flush_tty),
NULL, /* call */
NULL, /* event */
ERL_DRV_EXTENDED_MARKER,
ERL_DRV_EXTENDED_MAJOR_VERSION,
ERL_DRV_EXTENDED_MINOR_VERSION,
0, /* ERL_DRV_FLAGs */
- NULL,
+ NULL, /* handle2 */
NULL, /* process_exit */
IF_IMPL(ttysl_stop_select)
};
@@ -296,8 +318,7 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
return ERL_DRV_ERROR_GENERAL;
}
- /* Open the terminal and set the terminal */
- ttysl_out = fdopen(ttysl_fd, "w");
+ SET_NONBLOCKING(ttysl_fd);
#ifdef PRIMITIVE_UTF8_CHECK
setlocale(LC_CTYPE, ""); /* Set international environment,
@@ -317,8 +338,8 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
}
#endif
DEBUGLOG(("utf8_mode is %s\n",(utf8_mode) ? "on" : "off"));
- sys_sigset(SIGCONT, cont);
- sys_sigset(SIGWINCH, winch);
+ sys_signal(SIGCONT, cont);
+ sys_signal(SIGWINCH, winch);
driver_select(port, (ErlDrvEvent)(UWord)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 1);
ttysl_port = port;
@@ -400,12 +421,14 @@ static void ttysl_stop(ErlDrvData ttysl_data)
stop_lbuf();
stop_termcap();
tty_reset(ttysl_fd);
- driver_select(ttysl_port, (ErlDrvEvent)(UWord)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 0);
- sys_sigset(SIGCONT, SIG_DFL);
- sys_sigset(SIGWINCH, SIG_DFL);
+ driver_select(ttysl_port, (ErlDrvEvent)(UWord)ttysl_fd,
+ ERL_DRV_WRITE|ERL_DRV_READ|ERL_DRV_USE, 0);
+ sys_signal(SIGCONT, SIG_DFL);
+ sys_signal(SIGWINCH, SIG_DFL);
}
ttysl_port = (ErlDrvPort)-1;
ttysl_fd = -1;
+ ttysl_terminate = 0;
/* return TRUE; */
}
@@ -650,10 +673,26 @@ static int check_buf_size(byte *s, int n)
static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, ErlDrvSizeT count)
{
+ ErlDrvSizeT sz;
+
+ sz = driver_sizeq(ttysl_port);
+
+ putclen = count > TTY_BUFFSIZE ? TTY_BUFFSIZE : count;
+ putcbuf = driver_alloc_binary(putclen);
+ putcpos = 0;
+
if (lpos > MAXSIZE)
put_chars((byte*)"\n", 1);
switch (buf[0]) {
+ case OP_PUTC_SYNC:
+ /* Using sync means that we have to send an ok to the
+ controlling process for each command call. We delay
+ sending ok if the driver queue exceeds a certain size.
+ We do not set ourselves as a busy port, as this
+ could be very bad for user_drv, if it gets blocked on
+ the port_command. */
+ /* fall through */
case OP_PUTC:
DEBUGLOG(("OP: Putc(%lu)",(unsigned long) count-1));
if (check_buf_size((byte*)buf+1, count-1) == 0)
@@ -678,10 +717,104 @@ static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, ErlDrvSizeT coun
/* Unknown op, just ignore. */
break;
}
- fflush(ttysl_out);
+
+ driver_enq_bin(ttysl_port,putcbuf,0,putcpos);
+
+ if (sz == 0) {
+ for (;;) {
+ int written, qlen;
+ SysIOVec *iov;
+
+ iov = driver_peekq(ttysl_port,&qlen);
+ if (iov)
+ written = writev(ttysl_fd, iov, qlen > MAXIOV ? MAXIOV : qlen);
+ else
+ written = 0;
+ if (written < 0) {
+ if (errno == EAGAIN) {
+ driver_select(ttysl_port,(ErlDrvEvent)(long)ttysl_fd,
+ ERL_DRV_USE|ERL_DRV_WRITE,1);
+ break;
+ } else {
+ /* we ignore all other errors */
+ break;
+ }
+ } else {
+ if (driver_deq(ttysl_port, written) == 0)
+ break;
+ }
+ }
+ }
+
+ if (buf[0] == OP_PUTC_SYNC) {
+ if (driver_sizeq(ttysl_port) > TTY_BUFFSIZE && !ttysl_terminate) {
+ /* We delay sending the ack until the buffer has been consumed */
+ ttysl_send_ok = 1;
+ } else {
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, driver_mk_port(ttysl_port),
+ ERL_DRV_ATOM, driver_mk_atom("ok"),
+ ERL_DRV_TUPLE, 2
+ };
+ ASSERT(ttysl_send_ok == 0);
+ erl_drv_output_term(driver_mk_port(ttysl_port), spec,
+ sizeof(spec) / sizeof(spec[0]));
+ }
+ }
+
return; /* TRUE; */
}
+static void ttysl_to_tty(ErlDrvData ttysl_data, ErlDrvEvent fd) {
+ for (;;) {
+ int written, qlen;
+ SysIOVec *iov;
+ ErlDrvSizeT sz;
+
+ iov = driver_peekq(ttysl_port,&qlen);
+ if (iov)
+ written = writev(ttysl_fd, iov, qlen > MAXIOV ? MAXIOV : qlen);
+ else
+ written = 0;
+ if (written < 0) {
+ if (errno == EAGAIN) {
+ break;
+ } else {
+ /* we ignore all other errors */
+ }
+ } else {
+ sz = driver_deq(ttysl_port, written);
+ if (sz < TTY_BUFFSIZE && ttysl_send_ok) {
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, driver_mk_port(ttysl_port),
+ ERL_DRV_ATOM, driver_mk_atom("ok"),
+ ERL_DRV_TUPLE, 2
+ };
+ ttysl_send_ok = 0;
+ erl_drv_output_term(driver_mk_port(ttysl_port), spec,
+ sizeof(spec) / sizeof(spec[0]));
+ }
+ if (sz == 0) {
+ driver_select(ttysl_port,(ErlDrvEvent)(long)ttysl_fd,
+ ERL_DRV_WRITE,0);
+ if (ttysl_terminate)
+ /* flush has been called, which means we should terminate
+ when queue is empty. This will not send any exit
+ message */
+ driver_failure_atom(ttysl_port, "normal");
+ break;
+ }
+ }
+ }
+
+ return;
+}
+
+static void ttysl_flush_tty(ErlDrvData ttysl_data) {
+ ttysl_terminate = 1;
+ return;
+}
+
static void ttysl_from_tty(ErlDrvData ttysl_data, ErlDrvEvent fd)
{
byte b[1024];
@@ -1070,7 +1203,14 @@ static int write_buf(Uint32 *s, int n)
/* The basic procedure for outputting one character. */
static int outc(int c)
{
- return (int)putc(c, ttysl_out);
+ putcbuf->orig_bytes[putcpos++] = c;
+ if (putcpos == putclen) {
+ driver_enq_bin(ttysl_port,putcbuf,0,putclen);
+ putcpos = 0;
+ putclen = TTY_BUFFSIZE;
+ putcbuf = driver_alloc_binary(BUFSIZ);
+ }
+ return 1;
}
static int move_cursor(int from, int to)
@@ -1318,11 +1458,11 @@ static RETSIGTYPE suspend(int sig)
exit(1);
}
- sys_sigset(sig, SIG_DFL); /* Set signal handler to default */
+ sys_signal(sig, SIG_DFL); /* Set signal handler to default */
sys_sigrelease(sig); /* Allow 'sig' to come through */
kill(getpid(), sig); /* Send ourselves the signal */
sys_sigblock(sig); /* Reset to old mask */
- sys_sigset(sig, suspend); /* Reset signal handler */
+ sys_signal(sig, suspend); /* Reset signal handler */
if (tty_set(ttysl_fd) < 0) {
fprintf(stderr,"Can't set tty raw \n");
diff --git a/erts/emulator/drivers/win32/ttsl_drv.c b/erts/emulator/drivers/win32/ttsl_drv.c
index 502cb58dfa..851c336a11 100644
--- a/erts/emulator/drivers/win32/ttsl_drv.c
+++ b/erts/emulator/drivers/win32/ttsl_drv.c
@@ -46,6 +46,7 @@ static int rows; /* Number of rows available. */
#define OP_INSC 2
#define OP_DELC 3
#define OP_BEEP 4
+#define OP_PUTC_SYNC 5
/* Control op */
#define CTRL_OP_GET_WINSIZE 100
@@ -458,6 +459,7 @@ static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, ErlDrvSizeT coun
switch (buf[0]) {
case OP_PUTC:
+ case OP_PUTC_SYNC:
DEBUGLOG(("OP: Putc(%I64u)",(unsigned long long)count-1));
if (check_buf_size((byte*)buf+1, count-1) == 0)
return;
@@ -481,6 +483,20 @@ static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, ErlDrvSizeT coun
/* Unknown op, just ignore. */
break;
}
+
+ if (buf[0] == OP_PUTC_SYNC) {
+ /* On windows we do a blocking write to the tty so we just
+ send the ack immidiately. If at some point in the future
+ someone has a problem with tty output being blocking
+ this has to be changed. */
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, driver_mk_port(ttysl_port),
+ ERL_DRV_ATOM, driver_mk_atom("ok"),
+ ERL_DRV_TUPLE, 2
+ };
+ erl_drv_output_term(driver_mk_port(ttysl_port), spec,
+ sizeof(spec) / sizeof(spec[0]));
+ }
return;
}
diff --git a/erts/emulator/hipe/hipe_amd64.c b/erts/emulator/hipe/hipe_amd64.c
index b5dff06987..63646825b2 100644
--- a/erts/emulator/hipe/hipe_amd64.c
+++ b/erts/emulator/hipe/hipe_amd64.c
@@ -125,7 +125,7 @@ static void atexit_alloc_code_stats(void)
#define MAP_ANONYMOUS MAP_ANON
#endif
-static void morecore(unsigned int alloc_bytes)
+static int morecore(unsigned int alloc_bytes)
{
unsigned int map_bytes;
char *map_hint, *map_start;
@@ -174,10 +174,9 @@ static void morecore(unsigned int alloc_bytes)
abort();
}
#endif
- if (map_start == MAP_FAILED) {
- perror("mmap");
- abort();
- }
+ if (map_start == MAP_FAILED)
+ return -1;
+
ALLOC_CODE_STATS(total_mapped += map_bytes);
/* Merge adjacent mappings, so the trailing portion of the previous
@@ -197,6 +196,8 @@ static void morecore(unsigned int alloc_bytes)
}
ALLOC_CODE_STATS(atexit_alloc_code_stats());
+
+ return 0;
}
static void *alloc_code(unsigned int alloc_bytes)
@@ -206,8 +207,8 @@ static void *alloc_code(unsigned int alloc_bytes)
/* Align function entries. */
alloc_bytes = (alloc_bytes + 3) & ~3;
- if (code_bytes < alloc_bytes)
- morecore(alloc_bytes);
+ if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
+ return NULL;
ALLOC_CODE_STATS(++nr_allocs);
ALLOC_CODE_STATS(total_alloc += alloc_bytes);
res = code_next;
@@ -224,18 +225,18 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
-/* called from hipe_bif0.c:hipe_bifs_make_native_stub_2()
- and hipe_bif0.c:hipe_make_stub() */
-void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
+/* Make stub for native code calling exported beam function.
+*/
+void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
/*
* This creates a native code stub with the following contents:
*
- * movq $Address, P_BEAM_IP(%ebp) %% Actually two movl
+ * movq $Address, P_CALLEE_EXP(%ebp) %% Actually two movl
* movb $Arity, P_ARITY(%ebp)
* jmp callemu
*
- * The stub has variable size, depending on whether the P_BEAM_IP
+ * The stub has variable size, depending on whether the P_CALLEE_EXP
* and P_ARITY offsets fit in 8-bit signed displacements or not.
* The rel32 offset in the final jmp depends on its actual location,
* which also depends on the size of the previous instructions.
@@ -248,49 +249,51 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
codeSize = /* 23, 26, 29, or 32 bytes */
23 + /* 23 when all offsets are 8-bit */
- (P_BEAM_IP >= 128 ? 3 : 0) +
- ((P_BEAM_IP + 4) >= 128 ? 3 : 0) +
+ (P_CALLEE_EXP >= 128 ? 3 : 0) +
+ ((P_CALLEE_EXP + 4) >= 128 ? 3 : 0) +
(P_ARITY >= 128 ? 3 : 0);
codep = code = alloc_code(codeSize);
+ if (!code)
+ return NULL;
- /* movl $beamAddress, P_BEAM_IP(%ebp); 3 or 6 bytes, plus 4 */
+ /* movl $callee_exp, P_CALLEE_EXP(%ebp); 3 or 6 bytes, plus 4 */
codep[0] = 0xc7;
-#if P_BEAM_IP >= 128
+#if P_CALLEE_EXP >= 128
codep[1] = 0x85; /* disp32[EBP] */
- codep[2] = P_BEAM_IP & 0xFF;
- codep[3] = (P_BEAM_IP >> 8) & 0xFF;
- codep[4] = (P_BEAM_IP >> 16) & 0xFF;
- codep[5] = (P_BEAM_IP >> 24) & 0xFF;
+ codep[2] = P_CALLEE_EXP & 0xFF;
+ codep[3] = (P_CALLEE_EXP >> 8) & 0xFF;
+ codep[4] = (P_CALLEE_EXP >> 16) & 0xFF;
+ codep[5] = (P_CALLEE_EXP >> 24) & 0xFF;
codep += 6;
#else
codep[1] = 0x45; /* disp8[EBP] */
- codep[2] = P_BEAM_IP;
+ codep[2] = P_CALLEE_EXP;
codep += 3;
#endif
- codep[0] = ((unsigned long)beamAddress ) & 0xFF;
- codep[1] = ((unsigned long)beamAddress >> 8) & 0xFF;
- codep[2] = ((unsigned long)beamAddress >> 16) & 0xFF;
- codep[3] = ((unsigned long)beamAddress >> 24) & 0xFF;
+ codep[0] = ((unsigned long)callee_exp ) & 0xFF;
+ codep[1] = ((unsigned long)callee_exp >> 8) & 0xFF;
+ codep[2] = ((unsigned long)callee_exp >> 16) & 0xFF;
+ codep[3] = ((unsigned long)callee_exp >> 24) & 0xFF;
codep += 4;
- /* movl (shl 32 $beamAddress), P_BEAM_IP+4(%ebp); 3 or 6 bytes, plus 4 */
+ /* movl (shl 32 $callee_exp), P_CALLEE_EXP+4(%ebp); 3 or 6 bytes, plus 4 */
codep[0] = 0xc7;
-#if P_BEAM_IP+4 >= 128
+#if P_CALLEE_EXP+4 >= 128
codep[1] = 0x85; /* disp32[EBP] */
- codep[2] = (P_BEAM_IP+4) & 0xFF;
- codep[3] = ((P_BEAM_IP+4) >> 8) & 0xFF;
- codep[4] = ((P_BEAM_IP+4) >> 16) & 0xFF;
- codep[5] = ((P_BEAM_IP+4) >> 24) & 0xFF;
+ codep[2] = (P_CALLEE_EXP+4) & 0xFF;
+ codep[3] = ((P_CALLEE_EXP+4) >> 8) & 0xFF;
+ codep[4] = ((P_CALLEE_EXP+4) >> 16) & 0xFF;
+ codep[5] = ((P_CALLEE_EXP+4) >> 24) & 0xFF;
codep += 6;
#else
codep[1] = 0x45; /* disp8[EBP] */
- codep[2] = (P_BEAM_IP+4);
+ codep[2] = (P_CALLEE_EXP+4);
codep += 3;
#endif
- codep[0] = ((unsigned long)beamAddress >> 32) & 0xFF;
- codep[1] = ((unsigned long)beamAddress >> 40) & 0xFF;
- codep[2] = ((unsigned long)beamAddress >> 48) & 0xFF;
- codep[3] = ((unsigned long)beamAddress >> 56) & 0xFF;
+ codep[0] = ((unsigned long)callee_exp >> 32) & 0xFF;
+ codep[1] = ((unsigned long)callee_exp >> 40) & 0xFF;
+ codep[2] = ((unsigned long)callee_exp >> 48) & 0xFF;
+ codep[3] = ((unsigned long)callee_exp >> 56) & 0xFF;
codep += 4;
/* movb $beamArity, P_ARITY(%ebp); 3 or 6 bytes */
diff --git a/erts/emulator/hipe/hipe_amd64_abi.txt b/erts/emulator/hipe/hipe_amd64_abi.txt
index 8a34bfa67f..72aed13995 100644
--- a/erts/emulator/hipe/hipe_amd64_abi.txt
+++ b/erts/emulator/hipe/hipe_amd64_abi.txt
@@ -45,7 +45,7 @@ The first return value from a function is placed in %rax, the second
(if any) is placed in %rdx.
Notes:
-- Currently, NR_ARG_REGS==0.
+- Currently, NR_ARG_REGS == 4.
- C BIFs expect P in C parameter register 1: %rdi. By making Erlang
parameter registers 1-5 coincide with C parameter registers 2-6,
our BIF wrappers can simply move P to %rdi without having to shift
diff --git a/erts/emulator/hipe/hipe_amd64_asm.m4 b/erts/emulator/hipe/hipe_amd64_asm.m4
index 7c81040b8b..ca55d5bf3b 100644
--- a/erts/emulator/hipe/hipe_amd64_asm.m4
+++ b/erts/emulator/hipe/hipe_amd64_asm.m4
@@ -33,7 +33,35 @@ define(HEAP_LIMIT_IN_REGISTER,0)dnl global for HL
define(SIMULATE_NSP,0)dnl change to 1 to simulate call/ret insns
`#define AMD64_LEAF_WORDS 'LEAF_WORDS
-`#define LEAF_WORDS 'LEAF_WORDS
+`#define LEAF_WORDS 'LEAF_WORDS
+`#define AMD64_NR_ARG_REGS 'NR_ARG_REGS
+`#define NR_ARG_REGS 'NR_ARG_REGS
+
+`#define AMD64_HP_IN_REGISTER 'HP_IN_REGISTER
+`#if AMD64_HP_IN_REGISTER'
+`#define AMD64_HEAP_POINTER 15'
+define(HP,%r15)dnl Only change this together with above
+`#endif'
+
+`#define AMD64_FCALLS_IN_REGISTER 'FCALLS_IN_REGISTER
+`#if AMD64_FCALLS_IN_REGISTER'
+`#define AMD64_FCALLS_REGISTER 11'
+define(FCALLS,%r11)dnl This goes together with line above
+`#endif'
+
+`#define AMD64_HEAP_LIMIT_IN_REGISTER 'HEAP_LIMIT_IN_REGISTER
+`#if AMD64_HEAP_LIMIT_IN_REGISTER'
+`#define AMD64_HEAP_LIMIT_REGISTER 12'
+define(HEAP_LIMIT,%r12)dnl Change this together with line above
+`#endif'
+
+`#define AMD64_SIMULATE_NSP 'SIMULATE_NSP
+
+
+`#ifdef ASM'
+/*
+ * Only assembler stuff from here on (when included from *.S)
+ */
/*
* Workarounds for Darwin.
@@ -63,33 +91,24 @@ ifelse(OPSYS,darwin,``
*/
`#define P %rbp'
-`#define AMD64_HP_IN_REGISTER 'HP_IN_REGISTER
`#if AMD64_HP_IN_REGISTER
-#define AMD64_HEAP_POINTER 15'
-define(HP,%r15)dnl Only change this together with above
-`#define SAVE_HP movq 'HP`, P_HP(P)
+#define SAVE_HP movq 'HP`, P_HP(P)
#define RESTORE_HP movq P_HP(P), 'HP`
#else
#define SAVE_HP /*empty*/
#define RESTORE_HP /*empty*/
#endif'
-`#define AMD64_FCALLS_IN_REGISTER 'FCALLS_IN_REGISTER
`#if AMD64_FCALLS_IN_REGISTER
-#define AMD64_FCALLS_REGISTER 11'
-define(FCALLS,%r11)dnl This goes together with line above
-`#define SAVE_FCALLS movq 'FCALLS`, P_FCALLS(P)
+#define SAVE_FCALLS movq 'FCALLS`, P_FCALLS(P)
#define RESTORE_FCALLS movq P_FCALLS(P), 'FCALLS`
#else
#define SAVE_FCALLS /*empty*/
#define RESTORE_FCALLS /*empty*/
#endif'
-`#define AMD64_HEAP_LIMIT_IN_REGISTER 'HEAP_LIMIT_IN_REGISTER
`#if AMD64_HEAP_LIMIT_IN_REGISTER
-#define AMD64_HEAP_LIMIT_REGISTER 12'
-define(HEAP_LIMIT,%r12)dnl Change this together with line above
-`#define RESTORE_HEAP_LIMIT movq P_HP_LIMIT(P), 'HEAP_LIMIT`
+#define RESTORE_HEAP_LIMIT movq P_HP_LIMIT(P), 'HEAP_LIMIT`
#else
#define RESTORE_HEAP_LIMIT /*empty*/
#endif'
@@ -99,7 +118,6 @@ define(NSP,%rsp)dnl
`#define SAVE_CSP movq %rsp, P_CSP(P)
#define RESTORE_CSP movq P_CSP(P), %rsp'
-`#define AMD64_SIMULATE_NSP 'SIMULATE_NSP
/*
* Context switching macros.
@@ -132,8 +150,6 @@ define(NSP,%rsp)dnl
/*
* Argument (parameter) registers.
*/
-`#define AMD64_NR_ARG_REGS 'NR_ARG_REGS
-`#define NR_ARG_REGS 'NR_ARG_REGS
define(defarg,`define(ARG$1,`$2')dnl
#`define ARG'$1 $2'
@@ -237,6 +253,10 @@ define(NBIF_ARG,`ifelse(eval($3 >= NR_ARG_REGS),0,`NBIF_REG_ARG($1,$3)',`NBIF_ST
`/* #define NBIF_ARG_3_0 'NBIF_ARG(%rsi,3,0)` */'
`/* #define NBIF_ARG_3_1 'NBIF_ARG(%rdx,3,1)` */'
`/* #define NBIF_ARG_3_2 'NBIF_ARG(%rcx,3,2)` */'
+`/* #define NBIF_ARG_4_0 'NBIF_ARG(%rsi,4,0)` */'
+`/* #define NBIF_ARG_4_1 'NBIF_ARG(%rdx,4,1)` */'
+`/* #define NBIF_ARG_4_2 'NBIF_ARG(%rcx,4,2)` */'
+`/* #define NBIF_ARG_4_3 'NBIF_ARG(%r8,4,3)` */'
`/* #define NBIF_ARG_5_0 'NBIF_ARG(%rsi,5,0)` */'
`/* #define NBIF_ARG_5_1 'NBIF_ARG(%rdx,5,1)` */'
`/* #define NBIF_ARG_5_2 'NBIF_ARG(%rcx,5,2)` */'
@@ -261,6 +281,9 @@ define(NBIF_RET,`NBIF_RET_N(eval(RET_POP($1)))')dnl
`/* #define NBIF_RET_1 'NBIF_RET(1)` */'
`/* #define NBIF_RET_2 'NBIF_RET(2)` */'
`/* #define NBIF_RET_3 'NBIF_RET(3)` */'
+`/* #define NBIF_RET_4 'NBIF_RET(4)` */'
`/* #define NBIF_RET_5 'NBIF_RET(5)` */'
+`#endif /* ASM */'
+
`#endif /* HIPE_AMD64_ASM_H */'
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index a3219c7586..7d94aa05b3 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -18,7 +18,7 @@ changecom(`/*', `*/')dnl
* %CopyrightEnd%
*/
-
+#`define ASM'
include(`hipe/hipe_amd64_asm.m4')
#`include' "config.h"
#`include' "hipe_literals.h"
@@ -51,9 +51,10 @@ define(HANDLE_GOT_MBUF,`
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_4(nbif_name, cbif_name)
* standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 0-3 parameters and
+ * Generate native interface for a BIF with 0-4 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -154,6 +155,43 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
+define(standard_bif_interface_4,
+`
+#ifndef HAVE_$1
+#`define' HAVE_$1
+ TEXT
+ .align 4
+ GLOBAL(ASYM($1))
+ASYM($1):
+ /* set up the parameters */
+ movq P, %rdi
+ NBIF_ARG(%rsi,4,0)
+ NBIF_ARG(%rdx,4,1)
+ NBIF_ARG(%rcx,4,2)
+ NBIF_ARG(%r8,4,3)
+
+ /* make the call on the C stack */
+ SWITCH_ERLANG_TO_C
+ pushq %r8
+ pushq %rcx
+ pushq %rdx
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ sub $(8), %rsp /* stack frame 16-byte alignment */
+ CALL_BIF($2)
+ add $(4*8 + 8), %rsp
+ TEST_GOT_MBUF
+ SWITCH_C_TO_ERLANG
+
+ /* throw exception if failure, otherwise return */
+ TEST_GOT_EXN
+ jz nbif_4_simple_exception
+ NBIF_RET(4)
+ HANDLE_GOT_MBUF(4)
+ SET_SIZE(ASYM($1))
+ TYPE_FUNCTION(ASYM($1))
+#endif')
+
define(standard_bif_interface_0,
`
#ifndef HAVE_$1
diff --git a/erts/emulator/hipe/hipe_amd64_glue.S b/erts/emulator/hipe/hipe_amd64_glue.S
index 8816906870..3cb0a2875b 100644
--- a/erts/emulator/hipe/hipe_amd64_glue.S
+++ b/erts/emulator/hipe/hipe_amd64_glue.S
@@ -17,10 +17,9 @@
* %CopyrightEnd%
*/
-
+#define ASM
#include "hipe_amd64_asm.h"
#include "hipe_literals.h"
-#define ASM
#include "hipe_mode_switch.h"
/*
@@ -109,7 +108,7 @@ ASYM(nbif_return):
* stub (hipe_x86_loader.erl) which should look as follows:
*
* stub for f/N:
- * movq $<f's BEAM code address>, P_BEAM_IP(P)
+ * movq $<f's export entry address>, P_CALLEE_EXP(P)
* movb $<N>, P_ARITY(P)
* jmp nbif_callemu
*
@@ -119,7 +118,7 @@ ASYM(nbif_return):
GLOBAL(ASYM(nbif_callemu))
ASYM(nbif_callemu):
STORE_ARG_REGS
- movl $HIPE_MODE_SWITCH_RES_CALL, %eax
+ movl $HIPE_MODE_SWITCH_RES_CALL_EXPORTED, %eax
jmp .suspend_exit
/*
@@ -322,6 +321,7 @@ ASYM(nbif_fail):
GLOBAL(nbif_1_gc_after_bif)
GLOBAL(nbif_2_gc_after_bif)
GLOBAL(nbif_3_gc_after_bif)
+ GLOBAL(nbif_4_gc_after_bif)
.align 4
nbif_0_gc_after_bif:
xorl %edx, %edx
@@ -337,6 +337,10 @@ nbif_2_gc_after_bif:
.align 4
nbif_3_gc_after_bif:
movl $3, %edx
+ jmp .gc_after_bif
+ .align 4
+nbif_4_gc_after_bif:
+ movl $4, %edx
/*FALLTHROUGH*/
.align 4
.gc_after_bif:
@@ -360,6 +364,7 @@ nbif_3_gc_after_bif:
GLOBAL(nbif_1_simple_exception)
GLOBAL(nbif_2_simple_exception)
GLOBAL(nbif_3_simple_exception)
+ GLOBAL(nbif_4_simple_exception)
.align 4
nbif_0_simple_exception:
xorl %eax, %eax
@@ -375,6 +380,10 @@ nbif_2_simple_exception:
.align 4
nbif_3_simple_exception:
movl $3, %eax
+ jmp .nbif_simple_exception
+ .align 4
+nbif_4_simple_exception:
+ movl $4, %eax
/*FALLTHROUGH*/
.align 4
.nbif_simple_exception:
diff --git a/erts/emulator/hipe/hipe_arch.h b/erts/emulator/hipe/hipe_arch.h
index 04ed980126..b45209b3f7 100644
--- a/erts/emulator/hipe/hipe_arch.h
+++ b/erts/emulator/hipe/hipe_arch.h
@@ -29,6 +29,7 @@ extern void hipe_patch_load_fe(Uint *address, Uint value);
extern int hipe_patch_insn(void *address, Uint value, Eterm type);
extern int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline);
+extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
extern void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity);
#if defined(__sparc__)
diff --git a/erts/emulator/hipe/hipe_arm.c b/erts/emulator/hipe/hipe_arm.c
index 3db3ffe9b1..c0c6305c68 100644
--- a/erts/emulator/hipe/hipe_arm.c
+++ b/erts/emulator/hipe/hipe_arm.c
@@ -260,9 +260,9 @@ int hipe_patch_insn(void *address, Uint32 value, Eterm type)
return 0;
}
-/* called from hipe_bif0.c:hipe_bifs_make_native_stub_2()
- and hipe_bif0.c:hipe_make_stub() */
-void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
+/* Make stub for native code calling exported beam function
+*/
+void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
unsigned int *code;
unsigned int *tramp_callemu;
@@ -272,9 +272,9 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
* Native code calls BEAM via a stub looking as follows:
*
* mov r0, #beamArity
- * ldr r8, [pc,#0] // beamAddress
+ * ldr r8, [pc,#0] // callee_exp
* b nbif_callemu
- * .long beamAddress
+ * .long callee_exp
*
* I'm using r0 and r8 since they aren't used for
* parameter passing in native code. The branch to
@@ -283,6 +283,8 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
*/
code = alloc_stub(4, &tramp_callemu);
+ if (!code)
+ return NULL;
callemu_offset = ((int)&nbif_callemu - ((int)&code[2] + 8)) >> 2;
if (!(callemu_offset >= -0x00800000 && callemu_offset <= 0x007FFFFF)) {
callemu_offset = ((int)tramp_callemu - ((int)&code[2] + 8)) >> 2;
@@ -292,12 +294,12 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
/* mov r0, #beamArity */
code[0] = 0xE3A00000 | (beamArity & 0xFF);
- /* ldr r8, [pc,#0] // beamAddress */
+ /* ldr r8, [pc,#0] // callee_exp */
code[1] = 0xE59F8000;
/* b nbif_callemu */
code[2] = 0xEA000000 | (callemu_offset & 0x00FFFFFF);
- /* .long beamAddress */
- code[3] = (unsigned int)beamAddress;
+ /* .long callee_exp */
+ code[3] = (unsigned int)callee_exp;
hipe_flush_icache_range(code, 4*sizeof(int));
diff --git a/erts/emulator/hipe/hipe_arm.h b/erts/emulator/hipe/hipe_arm.h
index 19f2a986cf..b9cd1a750c 100644
--- a/erts/emulator/hipe/hipe_arm.h
+++ b/erts/emulator/hipe/hipe_arm.h
@@ -40,8 +40,4 @@ static __inline__ int hipe_word32_address_ok(void *address)
extern void hipe_arm_inc_stack(void);
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#endif /* HIPE_ARM_H */
diff --git a/erts/emulator/hipe/hipe_arm_asm.m4 b/erts/emulator/hipe/hipe_arm_asm.m4
index 85dc84973d..b2e3f83d1e 100644
--- a/erts/emulator/hipe/hipe_arm_asm.m4
+++ b/erts/emulator/hipe/hipe_arm_asm.m4
@@ -29,6 +29,14 @@ define(LEAF_WORDS,16)dnl number of stack words for leaf functions
define(NR_ARG_REGS,3)dnl admissible values are 0 to 6, inclusive
`#define ARM_LEAF_WORDS 'LEAF_WORDS
+`#define ARM_NR_ARG_REGS 'NR_ARG_REGS
+`#define NR_ARG_REGS 'NR_ARG_REGS
+
+
+`#ifdef ASM'
+/*
+ * Only assembler stuff from here on (when included from *.S)
+ */
/*
* Reserved registers.
@@ -77,8 +85,6 @@ define(NR_ARG_REGS,3)dnl admissible values are 0 to 6, inclusive
/*
* Argument (parameter) registers.
*/
-`#define ARM_NR_ARG_REGS 'NR_ARG_REGS
-`#define NR_ARG_REGS 'NR_ARG_REGS
define(defarg,`define(ARG$1,`$2')dnl
#`define ARG'$1 $2'
@@ -195,4 +201,6 @@ define(QUICK_CALL_RET,`NBIF_POP_N(eval(RET_POP($2)))b $1')dnl
`/* #define QUICK_CALL_RET_F_3 'QUICK_CALL_RET(F,3)` */'
`/* #define QUICK_CALL_RET_F_5 'QUICK_CALL_RET(F,5)` */'
+`#endif /* ASM */'
+
`#endif /* HIPE_ARM_ASM_H */'
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index 3ca9a1bcdb..884240be9c 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -19,6 +19,7 @@ changecom(`/*', `*/')dnl
*/
+#`define ASM'
include(`hipe/hipe_arm_asm.m4')
#`include' "config.h"
#`include' "hipe_literals.h"
diff --git a/erts/emulator/hipe/hipe_arm_glue.S b/erts/emulator/hipe/hipe_arm_glue.S
index 5723afac12..e7ff267606 100644
--- a/erts/emulator/hipe/hipe_arm_glue.S
+++ b/erts/emulator/hipe/hipe_arm_glue.S
@@ -17,10 +17,9 @@
* %CopyrightEnd%
*/
-
+#define ASM
#include "hipe_arm_asm.h"
#include "hipe_literals.h"
-#define ASM
#include "hipe_mode_switch.h"
.text
@@ -141,7 +140,7 @@ hipe_arm_throw_to_native:
* which should look as follows:
*
* stub for f/N:
- * <set r8 to f's BEAM code address>
+ * <set r8 to f's export entry address>
* <set r0 to N>
* b nbif_callemu
*
@@ -150,10 +149,10 @@ hipe_arm_throw_to_native:
.global nbif_callemu
.type nbif_callemu, %function
nbif_callemu:
- str r8, [P, #P_BEAM_IP]
+ str r8, [P, #P_CALLEE_EXP]
str r0, [P, #P_ARITY]
STORE_ARG_REGS
- mov r0, #HIPE_MODE_SWITCH_RES_CALL
+ mov r0, #HIPE_MODE_SWITCH_RES_CALL_EXPORTED
b .suspend_exit
/*
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 8af174170d..6c1de05a4c 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -89,25 +89,6 @@ static Eterm address_to_term(const void *address, Process *p)
/*
* BIFs for reading and writing memory. Used internally by HiPE.
*/
-#if 0 /* XXX: unused */
-BIF_RETTYPE hipe_bifs_read_u8_1(BIF_ALIST_1)
-{
- unsigned char *address = term_to_address(BIF_ARG_1);
- if (!address)
- BIF_ERROR(BIF_P, BADARG);
- BIF_RET(make_small(*address));
-}
-#endif
-
-#if 0 /* XXX: unused */
-BIF_RETTYPE hipe_bifs_read_u32_1(BIF_ALIST_1)
-{
- Uint32 *address = term_to_address(BIF_ARG_1);
- if (!address || !hipe_word32_address_ok(address))
- BIF_ERROR(BIF_P, BADARG);
- BIF_RET(Uint_to_term(*address, BIF_P));
-}
-#endif
BIF_RETTYPE hipe_bifs_write_u8_2(BIF_ALIST_2)
{
@@ -120,22 +101,6 @@ BIF_RETTYPE hipe_bifs_write_u8_2(BIF_ALIST_2)
BIF_RET(NIL);
}
-#if 0 /* XXX: unused */
-BIF_RETTYPE hipe_bifs_write_s32_2(BIF_ALIST_2)
-{
- Sint32 *address;
- Sint value;
-
- address = term_to_address(BIF_ARG_1);
- if (!address || !hipe_word32_address_ok(address))
- BIF_ERROR(BIF_P, BADARG);
- if (!term_to_Sint32(BIF_ARG_2, &value))
- BIF_ERROR(BIF_P, BADARG);
- *address = value;
- BIF_RET(NIL);
-}
-#endif
-
BIF_RETTYPE hipe_bifs_write_u32_2(BIF_ALIST_2)
{
Uint32 *address;
@@ -432,15 +397,17 @@ BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
ASSERT(bitoffs == 0);
ASSERT(bitsize == 0);
trampolines = NIL;
-#ifdef HIPE_ALLOC_CODE
- address = HIPE_ALLOC_CODE(nrbytes, BIF_ARG_2, &trampolines, BIF_P);
- if (!address)
- BIF_ERROR(BIF_P, BADARG);
-#else
- if (is_not_nil(BIF_ARG_2))
- BIF_ERROR(BIF_P, BADARG);
- address = erts_alloc(ERTS_ALC_T_HIPE, nrbytes);
-#endif
+ address = hipe_alloc_code(nrbytes, BIF_ARG_2, &trampolines, BIF_P);
+ if (!address) {
+ Uint nrcallees;
+
+ if (is_tuple(BIF_ARG_2))
+ nrcallees = arityval(tuple_val(BIF_ARG_2)[0]);
+ else
+ nrcallees = 0;
+ erl_exit(1, "%s: failed to allocate %lu bytes and %lu trampolines\r\n",
+ __func__, (unsigned long)nrbytes, (unsigned long)nrcallees);
+ }
memcpy(address, bytes, nrbytes);
hipe_flush_icache_range(address, nrbytes);
hp = HAlloc(BIF_P, 3);
@@ -639,33 +606,6 @@ BIF_RETTYPE hipe_bifs_fun_to_address_1(BIF_ALIST_1)
BIF_RET(address_to_term(pc, BIF_P));
}
-static void *hipe_get_emu_address(Eterm m, Eterm f, unsigned int arity, int is_remote)
-{
- void *address = NULL;
- if (!is_remote)
- address = hipe_find_emu_address(m, f, arity);
- if (!address) {
- /* if not found, stub it via the export entry */
- /* no lock needed around erts_export_get_or_make_stub() */
- Export *export_entry = erts_export_get_or_make_stub(m, f, arity);
- address = export_entry->addressv[erts_active_code_ix()];
- }
- return address;
-}
-
-#if 0 /* XXX: unused */
-BIF_RETTYPE hipe_bifs_get_emu_address_1(BIF_ALIST_1)
-{
- struct mfa mfa;
- void *address;
-
- if (!term_to_mfa(BIF_ARG_1, &mfa))
- BIF_ERROR(BIF_P, BADARG);
- address = hipe_get_emu_address(mfa.mod, mfa.fun, mfa.ari);
- BIF_RET(address_to_term(address, BIF_P));
-}
-#endif
-
BIF_RETTYPE hipe_bifs_set_native_address_3(BIF_ALIST_3)
{
Eterm *pc;
@@ -713,33 +653,6 @@ BIF_RETTYPE hipe_bifs_set_native_address_3(BIF_ALIST_3)
BIF_RET(am_false);
}
-#if 0 /* XXX: unused */
-/*
- * hipe_bifs_address_to_fun(Address)
- * - Address is the address of the start of a emu function's code
- * - returns {Module, Function, Arity}
- */
-BIF_RETTYPE hipe_bifs_address_to_fun_1(BIF_ALIST_1)
-{
- Eterm *pc;
- Eterm *funcinfo;
- Eterm *hp;
-
- pc = term_to_address(BIF_ARG_1);
- if (!pc)
- BIF_ERROR(BIF_P, BADARG);
- funcinfo = find_function_from_pc(pc);
- if (!funcinfo)
- BIF_RET(am_false);
- hp = HAlloc(BIF_P, 4);
- hp[0] = make_arityval(3);
- hp[1] = funcinfo[0];
- hp[2] = funcinfo[1];
- hp[3] = make_small(funcinfo[2]);
- BIF_RET(make_tuple(hp));
-}
-#endif
-
BIF_RETTYPE hipe_bifs_enter_sdesc_1(BIF_ALIST_1)
{
struct sdesc *sdesc;
@@ -948,37 +861,6 @@ BIF_RETTYPE hipe_bifs_primop_address_1(BIF_ALIST_1)
BIF_RET(address_to_term(primop->address, BIF_P));
}
-#if 0 /* XXX: unused */
-/*
- * hipe_bifs_gbif_address(F,A) -> address or false
- */
-#define GBIF_LIST(ATOM,ARY,CFUN) extern Eterm gbif_##CFUN(void);
-#include "hipe_gbif_list.h"
-#undef GBIF_LIST
-
-BIF_RETTYPE hipe_bifs_gbif_address_2(BIF_ALIST_2)
-{
- Uint arity;
- void *address;
-
- if (is_not_atom(BIF_ARG_1) || is_not_small(BIF_ARG_2))
- BIF_RET(am_false); /* error or false, does it matter? */
- arity = signed_val(BIF_ARG_2);
- /* XXX: replace with a hash table later */
- do { /* trick to let us use 'break' instead of 'goto' */
-#define GBIF_LIST(ATOM,ARY,CFUN) if (BIF_ARG_1 == ATOM && arity == ARY) { address = CFUN; break; }
-#include "hipe_gbif_list.h"
-#undef GBIF_LIST
- printf("\r\n%s: guard BIF ", __FUNCTION__);
- fflush(stdout);
- erts_printf("%T", BIF_ARG_1);
- printf("/%lu isn't listed in hipe_gbif_list.h\r\n", arity);
- BIF_RET(am_false);
- } while (0);
- BIF_RET(address_to_term(address, BIF_P));
-}
-#endif
-
BIF_RETTYPE hipe_bifs_atom_to_word_1(BIF_ALIST_1)
{
if (is_not_atom(BIF_ARG_1))
@@ -1028,77 +910,6 @@ void hipe_emulate_fpe(Process* p)
}
#endif
-#if 0 /* XXX: unused */
-/*
- * At least parts of this should be inlined in native code.
- * The rest could be made a primop used by both the emulator and
- * native code...
- */
-BIF_RETTYPE hipe_bifs_make_fun_3(BIF_ALIST_3)
-{
- Eterm free_vars;
- Eterm mod;
- Eterm *tp;
- Uint index;
- Uint uniq;
- Uint num_free;
- Eterm tmp_var;
- Uint *tmp_ptr;
- unsigned needed;
- ErlFunThing *funp;
- Eterm *hp;
- int i;
-
- if (is_not_list(BIF_ARG_1) && is_not_nil(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
- free_vars = BIF_ARG_1;
-
- if (is_not_atom(BIF_ARG_2))
- BIF_ERROR(BIF_P, BADARG);
- mod = BIF_ARG_2;
-
- if (is_not_tuple(BIF_ARG_3) ||
- (arityval(*tuple_val(BIF_ARG_3)) != 3))
- BIF_ERROR(BIF_P, BADARG);
- tp = tuple_val(BIF_ARG_3);
-
- if (term_to_Uint(tp[1], &index) == 0)
- BIF_ERROR(BIF_P, BADARG);
- if (term_to_Uint(tp[2], &uniq) == 0)
- BIF_ERROR(BIF_P, BADARG);
- if (term_to_Uint(tp[3], &num_free) == 0)
- BIF_ERROR(BIF_P, BADARG);
-
- needed = ERL_FUN_SIZE + num_free;
- funp = (ErlFunThing *) HAlloc(BIF_P, needed);
- hp = funp->env;
-
- funp->thing_word = HEADER_FUN;
-
- /* Need a ErlFunEntry *fe
- * fe->refc++;
- * funp->fe = fe;
- */
-
- funp->num_free = num_free;
- funp->creator = BIF_P->id;
- for (i = 0; i < num_free; i++) {
- if (is_nil(free_vars))
- BIF_ERROR(BIF_P, BADARG);
- tmp_ptr = list_val(free_vars);
- tmp_var = CAR(tmp_ptr);
- free_vars = CDR(tmp_ptr);
- *hp++ = tmp_var;
- }
- if (is_not_nil(free_vars))
- BIF_ERROR(BIF_P, BADARG);
-
- funp->next = MSO(BIF_P).funs;
- MSO(BIF_P).funs = funp;
-
- BIF_RET(make_fun(funp));
-}
-#endif
/*
* args: Module, {Uniq, Index, BeamAddress}
@@ -1163,22 +974,6 @@ BIF_RETTYPE hipe_bifs_set_native_address_in_fe_2(BIF_ALIST_2)
BIF_RET(am_true);
}
-#if 0 /* XXX: unused */
-BIF_RETTYPE hipe_bifs_make_native_stub_2(BIF_ALIST_2)
-{
- void *beamAddress;
- Uint beamArity;
- void *stubAddress;
-
- if ((beamAddress = term_to_address(BIF_ARG_1)) == 0 ||
- is_not_small(BIF_ARG_2) ||
- (beamArity = unsigned_val(BIF_ARG_2)) >= 256)
- BIF_ERROR(BIF_P, BADARG);
- stubAddress = hipe_make_native_stub(beamAddress, beamArity);
- BIF_RET(address_to_term(stubAddress, BIF_P));
-}
-#endif
-
/*
* MFA info hash table:
* - maps MFA to native code entry point
@@ -1500,18 +1295,15 @@ void hipe_mfa_save_orig_beam_op(Eterm mod, Eterm fun, unsigned int ari, Eterm *p
static void *hipe_make_stub(Eterm m, Eterm f, unsigned int arity, int is_remote)
{
- void *BEAMAddress;
+ Export *export_entry;
void *StubAddress;
-#if 0
- if (is_not_atom(m) || is_not_atom(f) || arity > 255)
- return NULL;
-#endif
- BEAMAddress = hipe_get_emu_address(m, f, arity, is_remote);
- StubAddress = hipe_make_native_stub(BEAMAddress, arity);
-#if 0
- hipe_mfa_set_na(m, f, arity, StubAddress);
-#endif
+ ASSERT(is_remote);
+
+ export_entry = erts_export_get_or_make_stub(m, f, arity);
+ StubAddress = hipe_make_native_stub(export_entry, arity);
+ if (!StubAddress)
+ erl_exit(1, "hipe_make_stub: code allocation failed\r\n");
return StubAddress;
}
diff --git a/erts/emulator/hipe/hipe_bif0.tab b/erts/emulator/hipe/hipe_bif0.tab
index 2514b1c3a5..d715a0914b 100644
--- a/erts/emulator/hipe/hipe_bif0.tab
+++ b/erts/emulator/hipe/hipe_bif0.tab
@@ -49,7 +49,6 @@ bif hipe_bifs:constants_size/0
bif hipe_bifs:merge_term/1
bif hipe_bifs:fun_to_address/1
-#bif hipe_bifs:get_emu_address/1
bif hipe_bifs:set_native_address/3
#bif hipe_bifs:address_to_fun/1
@@ -72,7 +71,6 @@ bif hipe_bifs:term_to_word/1
bif hipe_bifs:get_fe/2
bif hipe_bifs:set_native_address_in_fe/2
-#bif hipe_bifs:make_native_stub/2
bif hipe_bifs:find_na_or_make_stub/2
bif hipe_bifs:check_crc/1
diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c
index 56767ef04b..ecb34df412 100644
--- a/erts/emulator/hipe/hipe_bif1.c
+++ b/erts/emulator/hipe/hipe_bif1.c
@@ -574,22 +574,6 @@ BIF_RETTYPE hipe_bifs_pause_times_0(BIF_ALIST_0)
/* XXX: these macros have free variables */
#ifdef BM_TIMERS
-#if USE_PERFCTR
-#define MAKE_TIME(_timer_) { \
- BM_TIMER_T tmp = _timer_##_time; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- sec = (uint)(tmp - ((int)(tmp / 60)) * 60); \
- min = (uint)tmp / 60; }
-
-#define MAKE_MICRO_TIME(_timer_) { \
- BM_TIMER_T tmp = _timer_##_time * 1000; \
- micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- tmp /= 1000; \
- milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
- sec = (uint)tmp / 1000; }
-
-#else
#define MAKE_TIME(_timer_) { \
BM_TIMER_T tmp = _timer_##_time / 1000000; \
milli = tmp % 1000; \
@@ -604,7 +588,6 @@ BIF_RETTYPE hipe_bifs_pause_times_0(BIF_ALIST_0)
milli = tmp % 1000; \
sec = tmp / 1000; }
-#endif
#else
#define MAKE_TIME(_timer_)
#define MAKE_MICRO_TIME(_timer_)
@@ -852,9 +835,6 @@ BIF_RETTYPE hipe_bifs_misc_timer_clear_0(BIF_ALIST_0)
/*
* HiPE hrvtime().
* These implementations are currently available:
- * + On Linux with the perfctr extension we can use the process'
- * virtualised time-stamp counter. To enable this mode you must
- * pass `--with-perfctr=/path/to/perfctr' when configuring.
* + The fallback, which is the same as {X,_} = runtime(statistics).
*/
@@ -866,37 +846,6 @@ static double fallback_get_hrvtime(void)
return (double)ms_user;
}
-#if USE_PERFCTR
-
-#include "hipe_perfctr.h"
-static int hrvtime_started; /* 0: closed, +1: perfctr, -1: fallback */
-#define hrvtime_is_started() (hrvtime_started != 0)
-
-static void start_hrvtime(void)
-{
- if (hipe_perfctr_hrvtime_open() >= 0)
- hrvtime_started = 1;
- else
- hrvtime_started = -1;
-}
-
-static void stop_hrvtime(void)
-{
- if (hrvtime_started > 0)
- hipe_perfctr_hrvtime_close();
- hrvtime_started = 0;
-}
-
-static double get_hrvtime(void)
-{
- if (hrvtime_started > 0)
- return hipe_perfctr_hrvtime_get();
- else
- return fallback_get_hrvtime();
-}
-
-#else /* !USE_PERFCTR */
-
/*
* Fallback, if nothing better exists.
* This is the same as {X,_} = statistics(runtime), which uses
@@ -908,8 +857,6 @@ static double get_hrvtime(void)
#define stop_hrvtime() do{}while(0)
#define get_hrvtime() fallback_get_hrvtime()
-#endif /* !USE_PERFCTR */
-
BIF_RETTYPE hipe_bifs_get_hrvtime_0(BIF_ALIST_0)
{
Eterm *hp;
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index 5f92b6bac4..96a849621f 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -277,7 +277,10 @@ ifelse($1,list_to_binary_1,hipe_wrapper_list_to_binary_1,
ifelse($1,iolist_to_binary_1,hipe_wrapper_iolist_to_binary_1,
ifelse($1,binary_list_to_bin_1,hipe_wrapper_binary_list_to_bin_1,
ifelse($1,list_to_bitstring_1,hipe_wrapper_list_to_bitstring_1,
-$1)))))))))))')
+ifelse($1,send_2,hipe_wrapper_send_2,
+ifelse($1,send_3,hipe_wrapper_send_3,
+ifelse($1,ebif_bang_2,hipe_wrapper_ebif_bang_2,
+$1))))))))))))))')
define(BIF_LIST,`standard_bif_interface_$3(nbif_$4, CFUN($4))')
include(TARGET/`erl_bif_list.h')
diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c
index 32694a8f97..61406b92af 100644
--- a/erts/emulator/hipe/hipe_debug.c
+++ b/erts/emulator/hipe/hipe_debug.c
@@ -172,8 +172,10 @@ void hipe_print_pcb(Process *p)
printf("P: 0x%0*lx\r\n", 2*(int)sizeof(long), (unsigned long)p);
printf("-----------------------------------------------\r\n");
printf("Offset| Name | Value | *Value |\r\n");
+#undef U
#define U(n,x) \
printf(" % 4d | %s | 0x%0*lx | |\r\n", (int)offsetof(Process,x), n, 2*(int)sizeof(long), (unsigned long)p->x)
+#undef P
#define P(n,x) \
printf(" % 4d | %s | 0x%0*lx | 0x%0*lx |\r\n", (int)offsetof(Process,x), n, 2*(int)sizeof(long), (unsigned long)p->x, 2*(int)sizeof(long), p->x ? (unsigned long)*(p->x) : -1UL)
@@ -231,7 +233,7 @@ void hipe_print_pcb(Process *p)
U("nsp ", hipe.nsp);
U("nstack ", hipe.nstack);
U("nstend ", hipe.nstend);
- U("ncallee ", hipe.ncallee);
+ U("ncallee ", hipe.u.ncallee);
hipe_arch_print_pcb(&p->hipe);
#endif /* HIPE */
#undef U
diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c
index 86c4068072..b10263f6e2 100644
--- a/erts/emulator/hipe/hipe_gc.c
+++ b/erts/emulator/hipe/hipe_gc.c
@@ -22,6 +22,9 @@
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
+
+#define ERL_WANT_GC_INTERNALS__
+
#include "global.h"
#include "erl_gc.h"
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index 0e287908b1..ed355ce264 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -498,8 +498,8 @@ static const struct rts_param rts_params[] = {
{ 38, "P_ARG4", 1, offsetof(struct process, def_arg_reg[4]) },
{ 39, "P_ARG5", 1, offsetof(struct process, def_arg_reg[5]) },
{ 40, "P_NSP", 1, offsetof(struct process, hipe.nsp) },
- { 41, "P_NCALLEE", 1, offsetof(struct process, hipe.ncallee) },
- { 42, "P_CLOSURE", 1, offsetof(struct process, hipe.closure) },
+ { 41, "P_NCALLEE", 1, offsetof(struct process, hipe.u.ncallee) },
+ { 42, "P_CLOSURE", 1, offsetof(struct process, hipe.u.closure) },
{ 43, "P_NSP_LIMIT", 1, offsetof(struct process, hipe.nstack) },
{ 44, "P_CSP",
#if defined(__i386__) || defined(__x86_64__)
@@ -524,6 +524,7 @@ static const struct rts_param rts_params[] = {
},
{ 49, "P_MSG_FIRST", 1, offsetof(struct process, msg.first) },
{ 50, "P_MSG_SAVE", 1, offsetof(struct process, msg.save) },
+ { 51, "P_CALLEE_EXP", 1, offsetof(struct process, hipe.u.callee_exp) },
};
#define NR_PARAMS ARRAY_SIZE(rts_params)
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index 1ae1d17b7f..8c73312d45 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -140,7 +140,6 @@ void hipe_check_pcb(Process *p, const char *file, unsigned line)
#endif /* HIPE_DEBUG > 0 */
/* ensure that at least nwords words are available on the native stack */
-static void hipe_check_nstack(Process *p, unsigned nwords);
#if defined(__sparc__)
#include "hipe_sparc_glue.h"
@@ -159,7 +158,7 @@ static void hipe_check_nstack(Process *p, unsigned nwords);
Uint hipe_beam_pc_return[1]; /* needed in hipe_debug.c */
Uint hipe_beam_pc_throw[1]; /* needed in hipe_debug.c */
Uint hipe_beam_pc_resume[1]; /* needed by hipe_set_timeout() */
-static Eterm hipe_beam_catch_throw;
+Eterm hipe_beam_catch_throw;
void hipe_mode_switch_init(void)
{
@@ -185,22 +184,6 @@ void hipe_set_call_trap(Uint *bfun, void *nfun, int is_closure)
bfun[-4] = (Uint)nfun;
}
-void hipe_reserve_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
-{
- if (!hipe_bifcall_from_native_is_recursive(p))
- return;
-
- /* ensure that at least 2 words are available on the BEAM stack */
- if ((p->stop - 2) < p->htop) {
- DPRINTF("calling gc to reserve BEAM stack size");
- p->fcalls -= erts_garbage_collect(p, 2, reg, arity);
- ASSERT(!((p->stop - 2) < p->htop));
- }
- p->stop -= 2;
- p->stop[0] = NIL;
- p->stop[1] = hipe_beam_catch_throw;
-}
-
static __inline__ void
hipe_push_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
{
@@ -223,15 +206,6 @@ hipe_push_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
p->cp = hipe_beam_pc_return;
}
-void hipe_unreserve_beam_trap_frame(Process *p)
-{
- if (!hipe_bifcall_from_native_is_recursive(p))
- return;
-
- ASSERT(p->stop[0] == NIL && p->stop[1] == hipe_beam_catch_throw);
- p->stop += 2;
-}
-
static __inline__ void hipe_pop_beam_trap_frame(Process *p)
{
ASSERT(p->stop[1] == hipe_beam_catch_throw);
@@ -265,14 +239,14 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
/* BEAM calls a native code function */
unsigned arity = cmd >> 8;
- /* p->hipe.ncallee set in beam_emu */
+ /* p->hipe.u.ncallee set in beam_emu */
if (p->cp == hipe_beam_pc_return) {
/* Native called BEAM, which now tailcalls native. */
hipe_pop_beam_trap_frame(p);
result = hipe_tailcall_to_native(p, arity, reg);
break;
}
- DPRINTF("calling %#lx/%u", (long)p->hipe.ncallee, arity);
+ DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity);
result = hipe_call_to_native(p, arity, reg);
break;
}
@@ -290,18 +264,18 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
arity -= funp->num_free; /* arity == #formals */
reg[arity] = fun;
++arity; /* correct for having added the closure */
- /* HIPE_ASSERT(p->hipe.ncallee == (void(*)(void))funp->native_address); */
+ /* HIPE_ASSERT(p->hipe.u.ncallee == (void(*)(void))funp->native_address); */
/* just like a normal call from now on */
- /* p->hipe.ncallee set in beam_emu */
+ /* p->hipe.u.ncallee set in beam_emu */
if (p->cp == hipe_beam_pc_return) {
/* Native called BEAM, which now tailcalls native. */
hipe_pop_beam_trap_frame(p);
result = hipe_tailcall_to_native(p, arity, reg);
break;
}
- DPRINTF("calling %#lx/%u", (long)p->hipe.ncallee, arity);
+ DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity);
result = hipe_call_to_native(p, arity, reg);
break;
}
@@ -404,13 +378,13 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
if (is_recursive)
hipe_push_beam_trap_frame(p, reg, p->arity);
- result = HIPE_MODE_SWITCH_RES_CALL;
+ result = HIPE_MODE_SWITCH_RES_CALL_BEAM;
break;
}
- case HIPE_MODE_SWITCH_RES_CALL: {
+ case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: {
/* Native code calls or tailcalls BEAM.
*
- * p->i is the callee's BEAM code
+ * p->hipe.u.callee_exp is the callee's export entry
* p->arity is the callee's arity
* p->def_arg_reg[] contains the register parameters
* p->hipe.nsp[] contains the stacked parameters
@@ -430,15 +404,15 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
* F(A1, ..., AN, FV1, ..., FVM, Closure)
* (Where Ai is argument i and FVj is free variable j)
*
- * p->hipe.closure contains the closure
+ * p->hipe.u.closure contains the closure
* p->def_arg_reg[] contains the register parameters
* p->hipe.nsp[] contains the stacked parameters
*/
ErlFunThing *closure;
unsigned num_free, arity, i, is_recursive;
- HIPE_ASSERT(is_fun(p->hipe.closure));
- closure = (ErlFunThing*)fun_val(p->hipe.closure);
+ HIPE_ASSERT(is_fun(p->hipe.u.closure));
+ closure = (ErlFunThing*)fun_val(p->hipe.u.closure);
num_free = closure->num_free;
arity = closure->fe->arity;
@@ -468,10 +442,10 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
p->i = closure->fe->address;
/* Change result code to the faster plain CALL type. */
- result = HIPE_MODE_SWITCH_RES_CALL;
+ result = HIPE_MODE_SWITCH_RES_CALL_BEAM;
}
/* Append the closure as the last parameter. Don't increment arity. */
- reg[arity] = p->hipe.closure;
+ reg[arity] = p->hipe.u.closure;
if (is_recursive) {
/* BEAM called native, which now calls BEAM.
@@ -549,7 +523,7 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
}
}
HIPE_CHECK_PCB(p);
- result = HIPE_MODE_SWITCH_RES_CALL;
+ result = HIPE_MODE_SWITCH_RES_CALL_BEAM;
p->def_arg_reg[3] = result;
return p;
}
@@ -577,7 +551,7 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
address = hipe_get_remote_na(mfa[0], mfa[1], arity);
if (!address)
goto do_apply_fail;
- p->hipe.ncallee = (void(*)(void)) address;
+ p->hipe.u.ncallee = (void(*)(void)) address;
result = hipe_tailcall_to_native(p, arity, reg);
goto do_return_from_native;
do_apply_fail:
@@ -607,7 +581,6 @@ static unsigned hipe_next_nstack_size(unsigned size)
}
#if 0 && defined(HIPE_NSTACK_GROWS_UP)
-#define hipe_nstack_avail(p) ((p)->hipe.nstend - (p)->hipe.nsp)
void hipe_inc_nstack(Process *p)
{
Eterm *old_nstack = p->hipe.nstack;
@@ -631,7 +604,6 @@ void hipe_inc_nstack(Process *p)
#endif
#if defined(HIPE_NSTACK_GROWS_DOWN)
-#define hipe_nstack_avail(p) ((unsigned)((p)->hipe.nsp - (p)->hipe.nstack))
void hipe_inc_nstack(Process *p)
{
unsigned old_size = p->hipe.nstend - p->hipe.nstack;
@@ -663,12 +635,6 @@ void hipe_empty_nstack(Process *p)
p->hipe.nstend = NULL;
}
-static void hipe_check_nstack(Process *p, unsigned nwords)
-{
- while (hipe_nstack_avail(p) < nwords)
- hipe_inc_nstack(p);
-}
-
void hipe_set_closure_stub(ErlFunEntry *fe, unsigned num_free)
{
unsigned arity;
diff --git a/erts/emulator/hipe/hipe_mode_switch.h b/erts/emulator/hipe/hipe_mode_switch.h
index 06721e3c04..b8de12fcbb 100644
--- a/erts/emulator/hipe/hipe_mode_switch.h
+++ b/erts/emulator/hipe/hipe_mode_switch.h
@@ -31,7 +31,7 @@
/* result codes for beam_emu <- hipe_mode_switch() return */
#define HIPE_MODE_SWITCH_RES_RETURN 4
-#define HIPE_MODE_SWITCH_RES_CALL 5
+#define HIPE_MODE_SWITCH_RES_CALL_EXPORTED 5
#define HIPE_MODE_SWITCH_RES_THROW 6
/* additional result codes for hipe_mode_switch() <- native return */
@@ -45,6 +45,8 @@
#define HIPE_MODE_SWITCH_RES_APPLY 13 /* mode_switch <- native */
+#define HIPE_MODE_SWITCH_RES_CALL_BEAM 14
+
#ifndef ASM
#include "error.h"
@@ -59,13 +61,58 @@ void hipe_empty_nstack(Process *p);
void hipe_set_closure_stub(ErlFunEntry *fe, unsigned num_free);
Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s);
-void hipe_reserve_beam_trap_frame(Process*, Eterm reg[], unsigned arity);
-void hipe_unreserve_beam_trap_frame(Process*);
+ERTS_GLB_INLINE void hipe_reserve_beam_trap_frame(Process*, Eterm reg[], unsigned arity);
+ERTS_GLB_INLINE void hipe_unreserve_beam_trap_frame(Process*);
extern Uint hipe_beam_pc_return[];
extern Uint hipe_beam_pc_throw[];
extern Uint hipe_beam_pc_resume[];
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#include "erl_gc.h"
+#include "hipe_stack.h"
+
+#if defined(__sparc__)
+#include "hipe_sparc_glue.h"
+#elif defined(__i386__)
+#include "hipe_x86_glue.h"
+#elif defined(__x86_64__)
+#include "hipe_amd64_glue.h"
+#elif defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__)
+#include "hipe_ppc_glue.h"
+#elif defined(__arm__)
+#include "hipe_arm_glue.h"
+#endif
+
+extern Eterm hipe_beam_catch_throw;
+
+ERTS_GLB_INLINE void hipe_reserve_beam_trap_frame(Process *p, Eterm reg[], unsigned arity)
+{
+ if (!hipe_bifcall_from_native_is_recursive(p))
+ return;
+
+ /* ensure that at least 2 words are available on the BEAM stack */
+ if ((p->stop - 2) < p->htop) {
+ p->fcalls -= erts_garbage_collect(p, 2, reg, arity);
+ ASSERT(!((p->stop - 2) < p->htop));
+ }
+ p->stop -= 2;
+ p->stop[0] = NIL;
+ p->stop[1] = hipe_beam_catch_throw;
+}
+
+ERTS_GLB_INLINE void hipe_unreserve_beam_trap_frame(Process *p)
+{
+ if (!hipe_bifcall_from_native_is_recursive(p))
+ return;
+
+ ASSERT(p->stop[0] == NIL && p->stop[1] == hipe_beam_catch_throw);
+ p->stop += 2;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif /* ASM */
#endif /* HIPE_MODE_SWITCH_H */
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index 7d343dd91e..7e8632b50d 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -330,8 +330,6 @@ char *hipe_bs_allocate(int len)
Binary *bptr;
bptr = erts_bin_nrml_alloc(len);
- bptr->flags = 0;
- bptr->orig_size = len;
erts_smp_atomic_init_nob(&bptr->refc, 1);
return bptr->orig_bytes;
}
@@ -341,7 +339,6 @@ Binary *hipe_bs_reallocate(Binary* oldbptr, int newsize)
Binary *bptr;
bptr = erts_bin_realloc(oldbptr, newsize);
- bptr->orig_size = newsize;
return bptr;
}
diff --git a/erts/emulator/hipe/hipe_perfctr.c b/erts/emulator/hipe/hipe_perfctr.c
deleted file mode 100644
index 371b3fb097..0000000000
--- a/erts/emulator/hipe/hipe_perfctr.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-#include "sys.h"
-#include "error.h"
-#include "global.h"
-#include "bif.h"
-#include "big.h"
-#include "erl_binary.h"
-#include "hipe_perfctr.h"
-#include "libperfctr.h"
-
-static struct vperfctr *vperfctr;
-static unsigned int have_rdtsc;
-static double tsc_to_ms;
-static unsigned int tsc_on; /* control calls must set tsc_on if have_rdtsc is true */
-static unsigned int nractrs;
-static unsigned int users;
-#define USER_BIFS (1<<0)
-#define USER_HRVTIME (1<<1)
-
-static int hipe_perfctr_open(unsigned int user)
-{
- struct perfctr_info info;
-
- if (!vperfctr) {
- vperfctr = vperfctr_open();
- if (!vperfctr)
- return -1;
- if (vperfctr_info(vperfctr, &info) >= 0) {
- tsc_to_ms = (double)(info.tsc_to_cpu_mult ? : 1) / (double)info.cpu_khz;
- have_rdtsc = (info.cpu_features & PERFCTR_FEATURE_RDTSC) ? 1 : 0;
- }
- tsc_on = 0;
- nractrs = 0;
- }
- users |= user;
- return 0;
-}
-
-static void hipe_perfctr_reset(void)
-{
- struct vperfctr_control control;
-
- memset(&control, 0, sizeof control);
- if (have_rdtsc)
- control.cpu_control.tsc_on = 1;
- nractrs = 0;
- if (vperfctr_control(vperfctr, &control) >= 0)
- tsc_on = 1;
-}
-
-static void hipe_perfctr_close(unsigned int user)
-{
- if (!vperfctr)
- return;
- users &= ~user;
- switch (users) {
- case 0:
- vperfctr_unlink(vperfctr);
- vperfctr_close(vperfctr);
- vperfctr = NULL;
- tsc_on = 0;
- nractrs = 0;
- break;
- case USER_HRVTIME:
- hipe_perfctr_reset();
- }
-}
-
-/*
- * Interface for HiPE's hrvtime code.
- */
-
-int hipe_perfctr_hrvtime_open(void)
-{
- if (hipe_perfctr_open(USER_HRVTIME) < 0)
- return -1;
- if (have_rdtsc) {
- if (!tsc_on)
- hipe_perfctr_reset(); /* note: updates tsc_on */
- if (tsc_on)
- return 0;
- }
- hipe_perfctr_hrvtime_close();
- return -1;
-}
-
-void hipe_perfctr_hrvtime_close(void)
-{
- hipe_perfctr_close(USER_HRVTIME);
-}
-
-double hipe_perfctr_hrvtime_get(void)
-{
- return (double)vperfctr_read_tsc(vperfctr) * tsc_to_ms;
-}
-
-/*
- * BIF interface for user-programmable performance counters.
- */
-
-BIF_RETTYPE hipe_bifs_vperfctr_open_0(BIF_ALIST_0)
-{
- if (hipe_perfctr_open(USER_BIFS) < 0)
- BIF_RET(am_false); /* arity 0 BIFs can't fail :-( */
- BIF_RET(am_true);
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_close_0(BIF_ALIST_0)
-{
- hipe_perfctr_close(USER_BIFS);
- BIF_RET(NIL);
-}
-
-static Eterm ull_to_integer(unsigned long long x, Process *p)
-{
- unsigned long long tmpx;
- unsigned int ds, i;
- size_t sz;
- Eterm *hp;
- ErtsDigit *xp;
-
- if (x <= (unsigned long long)MAX_SMALL)
- return make_small(x);
-
- /* Calculate number of digits. */
- ds = 0;
- tmpx = x;
- do {
- ++ds;
- tmpx = (tmpx >> (D_EXP / 2)) >> (D_EXP / 2);
- } while (tmpx != 0);
-
- sz = BIG_NEED_SIZE(ds); /* number of words including arity */
- hp = HAlloc(p, sz);
- *hp = make_pos_bignum_header(sz-1);
-
- xp = (ErtsDigit*)(hp+1);
- i = 0;
- do {
- xp[i++] = (ErtsDigit)x;
- x = (x >> (D_EXP / 2)) >> (D_EXP / 2);
- } while (i < ds);
- while (i & (BIG_DIGITS_PER_WORD-1))
- xp[i++] = 0;
-
- return make_big(hp);
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_info_0(BIF_ALIST_0)
-{
- struct perfctr_info info;
-
- if (!vperfctr || vperfctr_info(vperfctr, &info) < 0)
- BIF_RET(am_false); /* arity 0 BIFs can't fail :-( */
- BIF_RET(new_binary(BIF_P, (void*)&info, sizeof info));
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_read_tsc_0(BIF_ALIST_0)
-{
- unsigned long long val;
-
- if (!vperfctr || !tsc_on)
- BIF_RET(am_false); /* arity 0 BIFs can't fail :-( */
- val = vperfctr_read_tsc(vperfctr);
- BIF_RET(ull_to_integer(val, BIF_P));
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_read_pmc_1(BIF_ALIST_1)
-{
- Uint pmc;
- unsigned long long val;
-
- if (!vperfctr ||
- is_not_small(BIF_ARG_1) ||
- (pmc = unsigned_val(BIF_ARG_1), pmc >= nractrs))
- BIF_RET(am_false); /* for consistency with the arity 0 BIFs */
- val = vperfctr_read_pmc(vperfctr, pmc);
- BIF_RET(ull_to_integer(val, BIF_P));
-}
-
-BIF_RETTYPE hipe_bifs_vperfctr_control_1(BIF_ALIST_1)
-{
- void *bytes;
- struct vperfctr_control control;
- Uint bitoffs;
- Uint bitsize;
-
- if (!vperfctr)
- BIF_ERROR(BIF_P, BADARG);
- if (is_not_binary(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
- if (binary_size(BIF_ARG_1) != sizeof control)
- BIF_ERROR(BIF_P, BADARG);
- ERTS_GET_BINARY_BYTES(BIF_ARG_1, bytes, bitoffs, bitsize);
- ASSERT(bitoffs == 0);
- ASSERT(bitsize == 0);
- memcpy(&control, bytes, sizeof control);
- if (have_rdtsc)
- control.cpu_control.tsc_on = 1;
- if (vperfctr_control(vperfctr, &control) < 0) {
- hipe_perfctr_reset();
- BIF_ERROR(BIF_P, BADARG);
- }
- tsc_on = control.cpu_control.tsc_on;
- nractrs = control.cpu_control.nractrs;
- BIF_RET(NIL);
-}
diff --git a/erts/emulator/hipe/hipe_perfctr.h b/erts/emulator/hipe/hipe_perfctr.h
deleted file mode 100644
index 8fbf9ecf35..0000000000
--- a/erts/emulator/hipe/hipe_perfctr.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-
-extern int hipe_perfctr_hrvtime_open(void);
-extern void hipe_perfctr_hrvtime_close(void);
-extern double hipe_perfctr_hrvtime_get(void);
diff --git a/erts/emulator/hipe/hipe_perfctr.tab b/erts/emulator/hipe/hipe_perfctr.tab
deleted file mode 100644
index eaecea4651..0000000000
--- a/erts/emulator/hipe/hipe_perfctr.tab
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2004-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-
-bif hipe_bifs:vperfctr_open/0
-bif hipe_bifs:vperfctr_close/0
-bif hipe_bifs:vperfctr_info/0
-bif hipe_bifs:vperfctr_read_tsc/0
-bif hipe_bifs:vperfctr_read_pmc/1
-bif hipe_bifs:vperfctr_control/1
diff --git a/erts/emulator/hipe/hipe_ppc.c b/erts/emulator/hipe/hipe_ppc.c
index 2d8fd61e1e..1eaa9f6855 100644
--- a/erts/emulator/hipe/hipe_ppc.c
+++ b/erts/emulator/hipe/hipe_ppc.c
@@ -285,7 +285,7 @@ int hipe_patch_insn(void *address, Uint64 value, Eterm type)
}
}
-void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
+void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
unsigned int *code;
@@ -293,17 +293,19 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
abort();
code = alloc_stub(7);
+ if (!code)
+ return NULL;
- /* addis r12,0,beamAddress@highest */
- code[0] = 0x3d800000 | (((unsigned long)beamAddress >> 48) & 0xffff);
- /* ori r12,r12,beamAddress@higher */
- code[1] = 0x618c0000 | (((unsigned long)beamAddress >> 32) & 0xffff);
+ /* addis r12,0,callee_exp@highest */
+ code[0] = 0x3d800000 | (((unsigned long)callee_exp >> 48) & 0xffff);
+ /* ori r12,r12,callee_exp@higher */
+ code[1] = 0x618c0000 | (((unsigned long)callee_exp >> 32) & 0xffff);
/* sldi r12,r12,32 (rldicr r12,r12,32,31) */
code[2] = 0x798c07c6;
- /* oris r12,r12,beamAddress@h */
- code[3] = 0x658c0000 | (((unsigned long)beamAddress >> 16) & 0xffff);
- /* ori r12,r12,beamAddress@l */
- code[4] = 0x618c0000 | ((unsigned long)beamAddress & 0xffff);
+ /* oris r12,r12,callee_exp@h */
+ code[3] = 0x658c0000 | (((unsigned long)callee_exp >> 16) & 0xffff);
+ /* ori r12,r12,callee_exp@l */
+ code[4] = 0x618c0000 | ((unsigned long)callee_exp & 0xffff);
/* addi r0,0,beamArity */
code[5] = 0x38000000 | (beamArity & 0x7FFF);
/* ba nbif_callemu */
@@ -355,18 +357,16 @@ int hipe_patch_insn(void *address, Uint32 value, Eterm type)
return 0;
}
-/* called from hipe_bif0.c:hipe_bifs_make_native_stub_2()
- and hipe_bif0.c:hipe_make_stub() */
-void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
+void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
unsigned int *code;
/*
* Native code calls BEAM via a stub looking as follows:
*
- * addi r12,0,beamAddress@l
+ * addi r12,0,callee_exp@l
* addi r0,0,beamArity
- * addis r12,r12,beamAddress@ha
+ * addis r12,r12,callee_exp@ha
* ba nbif_callemu
*
* I'm using r0 and r12 since the standard SVR4 ABI allows
@@ -383,13 +383,15 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
abort();
code = alloc_stub(4);
+ if (!code)
+ return NULL;
- /* addi r12,0,beamAddress@l */
- code[0] = 0x39800000 | ((unsigned long)beamAddress & 0xFFFF);
+ /* addi r12,0,callee_exp@l */
+ code[0] = 0x39800000 | ((unsigned long)callee_exp & 0xFFFF);
/* addi r0,0,beamArity */
code[1] = 0x38000000 | (beamArity & 0x7FFF);
- /* addis r12,r12,beamAddress@ha */
- code[2] = 0x3D8C0000 | at_ha((unsigned long)beamAddress);
+ /* addis r12,r12,callee_exp@ha */
+ code[2] = 0x3D8C0000 | at_ha((unsigned long)callee_exp);
/* ba nbif_callemu */
code[3] = 0x48000002 | (unsigned long)&nbif_callemu;
diff --git a/erts/emulator/hipe/hipe_ppc.h b/erts/emulator/hipe/hipe_ppc.h
index 66000c1846..e9d3e6564b 100644
--- a/erts/emulator/hipe/hipe_ppc.h
+++ b/erts/emulator/hipe/hipe_ppc.h
@@ -64,10 +64,6 @@ AEXTERN(void,hipe_ppc_inc_stack,(void));
extern void hipe_ppc_inc_stack(void); /* we don't have the AEXTERN() fallback :-( */
#endif
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#if !defined(__powerpc64__)
extern const unsigned int fconv_constant[];
#endif
diff --git a/erts/emulator/hipe/hipe_ppc_asm.m4 b/erts/emulator/hipe/hipe_ppc_asm.m4
index 343402f9f0..e5a56de687 100644
--- a/erts/emulator/hipe/hipe_ppc_asm.m4
+++ b/erts/emulator/hipe/hipe_ppc_asm.m4
@@ -23,6 +23,22 @@ changecom(`/*', `*/')dnl
#define HIPE_PPC_ASM_H'
/*
+ * Tunables.
+ */
+define(LEAF_WORDS,16)dnl number of stack words for leaf functions
+define(NR_ARG_REGS,4)dnl admissible values are 0 to 6, inclusive
+
+`#define PPC_LEAF_WORDS 'LEAF_WORDS
+`#define PPC_NR_ARG_REGS 'NR_ARG_REGS
+`#define NR_ARG_REGS 'NR_ARG_REGS
+
+
+`#ifdef ASM'
+/*
+ * Only assembler stuff from here on (when included from *.S)
+ */
+
+/*
* Handle 32 vs 64-bit.
*/
ifelse(ARCH,ppc64,`
@@ -53,13 +69,6 @@ define(WSIZE,4)dnl
`#define STORE 'STORE
`#define CMPI 'CMPI
-/*
- * Tunables.
- */
-define(LEAF_WORDS,16)dnl number of stack words for leaf functions
-define(NR_ARG_REGS,4)dnl admissible values are 0 to 6, inclusive
-
-`#define PPC_LEAF_WORDS 'LEAF_WORDS
/*
* Workarounds for Darwin.
@@ -193,8 +202,6 @@ NAME: \
/*
* Argument (parameter) registers.
*/
-`#define PPC_NR_ARG_REGS 'NR_ARG_REGS
-`#define NR_ARG_REGS 'NR_ARG_REGS
define(defarg,`define(ARG$1,`$2')dnl
#`define ARG'$1 $2'
@@ -273,6 +280,10 @@ define(NBIF_ARG,`ifelse(eval($3 >= NR_ARG_REGS),0,`NBIF_REG_ARG($1,$3)',`NBIF_ST
`/* #define NBIF_ARG_3_0 'NBIF_ARG(r3,3,0)` */'
`/* #define NBIF_ARG_3_1 'NBIF_ARG(r3,3,1)` */'
`/* #define NBIF_ARG_3_2 'NBIF_ARG(r3,3,2)` */'
+`/* #define NBIF_ARG_4_0 'NBIF_ARG(r3,4,0)` */'
+`/* #define NBIF_ARG_4_1 'NBIF_ARG(r3,4,1)` */'
+`/* #define NBIF_ARG_4_2 'NBIF_ARG(r3,4,2)` */'
+`/* #define NBIF_ARG_4_3 'NBIF_ARG(r3,4,3)` */'
`/* #define NBIF_ARG_5_0 'NBIF_ARG(r3,5,0)` */'
`/* #define NBIF_ARG_5_1 'NBIF_ARG(r3,5,1)` */'
`/* #define NBIF_ARG_5_2 'NBIF_ARG(r3,5,2)` */'
@@ -294,6 +305,7 @@ define(NBIF_RET,`NBIF_RET_N(eval(RET_POP($1)))')dnl
`/* #define NBIF_RET_1 'NBIF_RET(1)` */'
`/* #define NBIF_RET_2 'NBIF_RET(2)` */'
`/* #define NBIF_RET_3 'NBIF_RET(3)` */'
+`/* #define NBIF_RET_4 'NBIF_RET(4)` */'
`/* #define NBIF_RET_5 'NBIF_RET(5)` */'
dnl
@@ -309,4 +321,6 @@ define(QUICK_CALL_RET,`NBIF_POP_N(eval(RET_POP($2)))b $1')dnl
`/* #define QUICK_CALL_RET_F_3 'QUICK_CALL_RET(F,3)` */'
`/* #define QUICK_CALL_RET_F_5 'QUICK_CALL_RET(F,5)` */'
+`#endif /* ASM */'
+
`#endif /* HIPE_PPC_ASM_H */'
diff --git a/erts/emulator/hipe/hipe_ppc_bifs.m4 b/erts/emulator/hipe/hipe_ppc_bifs.m4
index 7cc2b5c7b6..b173b896b8 100644
--- a/erts/emulator/hipe/hipe_ppc_bifs.m4
+++ b/erts/emulator/hipe/hipe_ppc_bifs.m4
@@ -19,6 +19,7 @@ changecom(`/*', `*/')dnl
*/
+#`define ASM'
include(`hipe/hipe_ppc_asm.m4')
#`include' "config.h"
#`include' "hipe_literals.h"
@@ -45,9 +46,10 @@ define(HANDLE_GOT_MBUF,`
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_4(nbif_name, cbif_name)
* standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 0-3 parameters and
+ * Generate native interface for a BIF with 0-4 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -143,6 +145,41 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
+define(standard_bif_interface_4,
+`
+#ifndef HAVE_$1
+#`define' HAVE_$1
+ GLOBAL(ASYM($1))
+ASYM($1):
+ /* Set up C argument registers. */
+ mr r3, P
+ NBIF_ARG(r4,4,0)
+ NBIF_ARG(r5,4,1)
+ NBIF_ARG(r6,4,2)
+ NBIF_ARG(r7,4,3)
+
+ /* Save caller-save registers and call the C function. */
+ SAVE_CONTEXT_BIF
+ STORE r4, P_ARG0(r3) /* Store BIF__ARGS in def_arg_reg[] */
+ STORE r5, P_ARG1(r3)
+ STORE r6, P_ARG2(r3)
+ STORE r7, P_ARG3(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
+ TEST_GOT_MBUF
+
+ /* Restore registers. Check for exception. */
+ CMPI r3, THE_NON_VALUE
+ RESTORE_CONTEXT_BIF
+ beq- 1f
+ NBIF_RET(4)
+1: /* workaround for bc:s small offset operand */
+ b CSYM(nbif_4_simple_exception)
+ HANDLE_GOT_MBUF(4)
+ SET_SIZE(ASYM($1))
+ TYPE_FUNCTION(ASYM($1))
+#endif')
+
define(standard_bif_interface_0,
`
#ifndef HAVE_$1
diff --git a/erts/emulator/hipe/hipe_ppc_glue.S b/erts/emulator/hipe/hipe_ppc_glue.S
index 6f0217c738..b07f4bc9c8 100644
--- a/erts/emulator/hipe/hipe_ppc_glue.S
+++ b/erts/emulator/hipe/hipe_ppc_glue.S
@@ -17,10 +17,9 @@
* %CopyrightEnd%
*/
-
+#define ASM
#include "hipe_ppc_asm.h"
#include "hipe_literals.h"
-#define ASM
#include "hipe_mode_switch.h"
.text
@@ -296,7 +295,7 @@ CSYM(hipe_ppc_throw_to_native):
* which should look as follows:
*
* stub for f/N:
- * <set r12 to f's BEAM code address>
+ * <set r12 to f's export entry address>
* <set r0 to N>
* b nbif_callemu
*
@@ -312,10 +311,10 @@ CSYM(hipe_ppc_throw_to_native):
*/
GLOBAL(ASYM(nbif_callemu))
ASYM(nbif_callemu):
- STORE r12, P_BEAM_IP(P)
+ STORE r12, P_CALLEE_EXP(P)
STORE r0, P_ARITY(P)
STORE_ARG_REGS
- li r3, HIPE_MODE_SWITCH_RES_CALL
+ li r3, HIPE_MODE_SWITCH_RES_CALL_EXPORTED
b .suspend_exit
/*
@@ -463,10 +462,12 @@ ASYM(nbif_fail):
OPD(nbif_1_gc_after_bif)
OPD(nbif_2_gc_after_bif)
OPD(nbif_3_gc_after_bif)
+ OPD(nbif_4_gc_after_bif)
GLOBAL(CSYM(nbif_0_gc_after_bif))
GLOBAL(CSYM(nbif_1_gc_after_bif))
GLOBAL(CSYM(nbif_2_gc_after_bif))
GLOBAL(CSYM(nbif_3_gc_after_bif))
+ GLOBAL(CSYM(nbif_4_gc_after_bif))
CSYM(nbif_0_gc_after_bif):
li r4, 0
b .gc_after_bif
@@ -478,6 +479,9 @@ CSYM(nbif_2_gc_after_bif):
b .gc_after_bif
CSYM(nbif_3_gc_after_bif):
li r4, 3
+ b .gc_after_bif
+CSYM(nbif_4_gc_after_bif):
+ li r4, 4
/*FALLTHROUGH*/
.gc_after_bif:
stw r4, P_NARITY(P) /* Note: narity is a 32-bit field */
@@ -520,6 +524,11 @@ CSYM(nbif_2_simple_exception):
GLOBAL(CSYM(nbif_3_simple_exception))
CSYM(nbif_3_simple_exception):
li r4, 3
+ b .nbif_simple_exception
+ OPD(nbif_3_simple_exception)
+ GLOBAL(CSYM(nbif_4_simple_exception))
+CSYM(nbif_4_simple_exception):
+ li r4, 4
/*FALLTHROUGH*/
.nbif_simple_exception:
LOAD r3, P_FREASON(P)
diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h
index 4ee99d78a2..86655ad42c 100644
--- a/erts/emulator/hipe/hipe_process.h
+++ b/erts/emulator/hipe/hipe_process.h
@@ -23,14 +23,17 @@
#define HIPE_PROCESS_H
#include "erl_alloc.h"
+#include "export.h"
struct hipe_process_state {
Eterm *nsp; /* Native stack pointer. */
Eterm *nstack; /* Native stack block start. */
Eterm *nstend; /* Native stack block end (start+size). */
- /* XXX: ncallee and closure could share space in a union */
- void (*ncallee)(void); /* Native code callee (label) to invoke. */
- Eterm closure; /* Used to pass a closure from native code. */
+ union {
+ void (*ncallee)(void); /* Native code callee (label) to invoke. */
+ Eterm closure; /* Used to pass a closure from native code. */
+ Export* callee_exp; /* Used to pass export entry from native code */
+ }u;
Eterm *nstgraylim; /* Gray/white stack boundary. */
Eterm *nstblacklim; /* Black/gray stack boundary. Must exist if
graylim exists. Ignored if no graylim. */
diff --git a/erts/emulator/hipe/hipe_risc_stack.c b/erts/emulator/hipe/hipe_risc_stack.c
index 1183856c7e..bea3a0fecd 100644
--- a/erts/emulator/hipe/hipe_risc_stack.c
+++ b/erts/emulator/hipe/hipe_risc_stack.c
@@ -226,7 +226,7 @@ void (*hipe_handle_stack_trap(Process *p))(void)
* The native stack MUST contain a stack frame as it appears on
* entry to a function (actuals, caller's frame, caller's return address).
* p->hipe.narity MUST contain the arity (number of actuals).
- * On exit, p->hipe.ncallee is set to the handler's PC and p->hipe.nsp
+ * On exit, p->hipe.u.ncallee is set to the handler's PC and p->hipe.nsp
* is set to its SP (low address of its stack frame).
*/
void hipe_find_handler(Process *p)
@@ -254,7 +254,7 @@ void hipe_find_handler(Process *p)
if ((exnra = sdesc_exnra(sdesc)) != 0 &&
(p->catches >= 0 ||
exnra == (unsigned long)&nbif_fail)) {
- p->hipe.ncallee = (void(*)(void)) exnra;
+ p->hipe.u.ncallee = (void(*)(void)) exnra;
p->hipe.nsp = nsp;
p->hipe.narity = 0;
/* update the gray/white boundary if we threw past it */
diff --git a/erts/emulator/hipe/hipe_sparc.c b/erts/emulator/hipe/hipe_sparc.c
index 49d4da7bab..fea3b623a9 100644
--- a/erts/emulator/hipe/hipe_sparc.c
+++ b/erts/emulator/hipe/hipe_sparc.c
@@ -130,7 +130,7 @@ static void atexit_alloc_code_stats(void)
#define ALLOC_CODE_STATS(X) do{}while(0)
#endif
-static void morecore(unsigned int alloc_bytes)
+static int morecore(unsigned int alloc_bytes)
{
unsigned int map_bytes;
char *map_hint, *map_start;
@@ -158,10 +158,9 @@ static void morecore(unsigned int alloc_bytes)
#endif
,
-1, 0);
- if (map_start == MAP_FAILED) {
- perror("mmap");
- abort();
- }
+ if (map_start == MAP_FAILED)
+ return -1;
+
ALLOC_CODE_STATS(total_mapped += map_bytes);
/* Merge adjacent mappings, so the trailing portion of the previous
@@ -177,6 +176,8 @@ static void morecore(unsigned int alloc_bytes)
}
ALLOC_CODE_STATS(atexit_alloc_code_stats());
+
+ return 0;
}
static void *alloc_code(unsigned int alloc_bytes)
@@ -186,8 +187,8 @@ static void *alloc_code(unsigned int alloc_bytes)
/* Align function entries. */
alloc_bytes = (alloc_bytes + 3) & ~3;
- if (code_bytes < alloc_bytes)
- morecore(alloc_bytes);
+ if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
+ return NULL;
ALLOC_CODE_STATS(++nr_allocs);
ALLOC_CODE_STATS(total_alloc += alloc_bytes);
res = code_next;
@@ -204,22 +205,22 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
-/* called from hipe_bif0.c:hipe_bifs_make_native_stub_2()
- and hipe_bif0.c:hipe_make_stub() */
-void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
+void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
unsigned int *code;
unsigned int callEmuOffset;
int i;
code = alloc_code(5*sizeof(int));
+ if (!code)
+ return NULL;
/* sethi %hi(Address), %i4 */
- code[0] = 0x39000000 | (((unsigned int)beamAddress >> 10) & 0x3FFFFF);
+ code[0] = 0x39000000 | (((unsigned int)callee_exp >> 10) & 0x3FFFFF);
/* or %g0, %o7, %i3 ! mov %o7, %i3 */
code[1] = 0xB610000F;
/* or %i4, %lo(Address), %i4 */
- code[2] = 0xB8172000 | ((unsigned int)beamAddress & 0x3FF);
+ code[2] = 0xB8172000 | ((unsigned int)callee_exp & 0x3FF);
/* call callemu */
callEmuOffset = (char*)nbif_callemu - (char*)&code[3];
code[3] = (1 << 30) | ((callEmuOffset >> 2) & 0x3FFFFFFF);
diff --git a/erts/emulator/hipe/hipe_sparc.h b/erts/emulator/hipe/hipe_sparc.h
index 1134b86004..2d92ca3ca8 100644
--- a/erts/emulator/hipe/hipe_sparc.h
+++ b/erts/emulator/hipe/hipe_sparc.h
@@ -47,8 +47,4 @@ static __inline__ int hipe_word32_address_ok(void *address)
extern void hipe_sparc_inc_stack(void);
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#endif /* HIPE_SPARC_H */
diff --git a/erts/emulator/hipe/hipe_sparc_asm.m4 b/erts/emulator/hipe/hipe_sparc_asm.m4
index 227d10ed80..8020104e40 100644
--- a/erts/emulator/hipe/hipe_sparc_asm.m4
+++ b/erts/emulator/hipe/hipe_sparc_asm.m4
@@ -29,6 +29,14 @@ define(LEAF_WORDS,16)dnl number of stack words for leaf functions
define(NR_ARG_REGS,4)dnl admissible values are 0 to 6, inclusive
`#define SPARC_LEAF_WORDS 'LEAF_WORDS
+`#define SPARC_NR_ARG_REGS 'NR_ARG_REGS
+`#define NR_ARG_REGS 'NR_ARG_REGS
+
+
+`#ifdef ASM'
+/*
+ * Only assembler stuff from here on (when included from *.S)
+ */
/*
* Reserved registers.
@@ -80,9 +88,6 @@ define(NR_ARG_REGS,4)dnl admissible values are 0 to 6, inclusive
/*
* Argument (parameter) registers.
*/
-`#define SPARC_NR_ARG_REGS 'NR_ARG_REGS
-`#define NR_ARG_REGS 'NR_ARG_REGS
-
define(defarg,`define(ARG$1,`$2')dnl
#`define ARG'$1 $2'
)dnl
@@ -171,6 +176,10 @@ define(NBIF_ARG,`ifelse(eval($3 >= NR_ARG_REGS),0,`NBIF_REG_ARG($1,$3)',`NBIF_ST
`/* #define NBIF_ARG_3_0 'NBIF_ARG(r1,3,0)` */'
`/* #define NBIF_ARG_3_1 'NBIF_ARG(r2,3,1)` */'
`/* #define NBIF_ARG_3_2 'NBIF_ARG(r3,3,2)` */'
+`/* #define NBIF_ARG_4_0 'NBIF_ARG(r1,4,0)` */'
+`/* #define NBIF_ARG_4_1 'NBIF_ARG(r2,4,1)` */'
+`/* #define NBIF_ARG_4_2 'NBIF_ARG(r3,4,2)` */'
+`/* #define NBIF_ARG_4_3 'NBIF_ARG(r3,4,3)` */'
`/* #define NBIF_ARG_5_0 'NBIF_ARG(r1,5,0)` */'
`/* #define NBIF_ARG_5_1 'NBIF_ARG(r2,5,1)` */'
`/* #define NBIF_ARG_5_2 'NBIF_ARG(r3,5,2)` */'
@@ -195,6 +204,7 @@ define(NBIF_RET,`NBIF_RET_N(eval(RET_POP($1)))')dnl
`/* #define NBIF_RET_1 'NBIF_RET(1)` */'
`/* #define NBIF_RET_2 'NBIF_RET(2)` */'
`/* #define NBIF_RET_3 'NBIF_RET(3)` */'
+`/* #define NBIF_RET_4 'NBIF_RET(4)` */'
`/* #define NBIF_RET_5 'NBIF_RET(5)` */'
dnl
@@ -210,4 +220,6 @@ define(QUICK_CALL_RET,`ba $1; NBIF_POP_N(eval(RET_POP($2)))')dnl
`/* #define QUICK_CALL_RET_F_3 'QUICK_CALL_RET(F,3)` */'
`/* #define QUICK_CALL_RET_F_5 'QUICK_CALL_RET(F,5)` */'
+`#endif /* ASM */'
+
`#endif /* HIPE_SPARC_ASM_H */'
diff --git a/erts/emulator/hipe/hipe_sparc_bifs.m4 b/erts/emulator/hipe/hipe_sparc_bifs.m4
index ca5af45d58..8dfb28c8e0 100644
--- a/erts/emulator/hipe/hipe_sparc_bifs.m4
+++ b/erts/emulator/hipe/hipe_sparc_bifs.m4
@@ -19,6 +19,7 @@ changecom(`/*', `*/')dnl
*/
+#`define ASM'
include(`hipe/hipe_sparc_asm.m4')
#`include' "config.h"
#`include' "hipe_literals.h"
@@ -53,9 +54,10 @@ define(HANDLE_GOT_MBUF,`
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_3(nbif_name, cbif_name)
* standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 0-3 parameters and
+ * Generate native interface for a BIF with 0-4 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -145,6 +147,39 @@ $1:
.type $1, #function
#endif')
+define(standard_bif_interface_4,
+`
+#ifndef HAVE_$1
+#`define' HAVE_$1
+ .global $1
+$1:
+ /* Set up C argument registers. */
+ mov P, %o0
+ NBIF_ARG(%o1,4,0)
+ NBIF_ARG(%o2,4,1)
+ NBIF_ARG(%o3,4,2)
+ NBIF_ARG(%o4,4,3)
+
+ /* Save caller-save registers and call the C function. */
+ SAVE_CONTEXT_BIF
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ st %o3, [%o0+P_ARG2]
+ st %o4, [%o0+P_ARG3]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
+ nop
+ TEST_GOT_MBUF
+
+ /* Restore registers. Check for exception. */
+ TEST_GOT_EXN(4)
+ RESTORE_CONTEXT_BIF
+ NBIF_RET(4)
+ HANDLE_GOT_MBUF(4)
+ .size $1, .-$1
+ .type $1, #function
+#endif')
+
define(standard_bif_interface_0,
`
#ifndef HAVE_$1
diff --git a/erts/emulator/hipe/hipe_sparc_glue.S b/erts/emulator/hipe/hipe_sparc_glue.S
index 44bdf1bc7e..094a87fd58 100644
--- a/erts/emulator/hipe/hipe_sparc_glue.S
+++ b/erts/emulator/hipe/hipe_sparc_glue.S
@@ -18,10 +18,9 @@
* %CopyrightEnd%
*/
-
+#define ASM
#include "hipe_sparc_asm.h"
#include "hipe_literals.h"
-#define ASM
#include "hipe_mode_switch.h"
.section ".text"
@@ -155,9 +154,9 @@ hipe_sparc_throw_to_native:
* which should look as follows:
*
* stub for f/N:
- * sethi %hi(f's BEAM code address), TEMP_ARG0
+ * sethi %hi(f's export entry address), TEMP_ARG0
* mov RA, TEMP_RA ! because the call below clobbers RA (%o7)
- * or TEMP_ARG0, %lo(f's BEAM code address), TEMP_ARG0
+ * or TEMP_ARG0, %lo(f's export entry address), TEMP_ARG0
* call nbif_callemu ! clobbers RA!
* mov N, TEMP_ARG1 ! delay slot: TEMP_ARG1 := ARITY
*
@@ -165,12 +164,12 @@ hipe_sparc_throw_to_native:
*/
.global nbif_callemu
nbif_callemu:
- st TEMP_ARG0, [P+P_BEAM_IP]
+ st TEMP_ARG0, [P+P_CALLEE_EXP]
st TEMP_ARG1, [P+P_ARITY]
st TEMP_RA, [P+P_NRA]
STORE_ARG_REGS
ba .flush_exit
- mov HIPE_MODE_SWITCH_RES_CALL, %o0
+ mov HIPE_MODE_SWITCH_RES_CALL_EXPORTED, %o0
/*
* nbif_apply
@@ -317,6 +316,7 @@ nbif_fail:
.global nbif_1_gc_after_bif
.global nbif_2_gc_after_bif
.global nbif_3_gc_after_bif
+ .global nbif_4_gc_after_bif
nbif_0_gc_after_bif:
ba .gc_after_bif
mov 0, %o1 /* delay slot */
@@ -327,7 +327,10 @@ nbif_2_gc_after_bif:
ba .gc_after_bif
mov 2, %o1 /* delay slot */
nbif_3_gc_after_bif:
- mov 3, %o1
+ ba .gc_after_bif
+ mov 3, %o1 /* delay slot */
+nbif_4_gc_after_bif:
+ mov 4, %o1
/*FALLTHROUGH*/
.gc_after_bif:
st %o1, [P+P_NARITY]
@@ -365,7 +368,11 @@ nbif_2_simple_exception:
mov 2, %o1 /* delay slot */
.global nbif_3_simple_exception
nbif_3_simple_exception:
- mov 3, %o1
+ ba .nbif_simple_exception
+ mov 3, %o1 /* delay slot */
+ .global nbif_4_simple_exception
+nbif_4_simple_exception:
+ mov 4, %o1
/*FALLTHROUGH*/
.nbif_simple_exception:
ld [P+P_FREASON], %o0
diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h
index 66f9f04c73..4cfdb54dd8 100644
--- a/erts/emulator/hipe/hipe_stack.h
+++ b/erts/emulator/hipe/hipe_stack.h
@@ -108,12 +108,23 @@ extern int hipe_fill_stacktrace(Process*, int, Eterm**);
#if 0 && defined(HIPE_NSTACK_GROWS_UP)
#define hipe_nstack_start(p) ((p)->hipe.nstack)
#define hipe_nstack_used(p) ((p)->hipe.nsp - (p)->hipe.nstack)
+#define hipe_nstack_avail(p) ((p)->hipe.nstend - (p)->hipe.nsp)
#endif
#if defined(HIPE_NSTACK_GROWS_DOWN)
#define hipe_nstack_start(p) ((p)->hipe.nsp)
#define hipe_nstack_used(p) ((p)->hipe.nstend - (p)->hipe.nsp)
+#define hipe_nstack_avail(p) ((unsigned)((p)->hipe.nsp - (p)->hipe.nstack))
#endif
+/* ensure that at least nwords words are available on the native stack */
+static __inline__ void hipe_check_nstack(Process *p, unsigned nwords)
+{
+ extern void hipe_inc_nstack(Process *p);
+
+ while (hipe_nstack_avail(p) < nwords)
+ hipe_inc_nstack(p);
+}
+
/*
* GC support procedures
*/
diff --git a/erts/emulator/hipe/hipe_x86.c b/erts/emulator/hipe/hipe_x86.c
index 327c74e9aa..998905ea63 100644
--- a/erts/emulator/hipe/hipe_x86.c
+++ b/erts/emulator/hipe/hipe_x86.c
@@ -108,7 +108,7 @@ static void atexit_alloc_code_stats(void)
#define MAP_ANONYMOUS MAP_ANON
#endif
-static void morecore(unsigned int alloc_bytes)
+static int morecore(unsigned int alloc_bytes)
{
unsigned int map_bytes;
char *map_hint, *map_start;
@@ -136,10 +136,9 @@ static void morecore(unsigned int alloc_bytes)
#endif
,
-1, 0);
- if (map_start == MAP_FAILED) {
- perror("mmap");
- abort();
- }
+ if (map_start == MAP_FAILED)
+ return -1;
+
ALLOC_CODE_STATS(total_mapped += map_bytes);
/* Merge adjacent mappings, so the trailing portion of the previous
@@ -155,6 +154,8 @@ static void morecore(unsigned int alloc_bytes)
}
ALLOC_CODE_STATS(atexit_alloc_code_stats());
+
+ return 0;
}
static void *alloc_code(unsigned int alloc_bytes)
@@ -164,8 +165,8 @@ static void *alloc_code(unsigned int alloc_bytes)
/* Align function entries. */
alloc_bytes = (alloc_bytes + 3) & ~3;
- if (code_bytes < alloc_bytes)
- morecore(alloc_bytes);
+ if (code_bytes < alloc_bytes && morecore(alloc_bytes) != 0)
+ return NULL;
ALLOC_CODE_STATS(++nr_allocs);
ALLOC_CODE_STATS(total_alloc += alloc_bytes);
res = code_next;
@@ -182,18 +183,16 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
return alloc_code(nrbytes);
}
-/* called from hipe_bif0.c:hipe_bifs_make_native_stub_2()
- and hipe_bif0.c:hipe_make_stub() */
-void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
+void *hipe_make_native_stub(void *callee_exp, unsigned int beamArity)
{
/*
* This creates a native code stub with the following contents:
*
- * movl $Address, P_BEAM_IP(%ebp)
+ * movl $Address, P_CALLEE_EXP(%ebp)
* movb $Arity, P_ARITY(%ebp)
* jmp callemu
*
- * The stub has variable size, depending on whether the P_BEAM_IP
+ * The stub has variable size, depending on whether the P_CALLEE_EXP
* and P_ARITY offsets fit in 8-bit signed displacements or not.
* The rel32 offset in the final jmp depends on its actual location,
* which also depends on the size of the previous instructions.
@@ -206,28 +205,30 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
codeSize = /* 16, 19, or 22 bytes */
16 + /* 16 when both offsets are 8-bit */
- (P_BEAM_IP >= 128 ? 3 : 0) +
+ (P_CALLEE_EXP >= 128 ? 3 : 0) +
(P_ARITY >= 128 ? 3 : 0);
codep = code = alloc_code(codeSize);
+ if (!code)
+ return NULL;
- /* movl $beamAddress, P_BEAM_IP(%ebp); 3 or 6 bytes, plus 4 */
+ /* movl $beamAddress, P_CALLEE_EXP(%ebp); 3 or 6 bytes, plus 4 */
codep[0] = 0xc7;
-#if P_BEAM_IP >= 128
+#if P_CALLEE_EXP >= 128
codep[1] = 0x85; /* disp32[EBP] */
- codep[2] = P_BEAM_IP & 0xFF;
- codep[3] = (P_BEAM_IP >> 8) & 0xFF;
- codep[4] = (P_BEAM_IP >> 16) & 0xFF;
- codep[5] = (P_BEAM_IP >> 24) & 0xFF;
+ codep[2] = P_CALLEE_EXP & 0xFF;
+ codep[3] = (P_CALLEE_EXP >> 8) & 0xFF;
+ codep[4] = (P_CALLEE_EXP >> 16) & 0xFF;
+ codep[5] = (P_CALLEE_EXP >> 24) & 0xFF;
codep += 6;
#else
codep[1] = 0x45; /* disp8[EBP] */
- codep[2] = P_BEAM_IP;
+ codep[2] = P_CALLEE_EXP;
codep += 3;
#endif
- codep[0] = ((unsigned int)beamAddress) & 0xFF;
- codep[1] = ((unsigned int)beamAddress >> 8) & 0xFF;
- codep[2] = ((unsigned int)beamAddress >> 16) & 0xFF;
- codep[3] = ((unsigned int)beamAddress >> 24) & 0xFF;
+ codep[0] = ((unsigned int)callee_exp) & 0xFF;
+ codep[1] = ((unsigned int)callee_exp >> 8) & 0xFF;
+ codep[2] = ((unsigned int)callee_exp >> 16) & 0xFF;
+ codep[3] = ((unsigned int)callee_exp >> 24) & 0xFF;
codep += 4;
/* movb $beamArity, P_ARITY(%ebp); 3 or 6 bytes */
diff --git a/erts/emulator/hipe/hipe_x86.h b/erts/emulator/hipe/hipe_x86.h
index 97f09e38cd..f29117d0c4 100644
--- a/erts/emulator/hipe/hipe_x86.h
+++ b/erts/emulator/hipe/hipe_x86.h
@@ -53,8 +53,4 @@ extern void nbif_inc_stack_0(void);
extern void nbif_handle_fp_exception(void);
#endif
-/* for hipe_bifs_enter_code_2 */
-extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
-#define HIPE_ALLOC_CODE(n,c,t,p) hipe_alloc_code((n),(c),(t),(p))
-
#endif /* HIPE_X86_H */
diff --git a/erts/emulator/hipe/hipe_x86_asm.m4 b/erts/emulator/hipe/hipe_x86_asm.m4
index 020ccf8d4b..436feca506 100644
--- a/erts/emulator/hipe/hipe_x86_asm.m4
+++ b/erts/emulator/hipe/hipe_x86_asm.m4
@@ -33,6 +33,18 @@ define(SIMULATE_NSP,0)dnl change to 1 to simulate call/ret insns
`#define X86_LEAF_WORDS 'LEAF_WORDS
`#define LEAF_WORDS 'LEAF_WORDS
+`#define X86_NR_ARG_REGS 'NR_ARG_REGS
+`#define NR_ARG_REGS 'NR_ARG_REGS
+
+`#define X86_HP_IN_ESI 'HP_IN_ESI
+`#define X86_SIMULATE_NSP 'SIMULATE_NSP
+
+
+`#ifdef ASM'
+/*
+ * Only assembler stuff from here on (when included from *.S)
+ */
+
/*
* Workarounds for Darwin.
*/
@@ -60,7 +72,6 @@ ifelse(OPSYS,darwin,``
*/
`#define P %ebp'
-`#define X86_HP_IN_ESI 'HP_IN_ESI
`#if X86_HP_IN_ESI
#define SAVE_HP movl %esi, P_HP(P)
#define RESTORE_HP movl P_HP(P), %esi
@@ -73,7 +84,6 @@ ifelse(OPSYS,darwin,``
#define SAVE_CSP movl %esp, P_CSP(P)
#define RESTORE_CSP movl P_CSP(P), %esp'
-`#define X86_SIMULATE_NSP 'SIMULATE_NSP
/*
* Context switching macros.
@@ -100,12 +110,10 @@ ifelse(OPSYS,darwin,``
SAVE_CACHED_STATE; \
SWITCH_ERLANG_TO_C_QUICK'
+
/*
* Argument (parameter) registers.
*/
-`#define X86_NR_ARG_REGS 'NR_ARG_REGS
-`#define NR_ARG_REGS 'NR_ARG_REGS
-
ifelse(eval(NR_ARG_REGS >= 1),0,,
``#define ARG0 %eax
'')dnl
@@ -204,6 +212,7 @@ define(NBIF_COPY_NSP,`ifelse(eval($1 > NR_ARG_REGS),0,,`movl %esp, TEMP_NSP')')d
`/* #define NBIF_COPY_NSP_1 'NBIF_COPY_NSP(1)` */'
`/* #define NBIF_COPY_NSP_2 'NBIF_COPY_NSP(2)` */'
`/* #define NBIF_COPY_NSP_3 'NBIF_COPY_NSP(3)` */'
+`/* #define NBIF_COPY_NSP_4 'NBIF_COPY_NSP(4)` */'
`/* #define NBIF_COPY_NSP_5 'NBIF_COPY_NSP(5)` */'
dnl
@@ -227,6 +236,10 @@ define(NBIF_ARG_OPND,`ifelse(eval($2 >= NR_ARG_REGS),0,`ARG'$2,BASE_OFFSET(eval(
`/* #define NBIF_ARG_OPND_3_0 'NBIF_ARG_OPND(3,0)` */'
`/* #define NBIF_ARG_OPND_3_1 'NBIF_ARG_OPND(3,1)` */'
`/* #define NBIF_ARG_OPND_3_2 'NBIF_ARG_OPND(3,2)` */'
+`/* #define NBIF_ARG_OPND_4_0 'NBIF_ARG_OPND(4,0)` */'
+`/* #define NBIF_ARG_OPND_4_1 'NBIF_ARG_OPND(4,1)` */'
+`/* #define NBIF_ARG_OPND_4_2 'NBIF_ARG_OPND(4,2)` */'
+`/* #define NBIF_ARG_OPND_4_3 'NBIF_ARG_OPND(4,3)` */'
`/* #define NBIF_ARG_OPND_5_0 'NBIF_ARG_OPND(5,0)` */'
`/* #define NBIF_ARG_OPND_5_1 'NBIF_ARG_OPND(5,1)` */'
`/* #define NBIF_ARG_OPND_5_2 'NBIF_ARG_OPND(5,2)` */'
@@ -266,6 +279,7 @@ define(NBIF_RET,`NBIF_RET_N(eval(RET_POP($1)))')dnl
`/* #define NBIF_RET_1 'NBIF_RET(1)` */'
`/* #define NBIF_RET_2 'NBIF_RET(2)` */'
`/* #define NBIF_RET_3 'NBIF_RET(3)` */'
+`/* #define NBIF_RET_4 'NBIF_RET(4)` */'
`/* #define NBIF_RET_5 'NBIF_RET(5)` */'
dnl
@@ -282,4 +296,6 @@ define(LOAD_CALLER_SAVE,`LAR_N(eval(NR_CALLER_SAVE-1))')dnl
`#define STORE_CALLER_SAVE 'STORE_CALLER_SAVE
`#define LOAD_CALLER_SAVE 'LOAD_CALLER_SAVE
+`#endif /* ASM */'
+
`#endif /* HIPE_X86_ASM_H */'
diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4
index dd6980f555..b0064ee628 100644
--- a/erts/emulator/hipe/hipe_x86_bifs.m4
+++ b/erts/emulator/hipe/hipe_x86_bifs.m4
@@ -19,6 +19,7 @@ changecom(`/*', `*/')dnl
*/
+#`define ASM'
include(`hipe/hipe_x86_asm.m4')
#`include' "config.h"
#`include' "hipe_literals.h"
@@ -47,6 +48,7 @@ define(HANDLE_GOT_MBUF,`
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_4(nbif_name, cbif_name)
* standard_bif_interface_0(nbif_name, cbif_name)
*
* Generate native interface for a BIF with 0-3 parameters and
@@ -157,6 +159,43 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
+define(standard_bif_interface_4,
+`
+#ifndef HAVE_$1
+#`define' HAVE_$1
+ TEXT
+ .align 4
+ GLOBAL(ASYM($1))
+ASYM($1):
+ /* copy native stack pointer */
+ NBIF_COPY_NSP(4)
+
+ /* switch to C stack */
+ SWITCH_ERLANG_TO_C
+
+ /* make the call on the C stack */
+ NBIF_ARG_REG(0,P)
+ NBIF_ARG(2,4,0)
+ NBIF_ARG(3,4,1)
+ NBIF_ARG(4,4,2)
+ NBIF_ARG(5,4,3)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) /* BIF__ARGS */
+ CALL_BIF($2)
+ TEST_GOT_MBUF
+
+ /* switch to native stack */
+ SWITCH_C_TO_ERLANG
+
+ /* throw exception if failure, otherwise return */
+ TEST_GOT_EXN
+ jz nbif_4_simple_exception
+ NBIF_RET(4)
+ HANDLE_GOT_MBUF(4)
+ SET_SIZE(ASYM($1))
+ TYPE_FUNCTION(ASYM($1))
+#endif')
+
define(standard_bif_interface_0,
`
#ifndef HAVE_$1
diff --git a/erts/emulator/hipe/hipe_x86_glue.S b/erts/emulator/hipe/hipe_x86_glue.S
index 88b86f4de7..f124e36a26 100644
--- a/erts/emulator/hipe/hipe_x86_glue.S
+++ b/erts/emulator/hipe/hipe_x86_glue.S
@@ -18,10 +18,9 @@
* %CopyrightEnd%
*/
-
+#define ASM
#include "hipe_x86_asm.h"
#include "hipe_literals.h"
-#define ASM
#include "hipe_mode_switch.h"
/*
@@ -104,7 +103,7 @@ ASYM(nbif_return):
* stub (hipe_x86_loader.erl) which should look as follows:
*
* stub for f/N:
- * movl $<f's BEAM code address>, P_BEAM_IP(P)
+ * movl $<f's export entry address>, P_CALLEE_EXP(P)
* movb $<N>, P_ARITY(P)
* jmp nbif_callemu
*
@@ -114,7 +113,7 @@ ASYM(nbif_return):
GLOBAL(ASYM(nbif_callemu))
ASYM(nbif_callemu):
STORE_ARG_REGS
- movl $HIPE_MODE_SWITCH_RES_CALL, %eax
+ movl $HIPE_MODE_SWITCH_RES_CALL_EXPORTED, %eax
jmp .suspend_exit
/*
@@ -300,6 +299,7 @@ ASYM(nbif_fail):
GLOBAL(nbif_1_gc_after_bif)
GLOBAL(nbif_2_gc_after_bif)
GLOBAL(nbif_3_gc_after_bif)
+ GLOBAL(nbif_4_gc_after_bif)
.align 4
nbif_0_gc_after_bif:
xorl %edx, %edx
@@ -315,6 +315,10 @@ nbif_2_gc_after_bif:
.align 4
nbif_3_gc_after_bif:
movl $3, %edx
+ jmp .gc_after_bif
+ .align 4
+nbif_4_gc_after_bif:
+ movl $4, %edx
/*FALLTHROUGH*/
.align 4
.gc_after_bif:
@@ -338,6 +342,7 @@ nbif_3_gc_after_bif:
GLOBAL(nbif_1_simple_exception)
GLOBAL(nbif_2_simple_exception)
GLOBAL(nbif_3_simple_exception)
+ GLOBAL(nbif_4_simple_exception)
.align 4
nbif_0_simple_exception:
xorl %eax, %eax
@@ -353,6 +358,10 @@ nbif_2_simple_exception:
.align 4
nbif_3_simple_exception:
movl $3, %eax
+ jmp .nbif_simple_exception
+ .align 4
+nbif_4_simple_exception:
+ movl $4, %eax
/*FALLTHROUGH*/
.align 4
.nbif_simple_exception:
diff --git a/erts/emulator/hipe/hipe_x86_stack.c b/erts/emulator/hipe/hipe_x86_stack.c
index 9ad3fa9d31..7f1c2f7d41 100644
--- a/erts/emulator/hipe/hipe_x86_stack.c
+++ b/erts/emulator/hipe/hipe_x86_stack.c
@@ -209,7 +209,7 @@ void (*hipe_handle_stack_trap(Process *p))(void)
* The native stack MUST contain a stack frame as it appears on
* entry to a function (return address, actuals, caller's frame).
* p->hipe.narity MUST contain the arity (number of actuals).
- * On exit, p->hipe.ncallee is set to the handler's PC and p->hipe.nsp
+ * On exit, p->hipe.u.ncallee is set to the handler's PC and p->hipe.nsp
* is set to its SP (low address of its stack frame).
*/
void hipe_find_handler(Process *p)
@@ -240,7 +240,7 @@ void hipe_find_handler(Process *p)
if ((exnra = sdesc_exnra(sdesc)) != 0 &&
(p->catches >= 0 ||
exnra == (unsigned long)nbif_fail)) {
- p->hipe.ncallee = (void(*)(void)) exnra;
+ p->hipe.u.ncallee = (void(*)(void)) exnra;
p->hipe.nsp = nsp;
p->hipe.narity = 0;
/* update the gray/white boundary if we threw past it */
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 0051b45b31..7be17d20bb 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -1590,9 +1590,9 @@ ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
void
ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set,
- erts_short_time_t msec)
+ ErtsMonotonicTime timeout_time)
{
- ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, msec);
+ ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, timeout_time);
}
void
@@ -1600,9 +1600,12 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
{
ErtsPollResFd *pollres;
int pollres_len;
- SysTimeval wait_time;
+ ErtsMonotonicTime timeout_time;
int poll_ret, i;
erts_aint_t current_cio_time;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ ASSERT(esdp);
restart:
@@ -1612,12 +1615,10 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
/* Figure out timeout value */
- if (do_wait) {
- erts_time_remaining(&wait_time);
- } else { /* poll only */
- wait_time.tv_sec = 0;
- wait_time.tv_usec = 0;
- }
+ timeout_time = (do_wait
+ ? erts_check_next_timeout_time(esdp->timer_wheel,
+ ERTS_SEC_TO_MONOTONIC(10*60))
+ : ERTS_POLL_NO_TIMEOUT /* poll only */);
/*
* No need for an atomic inc op when incrementing
@@ -1640,14 +1641,12 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1);
- poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, &wait_time);
+ poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, timeout_time);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_deliver_time(); /* sync the machine's idea of time */
-
#ifdef ERTS_BREAK_REQUESTED
if (ERTS_BREAK_REQUESTED)
erts_do_break_handling();
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index d01297d55c..71355965aa 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -47,8 +47,8 @@ void erts_check_io_async_sig_interrupt_nkp(void);
#endif
void erts_check_io_interrupt_kp(int);
void erts_check_io_interrupt_nkp(int);
-void erts_check_io_interrupt_timed_kp(int, erts_short_time_t);
-void erts_check_io_interrupt_timed_nkp(int, erts_short_time_t);
+void erts_check_io_interrupt_timed_kp(int, ErtsMonotonicTime);
+void erts_check_io_interrupt_timed_nkp(int, ErtsMonotonicTime);
void erts_check_io_kp(int);
void erts_check_io_nkp(int);
void erts_init_check_io_kp(void);
@@ -65,7 +65,7 @@ int erts_check_io_max_files(void);
void erts_check_io_async_sig_interrupt(void);
#endif
void erts_check_io_interrupt(int);
-void erts_check_io_interrupt_timed(int, erts_short_time_t);
+void erts_check_io_interrupt_timed(int, ErtsMonotonicTime);
void erts_check_io(int);
void erts_init_check_io(void);
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index aa412a20c8..f4d4a85ca4 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -320,7 +320,7 @@ struct ErtsPollSet_ {
#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_t wakeup_state;
#endif
- erts_smp_atomic32_t timeout;
+ erts_atomic64_t timeout_time;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_t no_avoided_wakeups;
erts_smp_atomic_t no_avoided_interrupts;
@@ -384,6 +384,26 @@ static void check_poll_status(ErtsPollSet ps);
static void print_misc_debug_info(void);
#endif
+static ERTS_INLINE void
+init_timeout_time(ErtsPollSet ps)
+{
+ erts_atomic64_init_nob(&ps->timeout_time,
+ (erts_aint64_t) ERTS_MONOTONIC_TIME_MAX);
+}
+
+static ERTS_INLINE void
+set_timeout_time(ErtsPollSet ps, ErtsMonotonicTime time)
+{
+ erts_atomic64_set_relb(&ps->timeout_time,
+ (erts_aint64_t) time);
+}
+
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_time(ErtsPollSet ps)
+{
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb(&ps->timeout_time);
+}
+
#define ERTS_POLL_NOT_WOKEN 0
#define ERTS_POLL_WOKEN -1
#define ERTS_POLL_WOKEN_INTR 1
@@ -1993,44 +2013,153 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
}
}
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout(ErtsPollSet ps,
+ int resolution,
+ ErtsMonotonicTime timeout_time)
+{
+ ErtsMonotonicTime timeout, save_timeout_time;
+
+ if (timeout_time == ERTS_POLL_NO_TIMEOUT) {
+ save_timeout_time = ERTS_MONOTONIC_TIME_MIN;
+ timeout = 0;
+ }
+ else {
+ ErtsMonotonicTime diff_time, current_time;
+ current_time = erts_get_monotonic_time();
+ diff_time = timeout_time - current_time;
+ if (diff_time <= 0) {
+ save_timeout_time = ERTS_MONOTONIC_TIME_MIN;
+ timeout = 0;
+ }
+ else {
+ save_timeout_time = current_time;
+ switch (resolution) {
+ case 1000:
+ /* Round up to nearest even milli second */
+ timeout = ERTS_MONOTONIC_TO_MSEC(diff_time - 1) + 1;
+ if (timeout > (ErtsMonotonicTime) INT_MAX)
+ timeout = (ErtsMonotonicTime) INT_MAX;
+ save_timeout_time += ERTS_MSEC_TO_MONOTONIC(timeout);
+ break;
+ case 1000000:
+ /* Round up to nearest even micro second */
+ timeout = ERTS_MONOTONIC_TO_USEC(diff_time - 1) + 1;
+ save_timeout_time += ERTS_USEC_TO_MONOTONIC(timeout);
+ break;
+ case 1000000000:
+ /* Round up to nearest even nano second */
+ timeout = ERTS_MONOTONIC_TO_NSEC(diff_time - 1) + 1;
+ save_timeout_time += ERTS_NSEC_TO_MONOTONIC(timeout);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid resolution");
+ timeout = 0;
+ save_timeout_time = 0;
+ break;
+ }
+ }
+ }
+ set_timeout_time(ps, save_timeout_time);
+ return timeout;
+}
+
+#if ERTS_POLL_USE_SELECT
+
static ERTS_INLINE int
-check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
+get_timeout_timeval(ErtsPollSet ps,
+ SysTimeval *tvp,
+ ErtsMonotonicTime timeout_time)
+{
+ ErtsMonotonicTime timeout = get_timeout(ps,
+ 1000*1000,
+ timeout_time);
+
+ if (!timeout) {
+ tvp->tv_sec = 0;
+ tvp->tv_usec = 0;
+
+ return 0;
+ }
+ else {
+ ErtsMonotonicTime sec = timeout/(1000*1000);
+ tvp->tv_sec = sec;
+ tvp->tv_usec = timeout - sec*(1000*1000);
+
+ ASSERT(tvp->tv_sec >= 0);
+ ASSERT(tvp->tv_usec >= 0);
+ ASSERT(tvp->tv_usec < 1000*1000);
+
+ return !0;
+ }
+
+}
+
+#endif
+
+#if ERTS_POLL_USE_KQUEUE
+
+static ERTS_INLINE int
+get_timeout_timespec(ErtsPollSet ps,
+ struct timespec *tsp,
+ ErtsMonotonicTime timeout_time)
+{
+ ErtsMonotonicTime timeout = get_timeout(ps,
+ 1000*1000*1000,
+ timeout_time);
+
+ if (!timeout) {
+ tsp->tv_sec = 0;
+ tsp->tv_nsec = 0;
+ return 0;
+ }
+ else {
+ ErtsMonotonicTime sec = timeout/(1000*1000*1000);
+ tsp->tv_sec = sec;
+ tsp->tv_nsec = timeout - sec*(1000*1000*1000);
+
+ ASSERT(tsp->tv_sec >= 0);
+ ASSERT(tsp->tv_nsec >= 0);
+ ASSERT(tsp->tv_nsec < 1000*1000);
+
+ return !0;
+ }
+}
+
+#endif
+
+static ERTS_INLINE int
+check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
{
int res;
if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0
- && tv->tv_usec == 0 && tv->tv_sec == 0) {
+ && timeout_time == ERTS_POLL_NO_TIMEOUT) {
/* Nothing to poll and zero timeout; done... */
return 0;
}
else {
- long timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
- if (timeout > ERTS_AINT32_T_MAX)
- timeout = ERTS_AINT32_T_MAX;
- ASSERT(timeout >= 0);
- erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
+ int timeout;
#if ERTS_POLL_USE_FALLBACK
if (!(ps->fallback_used = ERTS_POLL_NEED_FALLBACK(ps))) {
#if ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */
- if (timeout > INT_MAX)
- timeout = INT_MAX;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
+ timeout = (int) get_timeout(ps, 1000, timeout_time);
#ifdef ERTS_SMP
if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
- res = epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
+ res = epoll_wait(ps->kp_fd, ps->res_events, max_res, timeout);
#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
struct timespec ts;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
+ timeout = get_timeout_timespec(ps, &ts, timeout_time);
#ifdef ERTS_SMP
if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
- ts.tv_sec = tv->tv_sec;
- ts.tv_nsec = tv->tv_usec*1000;
res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
#endif /* ----------------------------------------- */
}
@@ -2049,8 +2178,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
#if ERTS_POLL_USE_WAKEUP_PIPE
nfds++; /* Wakeup pipe */
#endif
- if (timeout > INT_MAX)
- timeout = INT_MAX;
+ timeout = (int) get_timeout(ps, 1000, timeout_time);
poll_res.dp_nfds = nfds < max_res ? nfds : max_res;
if (poll_res.dp_nfds > ps->res_events_len)
grow_res_events(ps, poll_res.dp_nfds);
@@ -2059,33 +2187,33 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
- poll_res.dp_timeout = (int) timeout;
+ poll_res.dp_timeout = timeout;
res = ioctl(ps->kp_fd, DP_POLL, &poll_res);
#elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
- if (timeout > INT_MAX)
- timeout = INT_MAX;
+ timeout = (int) get_timeout(ps, 1000, timeout_time);
#ifdef ERTS_SMP
if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
- res = poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
+ res = poll(ps->poll_fds, ps->no_poll_fds, timeout);
#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
- SysTimeval to = *tv;
+ SysTimeval to;
+ timeout = get_timeout_timeval(ps, &to, timeout_time);
ERTS_FD_COPY(&ps->input_fds, &ps->res_input_fds);
ERTS_FD_COPY(&ps->output_fds, &ps->res_output_fds);
#ifdef ERTS_SMP
- if (to.tv_sec || to.tv_usec)
+ if (timeout)
erts_thr_progress_prepare_wait(NULL);
#endif
res = ERTS_SELECT(ps->max_fd + 1,
- &ps->res_input_fds,
- &ps->res_output_fds,
- NULL,
- &to);
+ &ps->res_input_fds,
+ &ps->res_output_fds,
+ NULL,
+ &to);
#ifdef ERTS_SMP
- if (to.tv_sec || to.tv_usec)
+ if (timeout)
erts_thr_progress_finalize_wait(NULL);
if (res < 0
&& errno == EBADF
@@ -2108,10 +2236,10 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
handle_update_requests(ps);
ERTS_POLLSET_UNLOCK(ps);
res = ERTS_SELECT(ps->max_fd + 1,
- &ps->res_input_fds,
- &ps->res_output_fds,
- NULL,
- &to);
+ &ps->res_input_fds,
+ &ps->res_output_fds,
+ NULL,
+ &to);
if (res == 0) {
errno = EAGAIN;
res = -1;
@@ -2133,15 +2261,14 @@ int
ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
ErtsPollResFd pr[],
int *len,
- SysTimeval *utvp)
+ ErtsMonotonicTime timeout_time)
{
+ ErtsMonotonicTime to;
int res, no_fds;
int ebadf = 0;
#ifdef ERTS_SMP
int ps_locked = 0;
#endif
- SysTimeval *tvp;
- SysTimeval itv;
no_fds = *len;
#ifdef ERTS_POLL_MAX_RES
@@ -2151,13 +2278,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
*len = 0;
- ASSERT(utvp);
-
- tvp = utvp;
-
#ifdef ERTS_POLL_DEBUG_PRINT
- erts_printf("Entering erts_poll_wait(), timeout=%d\n",
- (int) tvp->tv_sec*1000 + tvp->tv_usec/1000);
+ erts_printf("Entering erts_poll_wait(), timeout_time=%bps\n",
+ timeout_time);
#endif
if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
@@ -2166,12 +2289,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
goto done;
}
- if (is_woken(ps)) {
- /* Use zero timeout */
- itv.tv_sec = 0;
- itv.tv_usec = 0;
- tvp = &itv;
- }
+ to = (is_woken(ps)
+ ? ERTS_POLL_NO_TIMEOUT /* Use zero timeout */
+ : timeout_time);
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
if (ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
@@ -2181,7 +2301,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
}
#endif
- res = check_fd_events(ps, tvp, no_fds);
+ res = check_fd_events(ps, to, no_fds);
woke_up(ps);
@@ -2224,7 +2344,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
done:
- erts_smp_atomic32_set_relb(&ps->timeout, ERTS_AINT32_T_MAX);
+ set_timeout_time(ps, ERTS_MONOTONIC_TIME_MAX);
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Leaving %s = erts_poll_wait()\n",
res == 0 ? "0" : erl_errno_id(res));
@@ -2268,13 +2388,14 @@ ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps)
void
ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
int set,
- erts_short_time_t msec)
+ ErtsMonotonicTime timeout_time)
{
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
if (!set)
reset_wakeup_state(ps);
else {
- if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ ErtsMonotonicTime max_wait_time = get_timeout_time(ps);
+ if (max_wait_time > timeout_time)
wake_poller(ps, 1, 0);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
else {
@@ -2431,7 +2552,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = kp_fd + 1;
ps->kp_fd = kp_fd;
#endif
- erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+ init_timeout_time(ps);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_init_nob(&ps->no_avoided_wakeups, 0);
erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0);
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 2f1c05f401..d02ed2396b 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -29,6 +29,8 @@
#include "sys.h"
+#define ERTS_POLL_NO_TIMEOUT ERTS_MONOTONIC_TIME_MIN
+
#if 0
#define ERTS_POLL_COUNT_AVOIDED_WAKEUPS
#endif
@@ -241,7 +243,7 @@ void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet,
int);
void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet,
int,
- erts_short_time_t);
+ ErtsMonotonicTime);
ErtsPollEvents ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet,
ErtsSysFdType,
ErtsPollEvents,
@@ -254,7 +256,7 @@ void ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet,
int ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet,
ErtsPollResFd [],
int *,
- SysTimeval *);
+ ErtsMonotonicTime);
int ERTS_POLL_EXPORT(erts_poll_max_fds)(void);
void ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet,
ErtsPollInfo *);
diff --git a/erts/emulator/sys/ose/erl_poll.c b/erts/emulator/sys/ose/erl_poll.c
index 7d2a3d1e0b..36ee2557e8 100644
--- a/erts/emulator/sys/ose/erl_poll.c
+++ b/erts/emulator/sys/ose/erl_poll.c
@@ -114,7 +114,7 @@ struct ErtsPollSet_ {
Uint item_count;
PROCESS interrupt;
erts_atomic32_t wakeup_state;
- erts_smp_atomic32_t timeout;
+ erts_atomic64_t timeout_time;
#ifdef ERTS_SMP
erts_smp_mtx_t mtx;
#endif
@@ -122,6 +122,26 @@ struct ErtsPollSet_ {
static int max_fds = -1;
+static ERTS_INLINE void
+init_timeout_time(ErtsPollSet ps)
+{
+ erts_atomic64_init_nob(&ps->timeout_time,
+ (erts_aint64_t) ERTS_MONOTONIC_TIME_MAX);
+}
+
+static ERTS_INLINE void
+set_timeout_time(ErtsPollSet ps, ErtsMonotonicTime time)
+{
+ erts_atomic64_set_relb(&ps->timeout_time,
+ (erts_aint64_t) time);
+}
+
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_time(ErtsPollSet ps)
+{
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb(&ps->timeout_time);
+}
+
#define ERTS_POLL_NOT_WOKEN ((erts_aint32_t) (1 << 0))
#define ERTS_POLL_WOKEN_INTR ((erts_aint32_t) (1 << 1))
#define ERTS_POLL_WOKEN_TIMEDOUT ((erts_aint32_t) (1 << 2))
@@ -386,12 +406,14 @@ void erts_poll_interrupt(ErtsPollSet ps,int set) {
}
-void erts_poll_interrupt_timed(ErtsPollSet ps,int set,erts_short_time_t msec) {
+void erts_poll_interrupt_timed(ErtsPollSet ps,
+ int set,
+ ErtsTimeoutTime timeout_time) {
HARDTRACEF("erts_poll_interrupt_timed called!\n");
if (!set)
reset_interrupt(ps);
- else if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ else if (get_timeout_time(ps) > timeout_time)
set_interrupt(ps);
}
@@ -453,12 +475,14 @@ done:
}
int erts_poll_wait(ErtsPollSet ps,
- ErtsPollResFd pr[],
- int *len,
- SysTimeval *utvp) {
+ ErtsPollResFd pr[],
+ int *len,
+ ErtsMonotonicTime timeout_time)
+{
int res = ETIMEDOUT, no_fds, currid = 0;
OSTIME timeout;
union SIGNAL *sig;
+ ErtsMonotonicTime current_time, diff_time, timeout;
// HARDTRACEF("%ux: In erts_poll_wait",ps);
if (ps->interrupt == (PROCESS)0)
ps->interrupt = current_process();
@@ -472,16 +496,29 @@ int erts_poll_wait(ErtsPollSet ps,
*len = 0;
- ASSERT(utvp);
+ /* erts_printf("Entering erts_poll_wait(), timeout_time=%bps\n",
+ timeout_time); */
- /* erts_printf("Entering erts_poll_wait(), timeout=%d\n",
- (int) utvp->tv_sec*1000 + utvp->tv_usec/1000); */
-
- timeout = utvp->tv_sec*1000 + utvp->tv_usec/1000;
+ if (timeout_time == ERTS_POLL_NO_TIMEOUT) {
+ no_timeout:
+ timeout = (OSTIME) 0;
+ save_timeout_time = ERTS_MONOTONIC_TIME_MIN;
+ }
+ else {
+ ErtsMonotonicTime current_time, diff_time;
+ current_time = erts_get_monotonic_time();
+ diff_time = timeout_time - current_time;
+ if (diff_time <= 0)
+ goto no_timeout;
+ diff_time = (ERTS_MONOTONIC_TO_MSEC(diff_time - 1) + 1);
+ if (diff_time > INT_MAX)
+ diff_time = INT_MAX;
+ timeout = (OSTIME) diff_time;
+ save_timeout_time = current_time;
+ save_timeout_time += ERTS_MSEC_TO_MONOTONIC(diff_time);
+ }
- if (timeout > ((time_t) ERTS_AINT32_T_MAX))
- timeout = ERTS_AINT32_T_MAX;
- erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
+ set_timeout_time(ps, save_timeout_time);
while (currid < no_fds) {
if (timeout > 0) {
@@ -627,7 +664,7 @@ int erts_poll_wait(ErtsPollSet ps,
}
erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
- erts_smp_atomic32_set_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+ set_timeout_time(ps, ERTS_MONOTONIC_TIME_MAX);
*len = currid;
@@ -690,7 +727,7 @@ ErtsPollSet erts_poll_create_pollset(void)
ps->info = NULL;
ps->interrupt = (PROCESS)0;
erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
- erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+ init_timeout_time(ps);
#ifdef ERTS_SMP
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
diff --git a/erts/emulator/sys/ose/sys.c b/erts/emulator/sys/ose/sys.c
index 5b950a7dae..13a5b71496 100644
--- a/erts/emulator/sys/ose/sys.c
+++ b/erts/emulator/sys/ose/sys.c
@@ -298,9 +298,9 @@ erts_sys_schedule_interrupt(int set)
#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
+erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time)
{
- ERTS_CHK_IO_INTR_TMD(set, msec);
+ ERTS_CHK_IO_INTR_TMD(set, timeout_time);
}
#endif
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 26ed2fb558..46d0b8dc9b 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -45,7 +45,7 @@
#include <fcntl.h>
#include "erl_errno.h"
#include <signal.h>
-
+#include <setjmp.h>
#if HAVE_SYS_SOCKETIO_H
# include <sys/socketio.h>
@@ -114,11 +114,6 @@
/*
* Make sure that MAXPATHLEN is defined.
*/
-#ifdef GETHRTIME_WITH_CLOCK_GETTIME
-#undef HAVE_GETHRTIME
-#define HAVE_GETHRTIME 1
-#endif
-
#ifndef MAXPATHLEN
# ifdef PATH_MAX
# define MAXPATHLEN PATH_MAX
@@ -160,35 +155,114 @@ typedef struct timeval SysTimeval;
typedef struct tms SysTimes;
-extern int erts_ticks_per_sec;
-
-#define SYS_CLK_TCK (erts_ticks_per_sec)
+#define SYS_CLK_TCK (erts_sys_time_data__.r.o.ticks_per_sec)
#define sys_times(Arg) times(Arg)
-#define ERTS_WRAP_SYS_TIMES 1
-extern int erts_ticks_per_sec_wrap;
-#define SYS_CLK_TCK_WRAP (erts_ticks_per_sec_wrap)
-extern clock_t sys_times_wrap(void);
+#if SIZEOF_LONG == 8
+typedef long ErtsMonotonicTime;
+#elif SIZEOF_LONG_LONG == 8
+typedef long long ErtsMonotonicTime;
+#else
+#error No signed 64-bit type found...
+#endif
+
+#define ERTS_MONOTONIC_TIME_MIN (((ErtsMonotonicTime) 1) << 63)
+#define ERTS_MONOTONIC_TIME_MAX (~ERTS_MONOTONIC_TIME_MIN)
+
+/*
+ * OS monotonic time
+ */
+
+/*
+ * Most common with os monotonic time using nano second
+ * time unit. These defines are modified below if this
+ * isn't the case...
+ */
+#define ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT 1
+#define ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT (1000*1000*1000)
+
+#undef ERTS_OS_MONOTONIC_INLINE_FUNC_PTR_CALL__
+#undef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+
+#if defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)
+
+#if defined(__linux__)
+
+#define ERTS_HAVE_CORRECTED_OS_MONOTONIC 1
+#define ERTS_OS_MONOTONIC_INLINE_FUNC_PTR_CALL__ 1
+
+#else /* !defined(__linux__) */
+
+ErtsMonotonicTime erts_os_monotonic_time(void);
+
+#endif /* !defined(__linux__) */
+
+#elif defined(OS_MONOTONIC_TIME_USING_GETHRTIME)
+
+#define erts_os_monotonic() ((ErtsMonotonicTime) gethrtime())
+
+#elif defined(OS_MONOTONIC_TIME_USING_MACH_CLOCK_GET_TIME) \
+ || defined(OS_MONOTONIC_TIME_USING_TIMES)
-#ifdef HAVE_GETHRTIME
-#ifdef GETHRTIME_WITH_CLOCK_GETTIME
-typedef long long SysHrTime;
+#if defined(OS_MONOTONIC_TIME_USING_TIMES)
+# undef ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+# define ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT (1000*1000)
+# define ERTS_HAVE_ERTS_OS_TIME_OFFSET_FINALIZE 1
+void erts_os_time_offset_finalize(void);
+# define ERTS_HAVE_ERTS_OS_MONOTONIC_TIME_INIT
+void erts_os_monotonic_time_init(void);
+#endif
+
+ErtsMonotonicTime erts_os_monotonic_time(void);
+
+#else /* No OS monotonic available... */
+
+#undef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+#undef ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+#define ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT (1000*1000)
+
+#endif
-extern SysHrTime sys_gethrtime(void);
-#define sys_init_hrtime() /* Nothing */
+struct erts_sys_time_read_only_data__ {
+#ifdef ERTS_OS_MONOTONIC_INLINE_FUNC_PTR_CALL__
+ ErtsMonotonicTime (*os_monotonic_time)(void);
+#endif
+ int ticks_per_sec;
+};
+
+typedef struct {
+ union {
+ struct erts_sys_time_read_only_data__ o;
+ char align__[(((sizeof(struct erts_sys_time_read_only_data__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } r;
+} ErtsSysTimeData__;
+
+extern ErtsSysTimeData__ erts_sys_time_data__;
-#else /* Real gethrtime (Solaris) */
+#ifdef ERTS_OS_MONOTONIC_INLINE_FUNC_PTR_CALL__
-typedef hrtime_t SysHrTime;
+ERTS_GLB_INLINE ErtsMonotonicTime erts_os_monotonic_time(void);
-#define sys_gethrtime() gethrtime()
-#define sys_init_hrtime() /* Nothing */
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsMonotonicTime
+erts_os_monotonic_time(void)
+{
+ return (*erts_sys_time_data__.r.o.os_monotonic_time)();
+}
-#endif /* GETHRTIME_WITH_CLOCK_GETTIME */
-#endif /* HAVE_GETHRTIME */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+#endif /* ERTS_OS_MONOTONIC_INLINE_FUNC_PTR_CALL__ */
+
+/*
+ *
+ */
+
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
typedef long long SysCpuTime;
typedef struct timespec SysTimespec;
@@ -200,7 +274,7 @@ typedef struct timespec SysTimespec;
int sys_start_hrvtime(void);
int sys_stop_hrvtime(void);
-#elif defined(HAVE_CLOCK_GETTIME)
+#elif defined(HAVE_CLOCK_GETTIME_CPU_TIME)
#define sys_clock_gettime(cid,tp) clock_gettime((cid),&(tp))
#define sys_get_proc_cputime(t,tp) sys_clock_gettime(CLOCK_PROCESS_CPUTIME_ID,(tp))
@@ -211,13 +285,8 @@ int sys_stop_hrvtime(void);
#define SYS_CLOCK_RESOLUTION 1
/* These are defined in sys.c */
-#if defined(SIG_SIGSET) /* Old SysV */
-RETSIGTYPE (*sys_sigset())();
-#elif defined(SIG_SIGNAL) /* Old BSD */
-RETSIGTYPE (*sys_sigset())();
-#else
-RETSIGTYPE (*sys_sigset(int, RETSIGTYPE (*func)(int)))(int);
-#endif
+typedef void (*SIGFUNC)(int);
+extern SIGFUNC sys_signal(int, SIGFUNC);
extern void sys_sigrelease(int);
extern void sys_sigblock(int);
extern void sys_stop_cat(void);
@@ -354,4 +423,28 @@ extern int exit_async(void);
#define ERTS_EXIT_AFTER_DUMP _exit
+#if !defined(__APPLE__) && !defined(__MACH__)
+/* Some OS X versions do not allow (ab)using signal handlers like this */
+#define ERTS_HAVE_TRY_CATCH 1
+
+/* We try to simulate a try catch in C with the help of signal handlers.
+ * Only use this as a very last resort, as it is not very portable and
+ * quite unstable. It is also not thread safe, so make sure that only
+ * one thread can call this at a time!
+ */
+extern void erts_sys_sigsegv_handler(int);
+extern jmp_buf erts_sys_sigsegv_jmp;
+#define ERTS_SYS_TRY_CATCH(EXPR,CATCH) \
+ do { \
+ SIGFUNC prev_handler = sys_signal(SIGSEGV, \
+ erts_sys_sigsegv_handler); \
+ if (!setjmp(erts_sys_sigsegv_jmp)) { \
+ EXPR; \
+ } else { \
+ CATCH; \
+ } \
+ sys_signal(SIGSEGV,prev_handler); \
+ } while(0)
+#endif
+
#endif /* #ifndef _ERL_UNIX_SYS_H */
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index cd87b320e2..f1c785890c 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -34,6 +34,7 @@
#include <termios.h>
#include <ctype.h>
#include <sys/utsname.h>
+#include <sys/select.h>
#ifdef ISC32
#include <sys/bsdtypes.h>
@@ -85,14 +86,24 @@ static erts_smp_rwmtx_t environ_rwmtx;
#define DISABLE_VFORK 0
#endif
+#if defined IOV_MAX
+#define MAXIOV IOV_MAX
+#elif defined UIO_MAXIOV
+#define MAXIOV UIO_MAXIOV
+#else
+#define MAXIOV 16
+#endif
+
#ifdef USE_THREADS
# ifdef ENABLE_CHILD_WAITER_THREAD
# define CHLDWTHR ENABLE_CHILD_WAITER_THREAD
# else
# define CHLDWTHR 0
# endif
+# define FDBLOCK 1
#else
# define CHLDWTHR 0
+# define FDBLOCK 0
#endif
/*
* [OTP-3906]
@@ -121,6 +132,15 @@ struct ErtsSysReportExit_ {
#endif
};
+/* Used by the fd driver iff the fd could not be set to non-blocking */
+typedef struct ErtsSysBlocking_ {
+ ErlDrvPDL pdl;
+ int res;
+ int err;
+ unsigned int pkey;
+} ErtsSysBlocking;
+
+
/* This data is shared by these drivers - initialized by spawn_init() */
static struct driver_data {
ErlDrvPort port_num;
@@ -129,6 +149,8 @@ static struct driver_data {
int pid;
int alive;
int status;
+ int terminating;
+ ErtsSysBlocking *blocking;
} *driver_data; /* indexed by fd */
static ErtsSysReportExit *report_exit_list;
@@ -202,8 +224,14 @@ static erts_smp_atomic_t sys_misc_mem_sz;
#if defined(ERTS_SMP)
static void smp_sig_notify(char c);
static int sig_notify_fds[2] = {-1, -1};
+
+static int sig_suspend_fds[2] = {-1, -1};
+#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
+
#endif
+jmp_buf erts_sys_sigsegv_jmp;
+
#if CHLDWTHR || defined(ERTS_SMP)
erts_mtx_t chld_stat_mtx;
#endif
@@ -280,7 +308,7 @@ struct {
int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
void (*check_io_as_interrupt)(void);
void (*check_io_interrupt)(int);
- void (*check_io_interrupt_tmd)(int, erts_short_time_t);
+ void (*check_io_interrupt_tmd)(int, ErtsMonotonicTime);
void (*check_io)(int);
Uint (*size)(void);
Eterm (*info)(void *);
@@ -386,9 +414,9 @@ erts_sys_schedule_interrupt(int set)
#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
+erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time)
{
- ERTS_CHK_IO_INTR_TMD(set, msec);
+ ERTS_CHK_IO_INTR_TMD(set, timeout_time);
}
#endif
@@ -504,11 +532,14 @@ thr_create_prepare_child(void *vtcdp)
void
erts_sys_pre_init(void)
{
+#ifdef USE_THREADS
+ erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
+#endif
+
erts_printf_add_cr_to_stdout = 1;
erts_printf_add_cr_to_stderr = 1;
+
#ifdef USE_THREADS
- {
- erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -525,6 +556,12 @@ erts_sys_pre_init(void)
erts_thr_init(&eid);
+#endif /* USE_THREADS */
+
+ erts_init_sys_time_sup();
+
+#ifdef USE_THREADS
+
report_exit_list = NULL;
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -541,7 +578,7 @@ erts_sys_pre_init(void)
erts_cnd_init(&chld_stat_cnd);
children_alive = 0;
#endif
- }
+
#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_break_requested, 0);
erts_smp_atomic32_init_nob(&erts_got_sigusr1, 0);
@@ -554,7 +591,9 @@ erts_sys_pre_init(void)
#if !CHLDWTHR && !defined(ERTS_SMP)
children_died = 0;
#endif
+
#endif /* USE_THREADS */
+
erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
{
@@ -640,39 +679,7 @@ erl_sys_init(void)
/* signal handling */
-#ifdef SIG_SIGSET /* Old SysV */
-RETSIGTYPE (*sys_sigset(sig, func))()
-int sig;
-RETSIGTYPE (*func)();
-{
- return(sigset(sig, func));
-}
-void sys_sigblock(int sig)
-{
- sighold(sig);
-}
-void sys_sigrelease(int sig)
-{
- sigrelse(sig);
-}
-#else /* !SIG_SIGSET */
-#ifdef SIG_SIGNAL /* Old BSD */
-RETSIGTYPE (*sys_sigset(sig, func))(int, int)
-int sig;
-RETSIGTYPE (*func)();
-{
- return(signal(sig, func));
-}
-sys_sigblock(int sig)
-{
- sigblock(sig);
-}
-sys_sigrelease(int sig)
-{
- sigsetmask(sigblock(0) & ~sigmask(sig));
-}
-#else /* !SIG_SIGNAL */ /* The True Way - POSIX!:-) */
-RETSIGTYPE (*sys_sigset(int sig, RETSIGTYPE (*func)(int)))(int)
+SIGFUNC sys_signal(int sig, SIGFUNC func)
{
struct sigaction act, oact;
@@ -705,23 +712,35 @@ void sys_sigrelease(int sig)
sigaddset(&mask, sig);
sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
}
-#endif /* !SIG_SIGNAL */
-#endif /* !SIG_SIGSET */
-#if (0) /* not used? -- gordon */
-static void (*break_func)();
-static RETSIGTYPE break_handler(int sig)
-{
-#ifdef QNX
- /* Turn off SIGCHLD during break processing */
- sys_sigblock(SIGCHLD);
-#endif
- (*break_func)();
-#ifdef QNX
- sys_sigrelease(SIGCHLD);
-#endif
+void erts_sys_sigsegv_handler(int signo) {
+ if (signo == SIGSEGV) {
+ longjmp(erts_sys_sigsegv_jmp, 1);
+ }
+}
+
+/*
+ * Function returns 1 if we can read from all values in between
+ * start and stop.
+ */
+int
+erts_sys_is_area_readable(char *start, char *stop) {
+ int fds[2];
+ if (!pipe(fds)) {
+ /* We let write try to figure out if the pointers are readable */
+ int res = write(fds[1], start, (char*)stop - (char*)start);
+ if (res == -1) {
+ close(fds[0]);
+ close(fds[1]);
+ return 0;
+ }
+ close(fds[0]);
+ close(fds[1]);
+ return 1;
+ }
+ return 0;
+
}
-#endif /* 0 */
static ERTS_INLINE int
prepare_crash_dump(int secs)
@@ -849,9 +868,23 @@ sigusr1_exit(void)
#ifdef ETHR_UNUSABLE_SIGUSRX
#warning "Unusable SIGUSR1 & SIGUSR2. Disabling use of these signals"
-#endif
-#ifndef ETHR_UNUSABLE_SIGUSRX
+#else
+
+#ifdef ERTS_SMP
+void
+sys_thr_suspend(erts_tid_t tid) {
+ erts_thr_kill(tid, ERTS_SYS_SUSPEND_SIGNAL);
+}
+
+void
+sys_thr_resume(erts_tid_t tid) {
+ int i = 0, res;
+ do {
+ res = write(sig_suspend_fds[1],&i,sizeof(i));
+ } while (res < 0 && errno == EAGAIN);
+}
+#endif
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
static RETSIGTYPE user_signal1(void)
@@ -866,20 +899,20 @@ static RETSIGTYPE user_signal1(int signum)
#endif
}
-#ifdef QUANTIFY
+#ifdef ERTS_SMP
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
-static RETSIGTYPE user_signal2(void)
+static RETSIGTYPE suspend_signal(void)
#else
-static RETSIGTYPE user_signal2(int signum)
+static RETSIGTYPE suspend_signal(int signum)
#endif
{
-#ifdef ERTS_SMP
- smp_sig_notify('2');
-#else
- quantify_save_data();
-#endif
+ int res;
+ int buf[1];
+ do {
+ res = read(sig_suspend_fds[0], buf, sizeof(int));
+ } while (res < 0 && errno == EINTR);
}
-#endif
+#endif /* #ifdef ERTS_SMP */
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
@@ -904,9 +937,9 @@ static RETSIGTYPE do_quit(int signum)
/* Disable break */
void erts_set_ignore_break(void) {
- sys_sigset(SIGINT, SIG_IGN);
- sys_sigset(SIGQUIT, SIG_IGN);
- sys_sigset(SIGTSTP, SIG_IGN);
+ sys_signal(SIGINT, SIG_IGN);
+ sys_signal(SIGQUIT, SIG_IGN);
+ sys_signal(SIGTSTP, SIG_IGN);
}
/* Don't use ctrl-c for break handler but let it be
@@ -929,14 +962,14 @@ void erts_replace_intr(void) {
void init_break_handler(void)
{
- sys_sigset(SIGINT, request_break);
+ sys_signal(SIGINT, request_break);
#ifndef ETHR_UNUSABLE_SIGUSRX
- sys_sigset(SIGUSR1, user_signal1);
-#ifdef QUANTIFY
- sys_sigset(SIGUSR2, user_signal2);
-#endif
+ sys_signal(SIGUSR1, user_signal1);
+#ifdef ERTS_SMP
+ sys_signal(ERTS_SYS_SUSPEND_SIGNAL, suspend_signal);
+#endif /* #ifdef ERTS_SMP */
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
- sys_sigset(SIGQUIT, do_quit);
+ sys_signal(SIGQUIT, do_quit);
}
int sys_max_files(void)
@@ -953,8 +986,13 @@ static void block_signals(void)
sys_sigblock(SIGINT);
#ifndef ETHR_UNUSABLE_SIGUSRX
sys_sigblock(SIGUSR1);
+#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+#endif /* #ifndef ERTS_SMP */
+
+#if defined(ERTS_SMP) && !defined(ETHR_UNUSABLE_SIGUSRX)
+ sys_sigblock(ERTS_SYS_SUSPEND_SIGNAL);
#endif
-#endif
+
}
static void unblock_signals(void)
@@ -968,26 +1006,13 @@ static void unblock_signals(void)
#ifndef ETHR_UNUSABLE_SIGUSRX
sys_sigrelease(SIGUSR1);
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+#endif /* #ifndef ERTS_SMP */
+
+#if defined(ERTS_SMP) && !defined(ETHR_UNUSABLE_SIGUSRX)
+ sys_sigrelease(ERTS_SYS_SUSPEND_SIGNAL);
#endif
-}
-/************************** Time stuff **************************/
-#ifdef HAVE_GETHRTIME
-#ifdef GETHRTIME_WITH_CLOCK_GETTIME
-SysHrTime sys_gethrtime(void)
-{
- struct timespec ts;
- long long result;
- if (clock_gettime(CLOCK_MONOTONIC,&ts) != 0) {
- erl_exit(1,"Fatal, could not get clock_monotonic value!, "
- "errno = %d\n", errno);
- }
- result = ((long long) ts.tv_sec) * 1000000000LL +
- ((long long) ts.tv_nsec);
- return (SysHrTime) result;
}
-#endif
-#endif
/************************** OS info *******************************/
@@ -1094,11 +1119,16 @@ void fini_getenv_state(GETENV_STATE *state)
/* Driver interfaces */
static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
+#if FDBLOCK
+static void fd_async(void *);
+static void fd_ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data);
+#endif
static ErlDrvSSizeT fd_control(ErlDrvData, unsigned int, char *, ErlDrvSizeT,
char **, ErlDrvSizeT);
static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
static int spawn_init(void);
static void fd_stop(ErlDrvData);
+static void fd_flush(ErlDrvData);
static void stop(ErlDrvData);
static void ready_input(ErlDrvData, ErlDrvEvent);
static void ready_output(ErlDrvData, ErlDrvEvent);
@@ -1143,8 +1173,12 @@ struct erl_drv_entry fd_driver_entry = {
fd_control,
NULL,
outputv,
- NULL, /* ready_async */
- NULL, /* flush */
+#if FDBLOCK
+ fd_ready_async, /* ready_async */
+#else
+ NULL,
+#endif
+ fd_flush, /* flush */
NULL, /* call */
NULL, /* event */
ERL_DRV_EXTENDED_MARKER,
@@ -1198,13 +1232,28 @@ static RETSIGTYPE onchld(int signum)
#endif
}
+static int set_blocking_data(struct driver_data *dd) {
+
+ dd->blocking = erts_alloc(ERTS_ALC_T_SYS_BLOCKING, sizeof(ErtsSysBlocking));
+
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, sizeof(ErtsSysBlocking));
+
+ dd->blocking->pdl = driver_pdl_create(dd->port_num);
+ dd->blocking->res = 0;
+ dd->blocking->err = 0;
+ dd->blocking->pkey = driver_async_port_key(dd->port_num);
+
+ return 1;
+}
+
static int set_driver_data(ErlDrvPort port_num,
int ifd,
int ofd,
int packet_bytes,
int read_write,
int exit_status,
- int pid)
+ int pid,
+ int is_blocking)
{
Port *prt;
ErtsSysReportExit *report_exit;
@@ -1236,8 +1285,13 @@ static int set_driver_data(ErlDrvPort port_num,
driver_data[ifd].pid = pid;
driver_data[ifd].alive = 1;
driver_data[ifd].status = 0;
+ driver_data[ifd].terminating = 0;
+ driver_data[ifd].blocking = NULL;
if (read_write & DO_WRITE) {
driver_data[ifd].ofd = ofd;
+ if (is_blocking && FDBLOCK)
+ if (!set_blocking_data(driver_data+ifd))
+ return -1;
if (ifd != ofd)
driver_data[ofd] = driver_data[ifd]; /* structure copy */
} else { /* DO_READ only */
@@ -1253,6 +1307,11 @@ static int set_driver_data(ErlDrvPort port_num,
driver_data[ofd].pid = pid;
driver_data[ofd].alive = 1;
driver_data[ofd].status = 0;
+ driver_data[ofd].terminating = 0;
+ driver_data[ofd].blocking = NULL;
+ if (is_blocking && FDBLOCK)
+ if (!set_blocking_data(driver_data+ofd))
+ return -1;
return(ofd);
}
}
@@ -1262,11 +1321,13 @@ static int spawn_init()
int i;
#if CHLDWTHR
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
+
thr_opts.detached = 0;
thr_opts.suggested_stack_size = 0; /* Smallest possible */
+ thr_opts.name = "child_waiter";
#endif
- sys_sigset(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
+ sys_signal(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
erts_smp_atomic_add_nob(&sys_misc_mem_sz,
@@ -1279,7 +1340,7 @@ static int spawn_init()
sys_sigblock(SIGCHLD);
#endif
- sys_sigset(SIGCHLD, onchld); /* Reap children */
+ sys_signal(SIGCHLD, onchld); /* Reap children */
#if CHLDWTHR
erts_thr_create(&child_waiter_tid, child_waiter, NULL, &thr_opts);
@@ -1745,7 +1806,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
}
res = set_driver_data(port_num, ifd[0], ofd[1], opts->packet_bytes,
- opts->read_write, opts->exit_status, pid);
+ opts->read_write, opts->exit_status, pid, 0);
/* Don't unblock SIGCHLD until now, since the call above must
first complete putting away the info about our new subprocess. */
unblock_signals();
@@ -1830,6 +1891,7 @@ static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
SysDriverOpts* opts)
{
ErlDrvData res;
+ int non_blocking = 0;
if (((opts->read_write & DO_READ) && opts->ifd >= max_files) ||
((opts->read_write & DO_WRITE) && opts->ofd >= max_files))
@@ -1902,6 +1964,20 @@ static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
* case - it can be called with any old pre-existing file descriptors,
* the relations between which (if they're even two) we can only guess
* at - still, we try our best...
+ *
+ * Added note OTP 18: Some systems seem to use stdout/stderr to log data
+ * using unix pipes, so we cannot allow the system to block on a write.
+ * Therefore we use an async thread to write the data to fd's that could
+ * not be set to non-blocking. When no async threads are available we
+ * fall back on the old behaviour.
+ *
+ * Also the guarantee about what is delivered to the OS has changed.
+ * Pre 18 the fd driver did no flushing of data before terminating.
+ * Now it does. This is because we want to be able to guarantee that things
+ * such as escripts and friends really have outputted all data before
+ * terminating. This could potentially block the termination of the system
+ * for a very long time, but if the user wants to terminate fast she should
+ * use erlang:halt with flush=false.
*/
if (opts->read_write & DO_READ) {
@@ -1924,6 +2000,7 @@ static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
imagine a scenario where setting non-blocking mode
here would cause problems - go ahead and do it. */
+ non_blocking = 1;
SET_NONBLOCKING(opts->ofd);
} else { /* output fd is a tty, input fd isn't */
@@ -1966,6 +2043,7 @@ static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
(nfd = open(tty, O_WRONLY)) != -1) {
dup2(nfd, opts->ofd);
close(nfd);
+ non_blocking = 1;
SET_NONBLOCKING(opts->ofd);
}
}
@@ -1974,8 +2052,9 @@ static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
}
CHLD_STAT_LOCK;
res = (ErlDrvData)(long)set_driver_data(port_num, opts->ifd, opts->ofd,
- opts->packet_bytes,
- opts->read_write, 0, -1);
+ opts->packet_bytes,
+ opts->read_write, 0, -1,
+ !non_blocking);
CHLD_STAT_UNLOCK;
return res;
}
@@ -2001,14 +2080,30 @@ static void nbio_stop_fd(ErlDrvPort prt, int fd)
SET_BLOCKING(fd);
}
-static void fd_stop(ErlDrvData fd) /* Does not close the fds */
+static void fd_stop(ErlDrvData ev) /* Does not close the fds */
{
int ofd;
+ int fd = (int)(long)ev;
+ ErlDrvPort prt = driver_data[fd].port_num;
- nbio_stop_fd(driver_data[(int)(long)fd].port_num, (int)(long)fd);
- ofd = driver_data[(int)(long)fd].ofd;
- if (ofd != (int)(long)fd && ofd != -1)
- nbio_stop_fd(driver_data[(int)(long)fd].port_num, (int)(long)ofd);
+#if FDBLOCK
+ if (driver_data[fd].blocking) {
+ erts_free(ERTS_ALC_T_SYS_BLOCKING,driver_data[fd].blocking);
+ driver_data[fd].blocking = NULL;
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*sizeof(ErtsSysBlocking));
+ }
+#endif
+
+ nbio_stop_fd(prt, fd);
+ ofd = driver_data[fd].ofd;
+ if (ofd != fd && ofd != -1)
+ nbio_stop_fd(prt, ofd);
+}
+
+static void fd_flush(ErlDrvData fd)
+{
+ if (!driver_data[(int)(long)fd].terminating)
+ driver_data[(int)(long)fd].terminating = 1;
}
static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name,
@@ -2031,8 +2126,8 @@ static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name,
CHLD_STAT_LOCK;
res = (ErlDrvData)(long)set_driver_data(port_num, fd, fd,
- opts->packet_bytes,
- opts->read_write, 0, -1);
+ opts->packet_bytes,
+ opts->read_write, 0, -1, 0);
CHLD_STAT_UNLOCK;
return res;
}
@@ -2069,6 +2164,7 @@ static void stop(ErlDrvData fd)
}
}
+/* used by fd_driver */
static void outputv(ErlDrvData e, ErlIOVec* ev)
{
int fd = (int)(long)e;
@@ -2094,12 +2190,21 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
ev->iov[0].iov_base = lbp;
ev->iov[0].iov_len = pb;
ev->size += pb;
+
+ if (driver_data[fd].blocking && FDBLOCK)
+ driver_pdl_lock(driver_data[fd].blocking->pdl);
+
if ((sz = driver_sizeq(ix)) > 0) {
driver_enqv(ix, ev, 0);
+
+ if (driver_data[fd].blocking && FDBLOCK)
+ driver_pdl_unlock(driver_data[fd].blocking->pdl);
+
if (sz + ev->size >= (1 << 13))
set_busy_port(ix, 1);
}
- else {
+ else if (!driver_data[fd].blocking || !FDBLOCK) {
+ /* We try to write directly if the fd in non-blocking */
int vsize = ev->vsize > MAX_VSIZE ? MAX_VSIZE : ev->vsize;
n = writev(ofd, (const void *) (ev->iov), vsize);
@@ -2115,10 +2220,22 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
driver_enqv(ix, ev, n); /* n is the skip value */
driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
}
+#if FDBLOCK
+ else {
+ if (ev->size != 0) {
+ driver_enqv(ix, ev, 0);
+ driver_pdl_unlock(driver_data[fd].blocking->pdl);
+ driver_async(ix, &driver_data[fd].blocking->pkey,
+ fd_async, driver_data+fd, NULL);
+ } else {
+ driver_pdl_unlock(driver_data[fd].blocking->pdl);
+ }
+ }
+#endif
/* return 0;*/
}
-
+/* Used by spawn_driver and vanilla driver */
static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
{
int fd = (int)(long)e;
@@ -2181,6 +2298,23 @@ static int port_inp_failure(ErlDrvPort port_num, int ready_fd, int res)
ASSERT(res <= 0);
(void) driver_select(port_num, ready_fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
clear_fd_data(ready_fd);
+
+ if (driver_data[ready_fd].blocking && FDBLOCK) {
+ driver_pdl_lock(driver_data[ready_fd].blocking->pdl);
+ if (driver_sizeq(driver_data[ready_fd].port_num) > 0) {
+ driver_pdl_unlock(driver_data[ready_fd].blocking->pdl);
+ /* We have stuff in the output queue, so we just
+ set the state to terminating and wait for fd_async_ready
+ to terminate the port */
+ if (res == 0)
+ driver_data[ready_fd].terminating = 2;
+ else
+ driver_data[ready_fd].terminating = -err;
+ return 0;
+ }
+ driver_pdl_unlock(driver_data[ready_fd].blocking->pdl);
+ }
+
if (res == 0) {
if (driver_data[ready_fd].report_exit) {
CHLD_STAT_LOCK;
@@ -2231,6 +2365,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
port_num = driver_data[fd].port_num;
packet_bytes = driver_data[fd].packet_bytes;
+
if (packet_bytes == 0) {
byte *read_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_READ_BUF,
ERTS_SYS_READ_BUF_SZ);
@@ -2354,6 +2489,8 @@ static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd)
if ((iv = (struct iovec*) driver_peekq(ix, &vsize)) == NULL) {
driver_select(ix, ready_fd, ERL_DRV_WRITE, 0);
+ if (driver_data[fd].terminating)
+ driver_failure_atom(driver_data[fd].port_num,"normal");
return; /* 0; */
}
vsize = vsize > MAX_VSIZE ? MAX_VSIZE : vsize;
@@ -2379,6 +2516,78 @@ static void stop_select(ErlDrvEvent fd, void* _)
close((int)fd);
}
+#if FDBLOCK
+
+static void
+fd_async(void *async_data)
+{
+ int res;
+ struct driver_data *dd = (struct driver_data*)async_data;
+ SysIOVec *iov0;
+ SysIOVec *iov;
+ int iovlen;
+ int err;
+ /* much of this code is stolen from efile_drv:invoke_writev */
+ driver_pdl_lock(dd->blocking->pdl);
+ iov0 = driver_peekq(dd->port_num, &iovlen);
+ iovlen = iovlen < MAXIOV ? iovlen : MAXIOV;
+ iov = erts_alloc_fnf(ERTS_ALC_T_SYS_WRITE_BUF,
+ sizeof(SysIOVec)*iovlen);
+ if (!iov) {
+ res = -1;
+ err = ENOMEM;
+ driver_pdl_unlock(dd->blocking->pdl);
+ } else {
+ memcpy(iov,iov0,iovlen*sizeof(SysIOVec));
+ driver_pdl_unlock(dd->blocking->pdl);
+
+ res = writev(dd->ofd, iov, iovlen);
+ err = errno;
+
+ erts_free(ERTS_ALC_T_SYS_WRITE_BUF, iov);
+ }
+ dd->blocking->res = res;
+ dd->blocking->err = err;
+}
+
+void fd_ready_async(ErlDrvData drv_data,
+ ErlDrvThreadData thread_data) {
+ struct driver_data *dd = (struct driver_data *)thread_data;
+ ErlDrvPort port_num = dd->port_num;
+
+ ASSERT(dd->blocking);
+ ASSERT(dd == (driver_data + (int)(long)drv_data));
+
+ if (dd->blocking->res > 0) {
+ driver_pdl_lock(dd->blocking->pdl);
+ if (driver_deq(port_num, dd->blocking->res) == 0) {
+ driver_pdl_unlock(dd->blocking->pdl);
+ set_busy_port(port_num, 0);
+ if (dd->terminating) {
+ /* The port is has been ordered to terminate
+ from either fd_flush or port_inp_failure */
+ if (dd->terminating == 1)
+ driver_failure_atom(port_num, "normal");
+ else if (dd->terminating == 2)
+ driver_failure_eof(port_num);
+ else if (dd->terminating < 0)
+ driver_failure_posix(port_num, -dd->terminating);
+ return; /* -1; */
+ }
+ } else {
+ driver_pdl_unlock(dd->blocking->pdl);
+ /* still data left to write in queue */
+ driver_async(port_num, &dd->blocking->pkey, fd_async, dd, NULL);
+ return /* 0; */;
+ }
+ } else if (dd->blocking->res < 0) {
+ driver_failure_posix(port_num, dd->blocking->err);
+ return; /* -1; */
+ }
+ return; /* 0; */
+}
+
+#endif
void erts_do_break_handling(void)
{
@@ -2648,18 +2857,30 @@ void sys_preload_end(Preload* p)
/* Nothing */
}
-/* Read a key from console (?) */
-
+/* Read a key from console, used by break.c
+ Here we assume that all schedulers are stopped so that erl_poll
+ does not interfere with the select below.
+*/
int sys_get_key(fd)
int fd;
{
- int c;
+ int c, ret;
unsigned char rbuf[64];
+ fd_set fds;
fflush(stdout); /* Flush query ??? */
- if ((c = read(fd,rbuf,64)) <= 0) {
- return c;
+ FD_ZERO(&fds);
+ FD_SET(fd,&fds);
+
+ ret = select(fd+1, &fds, NULL, NULL, NULL);
+
+ if (ret == 1) {
+ do {
+ c = read(fd,rbuf,64);
+ } while (c < 0 && errno == EAGAIN);
+ if (c <= 0)
+ return c;
}
return rbuf[0];
@@ -2983,13 +3204,6 @@ signal_dispatcher_thread_func(void *unused)
case '1': /* SIGUSR1 */
sigusr1_exit();
break;
-#ifdef QUANTIFY
- case '2': /* SIGUSR2 */
- quantify_save_data(); /* Might take a substantial amount of
- time, but this is a test/debug
- build */
- break;
-#endif
default:
erl_exit(ERTS_ABORT_EXIT,
"signal-dispatcher thread received unknown "
@@ -3007,6 +3221,7 @@ init_smp_sig_notify(void)
{
erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
thr_opts.detached = 1;
+ thr_opts.name = "sys_sig_dispatcher";
if (pipe(sig_notify_fds) < 0) {
erl_exit(ERTS_ABORT_EXIT,
@@ -3021,6 +3236,17 @@ init_smp_sig_notify(void)
NULL,
&thr_opts);
}
+
+static void
+init_smp_sig_suspend(void) {
+ if (pipe(sig_suspend_fds) < 0) {
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to create sig_suspend pipe: %s (%d)\n",
+ erl_errno_id(errno),
+ errno);
+ }
+}
+
#ifdef __DARWIN__
int erts_darwin_main_thread_pipe[2];
@@ -3048,9 +3274,11 @@ erts_sys_main_thread(void)
#endif
smp_sig_notify(0); /* Notify initialized */
- while (1) {
- /* Wait for a signal to arrive... */
+
+ /* Wait for a signal to arrive... */
+
#ifdef __DARWIN__
+ while (1) {
/*
* The wx driver needs to be able to steal the main thread for Cocoa to
* work properly.
@@ -3065,12 +3293,24 @@ erts_sys_main_thread(void)
void* (*func)(void*);
void* arg;
void *resp;
- read(erts_darwin_main_thread_pipe[0],&func,sizeof(void* (*)(void*)));
- read(erts_darwin_main_thread_pipe[0],&arg, sizeof(void*));
+ res = read(erts_darwin_main_thread_pipe[0],&func,sizeof(void* (*)(void*)));
+ if (res != sizeof(void* (*)(void*)))
+ break;
+ res = read(erts_darwin_main_thread_pipe[0],&arg,sizeof(void*));
+ if (res != sizeof(void*))
+ break;
resp = (*func)(arg);
write(erts_darwin_main_thread_result_pipe[1],&resp,sizeof(void *));
}
-#else
+
+ if (res == -1 && errno != EINTR)
+ break;
+ }
+ /* Something broke with the main thread pipe, so we ignore it for now.
+ Most probably erts has closed this pipe and is about to exit. */
+#endif /* #ifdef __DARWIN__ */
+
+ while (1) {
#ifdef DEBUG
int res =
#else
@@ -3079,7 +3319,6 @@ erts_sys_main_thread(void)
select(0, NULL, NULL, NULL, NULL);
ASSERT(res < 0);
ASSERT(errno == EINTR);
-#endif
}
}
@@ -3171,6 +3410,7 @@ erl_sys_args(int* argc, char** argv)
#ifdef ERTS_SMP
init_smp_sig_notify();
+ init_smp_sig_suspend();
#endif
/* Handled arguments have been marked with NULL. Slide arguments
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index cafeab547e..2ffa649767 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -32,7 +32,7 @@ void
erts_sys_init_float(void)
{
# ifdef SIGFPE
- sys_sigset(SIGFPE, SIG_IGN); /* Ignore so we can test for NaN and Inf */
+ sys_signal(SIGFPE, SIG_IGN); /* Ignore so we can test for NaN and Inf */
# endif
}
@@ -667,7 +667,7 @@ static void fpe_sig_handler(int sig)
static void erts_thread_catch_fp_exceptions(void)
{
- sys_sigset(SIGFPE, fpe_sig_handler);
+ sys_signal(SIGFPE, fpe_sig_handler);
unmask_fpe();
}
diff --git a/erts/emulator/sys/unix/sys_time.c b/erts/emulator/sys/unix/sys_time.c
index fcce54a2c4..9fdb1930b7 100644
--- a/erts/emulator/sys/unix/sys_time.c
+++ b/erts/emulator/sys/unix/sys_time.c
@@ -53,9 +53,40 @@
/******************* Routines for time measurement *********************/
-int erts_ticks_per_sec = 0; /* Will be SYS_CLK_TCK in erl_unix_sys.h */
-int erts_ticks_per_sec_wrap = 0; /* Will be SYS_CLK_TCK_WRAP */
-static int ticks_bsr = 0; /* Shift wrapped tick value this much to the right */
+#undef ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__
+#undef ERTS_SYS_TIME_INTERNAL_STATE_READ_ONLY__
+
+#if defined(OS_MONOTONIC_TIME_USING_TIMES)
+
+#define ERTS_WRAP_SYS_TIMES 1
+#define ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__
+#define ERTS_SYS_TIME_INTERNAL_STATE_READ_ONLY__
+
+/*
+ * Not sure there is a need to use times() anymore, perhaps drop
+ * support for this soon...
+ *
+ * sys_times() might need to be wrapped and the values shifted (right)
+ * a bit to cope with faster ticks, this has to be taken care
+ * of dynamically to start with, a special version that uses
+ * the times() return value as a high resolution timer can be made
+ * to fully utilize the faster ticks, like on windows, but for now, we'll
+ * settle with this silly workaround
+ */
+#ifdef ERTS_WRAP_SYS_TIMES
+static clock_t sys_times_wrap(void);
+#define KERNEL_TICKS() (sys_times_wrap() & \
+ ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
+#define ERTS_KERNEL_TICK_TO_USEC(TCKS) (((TCKS)*(1000*1000)) \
+ / internal_state.r.o.ticks_per_sec_wrap)
+#else
+
+#define KERNEL_TICKS() (sys_times(&internal_state.w.f.dummy_tms) & \
+ ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
+#define ERTS_KERNEL_TICK_TO_USEC(TCKS) (((TCKS)*(1000*1000))/SYS_CLK_TCK)
+#endif
+
+#endif
/*
* init timers, chose a tick length, and return it.
@@ -63,37 +94,374 @@ static int ticks_bsr = 0; /* Shift wrapped tick value this much to the right */
* does almost everything. Other platforms have to
* emulate Unix in this sense.
*/
-int sys_init_time(void)
+
+ErtsSysTimeData__ erts_sys_time_data__ erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+#if defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)
+
+#define ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__
+
+ErtsMonotonicTime clock_gettime_monotonic_raw(void);
+ErtsMonotonicTime clock_gettime_monotonic_verified(void);
+
+#endif /* defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME) */
+
+#ifdef ERTS_SYS_TIME_INTERNAL_STATE_READ_ONLY__
+struct sys_time_internal_state_read_only__ {
+#if defined(OS_MONOTONIC_TIME_USING_TIMES)
+ int ticks_bsr;
+ int ticks_per_sec_wrap;
+#endif
+};
+#endif
+
+#ifdef ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__
+struct sys_time_internal_state_write_freq__ {
+ erts_smp_mtx_t mtx;
+#if defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)
+ ErtsMonotonicTime last_delivered;
+#endif
+#if defined(OS_MONOTONIC_TIME_USING_TIMES)
+ ErtsMonotonicTime last_tick_count;
+ ErtsMonotonicTime last_tick_wrap_count;
+ ErtsMonotonicTime last_tick_monotonic_time;
+ ErtsMonotonicTime last_timeofday_usec;
+#ifndef ERTS_WRAP_SYS_TIMES
+ SysTimes dummy_tms;
+#endif
+#endif
+};
+#endif
+
+#if defined(ERTS_SYS_TIME_INTERNAL_STATE_READ_ONLY__) \
+ || defined(ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__)
+static struct {
+#ifdef ERTS_SYS_TIME_INTERNAL_STATE_READ_ONLY__
+ union {
+ struct sys_time_internal_state_read_only__ o;
+ char align__[(((sizeof(struct sys_time_internal_state_read_only__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } r;
+#endif
+#ifdef ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__
+ union {
+ struct sys_time_internal_state_write_freq__ f;
+ char align__[(((sizeof(struct sys_time_internal_state_write_freq__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } w;
+#endif
+} internal_state erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+#endif
+
+void
+sys_init_time(ErtsSysInitTimeResult *init_resp)
{
+#if !defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
+
+ init_resp->have_os_monotonic = 0;
+
+#else /* defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT) */
+
+ int major, minor, build, vsn;
+
+ init_resp->os_monotonic_info.resolution = (Uint64) 1000*1000*1000;
+#if defined(HAVE_CLOCK_GETRES) && defined(MONOTONIC_CLOCK_ID)
+ {
+ struct timespec ts;
+ if (clock_getres(MONOTONIC_CLOCK_ID, &ts) == 0
+ && ts.tv_sec == 0 && ts.tv_nsec != 0) {
+ init_resp->os_monotonic_info.resolution /= ts.tv_nsec;
+ }
+ }
+#endif
+
+#ifdef MONOTONIC_CLOCK_ID_STR
+ init_resp->os_monotonic_info.clock_id = MONOTONIC_CLOCK_ID_STR;
+#else
+ init_resp->os_monotonic_info.clock_id = NULL;
+#endif
+
+ init_resp->os_monotonic_info.locked_use = 0;
+
+#if defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)
+ init_resp->os_monotonic_info.func = "clock_gettime";
+#elif defined(OS_MONOTONIC_TIME_USING_MACH_CLOCK_GET_TIME)
+ init_resp->os_monotonic_info.func = "clock_get_time";
+#elif defined(OS_MONOTONIC_TIME_USING_GETHRTIME)
+ init_resp->os_monotonic_info.func = "gethrtime";
+#elif defined(OS_MONOTONIC_TIME_USING_TIMES)
+ init_resp->os_monotonic_info.func = "times";
+ init_resp->os_monotonic_info.locked_use = 1;
+ init_resp->os_monotonic_info.resolution = TICKS_PER_SEC();
+#else
+# error Unknown erts_os_monotonic_time() implementation
+#endif
+
+ init_resp->have_os_monotonic = 1;
+
+ os_version(&major, &minor, &build);
+
+ vsn = ERTS_MK_VSN_INT(major, minor, build);
+
+
+#if defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)
+ if (vsn >= ERTS_MK_VSN_INT(2, 6, 33))
+ erts_sys_time_data__.r.o.os_monotonic_time =
+ clock_gettime_monotonic_raw;
+ else {
+ /*
+ * Linux versions prior to 2.6.33 have a
+ * known bug that sometimes cause monotonic
+ * time to take small steps backwards.
+ */
+ erts_sys_time_data__.r.o.os_monotonic_time =
+ clock_gettime_monotonic_verified;
+ erts_smp_mtx_init(&internal_state.w.f.mtx,
+ "os_monotonic_time");
+ internal_state.w.f.last_delivered
+ = clock_gettime_monotonic_raw();
+ init_resp->os_monotonic_info.locked_use = 1;
+ }
+#else /* !(defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)) */
+ {
+ char flavor[1024];
+
+ os_flavor(flavor, sizeof(flavor));
+
+ if (sys_strcmp(flavor, "sunos") == 0) {
+ /*
+ * Don't trust hrtime on multi processors
+ * on SunOS prior to SunOS 5.8
+ */
+ if (vsn < ERTS_MK_VSN_INT(5, 8, 0)) {
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF)
+ if (sysconf(_SC_NPROCESSORS_CONF) > 1)
+#endif
+ init_resp->have_os_monotonic = 0;
+ }
+ }
+ }
+#endif /* !(defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)) */
+
+#endif /* defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT) */
+
+ init_resp->os_monotonic_time_unit = ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT;
+ init_resp->sys_clock_resolution = SYS_CLOCK_RESOLUTION;
+
/*
- * This (erts_ticks_per_sec) is only for times() (CLK_TCK),
- * the resolution is always one millisecond..
+ * This (erts_sys_time_data__.r.o.ticks_per_sec) is only for
+ * times() (CLK_TCK), the resolution is always one millisecond..
*/
- if ((erts_ticks_per_sec = TICKS_PER_SEC()) < 0)
- erl_exit(1, "Can't get clock ticks/sec\n");
- if (erts_ticks_per_sec >= 1000) {
+ if ((erts_sys_time_data__.r.o.ticks_per_sec = TICKS_PER_SEC()) < 0)
+ erl_exit(ERTS_ABORT_EXIT, "Can't get clock ticks/sec\n");
+
+#if defined(OS_MONOTONIC_TIME_USING_TIMES)
+
+ if (erts_sys_time_data__.r.o.ticks_per_sec >= 1000) {
/* Workaround for beta linux kernels, need to be done in runtime
to make erlang run on both 2.4 and 2.5 kernels. In the future,
the kernel ticks might as
well be used as a high res timer instead, but that's for when the
majority uses kernels with HZ == 1024 */
- ticks_bsr = 3;
+ internal_state.r.o.ticks_bsr = 3;
} else {
- ticks_bsr = 0;
+ internal_state.r.o.ticks_bsr = 0;
}
- erts_ticks_per_sec_wrap = (erts_ticks_per_sec >> ticks_bsr);
- return SYS_CLOCK_RESOLUTION;
+
+ internal_state.r.o.ticks_per_sec_wrap
+ = (erts_sys_time_data__.r.o.ticks_per_sec
+ >> internal_state.r.o.ticks_bsr);
+
+ erts_smp_mtx_init(&internal_state.w.f.mtx, "os_monotonic_time");
+ internal_state.w.f.last_tick_count = KERNEL_TICKS();
+ internal_state.w.f.last_tick_wrap_count = 0;
+ internal_state.w.f.last_tick_monotonic_time
+ = ERTS_KERNEL_TICK_TO_USEC(internal_state.w.f.last_tick_count);
+ {
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ internal_state.w.f.last_timeofday_usec = tv.tv_sec*(1000*1000);
+ internal_state.w.f.last_timeofday_usec += tv.tv_usec;
+ }
+
+#endif /* defined(OS_MONOTONIC_TIME_USING_TIMES) */
+
+}
+
+#if defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME)
+
+static ERTS_INLINE ErtsMonotonicTime
+clock_gettime_monotonic(void)
+{
+ ErtsMonotonicTime mtime;
+ struct timespec ts;
+
+ if (clock_gettime(MONOTONIC_CLOCK_ID,&ts) != 0) {
+ int err = errno;
+ char *errstr = err ? strerror(err) : "unknown";
+ erl_exit(ERTS_ABORT_EXIT,
+ "clock_gettime(%s, _) failed: %s (%d)\n",
+ MONOTONIC_CLOCK_ID_STR, errstr, err);
+
+ }
+ mtime = (ErtsMonotonicTime) ts.tv_sec;
+ mtime *= (ErtsMonotonicTime) 1000*1000*1000;
+ mtime += (ErtsMonotonicTime) ts.tv_nsec;
+ return mtime;
+}
+
+#if defined(__linux__)
+
+ErtsMonotonicTime clock_gettime_monotonic_verified(void)
+{
+ ErtsMonotonicTime mtime;
+
+ mtime = clock_gettime_monotonic();
+
+ erts_smp_mtx_lock(&internal_state.w.f.mtx);
+ if (mtime < internal_state.w.f.last_delivered)
+ mtime = internal_state.w.f.last_delivered;
+ else
+ internal_state.w.f.last_delivered = mtime;
+ erts_smp_mtx_unlock(&internal_state.w.f.mtx);
+
+ return mtime;
+}
+
+ErtsMonotonicTime clock_gettime_monotonic_raw(void)
+{
+ return clock_gettime_monotonic();
}
-clock_t sys_times_wrap(void)
+#else /* !defined(__linux__) */
+
+ErtsMonotonicTime erts_os_monotonic_time(void)
+{
+ return clock_gettime_monotonic();
+}
+
+#endif /* !defined(__linux__) */
+
+#elif defined(OS_MONOTONIC_TIME_USING_MACH_CLOCK_GET_TIME)
+
+#include <mach/clock.h>
+#include <mach/mach.h>
+
+ErtsMonotonicTime erts_os_monotonic_time(void)
+{
+ ErtsMonotonicTime mtime;
+ kern_return_t res;
+ clock_serv_t clk_srv;
+ mach_timespec_t time_spec;
+ int err;
+
+ host_get_clock_service(mach_host_self(),
+ MONOTONIC_CLOCK_ID,
+ &clk_srv);
+ errno = 0;
+ res = clock_get_time(clk_srv, &time_spec);
+ err = errno;
+ mach_port_deallocate(mach_task_self(), clk_srv);
+ if (res != KERN_SUCCESS) {
+ char *errstr = err ? strerror(err) : "unknown";
+ erl_exit(ERTS_ABORT_EXIT,
+ "clock_get_time(%s, _) failed: %s (%d)\n",
+ MONOTONIC_CLOCK_ID_STR, errstr, err);
+ }
+
+ mtime = (ErtsMonotonicTime) time_spec.tv_sec;
+ mtime *= (ErtsMonotonicTime) 1000*1000*1000;
+ mtime += (ErtsMonotonicTime) time_spec.tv_nsec;
+ return mtime;
+}
+
+#elif defined(OS_MONOTONIC_TIME_USING_TIMES)
+
+static clock_t sys_times_wrap(void)
{
SysTimes dummy;
- clock_t result = (sys_times(&dummy) >> ticks_bsr);
+ clock_t result = (sys_times(&dummy) >> internal_state.r.o.ticks_bsr);
return result;
}
+void
+erts_os_time_offset_finalize(void)
+{
+ erts_smp_mtx_lock(&internal_state.w.f.mtx);
+ internal_state.w.f.last_tick_wrap_count = 0;
+ erts_smp_mtx_unlock(&internal_state.w.f.mtx);
+}
+
+#define ERTS_TIME_EXCEED_TICK_LIMIT(SYS_TIME, TCK_TIME) \
+ (((Uint64) (SYS_TIME)) - (((Uint64) (TCK_TIME)) \
+ - ERTS_KERNEL_TICK_TO_USEC(1)) \
+ > ERTS_KERNEL_TICK_TO_USEC(2))
+
+/* Returns monotonic time in micro seconds */
+ErtsMonotonicTime
+erts_os_monotonic_time(void)
+{
+ SysTimeval tv;
+ ErtsMonotonicTime res;
+ ErtsMonotonicTime tick_count;
+ ErtsMonotonicTime tick_count_usec;
+ ErtsMonotonicTime tick_monotonic_time;
+ ErtsMonotonicTime timeofday_usec;
+ ErtsMonotonicTime timeofday_diff_usec;
+
+ erts_smp_mtx_lock(&internal_state.w.f.mtx);
+ tick_count = (ErtsMonotonicTime) KERNEL_TICKS();
+ sys_gettimeofday(&tv);
+
+ if (internal_state.w.f.last_tick_count > tick_count) {
+ internal_state.w.f.last_tick_wrap_count
+ += (((ErtsMonotonicTime) 1) << ((sizeof(clock_t) * 8) - 1));
+ }
+ internal_state.w.f.last_tick_count = tick_count;
+ tick_count += internal_state.w.f.last_tick_wrap_count;
+
+ tick_count_usec = ERTS_KERNEL_TICK_TO_USEC(tick_count);
+
+ timeofday_usec = (ErtsMonotonicTime) tv.tv_sec*(1000*1000);
+ timeofday_usec += (ErtsMonotonicTime) tv.tv_usec;
+ timeofday_diff_usec = timeofday_usec;
+ timeofday_diff_usec -= internal_state.w.f.last_timeofday_usec;
+ internal_state.w.f.last_timeofday_usec = timeofday_usec;
+
+ if (timeofday_diff_usec < 0) {
+ /* timeofday jumped backwards use tick count only... */
+ tick_monotonic_time = tick_count_usec;
+ }
+ else {
+ /* Use time diff from of timeofday if not off by too much... */
+ tick_monotonic_time = internal_state.w.f.last_tick_monotonic_time;
+ tick_monotonic_time += timeofday_diff_usec;
+
+ if (ERTS_TIME_EXCEED_TICK_LIMIT(tick_monotonic_time, tick_count_usec)) {
+ /*
+ * Value off by more than one tick from tick_count, i.e.
+ * timofday leaped one way or the other. We use
+ * tick_count_usec as is instead and unfortunately
+ * get lousy precision.
+ */
+ tick_monotonic_time = tick_count_usec;
+ }
+ }
+
+ if (internal_state.w.f.last_tick_monotonic_time < tick_monotonic_time)
+ internal_state.w.f.last_tick_monotonic_time = tick_monotonic_time;
+
+ res = internal_state.w.f.last_tick_monotonic_time;
+
+ erts_smp_mtx_unlock(&internal_state.w.f.mtx);
+
+ return res;
+}
+#endif /* !defined(OS_MONOTONIC_TIME_USING_TIMES) */
#ifdef HAVE_GETHRVTIME_PROCFS_IOCTL
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index 972170d465..5a62b00a68 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -285,7 +285,7 @@ struct ErtsPollSet_ {
#ifdef ERTS_SMP
erts_smp_mtx_t mtx;
#endif
- erts_smp_atomic32_t timeout;
+ erts_atomic64_t timeout_time;
};
#ifdef ERTS_SMP
@@ -363,6 +363,26 @@ do { \
wait_standby(PS); \
} while(0)
+static ERTS_INLINE void
+init_timeout_time(ErtsPollSet ps)
+{
+ erts_atomic64_init_nob(&ps->timeout_time,
+ (erts_aint64_t) ERTS_MONOTONIC_TIME_MAX);
+}
+
+static ERTS_INLINE void
+set_timeout_time(ErtsPollSet ps, ErtsMonotonicTime time)
+{
+ erts_atomic64_set_relb(&ps->timeout_time,
+ (erts_aint64_t) time);
+}
+
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_time(ErtsPollSet ps)
+{
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb(&ps->timeout_time);
+}
+
#define ERTS_POLL_NOT_WOKEN ((erts_aint32_t) 0)
#define ERTS_POLL_WOKEN_IO_READY ((erts_aint32_t) 1)
#define ERTS_POLL_WOKEN_INTR ((erts_aint32_t) 2)
@@ -422,15 +442,29 @@ wakeup_cause(ErtsPollSet ps)
}
static ERTS_INLINE DWORD
-poll_wait_timeout(ErtsPollSet ps, SysTimeval *tvp)
+poll_wait_timeout(ErtsPollSet ps, ErtsMonotonicTime timeout_time)
{
- time_t timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000;
+ ErtsMonotonicTime current_time, diff_time, timeout;
- if (timeout <= 0) {
+ if (timeout_time == ERTS_POLL_NO_TIMEOUT) {
+ no_timeout:
+ set_timeout_time(ps, ERTS_MONOTONIC_TIME_MIN);
woke_up(ps);
return (DWORD) 0;
}
+ current_time = erts_get_monotonic_time();
+ diff_time = timeout_time - current_time;
+ if (diff_time <= 0)
+ goto no_timeout;
+
+ /* Round up to nearest milli second */
+ timeout = (ERTS_MONOTONIC_TO_MSEC(diff_time - 1) + 1);
+ if (timeout > INT_MAX)
+ timeout = INT_MAX; /* Also prevents DWORD overflow */
+
+ set_timeout_time(ps, current_time + ERTS_MSEC_TO_MONOTONIC(timeout));
+
ResetEvent(ps->event_io_ready);
/*
* Since we don't know the internals of ResetEvent() we issue
@@ -442,10 +476,6 @@ poll_wait_timeout(ErtsPollSet ps, SysTimeval *tvp)
if (erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
return (DWORD) 0;
- if (timeout > ((time_t) ERTS_AINT32_T_MAX))
- timeout = ERTS_AINT32_T_MAX; /* Also prevents DWORD overflow */
-
- erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
return (DWORD) timeout;
}
@@ -1012,12 +1042,12 @@ void erts_poll_interrupt(ErtsPollSet ps, int set /* bool */)
void erts_poll_interrupt_timed(ErtsPollSet ps,
int set /* bool */,
- erts_short_time_t msec)
+ ErtsMonotonicTime timeout_time)
{
- HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
+ HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,timeout_time));
if (!set)
reset_interrupt(ps);
- else if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ else if (get_timeout_time(ps) > timeout_time)
set_interrupt(ps);
HARDTRACEF(("Out erts_poll_interrupt_timed"));
}
@@ -1092,7 +1122,7 @@ void erts_poll_controlv(ErtsPollSet ps,
int erts_poll_wait(ErtsPollSet ps,
ErtsPollResFd pr[],
int *len,
- SysTimeval *tvp)
+ ErtsMonotonicTime timeout_time)
{
int no_fds;
DWORD timeout;
@@ -1149,7 +1179,7 @@ int erts_poll_wait(ErtsPollSet ps,
no_fds = ERTS_POLL_MAX_RES;
#endif
- timeout = poll_wait_timeout(ps, tvp);
+ timeout = poll_wait_timeout(ps, timeout_time);
/*HARDDEBUGF(("timeout = %ld",(long) timeout));*/
@@ -1242,7 +1272,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic32_set_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+ set_timeout_time(ps, ERTS_MONOTONIC_TIME_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1326,7 +1356,7 @@ ErtsPollSet erts_poll_create_pollset(void)
#ifdef ERTS_SMP
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
- erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+ init_timeout_time(ps);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 838f0c61eb..33aa88ab5f 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -120,9 +120,6 @@
/*
* For erl_time_sup
*/
-#define HAVE_GETHRTIME
-
-#define sys_init_hrtime() /* Nothing */
#define SYS_CLK_TCK 1000
#define SYS_CLOCK_RESOLUTION 1
@@ -164,18 +161,58 @@ typedef struct {
#if defined (__GNUC__)
typedef unsigned long long Uint64;
typedef long long Sint64;
-
-typedef long long SysHrTime;
+# ifdef ULLONG_MAX
+# define ERTS_UINT64_MAX ULLONG_MAX
+# endif
+# ifdef LLONG_MAX
+# define ERTS_SINT64_MAX LLONG_MAX
+# endif
+# ifdef LLONG_MIN
+# define ERTS_SINT64_MIN LLONG_MIN
+# endif
+
+typedef long long ErtsMonotonicTime;
#else
typedef ULONGLONG Uint64;
typedef LONGLONG Sint64;
-typedef LONGLONG SysHrTime;
+typedef LONGLONG ErtsMonotonicTime;
#endif
-extern int sys_init_time(void);
+#define ERTS_MONOTONIC_TIME_MIN (((ErtsMonotonicTime) 1) << 63)
+#define ERTS_MONOTONIC_TIME_MAX (~ERTS_MONOTONIC_TIME_MIN)
+
+#define ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT 1
+#define ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT 0
+
+struct erts_sys_time_read_only_data__ {
+ ErtsMonotonicTime (*os_monotonic_time)(void);
+};
+
+typedef struct {
+ union {
+ struct erts_sys_time_read_only_data__ o;
+ char align__[(((sizeof(struct erts_sys_time_read_only_data__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } r;
+} ErtsSysTimeData__;
+
+extern ErtsSysTimeData__ erts_sys_time_data__;
+
+ERTS_GLB_INLINE ErtsMonotonicTime erts_os_monotonic_time(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsMonotonicTime
+erts_os_monotonic_time(void)
+{
+ return (*erts_sys_time_data__.r.o.os_monotonic_time)();
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
extern void sys_gettimeofday(SysTimeval *tv);
-extern SysHrTime sys_gethrtime(void);
extern clock_t sys_times(SysTimes *buffer);
extern char *win_build_environment(char *);
@@ -236,4 +273,16 @@ typedef long ssize_t;
int init_async(int);
int exit_async(void);
#endif
+
+#define ERTS_HAVE_TRY_CATCH 1
+
+#define ERTS_SYS_TRY_CATCH(EXPR,CATCH) \
+ __try { \
+ EXPR; \
+ } \
+ __except(GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) \
+ { \
+ CATCH; \
+ }
+
#endif
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 0ded6b274e..cf587af4ac 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -247,6 +247,27 @@ void erl_sys_args(int* argc, char** argv)
#endif
}
+/*
+ * Function returns 1 if we can read from all values in between
+ * start and stop.
+ */
+int
+erts_sys_is_area_readable(char *start, char *stop) {
+ volatile char tmp;
+ __try
+ {
+ while(start < stop) {
+ tmp = *start;
+ start++;
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return 0;
+ }
+ return 1;
+}
+
int erts_sys_prepare_crash_dump(int secs)
{
Port *heart_port;
@@ -1392,39 +1413,46 @@ int parse_command(wchar_t* cmd){
return i;
}
-static BOOL need_quotes(wchar_t *str)
-{
- int in_quote = 0;
- int backslashed = 0;
- int naked_space = 0;
- while (*str != L'\0') {
- switch (*str) {
- case L'\\' :
- backslashed = !backslashed;
- break;
- case L'"':
- if (backslashed) {
- backslashed=0;
- } else {
- in_quote = !in_quote;
- }
- break;
- case L' ':
- backslashed = 0;
- if (!(backslashed || in_quote)) {
- naked_space++;
- }
- break;
- default:
- backslashed = 0;
+/*
+ * Translating of command line arguments to correct format. In the examples
+ * below the '' are not part of the actual string.
+ * 'io:format("hello").' -> 'io:format(\"hello\").'
+ * 'io:format("is anybody in there?").' -> '"io:format(\"is anybody in there?\")."'
+ * 'Just nod if you can hear me.' -> '"Just nod if you can hear me."'
+ * 'Is there ""anyone at home?' -> '"Is there \"\"anyone at home?"'
+ * 'Relax."' -> 'Relax.\"'
+ *
+ * If new == NULL we just calculate the length.
+ *
+ * The reason for having to quote all of the is becasue CreateProcessW removes
+ * one level of escaping since it takes a single long command line rather
+ * than the argument chunks that unix uses.
+ */
+static int escape_and_quote(wchar_t *str, wchar_t *new, BOOL *quoted) {
+ int i, j = 0;
+ if (new == NULL)
+ *quoted = FALSE;
+ else if (*quoted)
+ new[j++] = L'"';
+ for ( i = 0; str[i] != L'\0'; i++,j++) {
+ if (str[i] == L' ' && new == NULL && *quoted == FALSE) {
+ *quoted = TRUE;
+ j++;
+ }
+ /* check if we have to escape quotes */
+ if (str[i] == L'"') {
+ if (new) new[j] = L'\\';
+ j++;
}
- ++str;
+ if (new) new[j] = str[i];
+ }
+ if (*quoted) {
+ if (new) new[j] = L'"';
+ j++;
}
- return (naked_space > 0);
+ return j;
}
-
-
/*
*----------------------------------------------------------------------
@@ -1585,31 +1613,24 @@ create_child_process
wcscpy(appname, execPath);
}
if (argv == NULL) {
- BOOL orig_need_q = need_quotes(execPath);
+ BOOL orig_need_q;
wchar_t *ptr;
- int ocl = wcslen(execPath);
+ int ocl = escape_and_quote(execPath, NULL, &orig_need_q);
if (run_cmd) {
newcmdline = (wchar_t *) erts_alloc(ERTS_ALC_T_TMP,
- (ocl + ((orig_need_q) ? 3 : 1)
- + 11)*sizeof(wchar_t));
+ (ocl + 1 + 11)*sizeof(wchar_t));
memcpy(newcmdline,L"cmd.exe /c ",11*sizeof(wchar_t));
ptr = newcmdline + 11;
} else {
newcmdline = (wchar_t *) erts_alloc(ERTS_ALC_T_TMP,
- (ocl + ((orig_need_q) ? 3 : 1))*sizeof(wchar_t));
+ (ocl + 1)*sizeof(wchar_t));
ptr = (wchar_t *) newcmdline;
}
- if (orig_need_q) {
- *ptr++ = L'"';
- }
- memcpy(ptr,execPath,ocl*sizeof(wchar_t));
- ptr += ocl;
- if (orig_need_q) {
- *ptr++ = L'"';
- }
- *ptr = L'\0';
+ ptr += escape_and_quote(execPath, ptr, &orig_need_q);
+ ptr[0] = L'\0';
} else {
- int sum = 1; /* '\0' */
+ int sum = 0;
+ BOOL *qte = NULL;
wchar_t **ar = argv;
wchar_t *n;
wchar_t *save_arg0 = NULL;
@@ -1620,11 +1641,13 @@ create_child_process
if (run_cmd) {
sum += 11; /* cmd.exe /c */
}
+
+ while (*ar != NULL) ar++;
+ qte = erts_alloc(ERTS_ALC_T_TMP, (ar - argv)*sizeof(BOOL));
+
+ ar = argv;
while (*ar != NULL) {
- sum += wcslen(*ar);
- if (need_quotes(*ar)) {
- sum += 2; /* quotes */
- }
+ sum += escape_and_quote(*ar,NULL,qte+(ar - argv));
sum++; /* space */
++ar;
}
@@ -1636,26 +1659,18 @@ create_child_process
n += 11;
}
while (*ar != NULL) {
- int q = need_quotes(*ar);
- sum = wcslen(*ar);
- if (q) {
- *n++ = L'"';
- }
- memcpy(n,*ar,sum*sizeof(wchar_t));
- n += sum;
- if (q) {
- *n++ = L'"';
- }
+ n += escape_and_quote(*ar,n,qte+(ar - argv));
*n++ = L' ';
++ar;
}
- *(n-1) = L'\0';
+ *(n-1) = L'\0'; /* overwrite last space with '\0' */
if (save_arg0 != NULL) {
argv[0] = save_arg0;
}
+ erts_free(ERTS_ALC_T_TMP, qte);
}
- DEBUGF(("Creating child process: %s, createFlags = %d\n", newcmdline, createFlags));
+ DEBUGF((stderr,"Creating child process: %S, createFlags = %d\n", newcmdline, createFlags));
ok = CreateProcessW((wchar_t *) appname,
(wchar_t *) newcmdline,
NULL,
@@ -2190,7 +2205,7 @@ static void fd_stop(ErlDrvData data)
ASSERT(dp->out.flushEvent);
SetEvent(dp->out.flushEvent);
} while (WaitForSingleObject(dp->out.flushReplyEvent, 10) == WAIT_TIMEOUT
- || !(dp->out.flags & DF_THREAD_FLUSHED));
+ && !(dp->out.flags & DF_THREAD_FLUSHED));
}
}
@@ -3157,25 +3172,31 @@ thr_create_prepare_child(void *vtcdp)
void
erts_sys_pre_init(void)
{
+#ifdef USE_THREADS
+ erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
+#endif
int_os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
GetVersionEx(&int_os_version);
check_supported_os_version();
+
#ifdef USE_THREADS
- {
- erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
+ eid.thread_create_child_func = thr_create_prepare_child;
+ /* Before creation in parent */
+ eid.thread_create_prepare_func = thr_create_prepare;
+ /* After creation in parent */
+ eid.thread_create_parent_func = thr_create_cleanup;
- eid.thread_create_child_func = thr_create_prepare_child;
- /* Before creation in parent */
- eid.thread_create_prepare_func = thr_create_prepare;
- /* After creation in parent */
- eid.thread_create_parent_func = thr_create_cleanup,
+ erts_thr_init(&eid);
+#endif
+
+ erts_init_sys_time_sup();
- erts_thr_init(&eid);
+#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init();
+ erts_lcnt_init();
#endif
- }
#endif
+
erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
}
@@ -3258,9 +3279,9 @@ erts_sys_schedule_interrupt(int set)
#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
+erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time)
{
- erts_check_io_interrupt_timed(set, msec);
+ erts_check_io_interrupt_timed(set, timeout_time);
}
#endif
diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c
index b84c8f85ce..3a10125c81 100644
--- a/erts/emulator/sys/win32/sys_time.c
+++ b/erts/emulator/sys/win32/sys_time.c
@@ -61,11 +61,6 @@
(epoch) = ((ull.QuadPart / TICKS_PER_SECOND) - EPOCH_JULIAN_DIFF); \
} while(0)
-static SysHrTime wrap = 0;
-static DWORD last_tick_count = 0;
-static erts_smp_mtx_t wrap_lock;
-static ULONGLONG (WINAPI *pGetTickCount64)(void) = NULL;
-
/* Getting timezone information is a heavy operation, so we want to do this
only once */
@@ -76,17 +71,161 @@ static int days_in_month[2][13] = {
{0,31,28,31,30,31,30,31,31,30,31,30,31},
{0,31,29,31,30,31,30,31,31,30,31,30,31}};
-int
-sys_init_time(void)
+/*
+ * erts_os_monotonic_time()
+ */
+
+struct sys_time_internal_state_read_only__ {
+ ULONGLONG (WINAPI *pGetTickCount64)(void);
+ BOOL (WINAPI *pQueryPerformanceCounter)(LARGE_INTEGER *);
+};
+
+struct sys_time_internal_state_write_freq__ {
+ erts_smp_mtx_t mtime_mtx;
+ ULONGLONG wrap;
+ ULONGLONG last_tick_count;
+};
+
+__declspec(align(ASSUMED_CACHE_LINE_SIZE)) struct {
+ union {
+ struct sys_time_internal_state_read_only__ o;
+ char align__[(((sizeof(struct sys_time_internal_state_read_only__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } r;
+ union {
+ struct sys_time_internal_state_write_freq__ f;
+ char align__[(((sizeof(struct sys_time_internal_state_write_freq__) - 1)
+ / ASSUMED_CACHE_LINE_SIZE) + 1)
+ * ASSUMED_CACHE_LINE_SIZE];
+ } w;
+} internal_state;
+
+__declspec(align(ASSUMED_CACHE_LINE_SIZE)) ErtsSysTimeData__ erts_sys_time_data__;
+
+static ErtsMonotonicTime
+os_monotonic_time_qpc(void)
{
+ LARGE_INTEGER pc;
+
+ if (!(*internal_state.r.o.pQueryPerformanceCounter)(&pc))
+ erl_exit(ERTS_ABORT_EXIT, "QueryPerformanceCounter() failed\n");
+
+ return (ErtsMonotonicTime) pc.QuadPart;
+}
+
+static ErtsMonotonicTime
+os_monotonic_time_gtc32(void)
+{
+ ULONGLONG res, ticks;
+
+ erts_smp_mtx_lock(&internal_state.w.f.mtime_mtx);
+
+ ticks = (ULONGLONG) (GetTickCount() & 0x7FFFFFFF);
+ if (ticks < internal_state.w.f.last_tick_count)
+ internal_state.w.f.wrap += (ULONGLONG) LL_LITERAL(1) << 31;
+ internal_state.w.f.last_tick_count = ticks;
+ res = ticks + internal_state.w.f.wrap;
+
+ erts_smp_mtx_unlock(&internal_state.w.f.mtime_mtx);
+
+ return (ErtsMonotonicTime) res*1000;
+}
+
+static ErtsMonotonicTime
+os_monotonic_time_gtc64(void)
+{
+ ULONGLONG ticks = (*internal_state.r.o.pGetTickCount64)();
+ return (ErtsMonotonicTime) ticks*1000;
+}
+
+/*
+ * Init
+ */
+
+void
+sys_init_time(ErtsSysInitTimeResult *init_resp)
+{
+ ErtsMonotonicTime (*os_mtime_func)(void);
+ ErtsMonotonicTime time_unit;
char kernel_dll_name[] = "kernel32";
HMODULE module;
+ init_resp->os_monotonic_info.clock_id = NULL;
+
module = GetModuleHandle(kernel_dll_name);
- pGetTickCount64 = (module != NULL) ?
- (ULONGLONG (WINAPI *)(void))
- GetProcAddress(module,"GetTickCount64") :
- NULL;
+ if (!module) {
+ get_tick_count:
+ erts_smp_mtx_init(&internal_state.w.f.mtime_mtx,
+ "os_monotonic_time");
+ internal_state.w.f.wrap = 0;
+ internal_state.w.f.last_tick_count = 0;
+
+ init_resp->os_monotonic_info.func = "GetTickCount";
+ init_resp->os_monotonic_info.locked_use = 1;
+ init_resp->os_monotonic_info.resolution = 1000;
+ time_unit = (ErtsMonotonicTime) 1000*1000;
+ os_mtime_func = os_monotonic_time_gtc32;
+ }
+ else {
+ int major, minor, build;
+
+ os_version(&major, &minor, &build);
+
+ if (major < 6) {
+
+ get_tick_count64:
+
+ internal_state.r.o.pGetTickCount64
+ = ((ULONGLONG (WINAPI *)(void))
+ GetProcAddress(module, "GetTickCount64"));
+ if (!internal_state.r.o.pGetTickCount64)
+ goto get_tick_count;
+
+ init_resp->os_monotonic_info.func = "GetTickCount64";
+ init_resp->os_monotonic_info.locked_use = 0;
+ init_resp->os_monotonic_info.resolution = 1000;
+ time_unit = (ErtsMonotonicTime) 1000*1000;
+ os_mtime_func = os_monotonic_time_gtc64;
+ }
+ else { /* Vista or newer... */
+
+ LARGE_INTEGER pf;
+ BOOL (WINAPI *QPF)(LARGE_INTEGER *);
+
+ QPF = ((BOOL (WINAPI *)(LARGE_INTEGER *))
+ GetProcAddress(module, "QueryPerformanceFrequency"));
+ if (!QPF)
+ goto get_tick_count64;
+ if (!(*QPF)(&pf))
+ goto get_tick_count64;
+ /*
+ * We only use QueryPerformanceCounter() if
+ * its frequency is equal to, or larger than
+ * GHz in order to ensure that the user wont
+ * be able to observe faulty order between
+ * values retrieved on different threads.
+ */
+ if (pf.QuadPart < (LONGLONG) 1000*1000*1000)
+ goto get_tick_count64;
+ internal_state.r.o.pQueryPerformanceCounter
+ = ((BOOL (WINAPI *)(LARGE_INTEGER *))
+ GetProcAddress(module, "QueryPerformanceCounter"));
+ if (!internal_state.r.o.pQueryPerformanceCounter)
+ goto get_tick_count64;
+
+ init_resp->os_monotonic_info.func = "QueryPerformanceCounter";
+ init_resp->os_monotonic_info.locked_use = 0;
+ time_unit = (ErtsMonotonicTime) pf.QuadPart;
+ init_resp->os_monotonic_info.resolution = time_unit;
+ os_mtime_func = os_monotonic_time_qpc;
+ }
+ }
+
+ erts_sys_time_data__.r.o.os_monotonic_time = os_mtime_func;
+ init_resp->os_monotonic_time_unit = time_unit;
+ init_resp->have_os_monotonic = 1;
+ init_resp->sys_clock_resolution = 1;
if(GetTimeZoneInformation(&static_tzi) &&
static_tzi.StandardDate.wMonth != 0 &&
@@ -94,9 +233,6 @@ sys_init_time(void)
have_static_tzi = 1;
}
- erts_smp_mtx_init(&wrap_lock, "sys_gethrtime");
-
- return 1;
}
/* Returns a switchtimes for DST as UTC filetimes given data from a
@@ -377,41 +513,6 @@ sys_gettimeofday(SysTimeval *tv)
EPOCH_JULIAN_DIFF);
}
-extern int erts_initialized;
-SysHrTime
-sys_gethrtime(void)
-{
- if (pGetTickCount64 != NULL) {
- return ((SysHrTime) pGetTickCount64()) * LL_LITERAL(1000000);
- } else {
- DWORD ticks;
- SysHrTime res;
- erts_smp_mtx_lock(&wrap_lock);
- ticks = (SysHrTime) (GetTickCount() & 0x7FFFFFFF);
- if (ticks < (SysHrTime) last_tick_count) {
- /* Detect a race that should no longer be here... */
- if ((((SysHrTime) last_tick_count) - ((SysHrTime) ticks)) > 1000) {
- wrap += LL_LITERAL(1) << 31;
- } else {
- /*
- * XXX Debug: Violates locking order, remove all this,
- * after testing!
- */
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Did not wrap when last_tick %d "
- "and tick %d",
- last_tick_count, ticks);
- erts_send_error_to_logger_nogl(dsbufp);
- ticks = last_tick_count;
- }
- }
- last_tick_count = ticks;
- res = ((((LONGLONG) ticks) + wrap) * LL_LITERAL(1000000));
- erts_smp_mtx_unlock(&wrap_lock);
- return res;
- }
-}
-
clock_t
sys_times(SysTimes *buffer) {
clock_t kernel_ticks = (GetTickCount() /
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index dfbe47786a..dd2e2cb504 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -108,6 +108,7 @@ MODULES= \
trace_call_time_SUITE \
scheduler_SUITE \
old_scheduler_SUITE \
+ unique_SUITE \
z_SUITE \
old_mod \
long_timers_test \
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index fbc229bc53..fc9bdae0a0 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -20,6 +20,7 @@
-module(bif_SUITE).
-include_lib("test_server/include/test_server.hrl").
+-include_lib("kernel/include/file.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
@@ -681,8 +682,38 @@ erlang_halt(Config) when is_list(Config) ->
{badrpc,nodedown} = rpc:call(N2, erlang, halt, [0]),
{ok,N3} = slave:start(H, halt_node3),
{badrpc,nodedown} = rpc:call(N3, erlang, halt, [0,[]]),
- ok.
+ % This test triggers a segfault when dumping a crash dump
+ % to make sure that we can handle it properly.
+ {ok,N4} = slave:start(H, halt_node4),
+ CrashDump = filename:join(proplists:get_value(priv_dir,Config),
+ "segfault_erl_crash.dump"),
+ true = rpc:call(N4, os, putenv, ["ERL_CRASH_DUMP",CrashDump]),
+ false = rpc:call(N4, erts_debug, set_internal_state,
+ [available_internal_state, true]),
+ {badrpc,nodedown} = rpc:call(N4, erts_debug, set_internal_state,
+ [broken_halt, "Validate correct crash dump"]),
+ ok = wait_until_stable_size(CrashDump,-1),
+ {ok, Bin} = file:read_file(CrashDump),
+ case {string:str(binary_to_list(Bin),"\n=end\n"),
+ string:str(binary_to_list(Bin),"\r\n=end\r\n")} of
+ {0,0} -> ct:fail("Could not find end marker in crash dump");
+ _ -> ok
+ end.
+
+wait_until_stable_size(_File,-10) ->
+ {error,enoent};
+wait_until_stable_size(File,PrevSz) ->
+ timer:sleep(250),
+ case file:read_file_info(File) of
+ {error,enoent} ->
+ wait_until_stable_size(File,PrevSz-1);
+ {ok,#file_info{size = PrevSz }} when PrevSz /= -1 ->
+ io:format("Crashdump file size was: ~p (~s)~n",[PrevSz,File]),
+ ok;
+ {ok,#file_info{size = NewSz }} ->
+ wait_until_stable_size(File,NewSz)
+ end.
%% Helpers
diff --git a/erts/emulator/test/long_timers_test.erl b/erts/emulator/test/long_timers_test.erl
index 28a4fba9f6..f381332b51 100644
--- a/erts/emulator/test/long_timers_test.erl
+++ b/erts/emulator/test/long_timers_test.erl
@@ -28,7 +28,7 @@
-define(MAX_TIMEOUT, 60). % Minutes
--define(MAX_LATE, 10*1000). % Milliseconds
+-define(MAX_LATE_MS, 10*1000). % Milliseconds
-define(REG_NAME, '___LONG___TIMERS___TEST___SERVER___').
-define(DRV_NAME, timer_driver).
@@ -75,7 +75,7 @@ check_result() ->
erlang:demonitor(Mon),
receive {'DOWN', Mon, _, _, _} -> ok after 0 -> ok end,
stop_node(Node),
- check(TORs, (timer:now_diff(End, Start) div 1000) - ?MAX_LATE, ok)
+ check(TORs, ms((End - Start) - max_late()), ok)
end.
check([#timeout_rec{timeout = Timeout,
@@ -83,7 +83,7 @@ check([#timeout_rec{timeout = Timeout,
timeout_diff = undefined} | TORs],
NeedRes,
_Ok) when Timeout < NeedRes ->
- io:format("~p timeout = ~p failed! No timeout.~n",
+ io:format("~p timeout = ~p ms failed! No timeout.~n",
[Type, Timeout]),
check(TORs, NeedRes, failed);
check([#timeout_rec{timeout_diff = undefined} | TORs],
@@ -95,7 +95,7 @@ check([#timeout_rec{timeout = Timeout,
timeout_diff = {error, Reason}} | TORs],
NeedRes,
_Ok) ->
- io:format("~p timeout = ~p failed! exit reason ~p~n",
+ io:format("~p timeout = ~p ms failed! exit reason ~p~n",
[Type, Timeout, Reason]),
check(TORs, NeedRes, failed);
check([#timeout_rec{timeout = Timeout,
@@ -103,43 +103,77 @@ check([#timeout_rec{timeout = Timeout,
timeout_diff = TimeoutDiff} | TORs],
NeedRes,
Ok) ->
- case (0 =< TimeoutDiff) and (TimeoutDiff =< ?MAX_LATE) of
- true ->
- io:format("~p timeout = ~p succeded! timeout diff = ~p.~n",
- [Type, Timeout, TimeoutDiff]),
- check(TORs, NeedRes, Ok);
- false ->
- io:format("~p timeout = ~p failed! timeout diff = ~p.~n",
- [Type, Timeout, TimeoutDiff]),
- check(TORs, NeedRes, failed)
- end;
+ {NewOk, SuccessStr} = case ((0 =< TimeoutDiff)
+ andalso (TimeoutDiff =< max_late())) of
+ true -> {Ok, "succeeded"};
+ false -> {failed, "FAILED"}
+ end,
+ io:format("~s timeout = ~s ms ~s! timeout diff = ~s.~n",
+ [type_str(Type),
+ time_str(Timeout),
+ SuccessStr,
+ time_str(TimeoutDiff, erlang:convert_time_unit(1, seconds, native))]),
+ check(TORs, NeedRes, NewOk);
check([], _NeedRes, Ok) ->
Ok.
+type_str(receive_after) -> "receive ... after";
+type_str(bif_timer) -> "BIF timer";
+type_str(driver) -> "driver".
+
+time_str(Time, Unit) ->
+ lists:flatten([time_str(Time), " ", unit_str(Unit)]).
+
+time_str(Time) ->
+ lists:reverse(conv_time_str(lists:reverse(integer_to_list(Time)))).
+
+conv_time_str([X,Y,Z,C|Cs]) when C /= $- ->
+ [X,Y,Z,$`|conv_time_str([C|Cs])];
+conv_time_str(Cs) ->
+ Cs.
+
+unit_str(1) -> "s";
+unit_str(1000) -> "ms";
+unit_str(1000000) -> "us";
+unit_str(1000000000) -> "ns";
+unit_str(Res) when is_integer(Res) -> ["/ ", integer_to_list(Res), " s"];
+unit_str(Res) -> Res.
+
+to_diff(Timeout, Start, Stop) ->
+ %% 'Timeout' in milli seconds
+ %% 'Start', 'Stop', and result in native unit
+ (Stop - Start) - erlang:convert_time_unit(Timeout, milli_seconds, native).
+
+ms(Time) ->
+ erlang:convert_time_unit(Time, native, milli_seconds).
+
+max_late() ->
+ erlang:convert_time_unit(?MAX_LATE_MS, milli_seconds, native).
+
receive_after(Timeout) ->
- Start = now(),
+ Start = erlang:monotonic_time(),
receive
{get_result, ?REG_NAME} ->
?REG_NAME ! #timeout_rec{pid = self(),
type = receive_after,
timeout = Timeout}
after Timeout ->
- Stop = now(),
+ Stop = erlang:monotonic_time(),
receive
{get_result, ?REG_NAME} ->
- TimeoutDiff = ((timer:now_diff(Stop, Start) div 1000)
- - Timeout),
?REG_NAME ! #timeout_rec{pid = self(),
type = receive_after,
timeout = Timeout,
- timeout_diff = TimeoutDiff}
+ timeout_diff = to_diff(Timeout,
+ Start,
+ Stop)}
end
end.
driver(Timeout) ->
Port = open_port({spawn, ?DRV_NAME},[]),
link(Port),
- Start = now(),
+ Start = erlang:monotonic_time(),
erlang:port_command(Port, <<?START_TIMER, Timeout:32>>),
receive
{get_result, ?REG_NAME} ->
@@ -147,38 +181,38 @@ driver(Timeout) ->
type = driver,
timeout = Timeout};
{Port,{data,[?TIMER]}} ->
- Stop = now(),
+ Stop = erlang:monotonic_time(),
unlink(Port),
true = erlang:port_close(Port),
receive
{get_result, ?REG_NAME} ->
- TimeoutDiff = ((timer:now_diff(Stop, Start) div 1000)
- - Timeout),
?REG_NAME ! #timeout_rec{pid = self(),
type = driver,
timeout = Timeout,
- timeout_diff = TimeoutDiff}
+ timeout_diff = to_diff(Timeout,
+ Start,
+ Stop)}
end
end.
bif_timer(Timeout) ->
Tmr = erlang:start_timer(Timeout, self(), ok),
- Start = now(),
+ Start = erlang:monotonic_time(),
receive
{get_result, ?REG_NAME} ->
?REG_NAME ! #timeout_rec{pid = self(),
type = bif_timer,
timeout = Timeout};
{timeout, Tmr, ok} ->
- Stop = now(),
+ Stop = erlang:monotonic_time(),
receive
{get_result, ?REG_NAME} ->
- TimeoutDiff = ((timer:now_diff(Stop, Start) div 1000)
- - Timeout),
?REG_NAME ! #timeout_rec{pid = self(),
type = bif_timer,
timeout = Timeout,
- timeout_diff = TimeoutDiff}
+ timeout_diff = to_diff(Timeout,
+ Start,
+ Stop)}
end
end.
@@ -189,7 +223,7 @@ test(Starter, DrvDir, StartDone) ->
register(?REG_NAME, self()),
{group_leader, GL} = process_info(whereis(net_kernel),group_leader),
group_leader(GL, self()),
- Start = now(),
+ Start = erlang:monotonic_time(),
TORs = lists:map(fun (Min) ->
TO = Min*60*1000,
[#timeout_rec{pid = spawn_opt(
@@ -222,7 +256,7 @@ test(Starter, DrvDir, StartDone) ->
test_loop(TORs, Start) ->
receive
{get_result, ?REG_NAME, Pid} ->
- End = now(),
+ End = erlang:monotonic_time(),
Pid ! {result, ?REG_NAME, get_test_results(TORs), Start, End},
erl_ddll:unload_driver(?DRV_NAME),
erl_ddll:stop(),
diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl
index 888ed8e272..1da08beb8b 100644
--- a/erts/emulator/test/map_SUITE.erl
+++ b/erts/emulator/test/map_SUITE.erl
@@ -30,7 +30,7 @@
t_list_comprehension/1,
t_map_sort_literals/1,
t_map_equal/1,
- %t_size/1,
+ t_map_compare/1,
t_map_size/1,
%% Specific Map BIFs
@@ -51,6 +51,10 @@
t_erlang_hash/1,
t_map_encode_decode/1,
+ %% non specific BIF related
+ t_bif_build_and_check/1,
+ t_bif_merge_and_check/1,
+
%% maps module not bifs
t_maps_fold/1,
t_maps_map/1,
@@ -58,6 +62,7 @@
t_maps_without/1,
%% misc
+ t_hashmap_balance/1,
t_pdict/1,
t_ets/1,
t_dets/1,
@@ -66,6 +71,13 @@
-include_lib("stdlib/include/ms_transform.hrl").
+-define(CHECK(Cond,Term),
+ case (catch (Cond)) of
+ true -> true;
+ _ -> io:format("###### CHECK FAILED ######~nINPUT: ~p~n", [Term]),
+ exit(Term)
+ end).
+
suite() -> [].
all() -> [
@@ -75,7 +87,7 @@ all() -> [
t_update_assoc,t_update_exact,
t_guard_bifs, t_guard_sequence, t_guard_update,
t_guard_receive,t_guard_fun, t_list_comprehension,
- t_map_equal,
+ t_map_equal, t_map_compare,
t_map_sort_literals,
%% Specific Map BIFs
@@ -90,12 +102,17 @@ all() -> [
t_erlang_hash, t_map_encode_decode,
t_map_size,
+ %% non specific BIF related
+ t_bif_build_and_check,
+ t_bif_merge_and_check,
+
%% maps module
t_maps_fold, t_maps_map,
t_maps_size, t_maps_without,
%% Other functions
+ t_hashmap_balance,
t_pdict,
t_ets,
t_tracing
@@ -146,17 +163,6 @@ t_build_and_match_literals(Config) when is_list(Config) ->
ok.
-%% Tests size(Map).
-%% not implemented, perhaps it shouldn't be either
-
-%t_size(Config) when is_list(Config) ->
-% 0 = size(#{}),
-% 1 = size(#{a=>1}),
-% 1 = size(#{a=>#{a=>1}}),
-% 2 = size(#{a=>1, b=>2}),
-% 3 = size(#{a=>1, b=>2, b=>"3"}),
-% ok.
-
t_map_size(Config) when is_list(Config) ->
0 = map_size(id(#{})),
1 = map_size(id(#{a=>1})),
@@ -172,12 +178,23 @@ t_map_size(Config) when is_list(Config) ->
true = map_is_size(M#{ "a" => 2}, 2),
false = map_is_size(M#{ "c" => 2}, 2),
+ Ks = [build_key(fun(K) -> <<1,K:32,1>> end,I)||I<-lists:seq(1,100)],
+ ok = build_and_check_size(Ks,0,#{}),
+
%% Error cases.
{'EXIT',{badarg,_}} = (catch map_size([])),
{'EXIT',{badarg,_}} = (catch map_size(<<1,2,3>>)),
{'EXIT',{badarg,_}} = (catch map_size(1)),
ok.
+build_and_check_size([K|Ks],N,M0) ->
+ N = map_size(M0),
+ M1 = M0#{ K => K },
+ build_and_check_size(Ks,N + 1,M1);
+build_and_check_size([],N,M) ->
+ N = map_size(M),
+ ok.
+
map_is_size(M,N) when map_size(M) =:= N -> true;
map_is_size(_,_) -> false.
@@ -432,7 +449,7 @@ t_map_sort_literals(Config) when is_list(Config) ->
true = #{ c => 1, b => 1, a => 1 } < id(#{ b => 1, c => 1, d => 1}),
true = #{ "a" => 1 } < id(#{ <<"a">> => 1}),
false = #{ <<"a">> => 1 } < id(#{ "a" => 1}),
- false = #{ 1 => 1 } < id(#{ 1.0 => 1}),
+ true = #{ 1 => 1 } < id(#{ 1.0 => 1}),
false = #{ 1.0 => 1 } < id(#{ 1 => 1}),
%% value order
@@ -440,16 +457,47 @@ t_map_sort_literals(Config) when is_list(Config) ->
false = #{ a => 2 } < id(#{ a => 1}),
false = #{ a => 2, b => 1 } < id(#{ a => 1, b => 3}),
true = #{ a => 1, b => 1 } < id(#{ a => 1, b => 3}),
+ false = #{ a => 1 } < id(#{ a => 1.0}),
+ false = #{ a => 1.0 } < id(#{ a => 1}),
true = #{ "a" => "hi", b => 134 } == id(#{ b => 134,"a" => "hi"}),
+ %% large maps
+
+ M = maps:from_list([{I,I}||I <- lists:seq(1,500)]),
+
+ %% size order
+ true = M#{ a => 1, b => 2} < id(M#{ a => 1, b => 1, c => 1}),
+ true = M#{ b => 1, a => 1} < id(M#{ c => 1, a => 1, b => 1}),
+ false = M#{ c => 1, b => 1, a => 1} < id(M#{ c => 1, a => 1}),
+
+ %% key order
+ true = M#{ a => 1 } < id(M#{ b => 1}),
+ false = M#{ b => 1 } < id(M#{ a => 1}),
+ true = M#{ a => 1, b => 1, c => 1 } < id(M#{ b => 1, c => 1, d => 1}),
+ true = M#{ b => 1, c => 1, d => 1 } > id(M#{ a => 1, b => 1, c => 1}),
+ true = M#{ c => 1, b => 1, a => 1 } < id(M#{ b => 1, c => 1, d => 1}),
+ true = M#{ "a" => 1 } < id(M#{ <<"a">> => 1}),
+ false = M#{ <<"a">> => 1 } < id(#{ "a" => 1}),
+ true = M#{ 1 => 1 } < id(maps:remove(1,M#{ 1.0 => 1})),
+ false = M#{ 1.0 => 1 } < id(M#{ 1 => 1}),
+
+ %% value order
+ true = M#{ a => 1 } < id(M#{ a => 2}),
+ false = M#{ a => 2 } < id(M#{ a => 1}),
+ false = M#{ a => 2, b => 1 } < id(M#{ a => 1, b => 3}),
+ true = M#{ a => 1, b => 1 } < id(M#{ a => 1, b => 3}),
+ false = M#{ a => 1 } < id(M#{ a => 1.0}),
+ false = M#{ a => 1.0 } < id(M#{ a => 1}),
+
+ true = M#{ "a" => "hi", b => 134 } == id(M#{ b => 134,"a" => "hi"}),
+
%% lists:sort
SortVs = [#{"a"=>1},#{a=>2},#{1=>3},#{<<"a">>=>4}],
[#{1:=ok},#{a:=ok},#{"a":=ok},#{<<"a">>:=ok}] = lists:sort([#{"a"=>ok},#{a=>ok},#{1=>ok},#{<<"a">>=>ok}]),
[#{1:=3},#{a:=2},#{"a":=1},#{<<"a">>:=4}] = lists:sort(SortVs),
[#{1:=3},#{a:=2},#{"a":=1},#{<<"a">>:=4}] = lists:sort(lists:reverse(SortVs)),
-
ok.
t_map_equal(Config) when is_list(Config) ->
@@ -469,27 +517,285 @@ t_map_equal(Config) when is_list(Config) ->
true = id(#{ a => 1, b => 3, c => <<"wat">> }) =:= id(#{ a => 1, b => 3, c=><<"wat">>}),
ok.
+
+t_map_compare(Config) when is_list(Config) ->
+ Seed = erlang:now(),
+ io:format("seed = ~p\n", [Seed]),
+ random:seed(Seed),
+ repeat(100, fun(_) -> float_int_compare() end, []),
+ repeat(100, fun(_) -> recursive_compare() end, []),
+ ok.
+
+float_int_compare() ->
+ Terms = numeric_keys(3),
+ %%io:format("Keys to use: ~p\n", [Terms]),
+ Pairs = lists:map(fun(K) -> list_to_tuple([{K,V} || V <- Terms]) end, Terms),
+ lists:foreach(fun(Size) ->
+ MapGen = fun() -> map_gen(list_to_tuple(Pairs), Size) end,
+ repeat(100, fun do_compare/1, [MapGen, MapGen])
+ end,
+ lists:seq(1,length(Terms))),
+ ok.
+
+numeric_keys(N) ->
+ lists:foldl(fun(_,Acc) ->
+ Int = random:uniform(N*4) - N*2,
+ Float = float(Int),
+ [Int, Float, Float * 0.99, Float * 1.01 | Acc]
+ end,
+ [],
+ lists:seq(1,N)).
+
+
+repeat(0, _, _) ->
+ ok;
+repeat(N, Fun, Arg) ->
+ Fun(Arg),
+ repeat(N-1, Fun, Arg).
+
+copy_term(T) ->
+ Papa = self(),
+ P = spawn_link(fun() -> receive Msg -> Papa ! Msg end end),
+ P ! T,
+ receive R -> R end.
+
+do_compare([Gen1, Gen2]) ->
+ M1 = Gen1(),
+ M2 = Gen2(),
+ %%io:format("Maps to compare: ~p AND ~p\n", [M1, M2]),
+ C = (M1 < M2),
+ Erlang = maps_lessthan(M1, M2),
+ C = Erlang,
+ ?CHECK(M1==M1, M1),
+
+ %% Change one key from int to float (or vice versa) and check compare
+ ML1 = maps:to_list(M1),
+ {K1,V1} = lists:nth(random:uniform(length(ML1)), ML1),
+ case K1 of
+ I when is_integer(I) ->
+ case maps:find(float(I),M1) of
+ error ->
+ M1f = maps:remove(I, maps:put(float(I), V1, M1)),
+ ?CHECK(M1f > M1, [M1f, M1]);
+ _ -> ok
+ end;
+
+ F when is_float(F), round(F) == F ->
+ case maps:find(round(F),M1) of
+ error ->
+ M1i = maps:remove(F, maps:put(round(F), V1, M1)),
+ ?CHECK(M1i < M1, [M1i, M1]);
+ _ -> ok
+ end;
+
+ _ -> ok % skip floats with decimals
+ end,
+
+ ?CHECK(M2 == M2, [M2]).
+
+
+maps_lessthan(M1, M2) ->
+ case {maps:size(M1),maps:size(M2)} of
+ {_S,_S} ->
+ {K1,V1} = lists:unzip(term_sort(maps:to_list(M1))),
+ {K2,V2} = lists:unzip(term_sort(maps:to_list(M2))),
+
+ case erts_internal:cmp_term(K1,K2) of
+ -1 -> true;
+ 0 -> (V1 < V2);
+ 1 -> false
+ end;
+
+ {S1, S2} ->
+ S1 < S2
+ end.
+
+term_sort(L) ->
+ lists:sort(fun(A,B) -> erts_internal:cmp_term(A,B) =< 0 end,
+ L).
+
+
+cmp(T1, T2, Exact) when is_tuple(T1) and is_tuple(T2) ->
+ case {size(T1),size(T2)} of
+ {_S,_S} -> cmp(tuple_to_list(T1), tuple_to_list(T2), Exact);
+ {S1,S2} when S1 < S2 -> -1;
+ {S1,S2} when S1 > S2 -> 1
+ end;
+
+cmp([H1|T1], [H2|T2], Exact) ->
+ case cmp(H1,H2, Exact) of
+ 0 -> cmp(T1,T2, Exact);
+ C -> C
+ end;
+
+cmp(M1, M2, Exact) when is_map(M1) andalso is_map(M2) ->
+ cmp_maps(M1,M2,Exact);
+cmp(M1, M2, Exact) ->
+ cmp_others(M1, M2, Exact).
+
+cmp_maps(M1, M2, Exact) ->
+ case {maps:size(M1),maps:size(M2)} of
+ {_S,_S} ->
+ {K1,V1} = lists:unzip(term_sort(maps:to_list(M1))),
+ {K2,V2} = lists:unzip(term_sort(maps:to_list(M2))),
+
+ case cmp(K1, K2, true) of
+ 0 -> cmp(V1, V2, Exact);
+ C -> C
+ end;
+
+ {S1,S2} when S1 < S2 -> -1;
+ {S1,S2} when S1 > S2 -> 1
+ end.
+
+cmp_others(I, F, true) when is_integer(I), is_float(F) ->
+ -1;
+cmp_others(F, I, true) when is_float(F), is_integer(I) ->
+ 1;
+cmp_others(T1, T2, _) ->
+ case {T1<T2, T1==T2} of
+ {true,false} -> -1;
+ {false,true} -> 0;
+ {false,false} -> 1
+ end.
+
+map_gen(Pairs, Size) ->
+ {_,L} = lists:foldl(fun(_, {Keys, Acc}) ->
+ KI = random:uniform(size(Keys)),
+ K = element(KI,Keys),
+ KV = element(random:uniform(size(K)), K),
+ {erlang:delete_element(KI,Keys), [KV | Acc]}
+ end,
+ {Pairs, []},
+ lists:seq(1,Size)),
+
+ maps:from_list(L).
+
+
+recursive_compare() ->
+ Leafs = {atom, 17, 16.9, 17.1, [], self(), spawn(fun() -> ok end), make_ref(), make_ref()},
+ {A, B} = term_gen_recursive(Leafs, 0, 0),
+ %%io:format("Recursive term A = ~p\n", [A]),
+ %%io:format("Recursive term B = ~p\n", [B]),
+
+ ?CHECK({true,false} =:= case do_cmp(A, B, false) of
+ -1 -> {A<B, A>=B};
+ 0 -> {A==B, A/=B};
+ 1 -> {A>B, A=<B}
+ end,
+ {A,B}),
+ A2 = copy_term(A),
+ ?CHECK(A == A2, {A,A2}),
+ ?CHECK(0 =:= cmp(A, A2, false), {A,A2}),
+
+ B2 = copy_term(B),
+ ?CHECK(B == B2, {B,B2}),
+ ?CHECK(0 =:= cmp(B, B2, false), {B,B2}),
+ ok.
+
+do_cmp(A, B, Exact) ->
+ C = cmp(A, B, Exact),
+ C.
+
+%% Generate two terms {A,B} that may only differ
+%% at float vs integer types.
+term_gen_recursive(Leafs, Flags, Depth) ->
+ MaxDepth = 10,
+ Rnd = case {Flags, Depth} of
+ {_, MaxDepth} -> % Only leafs
+ random:uniform(size(Leafs)) + 3;
+ {0, 0} -> % Only containers
+ random:uniform(3);
+ {0,_} -> % Anything
+ random:uniform(size(Leafs)+3)
+ end,
+ case Rnd of
+ 1 -> % Make map
+ Size = random:uniform(size(Leafs)),
+ lists:foldl(fun(_, {Acc1,Acc2}) ->
+ {K1,K2} = term_gen_recursive(Leafs, Flags,
+ Depth+1),
+ {V1,V2} = term_gen_recursive(Leafs, Flags, Depth+1),
+ %%ok = check_keys(K1,K2, 0),
+ {maps:put(K1,V1, Acc1), maps:put(K2,V2, Acc2)}
+ end,
+ {maps:new(), maps:new()},
+ lists:seq(1,Size));
+ 2 -> % Make cons
+ {Car1,Car2} = term_gen_recursive(Leafs, Flags, Depth+1),
+ {Cdr1,Cdr2} = term_gen_recursive(Leafs, Flags, Depth+1),
+ {[Car1 | Cdr1], [Car2 | Cdr2]};
+ 3 -> % Make tuple
+ Size = random:uniform(size(Leafs)),
+ L = lists:map(fun(_) -> term_gen_recursive(Leafs, Flags, Depth+1) end,
+ lists:seq(1,Size)),
+ {L1, L2} = lists:unzip(L),
+ {list_to_tuple(L1), list_to_tuple(L2)};
+
+ N -> % Make leaf
+ case element(N-3, Leafs) of
+ I when is_integer(I) ->
+ case random:uniform(4) of
+ 1 -> {I, float(I)};
+ 2 -> {float(I), I};
+ _ -> {I,I}
+ end;
+ T -> {T,T}
+ end
+ end.
+
+check_keys(K1, K2, _) when K1 =:= K2 ->
+ case erlang:phash3(K1) =:= erlang:phash3(K2) of
+ true -> ok;
+ false ->
+ io:format("Same keys with different hash values !!!\nK1 = ~p\nK2 = ~p\n", [K1,K2]),
+ error
+ end;
+check_keys(K1, K2, 0) ->
+ case {erlang:phash3(K1), erlang:phash3(K2)} of
+ {H,H} -> check_keys(K1, K2, 1);
+ {_,_} -> ok
+ end;
+check_keys(K1, K2, L) when L < 10 ->
+ case {erlang:phash3([L|K1]), erlang:phash3([L|K2])} of
+ {H,H} -> check_keys(K1, K2, L+1);
+ {_,_} -> ok
+ end;
+check_keys(K1, K2, L) ->
+ io:format("Same hash value at level ~p !!!\nK1 = ~p\nK2 = ~p\n", [L,K1,K2]),
+ error.
+
%% BIFs
t_bif_map_get(Config) when is_list(Config) ->
-
+ %% small map
1 = maps:get(a, #{ a=> 1}),
2 = maps:get(b, #{ a=> 1, b => 2}),
"hi" = maps:get("hello", #{ a=>1, "hello" => "hi"}),
"tuple hi" = maps:get({1,1.0}, #{ a=>a, {1,1.0} => "tuple hi"}),
- M = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
- "v4" = maps:get(<<"k2">>, M#{ <<"k2">> => "v4" }),
+ M0 = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
+ "v4" = maps:get(<<"k2">>, M0#{<<"k2">> => "v4"}),
+
+ %% large map
+ M1 = maps:from_list([{I,I}||I<-lists:seq(1,100)] ++
+ [{a,1},{b,2},{"hello","hi"},{{1,1.0},"tuple hi"},
+ {k1,"v1"},{<<"k2">>,"v3"}]),
+ 1 = maps:get(a, M1),
+ 2 = maps:get(b, M1),
+ "hi" = maps:get("hello", M1),
+ "tuple hi" = maps:get({1,1.0}, M1),
+ "v3" = maps:get(<<"k2">>, M1),
%% error case
{'EXIT',{badarg, [{maps,get,_,_}|_]}} = (catch maps:get(a,[])),
{'EXIT',{badarg, [{maps,get,_,_}|_]}} = (catch maps:get(a,<<>>)),
{'EXIT',{bad_key,[{maps,get,_,_}|_]}} = (catch maps:get({1,1}, #{{1,1.0} => "tuple"})),
{'EXIT',{bad_key,[{maps,get,_,_}|_]}} = (catch maps:get(a,#{})),
- {'EXIT',{bad_key,[{maps,get,_,_}|_]}} = (catch maps:get(a,#{ b=>1, c=>2})),
+ {'EXIT',{bad_key,[{maps,get,_,_}|_]}} = (catch maps:get(a,#{b=>1, c=>2})),
ok.
t_bif_map_find(Config) when is_list(Config) ->
-
+ %% small map
{ok, 1} = maps:find(a, #{ a=> 1}),
{ok, 2} = maps:find(b, #{ a=> 1, b => 2}),
{ok, "int"} = maps:find(1, #{ 1 => "int"}),
@@ -498,8 +804,18 @@ t_bif_map_find(Config) when is_list(Config) ->
{ok, "hi"} = maps:find("hello", #{ a=>1, "hello" => "hi"}),
{ok, "tuple hi"} = maps:find({1,1.0}, #{ a=>a, {1,1.0} => "tuple hi"}),
- M = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
- {ok, "v4"} = maps:find(<<"k2">>, M#{ <<"k2">> => "v4" }),
+ M0 = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
+ {ok, "v4"} = maps:find(<<"k2">>, M0#{ <<"k2">> => "v4" }),
+
+ %% large map
+ M1 = maps:from_list([{I,I}||I<-lists:seq(1,100)] ++
+ [{a,1},{b,2},{"hello","hi"},{{1,1.0},"tuple hi"},
+ {k1,"v1"},{<<"k2">>,"v3"}]),
+ {ok, 1} = maps:find(a, M1),
+ {ok, 2} = maps:find(b, M1),
+ {ok, "hi"} = maps:find("hello", M1),
+ {ok, "tuple hi"} = maps:find({1,1.0}, M1),
+ {ok, "v3"} = maps:find(<<"k2">>, M1),
%% error case
error = maps:find(a,#{}),
@@ -508,7 +824,6 @@ t_bif_map_find(Config) when is_list(Config) ->
error = maps:find(1, #{ 1.0 => "float"}),
error = maps:find({1.0,1}, #{ a=>a, {1,1.0} => "tuple hi"}), % reverse types in tuple key
-
{'EXIT',{badarg,[{maps,find,_,_}|_]}} = (catch maps:find(a,id([]))),
{'EXIT',{badarg,[{maps,find,_,_}|_]}} = (catch maps:find(a,id(<<>>))),
ok.
@@ -542,12 +857,12 @@ t_bif_map_is_key(Config) when is_list(Config) ->
t_bif_map_keys(Config) when is_list(Config) ->
[] = maps:keys(#{}),
- [1,2,3,4,5] = maps:keys(#{ 1 => a, 2 => b, 3 => c, 4 => d, 5 => e}),
- [1,2,3,4,5] = maps:keys(#{ 4 => d, 5 => e, 1 => a, 2 => b, 3 => c}),
+ [1,2,3,4,5] = lists:sort(maps:keys(#{ 1 => a, 2 => b, 3 => c, 4 => d, 5 => e})),
+ [1,2,3,4,5] = lists:sort(maps:keys(#{ 4 => d, 5 => e, 1 => a, 2 => b, 3 => c})),
% values in key order: [4,int,"hi",<<"key">>]
M1 = #{ "hi" => "hello", int => 3, <<"key">> => <<"value">>, 4 => number},
- [4,int,"hi",<<"key">>] = maps:keys(M1),
+ [4,int,"hi",<<"key">>] = lists:sort(maps:keys(M1)),
%% error case
{'EXIT',{badarg,[{maps,keys,_,_}|_]}} = (catch maps:keys(1 bsl 65 + 3)),
@@ -596,33 +911,33 @@ t_bif_map_put(Config) when is_list(Config) ->
M1 = #{ "hi" := "hello"} = maps:put("hi", "hello", #{}),
- ["hi"] = maps:keys(M1),
- ["hello"] = maps:values(M1),
+ true = is_members(["hi"],maps:keys(M1)),
+ true = is_members(["hello"],maps:values(M1)),
M2 = #{ int := 3 } = maps:put(int, 3, M1),
- [int,"hi"] = maps:keys(M2),
- [3,"hello"] = maps:values(M2),
+ true = is_members([int,"hi"],maps:keys(M2)),
+ true = is_members([3,"hello"],maps:values(M2)),
M3 = #{ <<"key">> := <<"value">> } = maps:put(<<"key">>, <<"value">>, M2),
- [int,"hi",<<"key">>] = maps:keys(M3),
- [3,"hello",<<"value">>] = maps:values(M3),
+ true = is_members([int,"hi",<<"key">>],maps:keys(M3)),
+ true = is_members([3,"hello",<<"value">>],maps:values(M3)),
M4 = #{ 18446744073709551629 := wat } = maps:put(18446744073709551629, wat, M3),
- [18446744073709551629,int,"hi",<<"key">>] = maps:keys(M4),
- [wat,3,"hello",<<"value">>] = maps:values(M4),
+ true = is_members([18446744073709551629,int,"hi",<<"key">>],maps:keys(M4)),
+ true = is_members([wat,3,"hello",<<"value">>],maps:values(M4)),
M0 = #{ 4 := number } = M5 = maps:put(4, number, M4),
- [4,18446744073709551629,int,"hi",<<"key">>] = maps:keys(M5),
- [number,wat,3,"hello",<<"value">>] = maps:values(M5),
+ true = is_members([4,18446744073709551629,int,"hi",<<"key">>],maps:keys(M5)),
+ true = is_members([number,wat,3,"hello",<<"value">>],maps:values(M5)),
M6 = #{ <<"key">> := <<"other value">> } = maps:put(<<"key">>, <<"other value">>, M5),
- [4,18446744073709551629,int,"hi",<<"key">>] = maps:keys(M6),
- [number,wat,3,"hello",<<"other value">>] = maps:values(M6),
+ true = is_members([4,18446744073709551629,int,"hi",<<"key">>],maps:keys(M6)),
+ true = is_members([number,wat,3,"hello",<<"other value">>],maps:values(M6)),
%% error case
{'EXIT',{badarg,[{maps,put,_,_}|_]}} = (catch maps:put(1,a,1 bsl 65 + 3)),
@@ -630,7 +945,15 @@ t_bif_map_put(Config) when is_list(Config) ->
{'EXIT',{badarg,[{maps,put,_,_}|_]}} = (catch maps:put(1,a,atom)),
{'EXIT',{badarg,[{maps,put,_,_}|_]}} = (catch maps:put(1,a,[])),
{'EXIT',{badarg,[{maps,put,_,_}|_]}} = (catch maps:put(1,a,<<>>)),
- ok.
+ ok.
+
+is_members(Ks,Ls) when length(Ks) =/= length(Ls) -> false;
+is_members(Ks,Ls) -> is_members_do(Ks,Ls).
+
+is_members_do([],[]) -> true;
+is_members_do([],_) -> false;
+is_members_do([K|Ks],Ls) ->
+ is_members_do(Ks, lists:delete(K,Ls)).
t_bif_map_remove(Config) when is_list(Config) ->
0 = erlang:map_size(maps:remove(some_key, #{})),
@@ -639,20 +962,20 @@ t_bif_map_remove(Config) when is_list(Config) ->
4 => number, 18446744073709551629 => wat},
M1 = maps:remove("hi", M0),
- [4,18446744073709551629,int,<<"key">>] = maps:keys(M1),
- [number,wat,3,<<"value">>] = maps:values(M1),
+ true = is_members([4,18446744073709551629,int,<<"key">>],maps:keys(M1)),
+ true = is_members([number,wat,3,<<"value">>],maps:values(M1)),
M2 = maps:remove(int, M1),
- [4,18446744073709551629,<<"key">>] = maps:keys(M2),
- [number,wat,<<"value">>] = maps:values(M2),
+ true = is_members([4,18446744073709551629,<<"key">>],maps:keys(M2)),
+ true = is_members([number,wat,<<"value">>],maps:values(M2)),
M3 = maps:remove(<<"key">>, M2),
- [4,18446744073709551629] = maps:keys(M3),
- [number,wat] = maps:values(M3),
+ true = is_members([4,18446744073709551629],maps:keys(M3)),
+ true = is_members([number,wat],maps:values(M3)),
M4 = maps:remove(18446744073709551629, M3),
- [4] = maps:keys(M4),
- [number] = maps:values(M4),
+ true = is_members([4],maps:keys(M4)),
+ true = is_members([number],maps:values(M4)),
M5 = maps:remove(4, M4),
[] = maps:keys(M5),
@@ -702,15 +1025,15 @@ t_bif_map_update(Config) when is_list(Config) ->
t_bif_map_values(Config) when is_list(Config) ->
[] = maps:values(#{}),
+ [1] = maps:values(#{a=>1}),
- [a,b,c,d,e] = maps:values(#{ 1 => a, 2 => b, 3 => c, 4 => d, 5 => e}),
- [a,b,c,d,e] = maps:values(#{ 4 => d, 5 => e, 1 => a, 2 => b, 3 => c}),
+ true = is_members([a,b,c,d,e],maps:values(#{ 1 => a, 2 => b, 3 => c, 4 => d, 5 => e})),
+ true = is_members([a,b,c,d,e],maps:values(#{ 4 => d, 5 => e, 1 => a, 2 => b, 3 => c})),
- % values in key order: [4,int,"hi",<<"key">>]
M1 = #{ "hi" => "hello", int => 3, <<"key">> => <<"value">>, 4 => number},
M2 = M1#{ "hi" => "hello2", <<"key">> => <<"value2">> },
- [number,3,"hello2",<<"value2">>] = maps:values(M2),
- [number,3,"hello",<<"value">>] = maps:values(M1),
+ true = is_members([number,3,"hello2",<<"value2">>],maps:values(M2)),
+ true = is_members([number,3,"hello",<<"value">>],maps:values(M1)),
%% error case
{'EXIT',{badarg,[{maps,values,_,_}|_]}} = (catch maps:values(1 bsl 65 + 3)),
@@ -730,61 +1053,61 @@ t_erlang_hash(Config) when is_list(Config) ->
t_bif_erlang_phash2() ->
39679005 = erlang:phash2(#{}),
- 78942764 = erlang:phash2(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 }),
- 37338230 = erlang:phash2(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} }),
- 14363616 = erlang:phash2(#{ 1 => a }),
- 51612236 = erlang:phash2(#{ a => 1 }),
+ 33667975 = erlang:phash2(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 }), % 78942764
+ 95332690 = erlang:phash2(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} }), % 37338230
+ 108954384 = erlang:phash2(#{ 1 => a }), % 14363616
+ 59617982 = erlang:phash2(#{ a => 1 }), % 51612236
- 37468437 = erlang:phash2(#{{} => <<>>}),
- 44049159 = erlang:phash2(#{<<>> => {}}),
+ 42770201 = erlang:phash2(#{{} => <<>>}), % 37468437
+ 71687700 = erlang:phash2(#{<<>> => {}}), % 44049159
M0 = #{ a => 1, "key" => <<"value">> },
M1 = maps:remove("key",M0),
M2 = M1#{ "key" => <<"value">> },
- 118679416 = erlang:phash2(M0),
- 51612236 = erlang:phash2(M1),
- 118679416 = erlang:phash2(M2),
+ 70249457 = erlang:phash2(M0), % 118679416
+ 59617982 = erlang:phash2(M1), % 51612236
+ 70249457 = erlang:phash2(M2), % 118679416
ok.
t_bif_erlang_phash() ->
Sz = 1 bsl 32,
- 268440612 = erlang:phash(#{},Sz),
- 1196461908 = erlang:phash(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 },Sz),
- 3944426064 = erlang:phash(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} },Sz),
- 1394238263 = erlang:phash(#{ 1 => a },Sz),
- 4066388227 = erlang:phash(#{ a => 1 },Sz),
+ 1113425985 = erlang:phash(#{},Sz), % 268440612
+ 1510068139 = erlang:phash(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 },Sz), % 1196461908
+ 3182345590 = erlang:phash(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} },Sz), % 3944426064
+ 2927531828 = erlang:phash(#{ 1 => a },Sz), % 1394238263
+ 1670235874 = erlang:phash(#{ a => 1 },Sz), % 4066388227
- 1578050717 = erlang:phash(#{{} => <<>>},Sz),
- 1578050717 = erlang:phash(#{<<>> => {}},Sz), % yep, broken
+ 3935089469 = erlang:phash(#{{} => <<>>},Sz), % 1578050717
+ 71692856 = erlang:phash(#{<<>> => {}},Sz), % 1578050717
M0 = #{ a => 1, "key" => <<"value">> },
M1 = maps:remove("key",M0),
M2 = M1#{ "key" => <<"value">> },
- 3590546636 = erlang:phash(M0,Sz),
- 4066388227 = erlang:phash(M1,Sz),
- 3590546636 = erlang:phash(M2,Sz),
+ 2620391445 = erlang:phash(M0,Sz), % 3590546636
+ 1670235874 = erlang:phash(M1,Sz), % 4066388227
+ 2620391445 = erlang:phash(M2,Sz), % 3590546636
ok.
t_bif_erlang_hash() ->
Sz = 1 bsl 27 - 1,
- 5158 = erlang:hash(#{},Sz),
- 71555838 = erlang:hash(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 },Sz),
- 5497225 = erlang:hash(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} },Sz),
- 126071654 = erlang:hash(#{ 1 => a },Sz),
- 126426236 = erlang:hash(#{ a => 1 },Sz),
+ 39684169 = erlang:hash(#{},Sz), % 5158
+ 33673142 = erlang:hash(#{ a => 1, "a" => 2, <<"a">> => 3, {a,b} => 4 },Sz), % 71555838
+ 95337869 = erlang:hash(#{ 1 => a, 2 => "a", 3 => <<"a">>, 4 => {a,b} },Sz), % 5497225
+ 108959561 = erlang:hash(#{ 1 => a },Sz), % 126071654
+ 59623150 = erlang:hash(#{ a => 1 },Sz), % 126426236
- 101655720 = erlang:hash(#{{} => <<>>},Sz),
- 101655720 = erlang:hash(#{<<>> => {}},Sz), % yep, broken
+ 42775386 = erlang:hash(#{{} => <<>>},Sz), % 101655720
+ 71692856 = erlang:hash(#{<<>> => {}},Sz), % 101655720
M0 = #{ a => 1, "key" => <<"value">> },
M1 = maps:remove("key",M0),
M2 = M1#{ "key" => <<"value">> },
- 38260486 = erlang:hash(M0,Sz),
- 126426236 = erlang:hash(M1,Sz),
- 38260486 = erlang:hash(M2,Sz),
+ 70254632 = erlang:hash(M0,Sz), % 38260486
+ 59623150 = erlang:hash(M1,Sz), % 126426236
+ 70254632 = erlang:hash(M2,Sz), % 38260486
ok.
@@ -818,14 +1141,21 @@ t_map_encode_decode(Config) when is_list(Config) ->
%% literally #{ "hi" => "value", a=>33, b=>55 } in the internal order
#{ a:=33, b:=55, "hi" := "value"} = erlang:binary_to_term(<<131,116,0,0,0,3,
- 107,0,2,104,105, % "hi" :: list()
+ 107,0,2,104,105, % "hi" :: list()
107,0,5,118,97,108,117,101, % "value" :: list()
- 100,0,1,97, % a :: atom()
- 97,33, % 33 :: integer()
- 100,0,1,98, % b :: atom()
- 97,55 % 55 :: integer()
+ 100,0,1,97, % a :: atom()
+ 97,33, % 33 :: integer()
+ 100,0,1,98, % b :: atom()
+ 97,55 % 55 :: integer()
>>),
+ %% many maps in same binary
+ MapList = lists:foldl(fun(K, [M|_]=Acc) -> [M#{K => K} | Acc] end,
+ [#{}],
+ lists:seq(1,100)),
+ MapList = binary_to_term(term_to_binary(MapList)),
+ MapListR = lists:reverse(MapList),
+ MapListR = binary_to_term(term_to_binary(MapListR)),
%% error cases
%% template: <<131,116,0,0,0,2,100,0,1,97,100,0,1,98,97,1,97,1>>
@@ -856,39 +1186,42 @@ t_map_encode_decode(Config) when is_list(Config) ->
map_encode_decode_and_match([{K,V}|Pairs], EncodedPairs, M0) ->
M1 = maps:put(K,V,M0),
B0 = erlang:term_to_binary(M1),
- Ls = lists:sort(fun(A,B) -> erts_internal:cmp_term(A,B) < 0 end, [{K, erlang:term_to_binary(K), erlang:term_to_binary(V)}|EncodedPairs]),
- %% sort Ks and Vs according to term spec, then match it
- KVbins = lists:foldr(fun({_,Kbin,Vbin}, Acc) -> [Kbin,Vbin | Acc] end, [], Ls),
- ok = match_encoded_map(B0, length(Ls), KVbins),
+ Ls = [{erlang:term_to_binary(K), erlang:term_to_binary(V)}|EncodedPairs],
+ ok = match_encoded_map(B0, length(Ls), Ls),
%% decode and match it
M1 = erlang:binary_to_term(B0),
map_encode_decode_and_match(Pairs,Ls,M1);
map_encode_decode_and_match([],_,_) -> ok.
match_encoded_map(<<131,116,Size:32,Encoded/binary>>,Size,Items) ->
- match_encoded_map(Encoded,Items);
+ match_encoded_map_stripped_size(Encoded,Items,Items);
match_encoded_map(_,_,_) -> no_match_size.
-match_encoded_map(<<>>,[]) -> ok;
-match_encoded_map(Bin,[<<131,Item/binary>>|Items]) ->
- Size = erlang:byte_size(Item),
- <<EncodedTerm:Size/binary, Bin1/binary>> = Bin,
- EncodedTerm = Item, %% Asssert
- match_encoded_map(Bin1,Items).
+match_encoded_map_stripped_size(<<>>,_,_) -> ok;
+match_encoded_map_stripped_size(B0,[{<<131,K/binary>>,<<131,V/binary>>}|Items],Ls) ->
+ Ksz = byte_size(K),
+ Vsz = byte_size(V),
+ case B0 of
+ <<K:Ksz/binary,V:Vsz/binary,B1/binary>> ->
+ match_encoded_map_stripped_size(B1,Ls,Ls);
+ _ ->
+ match_encoded_map_stripped_size(B0,Items,Ls)
+ end;
+match_encoded_map_stripped_size(_,[],_) -> fail.
t_bif_map_to_list(Config) when is_list(Config) ->
[] = maps:to_list(#{}),
- [{a,1},{b,2}] = maps:to_list(#{a=>1,b=>2}),
- [{a,1},{b,2},{c,3}] = maps:to_list(#{c=>3,a=>1,b=>2}),
- [{a,1},{b,2},{g,3}] = maps:to_list(#{g=>3,a=>1,b=>2}),
- [{a,1},{b,2},{g,3},{"c",4}] = maps:to_list(#{g=>3,a=>1,b=>2,"c"=>4}),
- [{3,v2},{hi,v4},{{hi,3},v5},{"hi",v3},{<<"hi">>,v1}] = maps:to_list(#{
- <<"hi">>=>v1,3=>v2,"hi"=>v3,hi=>v4,{hi,3}=>v5}),
+ [{a,1},{b,2}] = lists:sort(maps:to_list(#{a=>1,b=>2})),
+ [{a,1},{b,2},{c,3}] = lists:sort(maps:to_list(#{c=>3,a=>1,b=>2})),
+ [{a,1},{b,2},{g,3}] = lists:sort(maps:to_list(#{g=>3,a=>1,b=>2})),
+ [{a,1},{b,2},{g,3},{"c",4}] = lists:sort(maps:to_list(#{g=>3,a=>1,b=>2,"c"=>4})),
+ [{3,v2},{hi,v4},{{hi,3},v5},{"hi",v3},{<<"hi">>,v1}] =
+ lists:sort(maps:to_list(#{<<"hi">>=>v1,3=>v2,"hi"=>v3,hi=>v4,{hi,3}=>v5})),
- [{3,v7},{hi,v9},{{hi,3},v10},{"hi",v8},{<<"hi">>,v6}] = maps:to_list(#{
- <<"hi">>=>v1,3=>v2,"hi"=>v3,hi=>v4,{hi,3}=>v5,
- <<"hi">>=>v6,3=>v7,"hi"=>v8,hi=>v9,{hi,3}=>v10}),
+ [{3,v7},{hi,v9},{{hi,3},v10},{"hi",v8},{<<"hi">>,v6}] =
+ lists:sort(maps:to_list(#{<<"hi">>=>v1,3=>v2,"hi"=>v3,hi=>v4,{hi,3}=>v5,
+ <<"hi">>=>v6,3=>v7,"hi"=>v8,hi=>v9,{hi,3}=>v10})),
%% error cases
{'EXIT', {badarg,_}} = (catch maps:to_list(id(a))),
@@ -901,7 +1234,7 @@ t_bif_map_from_list(Config) when is_list(Config) ->
A = maps:from_list([]),
0 = erlang:map_size(A),
- #{a:=1,b:=2} = maps:from_list([{a,1},{b,2}]),
+ #{a:=1,b:=2} = maps:from_list([{a,1},{b,2}]),
#{c:=3,a:=1,b:=2} = maps:from_list([{a,1},{b,2},{c,3}]),
#{g:=3,a:=1,b:=2} = maps:from_list([{a,1},{b,2},{g,3}]),
@@ -923,6 +1256,136 @@ t_bif_map_from_list(Config) when is_list(Config) ->
{'EXIT', {badarg,_}} = (catch maps:from_list(id(42))),
ok.
+t_bif_build_and_check(Config) when is_list(Config) ->
+ ok = check_build_and_remove(750,[
+ fun(K) -> [K,K] end,
+ fun(K) -> [float(K),K] end,
+ fun(K) -> K end,
+ fun(K) -> {1,K} end,
+ fun(K) -> {K} end,
+ fun(K) -> [K|K] end,
+ fun(K) -> [K,1,2,3,4] end,
+ fun(K) -> {K,atom} end,
+ fun(K) -> float(K) end,
+ fun(K) -> integer_to_list(K) end,
+ fun(K) -> list_to_atom(integer_to_list(K)) end,
+ fun(K) -> [K,{K,[K,{K,[K]}]}] end,
+ fun(K) -> <<K:32>> end
+ ]),
+
+ ok.
+
+check_build_and_remove(_,[]) -> ok;
+check_build_and_remove(N,[F|Fs]) ->
+ {M,Ks} = build_and_check(N, maps:new(), F, []),
+ ok = remove_and_check(Ks,M),
+ check_build_and_remove(N,Fs).
+
+build_and_check(0, M0, _, Ks) -> {M0, Ks};
+build_and_check(N, M0, F, Ks) ->
+ K = build_key(F,N),
+ M1 = maps:put(K,K,M0),
+ ok = check_keys_exist([K|Ks], M1),
+ M2 = maps:update(K,v,M1),
+ v = maps:get(K,M2),
+ build_and_check(N-1,M1,F,[K|Ks]).
+
+remove_and_check([],_) -> ok;
+remove_and_check([K|Ks], M0) ->
+ K = maps:get(K,M0),
+ true = maps:is_key(K,M0),
+ M1 = maps:remove(K,M0),
+ false = maps:is_key(K,M1),
+ true = maps:is_key(K,M0),
+ ok = check_keys_exist(Ks,M1),
+ error = maps:find(K,M1),
+ remove_and_check(Ks, M1).
+
+build_key(F,N) when N rem 3 =:= 0 -> F(N);
+build_key(F,N) when N rem 3 =:= 1 -> K = F(N), {K,K};
+build_key(F,N) when N rem 3 =:= 2 -> K = F(N), [K,K].
+
+check_keys_exist([], _) -> ok;
+check_keys_exist([K|Ks],M) ->
+ true = maps:is_key(K,M),
+ check_keys_exist(Ks,M).
+
+t_bif_merge_and_check(Config) when is_list(Config) ->
+ %% simple disjunct ones
+ %% make sure all keys are unique
+ Kss = [[a,b,c,d],
+ [1,2,3,4],
+ [],
+ ["hi"],
+ [e],
+ [build_key(fun(K) -> {small,K} end, I) || I <- lists:seq(1,32)],
+ lists:seq(5, 28),
+ lists:seq(29, 59),
+ [build_key(fun(K) -> integer_to_list(K) end, I) || I <- lists:seq(2000,10000)],
+ [build_key(fun(K) -> <<K:32>> end, I) || I <- lists:seq(1,80)],
+ [build_key(fun(K) -> {<<K:32>>} end, I) || I <- lists:seq(100,1000)]],
+
+
+ KsMs = build_keys_map_pairs(Kss),
+ Cs = [{CKs1,CM1,CKs2,CM2} || {CKs1,CM1} <- KsMs, {CKs2,CM2} <- KsMs],
+ ok = merge_and_check_combo(Cs),
+
+ %% overlapping ones
+
+ KVs1 = [{a,1},{b,2},{c,3}],
+ KVs2 = [{b,3},{c,4},{d,5}],
+ KVs = [{I,I} || I <- lists:seq(1,32)],
+ KVs3 = KVs1 ++ KVs,
+ KVs4 = KVs2 ++ KVs,
+
+ M1 = maps:from_list(KVs1),
+ M2 = maps:from_list(KVs2),
+ M3 = maps:from_list(KVs3),
+ M4 = maps:from_list(KVs4),
+
+ M12 = maps:merge(M1,M2),
+ ok = check_key_values(KVs2 ++ [{a,1}], M12),
+ M21 = maps:merge(M2,M1),
+ ok = check_key_values(KVs1 ++ [{d,5}], M21),
+
+ M34 = maps:merge(M3,M4),
+ ok = check_key_values(KVs4 ++ [{a,1}], M34),
+ M43 = maps:merge(M4,M3),
+ ok = check_key_values(KVs3 ++ [{d,5}], M43),
+
+ M14 = maps:merge(M1,M4),
+ ok = check_key_values(KVs4 ++ [{a,1}], M14),
+ M41 = maps:merge(M4,M1),
+ ok = check_key_values(KVs1 ++ [{d,5}] ++ KVs, M41),
+
+ ok.
+
+check_key_values([],_) -> ok;
+check_key_values([{K,V}|KVs],M) ->
+ V = maps:get(K,M),
+ check_key_values(KVs,M).
+
+merge_and_check_combo([]) -> ok;
+merge_and_check_combo([{Ks1,M1,Ks2,M2}|Cs]) ->
+ M12 = maps:merge(M1,M2),
+ ok = check_keys_exist(Ks1 ++ Ks2, M12),
+ M21 = maps:merge(M2,M1),
+ ok = check_keys_exist(Ks1 ++ Ks2, M21),
+
+ true = M12 =:= M21,
+ M12 = M21,
+
+ merge_and_check_combo(Cs).
+
+build_keys_map_pairs([]) -> [];
+build_keys_map_pairs([Ks|Kss]) ->
+ M = maps:from_list(keys_to_pairs(Ks)),
+ ok = check_keys_exist(Ks, M),
+ [{Ks,M}|build_keys_map_pairs(Kss)].
+
+keys_to_pairs(Ks) -> [{K,K} || K <- Ks].
+
+
%% Maps module, not BIFs
t_maps_fold(_Config) ->
Vs = lists:seq(1,100),
@@ -960,6 +1423,75 @@ t_maps_without(_Config) ->
%% MISC
+
+%% Verify that the the number of nodes in hashmaps
+%% of different types and sizes does not deviate too
+%% much from the theoretical model.
+t_hashmap_balance(_Config) ->
+ io:format("Integer keys\n", []),
+ hashmap_balance(fun(I) -> I end),
+ io:format("Float keys\n", []),
+ hashmap_balance(fun(I) -> float(I) end),
+ io:format("String keys\n", []),
+ hashmap_balance(fun(I) -> integer_to_list(I) end),
+ io:format("Binary (big) keys\n", []),
+ hashmap_balance(fun(I) -> <<I:16/big>> end),
+ io:format("Binary (little) keys\n", []),
+ hashmap_balance(fun(I) -> <<I:16/little>> end),
+ io:format("Atom keys\n", []),
+ erts_debug:set_internal_state(available_internal_state, true),
+ hashmap_balance(fun(I) -> erts_debug:get_internal_state({atom,I}) end),
+ erts_debug:set_internal_state(available_internal_state, false),
+
+ ok.
+
+hashmap_balance(KeyFun) ->
+ F = fun(I, {M0,Max0}) ->
+ Key = KeyFun(I),
+ M1 = M0#{Key => Key},
+ Max1 = case erts_internal:map_type(M1) of
+ hashmap ->
+ Nodes = hashmap_nodes(M1),
+ Avg = maps:size(M1) * 0.4,
+ StdDev = math:sqrt(maps:size(M1)) / 3,
+ SD_diff = abs(Nodes - Avg) / StdDev,
+ %%io:format("~p keys: ~p nodes avg=~p SD_diff=~p\n",
+ %% [maps:size(M1), Nodes, Avg, SD_diff]),
+ {MaxDiff0, _} = Max0,
+ case {Nodes > Avg, SD_diff > MaxDiff0} of
+ {true, true} -> {SD_diff, M1};
+ _ -> Max0
+ end;
+
+ flatmap -> Max0
+ end,
+ {M1, Max1}
+ end,
+
+ {_,{MaxDiff,MaxMap}} = lists:foldl(F,
+ {#{}, {0, 0}},
+ lists:seq(1,10000)),
+ io:format("Max std dev diff ~p for map of size ~p (nodes=~p, flatsize=~p)\n",
+ [MaxDiff, maps:size(MaxMap), hashmap_nodes(MaxMap), erts_debug:flat_size(MaxMap)]),
+
+ true = (MaxDiff < 6), % The probability of this line failing is about 0.000000001
+ % for a uniform hash. I've set the probability this "high" for now
+ % to detect flaws in our make_internal_hash.
+ % Hard limit is 15 (see hashmap_over_estimated_heap_size).
+ ok.
+
+hashmap_nodes(M) ->
+ Info = erts_debug:map_info(M),
+ lists:foldl(fun(Tpl,Acc) ->
+ case element(1,Tpl) of
+ bitmaps -> Acc + element(2,Tpl);
+ arrays -> Acc + element(2,Tpl);
+ _ -> Acc
+ end
+ end,
+ 0,
+ Info).
+
t_pdict(_Config) ->
put(#{ a => b, b => a},#{ c => d}),
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index fdce157abc..d3c884689f 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -30,6 +30,7 @@
-export([fpe/1]).
-export([otp_9422/1]).
-export([faulty_seq_trace/1, do_faulty_seq_trace/0]).
+-export([maps/1]).
-export([runner/2, loop_runner/3]).
-export([f1/1, f2/2, f3/2, fn/1, fn/2, fn/3]).
-export([do_boxed_and_small/0]).
@@ -62,7 +63,8 @@ all() ->
moving_labels,
faulty_seq_trace,
empty_list,
- otp_9422];
+ otp_9422,
+ maps];
true -> [not_run]
end.
@@ -899,6 +901,74 @@ fpe(Config) when is_list(Config) ->
_ -> ok
end.
+maps(Config) when is_list(Config) ->
+ {ok,#{},[],[]} = erlang:match_spec_test(#{}, [{'_',[],['$_']}], table),
+ {ok,#{},[],[]} = erlang:match_spec_test(#{}, [{#{},[],['$_']}], table),
+ {ok,false,[],[]} =
+ erlang:match_spec_test(#{}, [{not_a_map,[],['$_']}], table),
+ {ok,bar,[],[]} =
+ erlang:match_spec_test(#{foo => bar},
+ [{#{foo => '$1'},[],['$1']}],
+ table),
+ {ok,false,[],[]} =
+ erlang:match_spec_test(#{foo => bar},
+ [{#{foo => qux},[],[qux]}],
+ table),
+ {ok,false,[],[]} =
+ erlang:match_spec_test(#{}, [{#{foo => '_'},[],[foo]}], table),
+ {error,_} =
+ erlang:match_spec_test(#{}, [{#{'$1' => '_'},[],[foo]}], table),
+ {ok,bar,[],[]} =
+ erlang:match_spec_test({#{foo => bar}},
+ [{{#{foo => '$1'}},[],['$1']}],
+ table),
+ {ok,#{foo := 3},[],[]} =
+ erlang:match_spec_test({}, [{{},[],[#{foo => {'+',1,2}}]}], table),
+
+ {ok,"camembert",[],[]} =
+ erlang:match_spec_test(#{b => "camembert",c => "cabécou"},
+ [{#{b => '$1',c => "cabécou"},[],['$1']}], table),
+
+ {ok,#{a :="camembert",b := "hi"},[],[]} =
+ erlang:match_spec_test(#{<<"b">> =>"camembert","c"=>"cabécou", "wat"=>"hi", b=><<"other">>},
+ [{#{<<"b">> => '$1',"wat" => '$2'},[],[#{a=>'$1',b=>'$2'}]}],
+ table),
+ %% large maps
+
+ Ls0 = [{I,<<I:32>>}||I <- lists:seq(1,415)],
+ M0 = maps:from_list(Ls0),
+ M1 = #{a=>1,b=>2,c=>3,d=>4},
+
+ R1 = M0#{263 := #{ a=> 3 }},
+ Ms1 = [{M1#{c:='$1'},[],[M0#{263 := #{a => '$1'}}]}],
+
+ {ok,R1,[],[]} = erlang:match_spec_test(M1,Ms1,table),
+
+ Ms2 = [{M0#{63:='$1', 19:='$2'},[],[M0#{19:='$1', 63:='$2'}]}],
+ R2 = M0#{63 := maps:get(19,M0), 19 := maps:get(63,M0) },
+ {ok,R2,[],[]} = erlang:match_spec_test(M0,Ms2,table),
+
+ ok = maps_check_loop(M1),
+ ok = maps_check_loop(M0),
+ M2 = maps:from_list([{integer_to_list(K),V} || {K,V} <- Ls0]),
+ ok = maps_check_loop(M2),
+ ok.
+
+maps_check_loop(M) ->
+ Ks = maps:keys(M),
+ maps_check_loop(M,M,M,M,Ks,lists:reverse(Ks),1).
+
+maps_check_loop(Orig,M0,MsM0,Rm0,[K|Ks],[Rk|Rks],Ix) ->
+ MsK = list_to_atom([$$]++integer_to_list(Ix)),
+ MsM1 = MsM0#{K := MsK},
+ Rm1 = Rm0#{Rk := MsK},
+ M1 = M0#{Rk := maps:get(K,MsM0)},
+ Ms = [{MsM1,[],[Rm1]}],
+ {ok,M1,[],[]} = erlang:match_spec_test(Orig,Ms,table),
+ maps_check_loop(Orig,M1,MsM1,Rm1,Ks,Rks,Ix+1);
+maps_check_loop(_,_,_,_,[],[],_) -> ok.
+
+
empty_list(Config) when is_list(Config) ->
Val=[{'$1',[], [{message,'$1'},{message,{caller}},{return_trace}]}],
%% Did crash debug VM in faulty assert:
diff --git a/erts/emulator/test/module_info_SUITE.erl b/erts/emulator/test/module_info_SUITE.erl
index 8a63d9fe3e..f3986f0c4f 100644
--- a/erts/emulator/test/module_info_SUITE.erl
+++ b/erts/emulator/test/module_info_SUITE.erl
@@ -24,7 +24,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
- exports/1,functions/1,native/1]).
+ exports/1,functions/1,native/1,info/1]).
%%-compile(native).
@@ -52,8 +52,8 @@ end_per_group(_GroupName, Config) ->
Config.
-modules() ->
- [exports, functions, native].
+modules() ->
+ [exports, functions, native, info].
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Dog = ?t:timetrap(?t:minutes(3)),
@@ -122,6 +122,22 @@ native_proj({Name,Arity,Addr}) ->
native_filter(Set) ->
sofs:no_elements(Set) =/= 1.
+%% Test that the module info of this module is correct. Use
+%% erlang:get_module_info(?MODULE) to avoid compiler optimization tricks.
+info(Config) when is_list(Config) ->
+ Info = erlang:get_module_info(?MODULE),
+ All = all_exported(),
+ {ok,{?MODULE,MD5}} = beam_lib:md5(code:which(?MODULE)),
+ {module, ?MODULE} = lists:keyfind(module, 1, Info),
+ {md5, MD5} = lists:keyfind(md5, 1, Info),
+ {exports, Exports} = lists:keyfind(exports, 1, Info),
+ All = lists:sort(Exports),
+ {attributes, Attrs} = lists:keyfind(attributes, 1, Info),
+ {vsn,_} = lists:keyfind(vsn, 1, Attrs),
+ {compile, Compile} = lists:keyfind(compile, 1, Info),
+ {options,_} = lists:keyfind(options, 1, Compile),
+ ok.
+
%% Helper functions (local).
add_arity(L) ->
diff --git a/erts/emulator/test/monitor_SUITE.erl b/erts/emulator/test/monitor_SUITE.erl
index aec59867d8..07e2862b2a 100644
--- a/erts/emulator/test/monitor_SUITE.erl
+++ b/erts/emulator/test/monitor_SUITE.erl
@@ -26,7 +26,8 @@
case_1/1, case_1a/1, case_2/1, case_2a/1, mon_e_1/1, demon_e_1/1, demon_1/1,
demon_2/1, demon_3/1, demonitor_flush/1,
local_remove_monitor/1, remote_remove_monitor/1, mon_1/1, mon_2/1,
- large_exit/1, list_cleanup/1, mixer/1, named_down/1, otp_5827/1]).
+ large_exit/1, list_cleanup/1, mixer/1, named_down/1, otp_5827/1,
+ monitor_time_offset/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -38,7 +39,8 @@ all() ->
[case_1, case_1a, case_2, case_2a, mon_e_1, demon_e_1,
demon_1, mon_1, mon_2, demon_2, demon_3,
demonitor_flush, {group, remove_monitor}, large_exit,
- list_cleanup, mixer, named_down, otp_5827].
+ list_cleanup, mixer, named_down, otp_5827,
+ monitor_time_offset].
groups() ->
[{remove_monitor, [],
@@ -59,7 +61,7 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Dog=?t:timetrap(?t:minutes(15)),
- [{watchdog, Dog}|Config].
+ [{watchdog, Dog},{testcase, Func}|Config].
end_per_testcase(_Func, Config) ->
Dog=?config(watchdog, Config),
@@ -837,6 +839,89 @@ otp_5827(Config) when is_list(Config) ->
?line ?t:fail("erlang:monitor/2 hangs")
end.
+monitor_time_offset(Config) when is_list(Config) ->
+ {ok, Node} = start_node(Config, "+C single_time_warp"),
+ Me = self(),
+ PMs = lists:map(fun (_) ->
+ Pid = spawn(Node,
+ fun () ->
+ check_monitor_time_offset(Me)
+ end),
+ {Pid, erlang:monitor(process, Pid)}
+ end,
+ lists:seq(1, 100)),
+ lists:foreach(fun ({P, _M}) ->
+ P ! check_no_change_message
+ end, PMs),
+ lists:foreach(fun ({P, M}) ->
+ receive
+ {no_change_message_received, P} ->
+ ok;
+ {'DOWN', M, process, P, Reason} ->
+ ?t:fail(Reason)
+ end
+ end, PMs),
+ preliminary = rpc:call(Node, erlang, system_flag, [time_offset, finalize]),
+ lists:foreach(fun ({P, M}) ->
+ receive
+ {change_messages_received, P} ->
+ erlang:demonitor(M, [flush]);
+ {'DOWN', M, process, P, Reason} ->
+ ?t:fail(Reason)
+ end
+ end, PMs),
+ stop_node(Node),
+ ok.
+
+check_monitor_time_offset(Leader) ->
+ Mon1 = erlang:monitor(time_offset, clock_service),
+ Mon2 = erlang:monitor(time_offset, clock_service),
+ Mon3 = erlang:monitor(time_offset, clock_service),
+ Mon4 = erlang:monitor(time_offset, clock_service),
+
+ erlang:demonitor(Mon2, [flush]),
+
+ Mon5 = erlang:monitor(time_offset, clock_service),
+ Mon6 = erlang:monitor(time_offset, clock_service),
+ Mon7 = erlang:monitor(time_offset, clock_service),
+
+ receive check_no_change_message -> ok end,
+ receive
+ {'CHANGE', _, time_offset, clock_service, _} ->
+ exit(unexpected_change_message_received)
+ after 0 ->
+ Leader ! {no_change_message_received, self()}
+ end,
+ receive after 100 -> ok end,
+ erlang:demonitor(Mon4, [flush]),
+ receive
+ {'CHANGE', Mon3, time_offset, clock_service, _} ->
+ ok
+ end,
+ receive
+ {'CHANGE', Mon6, time_offset, clock_service, _} ->
+ ok
+ end,
+ erlang:demonitor(Mon5, [flush]),
+ receive
+ {'CHANGE', Mon7, time_offset, clock_service, _} ->
+ ok
+ end,
+ receive
+ {'CHANGE', Mon1, time_offset, clock_service, _} ->
+ ok
+ end,
+ receive
+ {'CHANGE', _, time_offset, clock_service, _} ->
+ exit(unexpected_change_message_received)
+ after 1000 ->
+ ok
+ end,
+ Leader ! {change_messages_received, self()}.
+
+%%
+%% ...
+%%
wait_for_m(_,_,0) ->
exit(monitor_wait_timeout);
@@ -959,3 +1044,25 @@ generate(_Fun, 0) ->
[];
generate(Fun, N) ->
[Fun() | generate(Fun, N-1)].
+
+start_node(Config) ->
+ start_node(Config, "").
+
+start_node(Config, Args) ->
+ TestCase = ?config(testcase, Config),
+ PA = filename:dirname(code:which(?MODULE)),
+ ESTime = erlang:monotonic_time(1) + erlang:time_offset(1),
+ Unique = erlang:unique_integer([positive]),
+ Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(TestCase)
+ ++ "-"
+ ++ integer_to_list(ESTime)
+ ++ "-"
+ ++ integer_to_list(Unique)),
+ test_server:start_node(Name,
+ slave,
+ [{args, "-pa " ++ PA ++ " " ++ Args}]).
+
+stop_node(Node) ->
+ test_server:stop_node(Node).
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 4560077a51..b0624fb8c1 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -451,7 +451,7 @@ maps(Config) when is_list(Config) ->
M = maps_from_list_nif(Pairs),
R = {RIs,Is} = sorted_list_from_maps_nif(M),
io:format("Pairs: ~p~nMap: ~p~nReturned: ~p~n", [lists:sort(Pairs),M,R]),
- Is = lists:sort(Pairs),
+ true = (lists:sort(Is) =:= lists:sort(Pairs)),
Is = lists:reverse(RIs),
#{} = maps_from_list_nif([]),
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 85544db2ab..5a3be84825 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -1717,8 +1717,9 @@ static ERL_NIF_TERM sorted_list_from_maps_nif(ErlNifEnv* env, int argc, const ER
return enif_make_int(env, __LINE__);
cnt = 0;
+ next_ret = 1;
while(enif_map_iterator_get_pair(env,&iter_f,&key,&value)) {
- if (cnt && !next_ret)
+ if (!next_ret)
return enif_make_int(env, __LINE__);
list_f = enif_make_list_cell(env, enif_make_tuple2(env, key, value), list_f);
next_ret = enif_map_iterator_next(env,&iter_f);
@@ -1731,8 +1732,9 @@ static ERL_NIF_TERM sorted_list_from_maps_nif(ErlNifEnv* env, int argc, const ER
return enif_make_int(env, __LINE__);
cnt = 0;
+ prev_ret = 1;
while(enif_map_iterator_get_pair(env,&iter_b,&key,&value)) {
- if (cnt && !prev_ret)
+ if (!prev_ret)
return enif_make_int(env, __LINE__);
/* Test that iter_f can step "backwards" */
@@ -1744,6 +1746,7 @@ static ERL_NIF_TERM sorted_list_from_maps_nif(ErlNifEnv* env, int argc, const ER
list_b = enif_make_list_cell(env, enif_make_tuple2(env, key, value), list_b);
prev_ret = enif_map_iterator_prev(env,&iter_b);
+ cnt++;
}
if (cnt) {
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 9083545060..6bbf93b7d7 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -1407,6 +1407,12 @@ spawn_executable(Config) when is_list(Config) ->
run_echo_args(SpaceDir,[ExactFile2,"hello world","dlrow olleh"]),
[ExactFile2,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,[binary, ExactFile2,"hello world","dlrow olleh"]),
+
+ [ExactFile2,"hello \"world\"","\"dlrow\" olleh"] =
+ run_echo_args(SpaceDir,[binary, ExactFile2,"hello \"world\"","\"dlrow\" olleh"]),
+ [ExactFile2,"hello \"world\"","\"dlrow\" olleh"] =
+ run_echo_args(SpaceDir,[binary, ExactFile2,"hello \"world\"","\"dlrow\" olleh"]),
+
[ExactFile2] = run_echo_args(SpaceDir,[default]),
[ExactFile2,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,[switch_order,ExactFile2,"hello world",
diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl
index a0a8a9c42c..43f7ac7f7c 100644
--- a/erts/emulator/test/time_SUITE.erl
+++ b/erts/emulator/test/time_SUITE.erl
@@ -34,7 +34,14 @@
bad_univ_to_local/1, bad_local_to_univ/1,
univ_to_seconds/1, seconds_to_univ/1,
consistency/1,
- now_unique/1, now_update/1, timestamp/1]).
+ now_unique/1, now_update/1, timestamp/1,
+ time_warp_modes/1,
+ monotonic_time_monotonicity/1,
+ time_unit_conversion/1,
+ signed_time_unit_conversion/1,
+ erlang_timestamp/1]).
+
+-export([init_per_testcase/2, end_per_testcase/2]).
-export([local_to_univ_utc/1]).
@@ -56,6 +63,12 @@
-define(dst_timezone, 2).
+init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
+ [{testcase, Func}|Config].
+
+end_per_testcase(_Func, Config) ->
+ ok.
+
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
@@ -63,7 +76,12 @@ all() ->
bad_univ_to_local, bad_local_to_univ,
univ_to_seconds, seconds_to_univ,
consistency,
- {group, now}, timestamp].
+ {group, now}, timestamp,
+ time_warp_modes,
+ monotonic_time_monotonicity,
+ time_unit_conversion,
+ signed_time_unit_conversion,
+ erlang_timestamp].
groups() ->
[{now, [], [now_unique, now_update]}].
@@ -420,6 +438,368 @@ now_update1(N) when N > 0 ->
now_update1(0) ->
?line test_server:fail().
+time_warp_modes(Config) when is_list(Config) ->
+ %% All time warp modes always supported in
+ %% combination with no time correction...
+ check_time_warp_mode(Config, false, no_time_warp),
+ check_time_warp_mode(Config, false, single_time_warp),
+ check_time_warp_mode(Config, false, multi_time_warp),
+
+ erts_debug:set_internal_state(available_internal_state, true),
+ try
+ case erts_debug:get_internal_state({check_time_config,
+ true, no_time_warp}) of
+ false -> ok;
+ true -> check_time_warp_mode(Config, true, no_time_warp)
+ end,
+ case erts_debug:get_internal_state({check_time_config,
+ true, single_time_warp}) of
+ false -> ok;
+ true -> check_time_warp_mode(Config, true, single_time_warp)
+ end,
+ case erts_debug:get_internal_state({check_time_config,
+ true, multi_time_warp}) of
+ false -> ok;
+ true -> check_time_warp_mode(Config, true, multi_time_warp)
+ end
+ after
+ erts_debug:set_internal_state(available_internal_state, false)
+ end.
+
+check_time_warp_mode(Config, TimeCorrection, TimeWarpMode) ->
+ io:format("~n~n~n***** Testing TimeCorrection=~p TimeWarpMode=~p *****~n",
+ [TimeCorrection, TimeWarpMode]),
+ Mon = erlang:monitor(time_offset, clock_service),
+ _ = erlang:time_offset(),
+ Start = erlang:monotonic_time(1000),
+ MonotonicityTimeout = 2000,
+ {ok, Node} = start_node(Config,
+ "+c " ++ atom_to_list(TimeCorrection)
+ ++ " +C " ++ atom_to_list(TimeWarpMode)),
+ StartTime = rpc:call(Node, erlang, system_info, [start_time]),
+ Me = self(),
+ MonotincityTestStarted = make_ref(),
+ MonotincityTestDone = make_ref(),
+ spawn_link(Node,
+ fun () ->
+ Me ! MonotincityTestStarted,
+ cmp_times(erlang:start_timer(MonotonicityTimeout,
+ self(),
+ timeout),
+ erlang:monotonic_time()),
+ Me ! MonotincityTestDone
+ end),
+ receive MonotincityTestStarted -> ok end,
+ check_time_offset(Node, TimeWarpMode),
+ TimeWarpMode = rpc:call(Node, erlang, system_info, [time_warp_mode]),
+ TimeCorrection = rpc:call(Node, erlang, system_info, [time_correction]),
+ receive MonotincityTestDone -> ok end,
+ MonotonicTime = rpc:call(Node, erlang, monotonic_time, []),
+ MonotonicTimeUnit = rpc:call(Node,
+ erlang,
+ convert_time_unit,
+ [1, seconds, native]),
+ UpMilliSeconds = erlang:convert_time_unit(MonotonicTime - StartTime,
+ MonotonicTimeUnit,
+ milli_seconds),
+ io:format("UpMilliSeconds=~p~n", [UpMilliSeconds]),
+ End = erlang:monotonic_time(milli_seconds),
+ stop_node(Node),
+ try
+ true = (UpMilliSeconds > (98*MonotonicityTimeout) div 100),
+ true = (UpMilliSeconds < (102*(End-Start)) div 100)
+ catch
+ error:_ ->
+ io:format("Uptime inconsistency", []),
+ case {TimeCorrection, erlang:system_info(time_correction)} of
+ {true, true} ->
+ ?t:fail(uptime_inconsistency);
+ {true, false} ->
+ _ = erlang:time_offset(),
+ receive
+ {'CHANGE', Mon, time_offset, clock_service, _} ->
+ ignore
+ after 1000 ->
+ ?t:fail(uptime_inconsistency)
+ end;
+ _ ->
+ ignore
+ end
+ end,
+ erlang:demonitor(Mon, [flush]),
+ ok.
+
+check_time_offset(Node, no_time_warp) ->
+ final = rpc:call(Node, erlang, system_info, [time_offset]),
+ final = rpc:call(Node, erlang, system_flag, [time_offset, finalize]),
+ final = rpc:call(Node, erlang, system_info, [time_offset]);
+check_time_offset(Node, single_time_warp) ->
+ preliminary = rpc:call(Node, erlang, system_info, [time_offset]),
+ preliminary = rpc:call(Node, erlang, system_flag, [time_offset, finalize]),
+ final = rpc:call(Node, erlang, system_info, [time_offset]),
+ final = rpc:call(Node, erlang, system_flag, [time_offset, finalize]);
+check_time_offset(Node, multi_time_warp) ->
+ volatile = rpc:call(Node, erlang, system_info, [time_offset]),
+ volatile = rpc:call(Node, erlang, system_flag, [time_offset, finalize]),
+ volatile = rpc:call(Node, erlang, system_info, [time_offset]).
+
+monotonic_time_monotonicity(Config) when is_list(Config) ->
+ Done = erlang:start_timer(10000,self(),timeout),
+ cmp_times(Done, erlang:monotonic_time()).
+
+cmp_times(Done, X0) ->
+ X1 = erlang:monotonic_time(),
+ X2 = erlang:monotonic_time(),
+ X3 = erlang:monotonic_time(),
+ X4 = erlang:monotonic_time(),
+ X5 = erlang:monotonic_time(),
+ true = (X0 =< X1),
+ true = (X1 =< X2),
+ true = (X2 =< X3),
+ true = (X3 =< X4),
+ true = (X4 =< X5),
+ receive
+ {timeout, Done, timeout} ->
+ ok
+ after 0 ->
+ cmp_times(Done, X5)
+ end.
+
+-define(CHK_RES_CONVS_TIMEOUT, 400).
+
+time_unit_conversion(Config) when is_list(Config) ->
+ Mon = erlang:monitor(time_offset, clock_service),
+ start_check_res_convs(Mon, 1000000000000),
+ start_check_res_convs(Mon, 2333333333333),
+ start_check_res_convs(Mon, 5732678356789),
+ erlang:demonitor(Mon, [flush]).
+
+start_check_res_convs(Mon, Res) ->
+ io:format("Checking ~p time_unit~n", [Res]),
+ check_res_convs(Mon,
+ erlang:start_timer(?CHK_RES_CONVS_TIMEOUT,
+ self(),
+ timeout),
+ Res).
+
+
+check_res_convs(Mon, Done, Res) ->
+ receive
+ {timeout, Done, timeout} ->
+ case Res div 10 of
+ 0 ->
+ ok;
+ NewRes ->
+ start_check_res_convs(Mon, NewRes)
+ end
+ after 0 ->
+ do_check_res_convs(Mon, Done, Res)
+ end.
+
+do_check_res_convs(Mon, Done, Res) ->
+ TStart = erlang:monotonic_time(),
+ T = erlang:monotonic_time(Res),
+ TEnd = erlang:monotonic_time(),
+ TMin = erlang:convert_time_unit(TStart, native, Res),
+ TMax = erlang:convert_time_unit(TEnd, native, Res),
+ %io:format("~p =< ~p =< ~p~n", [TMin, T, TEnd]),
+ true = (TMin =< T),
+ true = (TMax >= T),
+ check_time_offset_res_conv(Mon, Res),
+ check_res_convs(Mon, Done, Res).
+
+
+check_time_offset_res_conv(Mon, Res) ->
+ TORes = erlang:time_offset(Res),
+ TO = erlang:time_offset(),
+ case erlang:convert_time_unit(TO, native, Res) of
+ TORes ->
+ ok;
+ TORes2 ->
+ case check_time_offset_change(Mon, TO, 1000) of
+ {TO, false} ->
+ ?t:fail({time_unit_conversion_inconsistency,
+ TO, TORes, TORes2});
+ {_NewTO, true} ->
+ ?t:format("time_offset changed", []),
+ check_time_offset_res_conv(Mon, Res)
+ end
+ end.
+
+signed_time_unit_conversion(Config) when is_list(Config) ->
+ chk_strc(1000000000, 1000000),
+ chk_strc(1000000000, 1000),
+ chk_strc(1000000000, 1),
+ chk_strc(1000000, 1000),
+ chk_strc(1000000, 1),
+ chk_strc(1000, 1),
+ chk_strc(4711, 17),
+ chk_strc(1 bsl 10, 1),
+ chk_strc(1 bsl 16, 10),
+ chk_strc(1 bsl 17, 1 bsl 8),
+ chk_strc((1 bsl 17) + 1, (1 bsl 8) - 1),
+ chk_strc(1 bsl 17, 11),
+ ok.
+
+chk_strc(Res0, Res1) ->
+ case (Res0 /= Res1) andalso (Res0 =< 1000000) andalso (Res1 =< 1000000) of
+ true ->
+ {FromRes, ToRes} = case Res0 > Res1 of
+ true -> {Res0, Res1};
+ false -> {Res1, Res0}
+ end,
+ MinFromValuesPerToValue = FromRes div ToRes,
+ MaxFromValuesPerToValue = ((FromRes-1) div ToRes)+1,
+ io:format("~p -> ~p [~p, ~p]~n",
+ [FromRes, ToRes,
+ MinFromValuesPerToValue, MaxFromValuesPerToValue]),
+ chk_values_per_value(FromRes, ToRes,
+ -10*FromRes, 10*FromRes,
+ MinFromValuesPerToValue,
+ MaxFromValuesPerToValue,
+ undefined, MinFromValuesPerToValue);
+ _ ->
+ ok
+ end,
+ chk_random_values(Res0, Res1),
+ chk_random_values(Res1, Res0),
+ ok.
+
+chk_random_values(FR, TR) ->
+% case (FR rem TR == 0) orelse (TR rem FR == 0) of
+% true ->
+ io:format("rand values ~p -> ~p~n", [FR, TR]),
+ random:seed(268438039, 268440479, 268439161),
+ Values = lists:map(fun (_) -> random:uniform(1 bsl 65) - (1 bsl 64) end,
+ lists:seq(1, 100000)),
+ CheckFun = fun (V) ->
+ CV = erlang:convert_time_unit(V, FR, TR),
+ case {(FR*CV) div TR =< V,
+ (FR*(CV+1)) div TR >= V} of
+ {true, true} ->
+ ok;
+ Failure ->
+ ?t:fail({Failure, CV, V, FR, TR})
+ end
+ end,
+ lists:foreach(CheckFun, Values).%;
+% false -> ok
+% end.
+
+
+chk_values_per_value(_FromRes, _ToRes,
+ EndValue, EndValue,
+ MinFromValuesPerToValue, MaxFromValuesPerToValue,
+ _ToValue, FromValueCount) ->
+% io:format("~p [~p]~n", [EndValue, FromValueCount]),
+ case ((MinFromValuesPerToValue =< FromValueCount)
+ andalso (FromValueCount =< MaxFromValuesPerToValue)) of
+ false ->
+ ?t:fail({MinFromValuesPerToValue,
+ FromValueCount,
+ MaxFromValuesPerToValue});
+ true ->
+ ok
+ end;
+chk_values_per_value(FromRes, ToRes, Value, EndValue,
+ MinFromValuesPerToValue, MaxFromValuesPerToValue,
+ ToValue, FromValueCount) ->
+ case erlang:convert_time_unit(Value, FromRes, ToRes) of
+ ToValue ->
+ chk_values_per_value(FromRes, ToRes,
+ Value+1, EndValue,
+ MinFromValuesPerToValue,
+ MaxFromValuesPerToValue,
+ ToValue, FromValueCount+1);
+ NewToValue ->
+ case ((MinFromValuesPerToValue =< FromValueCount)
+ andalso (FromValueCount =< MaxFromValuesPerToValue)) of
+ false ->
+ ?t:fail({MinFromValuesPerToValue,
+ FromValueCount,
+ MaxFromValuesPerToValue});
+ true ->
+% io:format("~p -> ~p [~p]~n",
+% [Value, NewToValue, FromValueCount]),
+ chk_values_per_value(FromRes, ToRes,
+ Value+1, EndValue,
+ MinFromValuesPerToValue,
+ MaxFromValuesPerToValue,
+ NewToValue, 1)
+ end
+ end.
+
+erlang_timestamp(Config) when is_list(Config) ->
+ Mon = erlang:monitor(time_offset, clock_service),
+ {TO, _} = check_time_offset_change(Mon,
+ erlang:time_offset(),
+ 0),
+ Done = erlang:start_timer(10000,self(),timeout),
+ ok = check_erlang_timestamp(Done, Mon, TO).
+
+check_erlang_timestamp(Done, Mon, TO) ->
+ receive
+ {timeout, Done, timeout} ->
+ erlang:demonitor(Mon, [flush]),
+ ok
+ after 0 ->
+ do_check_erlang_timestamp(Done, Mon, TO)
+ end.
+
+do_check_erlang_timestamp(Done, Mon, TO) ->
+ MinMon = erlang:monotonic_time(),
+ {MegaSec, Sec, MicroSec} = erlang:timestamp(),
+ MaxMon = erlang:monotonic_time(),
+ TsMin = erlang:convert_time_unit(MinMon+TO,
+ native,
+ micro_seconds),
+ TsMax = erlang:convert_time_unit(MaxMon+TO,
+ native,
+ micro_seconds),
+ TsTime = (MegaSec*1000000+Sec)*1000000+MicroSec,
+ case (TsMin =< TsTime) andalso (TsTime =< TsMax) of
+ true ->
+ NewTO = case erlang:time_offset() of
+ TO ->
+ TO;
+ _ ->
+ check_time_offset_change(Mon, TO, 0)
+ end,
+ check_erlang_timestamp(Done, Mon, NewTO);
+ false ->
+ io:format("TsMin=~p TsTime=~p TsMax=~p~n", [TsMin, TsTime, TsMax]),
+ ?t:format("Detected inconsistency; "
+ "checking for time_offset change...", []),
+ case check_time_offset_change(Mon, TO, 1000) of
+ {TO, false} ->
+ ?t:fail(timestamp_inconsistency);
+ {NewTO, true} ->
+ ?t:format("time_offset changed", []),
+ check_erlang_timestamp(Done, Mon, NewTO)
+ end
+ end.
+
+check_time_offset_change(Mon, TO, Wait) ->
+ process_changed_time_offset(Mon, TO, false, Wait).
+
+process_changed_time_offset(Mon, TO, Changed, Wait) ->
+ receive
+ {'CHANGE', Mon, time_offset, clock_service, NewTO} ->
+ process_changed_time_offset(Mon, NewTO, true, Wait)
+ after Wait ->
+ case erlang:time_offset() of
+ TO ->
+ {TO, Changed};
+ _OtherTO ->
+ receive
+ {'CHANGE', Mon, time_offset, clock_service, NewTO} ->
+ process_changed_time_offset(Mon, NewTO, true, Wait)
+ end
+ end
+ end.
+
+
+
%% Returns the test data: a list of {Utc, Local} tuples.
test_data() ->
@@ -554,4 +934,25 @@ bad_dates() ->
{{1996, 4, 30}, {12, 0, -1}}, % Sec
{{1996, 4, 30}, {12, 0, 60}}].
-
+
+start_node(Config) ->
+ start_node(Config, "").
+
+start_node(Config, Args) ->
+ TestCase = ?config(testcase, Config),
+ PA = filename:dirname(code:which(?MODULE)),
+ ESTime = erlang:monotonic_time(1) + erlang:time_offset(1),
+ Unique = erlang:unique_integer([positive]),
+ Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(TestCase)
+ ++ "-"
+ ++ integer_to_list(ESTime)
+ ++ "-"
+ ++ integer_to_list(Unique)),
+ test_server:start_node(Name,
+ slave,
+ [{args, "-pa " ++ PA ++ " " ++ Args}]).
+
+stop_node(Node) ->
+ test_server:stop_node(Node).
diff --git a/erts/emulator/test/timer_bif_SUITE.erl b/erts/emulator/test/timer_bif_SUITE.erl
index c28224729d..da19be3424 100644
--- a/erts/emulator/test/timer_bif_SUITE.erl
+++ b/erts/emulator/test/timer_bif_SUITE.erl
@@ -238,6 +238,7 @@ cleanup(Config) when is_list(Config) ->
?line wait_until(fun () -> process_is_cleaned_up(P1) end),
?line T1 = erlang:start_timer(10000, P1, "hej"),
?line T2 = erlang:send_after(10000, P1, "hej"),
+ receive after 1000 -> ok end,
?line Mem = mem(),
?line false = erlang:read_timer(T1),
?line false = erlang:read_timer(T2),
@@ -250,6 +251,7 @@ cleanup(Config) when is_list(Config) ->
?line true = is_integer(erlang:read_timer(T3)),
?line true = is_integer(erlang:read_timer(T4)),
?line wait_until(fun () -> process_is_cleaned_up(P2) end),
+ receive after 1000 -> ok end,
?line false = erlang:read_timer(T3),
?line false = erlang:read_timer(T4),
?line Mem = mem(),
@@ -455,10 +457,18 @@ registered_process(Config) when is_list(Config) ->
?line ok.
mem() ->
- AA = erlang:system_info(allocated_areas),
- {value,{bif_timer,Mem}} = lists:keysearch(bif_timer, 1, AA),
- Mem.
-
+ TSrvs = erts_internal:get_bif_timer_servers(),
+ lists:foldl(fun (Tab, Sz) ->
+ case lists:member(ets:info(Tab, owner), TSrvs) of
+ true ->
+ ets:info(Tab, memory) + Sz;
+ false ->
+ Sz
+ end
+ end,
+ 0,
+ ets:all())*erlang:system_info({wordsize,external}).
+
process_is_cleaned_up(P) when is_pid(P) ->
undefined == erts_debug:get_internal_state({process_status, P}).
diff --git a/erts/emulator/test/trace_bif_SUITE.erl b/erts/emulator/test/trace_bif_SUITE.erl
index 2c78aa394f..063e348836 100644
--- a/erts/emulator/test/trace_bif_SUITE.erl
+++ b/erts/emulator/test/trace_bif_SUITE.erl
@@ -260,7 +260,9 @@ bif_process() ->
apply(erlang, Name, Args),
bif_process();
{do_time_bif} ->
- _ = time(), %Assignment tells compiler to keep call.
+ %% Match the return value to ensure that the time() call
+ %% is not optimized away.
+ {_,_,_} = time(),
bif_process();
{do_statistics_bif} ->
statistics(runtime),
diff --git a/erts/emulator/test/unique_SUITE.erl b/erts/emulator/test/unique_SUITE.erl
new file mode 100644
index 0000000000..5ad6e59272
--- /dev/null
+++ b/erts/emulator/test/unique_SUITE.erl
@@ -0,0 +1,390 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2014. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(unique_SUITE).
+
+-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
+ init_per_group/2,end_per_group/2,
+ init_per_testcase/2,end_per_testcase/2]).
+-export([unique_monotonic_integer_white_box/1,
+ unique_integer_white_box/1]).
+
+-include_lib("test_server/include/test_server.hrl").
+
+%-define(P(V), V).
+-define(P(V), print_ret_val(?FILE, ?LINE, V)).
+
+-define(PRINT(V), print_ret_val(?FILE, ?LINE, V)).
+
+
+init_per_testcase(Case, Config) ->
+ ?line Dog=test_server:timetrap(test_server:minutes(2)),
+ [{watchdog, Dog}, {testcase, Case}|Config].
+
+end_per_testcase(_, Config) ->
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [unique_monotonic_integer_white_box,
+ unique_integer_white_box].
+
+groups() ->
+ [].
+
+init_per_suite(Config) ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ Config.
+
+end_per_suite(_Config) ->
+ erts_debug:set_internal_state(available_internal_state, false),
+ ok.
+
+init_per_group(_GroupName, Config) ->
+ Config.
+
+end_per_group(_GroupName, Config) ->
+ Config.
+
+%%
+%%
+%% Unique counter white box test case
+%%
+%%
+
+unique_monotonic_integer_white_box(Config) when is_list(Config) ->
+ {ok, Node} = start_node(Config),
+ TestServer = self(),
+ Success = make_ref(),
+ %% Run this in a separate node, so we don't mess up
+ %% the system when moving the strict monotonic counter
+ %% around in a non-strict monotonic way...
+ Test = spawn(Node,
+ fun () ->
+ unique_monotonic_integer_white_box_test(TestServer, Success)
+ end),
+ Mon = erlang:monitor(process, Test),
+ receive
+ {'DOWN', Mon, process, Test, Error} ->
+ ?t:fail(Error);
+ Success ->
+ ok
+ end,
+ erlang:demonitor(Mon, [flush]),
+ stop_node(Node),
+ ok.
+
+set_unique_monotonic_integer_state(MinCounter, NextValue) ->
+ true = erts_debug:set_internal_state(unique_monotonic_integer_state,
+ NextValue-MinCounter-1).
+
+
+
+unique_monotonic_integer_white_box_test(TestServer, Success) ->
+ erts_debug:set_internal_state(available_internal_state, true),
+
+ WordSize = erlang:system_info({wordsize, internal}),
+ SmallBits = WordSize*8 - 4,
+
+ MinSmall = -1*(1 bsl (SmallBits-1)),
+ MaxSmall = (1 bsl (SmallBits-1))-1,
+ %% Make sure we got small sizes correct...
+ 0 = erts_debug:size(MinSmall),
+ false = 0 =:= erts_debug:size(MinSmall-1),
+ 0 = erts_debug:size(MaxSmall),
+ false = 0 =:= erts_debug:size(MaxSmall+1),
+
+ ?PRINT({min_small, MinSmall}),
+ ?PRINT({max_small, MaxSmall}),
+
+ MinSint64 = -1*(1 bsl 63),
+ MaxSint64 = (1 bsl 63)-1,
+
+ ?PRINT({min_Sint64, MinSint64}),
+ ?PRINT({max_Sint64, MaxSint64}),
+
+ MinCounter = erts_debug:get_internal_state(min_unique_monotonic_integer),
+ MaxCounter = MinCounter + (1 bsl 64) - 1,
+
+ ?PRINT({min_counter, MinCounter}),
+ ?PRINT({max_counter, MaxCounter}),
+
+ case WordSize of
+ 4 ->
+ MinCounter = MinSint64;
+ 8 ->
+ MinCounter = MinSmall
+ end,
+
+ StartState = erts_debug:get_internal_state(unique_monotonic_integer_state),
+
+ %% Verify that we get expected results over all internal limits...
+
+ case MinCounter < MinSmall of
+ false ->
+ 8 = WordSize,
+ ok;
+ true ->
+ 4 = WordSize,
+ ?PRINT(over_min_small),
+ set_unique_monotonic_integer_state(MinCounter, MinSmall-2),
+ true = (?P(erlang:unique_integer([monotonic])) == MinSmall - 2),
+ true = (?P(erlang:unique_integer([monotonic])) == MinSmall - 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MinSmall),
+ true = (?P(erlang:unique_integer([monotonic])) == MinSmall + 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MinSmall + 2),
+ garbage_collect(),
+ ok
+ end,
+
+ ?PRINT(over_zero), %% Not really an interesting limit, but...
+ set_unique_monotonic_integer_state(MinCounter, -2),
+ true = (?P(erlang:unique_integer([monotonic])) == -2),
+ true = (?P(erlang:unique_integer([monotonic])) == -1),
+ true = (?P(erlang:unique_integer([monotonic])) == 0),
+ true = (?P(erlang:unique_integer([monotonic])) == 1),
+ true = (?P(erlang:unique_integer([monotonic])) == 2),
+ garbage_collect(),
+
+ ?PRINT(over_max_small),
+ set_unique_monotonic_integer_state(MinCounter, MaxSmall-2),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSmall - 2),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSmall - 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSmall),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSmall + 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSmall + 2),
+ garbage_collect(),
+
+ case MaxCounter > MaxSint64 of
+ false ->
+ 4 = WordSize,
+ ok;
+ true ->
+ 8 = WordSize,
+ ?PRINT(over_max_sint64),
+ set_unique_monotonic_integer_state(MinCounter, MaxSint64-2),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSint64 - 2),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSint64 - 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSint64),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSint64 + 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxSint64 + 2),
+ garbage_collect()
+ end,
+
+ ?PRINT(over_max_min_counter),
+ set_unique_monotonic_integer_state(MinCounter, if MaxCounter == MaxSint64 ->
+ MaxCounter-2;
+ true ->
+ MinCounter-3
+ end),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxCounter - 2),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxCounter - 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MaxCounter),
+ true = (?P(erlang:unique_integer([monotonic])) == MinCounter),
+ true = (?P(erlang:unique_integer([monotonic])) == MinCounter + 1),
+ true = (?P(erlang:unique_integer([monotonic])) == MinCounter + 2),
+ garbage_collect(),
+
+ %% Restore initial state and hope we didn't mess it up for the
+ %% system...
+ true = erts_debug:set_internal_state(unique_monotonic_integer_state,
+ StartState),
+
+ TestServer ! Success.
+
+%%
+%%
+%% Unique integer white box test case
+%%
+%%
+
+-record(uniqint_info, {min_int,
+ max_int,
+ max_small,
+ schedulers,
+ sched_bits}).
+
+unique_integer_white_box(Config) when is_list(Config) ->
+ UinqintInfo = init_uniqint_info(),
+ #uniqint_info{min_int = MinInt,
+ max_int = MaxInt,
+ max_small = MaxSmall} = UinqintInfo,
+ io:format("****************************************************~n", []),
+ io:format("*** Around MIN_UNIQ_INT ~p ***~n", [MinInt]),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around(MinInt, UinqintInfo),
+ io:format("****************************************************~n", []),
+ io:format("*** Around 0 ***~n", []),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around(0, UinqintInfo),
+ io:format("****************************************************~n", []),
+ io:format("*** Around MAX_SMALL ~p ***~n", [MaxSmall]),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around(MaxSmall, UinqintInfo),
+ io:format("****************************************************~n", []),
+ io:format("*** Around 2^64+MIN_UNIQ_INT ~p ***~n", [(1 bsl 64)+MinInt]),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around((1 bsl 64)+MinInt, UinqintInfo),
+ io:format("****************************************************~n", []),
+ io:format("*** Around 2^64 ~p~n", [(1 bsl 64)]),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around((1 bsl 64), UinqintInfo),
+ io:format("****************************************************~n", []),
+ io:format("*** Around 2^64-MIN_UNIQ_INT ~p ***~n", [(1 bsl 64)-MinInt]),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around((1 bsl 64)-MinInt, UinqintInfo),
+ io:format("****************************************************~n", []),
+ io:format("*** Around MAX_UNIQ_INT ~p ***~n", [MaxInt]),
+ io:format("****************************************************~n", []),
+ check_unique_integer_around(MaxInt, UinqintInfo),
+ ok.
+
+
+%%% Internal unique_integer_white_box/1 test case
+
+calc_sched_bits(NoScheds, Shift) when NoScheds < 1 bsl Shift ->
+ Shift;
+calc_sched_bits(NoScheds, Shift) ->
+ calc_sched_bits(NoScheds, Shift+1).
+
+init_uniqint_info() ->
+ SmallBits = erlang:system_info({wordsize, internal})*8-4,
+ io:format("SmallBits=~p~n", [SmallBits]),
+ Schedulers = erlang:system_info(schedulers),
+ io:format("Schedulers=~p~n", [Schedulers]),
+ MinSmall = -1*(1 bsl (SmallBits-1)),
+ io:format("MinSmall=~p~n", [MinSmall]),
+ MaxSmall = (1 bsl (SmallBits-1))-1,
+ io:format("MaxSmall=~p~n", [MaxSmall]),
+ SchedBits = calc_sched_bits(Schedulers, 0),
+ io:format("SchedBits=~p~n", [SchedBits]),
+ MaxInt = ((((1 bsl 64) - 1) bsl SchedBits) bor Schedulers) + MinSmall,
+ io:format("MaxInt=~p~n", [MaxInt]),
+ #uniqint_info{min_int = MinSmall,
+ max_int = MaxInt,
+ max_small = MaxSmall,
+ schedulers = Schedulers,
+ sched_bits = SchedBits}.
+
+valid_uniqint(Int, #uniqint_info{min_int = MinInt} = UinqintInfo) when Int < MinInt ->
+ valid_uniqint(MinInt, UinqintInfo);
+valid_uniqint(Int, #uniqint_info{min_int = MinInt,
+ sched_bits = SchedBits,
+ schedulers = Scheds}) ->
+ Int1 = Int - MinInt,
+ {Inc, ThreadNo} = case Int1 band ((1 bsl SchedBits) - 1) of
+ TN when TN > Scheds ->
+ {1, Scheds};
+ TN ->
+ {0, TN}
+ end,
+ Counter = ((Int1 bsr SchedBits) + Inc) rem (1 bsl 64),
+ ((Counter bsl SchedBits) bor ThreadNo) + MinInt.
+
+smaller_valid_uniqint(Int, UinqintInfo) ->
+ Cand = Int-1,
+ case valid_uniqint(Cand, UinqintInfo) of
+ RI when RI < Int ->
+ RI;
+ _ ->
+ smaller_valid_uniqint(Cand, UinqintInfo)
+ end.
+
+int32_to_bigendian_list(Int) ->
+ 0 = Int bsr 32,
+ [(Int bsr 24) band 16#ff,
+ (Int bsr 16) band 16#ff,
+ (Int bsr 8) band 16#ff,
+ Int band 16#ff].
+
+mk_uniqint(Int, #uniqint_info {min_int = MinInt,
+ sched_bits = SchedBits} = _UinqintInfo) ->
+ Int1 = Int - MinInt,
+ ThrId = Int1 band ((1 bsl SchedBits) - 1),
+ Value = (Int1 bsr SchedBits) band ((1 bsl 64) - 1),
+ 0 = Int1 bsr (SchedBits + 64),
+ NodeName = atom_to_list(node()),
+ Make = {make_unique_integer, ThrId, Value},
+ %% erlang:display(Make),
+ Res = erts_debug:get_internal_state(Make),
+ %% erlang:display({uniq_int, Res}),
+ Res.
+
+check_uniqint(Int, UinqintInfo) ->
+ UniqInt = mk_uniqint(Int, UinqintInfo),
+ io:format("UniqInt=~p ", [UniqInt]),
+ case UniqInt =:= Int of
+ true ->
+ io:format("OK~n~n", []);
+ false ->
+ io:format("result UniqInt=~p FAILED~n", [UniqInt]),
+ exit(badres)
+ end.
+
+check_unique_integer_around(Int, #uniqint_info{min_int = MinInt,
+ max_int = MaxInt} = UinqintInfo) ->
+ {Start, End} = case {Int =< MinInt+100, Int >= MaxInt-100} of
+ {true, false} ->
+ {MinInt, MinInt+100};
+ {false, false} ->
+ {smaller_valid_uniqint(Int-100, UinqintInfo),
+ valid_uniqint(Int+100, UinqintInfo)};
+ {false, true} ->
+ {MaxInt-100, MaxInt}
+ end,
+ lists:foldl(fun (I, OldRefInt) ->
+ RefInt = valid_uniqint(I, UinqintInfo),
+ case OldRefInt =:= RefInt of
+ true ->
+ ok;
+ false ->
+ check_uniqint(RefInt, UinqintInfo)
+ end,
+ RefInt
+ end,
+ none,
+ lists:seq(Start, End)).
+
+
+%% helpers
+
+print_ret_val(File, Line, Value) ->
+ io:format("~s:~p: ~p~n", [File, Line, Value]),
+ Value.
+
+start_node(Config) ->
+ start_node(Config, []).
+start_node(Config, Opts) when is_list(Config), is_list(Opts) ->
+ ?line Pa = filename:dirname(code:which(?MODULE)),
+ ?line A = erlang:monotonic_time(1) + erlang:time_offset(1),
+ ?line B = erlang:unique_integer([positive]),
+ ?line Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(?config(testcase, Config))
+ ++ "-"
+ ++ integer_to_list(A)
+ ++ "-"
+ ++ integer_to_list(B)),
+ ?line ?t:start_node(Name, slave, [{args, Opts++" -pa "++Pa}]).
+
+stop_node(Node) ->
+ ?t:stop_node(Node).
diff --git a/erts/emulator/valgrind/suppress.patched.3.6.0 b/erts/emulator/valgrind/suppress.patched.3.6.0
index b3507bdba7..f79e3ff634 100644
--- a/erts/emulator/valgrind/suppress.patched.3.6.0
+++ b/erts/emulator/valgrind/suppress.patched.3.6.0
@@ -273,6 +273,11 @@ obj:*/ssleay.*
fun:AES_cbc_encrypt
...
}
+{
+ crypto RC4 can do harmless word aligned read past end of input
+ Memcheck:Addr8
+ fun:RC4
+}
{
erts_bits_init_state; Why is this needed?
diff --git a/erts/emulator/valgrind/suppress.standard b/erts/emulator/valgrind/suppress.standard
index a4da31a61d..b3c77119fb 100644
--- a/erts/emulator/valgrind/suppress.standard
+++ b/erts/emulator/valgrind/suppress.standard
@@ -260,6 +260,11 @@ obj:*/ssleay.*
fun:AES_cbc_encrypt
...
}
+{
+ crypto RC4 can do harmless word aligned read past end of input
+ Memcheck:Addr8
+ fun:RC4
+}
{
Prebuilt constant terms in os_info_init (PossiblyLost)