aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in65
-rw-r--r--erts/emulator/beam/atom.c10
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c48
-rw-r--r--erts/emulator/beam/beam_bp.h2
-rw-r--r--erts/emulator/beam/beam_debug.c2
-rw-r--r--erts/emulator/beam/beam_emu.c30
-rw-r--r--erts/emulator/beam/beam_load.c5
-rw-r--r--erts/emulator/beam/bif.c4
-rw-r--r--erts/emulator/beam/big.c96
-rw-r--r--erts/emulator/beam/big.h4
-rw-r--r--erts/emulator/beam/binary.c22
-rw-r--r--erts/emulator/beam/break.c32
-rw-r--r--erts/emulator/beam/copy.c110
-rw-r--r--erts/emulator/beam/decl.h55
-rw-r--r--erts/emulator/beam/dist.c26
-rw-r--r--erts/emulator/beam/elib_malloc.c2334
-rw-r--r--erts/emulator/beam/elib_stat.h45
-rw-r--r--erts/emulator/beam/erl_alloc.c210
-rw-r--r--erts/emulator/beam/erl_alloc.types8
-rw-r--r--erts/emulator/beam/erl_async.c2
-rw-r--r--erts/emulator/beam/erl_bif_binary.c9
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c5
-rw-r--r--erts/emulator/beam/erl_bif_info.c177
-rw-r--r--erts/emulator/beam/erl_bif_trace.c2
-rw-r--r--erts/emulator/beam/erl_bits.c12
-rw-r--r--erts/emulator/beam/erl_db.c319
-rw-r--r--erts/emulator/beam/erl_db_hash.c24
-rw-r--r--erts/emulator/beam/erl_db_tree.c31
-rw-r--r--erts/emulator/beam/erl_db_util.c32
-rw-r--r--erts/emulator/beam/erl_db_util.h5
-rw-r--r--erts/emulator/beam/erl_drv_thread.c78
-rw-r--r--erts/emulator/beam/erl_fun.c22
-rw-r--r--erts/emulator/beam/erl_fun.h4
-rw-r--r--erts/emulator/beam/erl_gc.c502
-rw-r--r--erts/emulator/beam/erl_init.c116
-rw-r--r--erts/emulator/beam/erl_lock_check.c110
-rw-r--r--erts/emulator/beam/erl_lock_check.h1
-rw-r--r--erts/emulator/beam/erl_lock_count.c18
-rw-r--r--erts/emulator/beam/erl_message.c153
-rw-r--r--erts/emulator/beam/erl_message.h25
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_nif.c46
-rw-r--r--erts/emulator/beam/erl_nif.h13
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h21
-rw-r--r--erts/emulator/beam/erl_node_tables.c101
-rw-r--r--erts/emulator/beam/erl_obsolete.c186
-rw-r--r--erts/emulator/beam/erl_port_task.c7
-rw-r--r--erts/emulator/beam/erl_process.c1911
-rw-r--r--erts/emulator/beam/erl_process.h100
-rw-r--r--erts/emulator/beam/erl_process_lock.c350
-rw-r--r--erts/emulator/beam/erl_process_lock.h46
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_term.h15
-rw-r--r--erts/emulator/beam/erl_threads.h509
-rw-r--r--erts/emulator/beam/erl_time_sup.c20
-rw-r--r--erts/emulator/beam/export.c8
-rw-r--r--erts/emulator/beam/external.c150
-rw-r--r--erts/emulator/beam/external.h2
-rw-r--r--erts/emulator/beam/global.h30
-rw-r--r--erts/emulator/beam/io.c30
-rw-r--r--erts/emulator/beam/packet_parser.c2
-rw-r--r--erts/emulator/beam/register.c7
-rw-r--r--erts/emulator/beam/sys.h59
-rw-r--r--erts/emulator/beam/utils.c36
-rw-r--r--erts/emulator/drivers/common/efile_drv.c2
-rw-r--r--erts/emulator/drivers/common/inet_drv.c10
-rw-r--r--erts/emulator/drivers/unix/mem_drv.c145
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c6
-rw-r--r--erts/emulator/drivers/win32/mem_drv.c141
-rw-r--r--erts/emulator/hipe/hipe_bif0.c5
-rw-r--r--erts/emulator/hipe/hipe_bif1.c49
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c4
-rw-r--r--erts/emulator/obsolete/driver.h263
-rw-r--r--erts/emulator/sys/common/erl_poll.c39
-rw-r--r--erts/emulator/sys/unix/sys.c26
-rw-r--r--erts/emulator/sys/unix/sys_float.c17
-rw-r--r--erts/emulator/sys/win32/sys.c34
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/decode_packet_SUITE.erl4
-rw-r--r--erts/emulator/test/float_SUITE.erl29
-rw-r--r--erts/emulator/test/hash_SUITE.erl8
-rw-r--r--erts/emulator/test/nif_SUITE.erl168
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c177
-rw-r--r--erts/emulator/test/obsolete_SUITE.erl123
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/Makefile.src33
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/erl_threads.c302
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/testcase_driver.c262
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/testcase_driver.h57
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl361
-rw-r--r--erts/emulator/test/send_term_SUITE.erl67
-rw-r--r--erts/emulator/test/send_term_SUITE_data/send_term_drv.c218
-rw-r--r--erts/emulator/test/time_SUITE.erl37
93 files changed, 4708 insertions, 6509 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index eca6121a1e..ed0d1b3fa6 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -199,6 +199,14 @@ MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
+ifeq ($(TARGET),win32)
+LIB_PREFIX=
+LIB_SUFFIX=.lib
+else
+LIB_PREFIX=lib
+LIB_SUFFIX=.a
+endif
+
OMIT_OMIT_FP=no
ifeq (@EMU_LOCK_CHECKING@,yes)
@@ -279,16 +287,11 @@ endif
ifeq ($(TARGET),win32)
LIBS += -L$(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE) -lepcre
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/epcre.lib
else
-LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a
-DEPLIBS += \
- $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a \
- $(ERL_TOP)/erts/lib/internal/$(TARGET)/liberts_internal.a
-# rem liberts_internal.a
+LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
-ELIB_FLAGS = -DENABLE_ELIB_MALLOC -DELIB_ALLOC_IS_CLIB -DELIB_HEAP_SBRK
+DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
PERFCTR_PATH=@PERFCTR_PATH@
USE_PERFCTR=@USE_PERFCTR@
@@ -305,7 +308,19 @@ LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
THR_LIB_NAME=@EMU_THR_LIB_NAME@
-THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER),$(ORG_THR_LIBS))
+ifneq ($(strip $(THR_LIB_NAME)),)
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal_r$(TYPEMARKER)$(LIB_SUFFIX) \
+ $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)ethread$(TYPEMARKER)$(LIB_SUFFIX)
+else
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal$(TYPEMARKER)$(LIB_SUFFIX)
+endif
+
+THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER), \
+ $(subst -lerts_internal_r,-lerts_internal_r$(TYPEMARKER),$(ORG_THR_LIBS)))
+
+LIBS += $(THR_LIBS)
+
+ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r)
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
ERTS_INTERNAL_LIB=erts_internal
@@ -317,7 +332,9 @@ ERTS_INTERNAL_LIB=erts_internal
endif
endif
-LIBS += $(THR_LIBS) -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+
+endif # erts_internal_r
LIBS += @LIBRT@
@@ -441,8 +458,6 @@ release_spec: all
ifeq ($(ERLANG_OSTYPE), unix)
$(INSTALL_PROGRAM) $(BINDIR)/$(CS_EXECUTABLE) $(RELSYSDIR)/bin
endif
- $(INSTALL_DIR) $(RELEASE_PATH)/usr/include/obsolete
- $(INSTALL_DATA) obsolete/driver.h $(RELEASE_PATH)/usr/include/obsolete
endif
endif
@@ -636,9 +651,6 @@ $(BINDIR)/$(CS_EXECUTABLE): $(CS_SRC)
$(CS_PURIFY) $(CC) $(CS_LDFLAGS) -o $(BINDIR)/$(CS_EXECUTABLE) \
$(CS_CFLAGS) $(COMMON_INCLUDES) $(CS_SRC) $(CS_LIBS)
-$(OBJDIR)/%.elib.o: beam/%.c
- $(CC) $(ELIB_FLAGS) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/%.kp.o: sys/common/%.c
$(CC) -DERTS_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
@@ -647,9 +659,6 @@ $(OBJDIR)/%.nkp.o: sys/common/%.c
ifeq ($(GCC),yes)
-$(OBJDIR)/erl_obsolete.o: beam/erl_obsolete.c
- $(CC) $(subst -Wstrict-prototypes, , $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS))) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/erl_goodfit_alloc.o: beam/erl_goodfit_alloc.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS) $(UNROLL_FLG), $(CFLAGS)) $(INCLUDES) -c $< -o $@
endif
@@ -724,7 +733,7 @@ RUN_OBJS = \
$(OBJDIR)/erl_fun.o $(OBJDIR)/erl_bif_port.o \
$(OBJDIR)/erl_term.o $(OBJDIR)/erl_node_tables.o \
$(OBJDIR)/erl_monitors.o $(OBJDIR)/erl_process_dump.o \
- $(OBJDIR)/erl_obsolete.o $(OBJDIR)/erl_bif_timer.o \
+ $(OBJDIR)/erl_bif_timer.o \
$(OBJDIR)/erl_drv_thread.o $(OBJDIR)/erl_bif_chksum.o \
$(OBJDIR)/erl_bif_re.o $(OBJDIR)/erl_unicode.o \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
@@ -748,15 +757,13 @@ OS_OBJS = \
$(OBJDIR)/sys_time.o \
$(OBJDIR)/sys_interrupt.o \
$(OBJDIR)/sys_env.o \
- $(OBJDIR)/dosmap.o \
- $(OBJDIR)/elib_malloc.o
+ $(OBJDIR)/dosmap.o
else
OS_OBJS = \
$(OBJDIR)/sys.o \
$(OBJDIR)/driver_tab.o \
$(OBJDIR)/unix_efile.o \
$(OBJDIR)/gzio.o \
- $(OBJDIR)/elib_malloc.o \
$(OBJDIR)/elib_memmove.o
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
@@ -820,16 +827,6 @@ BASE_OBJS = $(RUN_OBJS) $(EMU_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
OBJS = $(BASE_OBJS) $(DRV_OBJS)
-ELIB_C_FILES = beam/elib_malloc.c \
- beam/elib_memmove.c \
- beam/erl_bif_info.c \
- beam/utils.c \
- beam/erl_alloc.c
-
-MOD_OBJS_ELIB = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(ELIB_C_FILES)))
-OBJS_ELIB = $(patsubst %.o,%.elib.o,$(MOD_OBJS_ELIB)) \
- $(filter-out $(MOD_OBJS_ELIB),$(OBJS))
-
########################################
# HiPE section
@@ -921,10 +918,6 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
$(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \
$(HIPEBEAMLDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) $(LIBS)
-$(BINDIR)/$(EMULATOR_EXECUTABLE_ELIB): $(INIT_OBJS) $(OBJS_ELIB) $(DEPLIBS)
- $(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE_ELIB) \
- $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS_ELIB) $(LIBS)
-
endif
#
@@ -1019,7 +1012,7 @@ depend:
$(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \
| $(SED_DEPEND) >> $(TARGET)/depend.mk
ifneq ($(TARGET),win32)
- $(DEP_CC) $(DEP_FLAGS) $(ELIB_FLAGS) $(ELIB_C_FILES) \
+ $(DEP_CC) $(DEP_FLAGS) $(ELIB_C_FILES) \
| $(SED_ELIB_DEPEND) >> $(TARGET)/depend.mk
endif
ifdef HIPE_ENABLED
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index e2a79d6e4f..6b3c106a97 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -41,8 +41,7 @@ static erts_smp_rwmtx_t atom_table_lock;
#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
-#define atom_init_lock() erts_smp_rwmtx_init(&atom_table_lock, \
- "atom_tab")
+
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
@@ -304,12 +303,17 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_smp_atomic_init(&atom_put_ops, 0);
#endif
- atom_init_lock();
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
f.alloc = (HALLOC_FUN) atom_alloc;
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 28f69b9460..0815cdbc7f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -432,6 +432,7 @@ atom raw
atom re
atom re_pattern
atom re_run_trap
+atom read_concurrency
atom ready_input
atom ready_output
atom ready_async
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 596ad9a010..6ae9736141 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -337,7 +337,6 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
ep->code[0] == BIF_ARG_1 &&
ep->code[4] != 0) {
ep->address = (void *) ep->code[4];
- ep->code[3] = 0;
ep->code[4] = 0;
}
}
@@ -402,7 +401,7 @@ check_process_code(Process* rp, Module* modp)
BeamInstr* end;
Eterm* sp;
#ifndef HYBRID /* FIND ME! */
- ErlFunThing* funp;
+ struct erl_off_heap_header* oh;
int done_gc = 0;
#endif
@@ -470,27 +469,30 @@ check_process_code(Process* rp, Module* modp)
#ifndef HYBRID /* FIND ME! */
rescan:
- for (funp = MSO(rp).funs; funp; funp = funp->next) {
- BeamInstr* fun_code;
-
- fun_code = funp->fe->address;
-
- if (INSIDE((BeamInstr *) funp->fe->address)) {
- if (done_gc) {
- return am_true;
- } else {
- /*
- * Try to get rid of this fun by garbage collecting.
- * Clear both fvalue and ftrace to make sure they
- * don't hold any funs.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- goto rescan;
+ for (oh = MSO(rp).first; oh; oh = oh->next) {
+ if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
+ ErlFunThing* funp = (ErlFunThing*) oh;
+ BeamInstr* fun_code;
+
+ fun_code = funp->fe->address;
+
+ if (INSIDE((BeamInstr *) funp->fe->address)) {
+ if (done_gc) {
+ return am_true;
+ } else {
+ /*
+ * Try to get rid of this fun by garbage collecting.
+ * Clear both fvalue and ftrace to make sure they
+ * don't hold any funs.
+ */
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ done_gc = 1;
+ FLAGS(rp) |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ goto rescan;
+ }
}
}
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index b5d5b3c203..ebc171078d 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -123,7 +123,7 @@ typedef struct {
Uint ms;
Uint s;
Uint us;
- Uint *pc;
+ BeamInstr *pc;
} process_breakpoint_time_t; /* used within psd */
extern erts_smp_spinlock_t erts_bp_lock;
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 23b267d5cd..b0bf14b94f 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -125,7 +125,7 @@ erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
BIF_ERROR(p, BADARG);
}
-#if 0 /* XXX:PaN - not used */
+#if 0 /* Kept for conveninence when hard debugging. */
void debug_dump_code(BeamInstr *I, int num)
{
BeamInstr *code_ptr = I;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index c0680086aa..8a0e12dd4f 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3385,12 +3385,12 @@ apply_bif_or_nif_epilogue:
HTOP += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = num_bytes;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
- MSO(c_p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
new_binary = make_binary(pb);
goto do_bits_sub_bin;
}
@@ -3486,13 +3486,13 @@ apply_bif_or_nif_epilogue:
HTOP += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = tmp_arg1;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
- MSO(c_p).overhead += tmp_arg1 / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(c_p)), tmp_arg1 / sizeof(Eterm));
StoreBifResult(2, make_binary(pb));
}
@@ -4373,7 +4373,7 @@ apply_bif_or_nif_epilogue:
ASSERT(is_CP((BeamInstr)(ep->code)));
ASSERT(is_internal_pid(c_p->tracer_proc) ||
is_internal_port(c_p->tracer_proc));
- E[2] = make_cp(c_p->cp); /* XXX:PaN - code in lower range on halfword */
+ E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
E[1] = am_true; /* Process tracer */
E[0] = make_cp(ep->code);
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
@@ -4463,7 +4463,7 @@ apply_bif_or_nif_epilogue:
E -= 2;
E[0] = make_cp(I);
E[1] = make_cp(c_p->cp); /* original return address */
- c_p->cp = (BeamInstr *) make_cp(beam_return_time_trace);
+ c_p->cp = beam_return_time_trace;
}
}
@@ -4493,20 +4493,20 @@ apply_bif_or_nif_epilogue:
BeamInstr real_I;
Uint32 flags;
Eterm tracer_pid;
- BeamInstr *cpp;
+ Uint* cpp;
int return_to_trace = 0, need = 0;
flags = 0;
SWAPOUT;
reg[0] = r(0);
if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = (BeamInstr*)&E[2];
+ cpp = &E[2];
} else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
- cpp = (BeamInstr*)&E[0];
+ cpp = &E[0];
} else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
return_to_trace = !0;
- cpp = (BeamInstr*)&E[0];
+ cpp = &E[0];
} else {
cpp = NULL;
}
@@ -4889,7 +4889,7 @@ apply_bif_or_nif_epilogue:
neg_o_reds = -c_p->def_arg_reg[4];
FCALLS = c_p->fcalls;
SWAPIN;
- switch( c_p->def_arg_reg[3] ) { /* XXX:PaN - Halfword wont work with hipe yet... */
+ switch( c_p->def_arg_reg[3] ) { /* Halfword wont work with hipe yet! */
case HIPE_MODE_SWITCH_RES_RETURN:
ASSERT(is_value(reg[0]));
MoveReturn(reg[0], r(0));
@@ -6323,8 +6323,8 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
erts_refc_inc(&fe->refc, 2);
funp->thing_word = HEADER_FUN;
#ifndef HYBRID /* FIND ME! */
- funp->next = MSO(p).funs;
- MSO(p).funs = funp;
+ funp->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*) funp;
#endif
funp->fe = fe;
funp->num_free = num_free;
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index fb5e964937..df5602b040 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -1568,7 +1568,8 @@ load_code(LoaderState* stp)
case 0: /* Floating point number */
{
Eterm* hp;
-#if !defined(ARCH_64) || HALFWORD_HEAP /* XXX:PaN - Should use ARCH_64 variant instead */
+/* XXX:PaN - Halfword should use ARCH_64 variant instead */
+#if !defined(ARCH_64) || HALFWORD_HEAP
Uint high, low;
# endif
last_op->a[arg].val = new_literal(stp, &hp,
@@ -1935,7 +1936,7 @@ load_code(LoaderState* stp)
}
code[ci++] = (BeamInstr) stp->import[i].bf;
break;
- case 'P': /* Byte offset into tuple */ /* XXX:PaN - * sizeof(Eterm or Eterm *) ? */
+ case 'P': /* Byte offset into tuple */
VerifyTag(stp, tag, TAG_u);
tmp = tmp_op->a[arg].val;
code[ci++] = (BeamInstr) ((tmp_op->a[arg].val+1) * sizeof(Eterm));
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 506bf383ca..6e9755ad48 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -3611,11 +3611,11 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
etp->header = make_external_pid_header(1);
- etp->next = MSO(BIF_P).externals;
+ etp->next = MSO(BIF_P).first;
etp->node = enp;
etp->data.ui[0] = make_pid_data(c, b);
- MSO(BIF_P).externals = etp;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
erts_deref_dist_entry(dep);
BIF_RET(make_external_pid(etp));
}
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 90d3a0304a..ff15d834ab 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1509,14 +1509,14 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
- *hpp += 2;
+ *hpp += 3;
}
else
#endif
{
*hp = make_pos_bignum_header(1);
BIG_DIGIT(hp, 0) = (Uint) x;
- *hpp += 1;
+ *hpp += 2;
}
return make_big(hp);
}
@@ -1539,7 +1539,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
- *hpp += 2;
+ *hpp += 3;
}
else
#endif
@@ -1549,7 +1549,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
else
*hp = make_pos_bignum_header(1);
BIG_DIGIT(hp, 0) = (Uint) x;
- *hpp += 1;
+ *hpp += 2;
}
return make_big(hp);
}
@@ -1881,6 +1881,9 @@ term_to_Uint(Eterm term, Uint *up)
int
term_to_UWord(Eterm term, UWord *up)
{
+#if SIZEOF_VOID_P == ERTS_SIZEOF_ETERM
+ return term_to_Uint(term,up);
+#else
if (is_small(term)) {
Sint i = signed_val(term);
if (i < 0) {
@@ -1903,7 +1906,47 @@ term_to_UWord(Eterm term, UWord *up)
return 0;
}
while (xl-- > 0) {
- uval |= ((Uint)(*xr++)) << n;
+ uval |= ((UWord)(*xr++)) << n;
+ n += D_EXP;
+ }
+ *up = uval;
+ return 1;
+ } else {
+ *up = BADARG;
+ return 0;
+ }
+#endif
+}
+
+int
+term_to_Uint64(Eterm term, Uint64 *up)
+{
+#if SIZEOF_VOID_P == 8
+ return term_to_UWord(term,up);
+#else
+ if (is_small(term)) {
+ Sint i = signed_val(term);
+ if (i < 0) {
+ *up = BADARG;
+ return 0;
+ }
+ *up = (Uint64) i;
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ Uint64 uval = 0;
+ int n = 0;
+
+ if (big_sign(term)) {
+ *up = BADARG;
+ return 0;
+ } else if (xl*D_EXP > sizeof(Uint64)*8) {
+ *up = SYSTEM_LIMIT;
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((Uint64)(*xr++)) << n;
n += D_EXP;
}
*up = uval;
@@ -1912,8 +1955,10 @@ term_to_UWord(Eterm term, UWord *up)
*up = BADARG;
return 0;
}
+#endif
}
+
int term_to_Sint(Eterm term, Sint *sp)
{
if (is_small(term)) {
@@ -1948,6 +1993,47 @@ int term_to_Sint(Eterm term, Sint *sp)
}
}
+#if HAVE_INT64
+int term_to_Sint64(Eterm term, Sint64 *sp)
+{
+#if ERTS_SIZEOF_ETERM == 8
+ return term_to_Sint(term, sp);
+#else
+ if (is_small(term)) {
+ *sp = signed_val(term);
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ int sign = big_sign(term);
+ Uint64 uval = 0;
+ int n = 0;
+
+ if (xl*D_EXP > sizeof(Uint64)*8) {
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((Uint64)(*xr++)) << n;
+ n += D_EXP;
+ }
+ if (sign) {
+ uval = -uval;
+ if ((Sint64)uval > 0)
+ return 0;
+ } else {
+ if ((Sint64)uval < 0)
+ return 0;
+ }
+ *sp = uval;
+ return 1;
+ } else {
+ return 0;
+ }
+#endif
+}
+#endif /* HAVE_INT64 */
+
+
/*
** Add and subtract
*/
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 56f3be372a..25466cd3c2 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -152,6 +152,10 @@ byte* big_to_bytes(Eterm, byte*);
int term_to_Uint(Eterm, Uint*);
int term_to_UWord(Eterm, UWord*);
int term_to_Sint(Eterm, Sint*);
+#if HAVE_INT64
+int term_to_Uint64(Eterm, Uint64*);
+int term_to_Sint64(Eterm, Sint64*);
+#endif
Uint32 big_to_uint32(Eterm b);
int term_equals_2pow32(Eterm);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index c68392fad4..8ee8fbcb29 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -88,8 +88,8 @@ new_binary(Process *p, byte *buf, int len)
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -97,7 +97,7 @@ new_binary(Process *p, byte *buf, int len)
/*
* Miscellanous updates. Return the tagged binary.
*/
- MSO(p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
}
@@ -127,8 +127,8 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -136,7 +136,7 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
/*
* Miscellanous updates. Return the tagged binary.
*/
- MSO(p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
}
@@ -487,16 +487,6 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-void
-erts_cleanup_mso(ProcBin* pb)
-{
- while (pb != NULL) {
- ProcBin* next = pb->next;
- if (erts_refc_dectest(&pb->val->refc, 0) == 0)
- erts_bin_free(pb->val);
- pb = next;
- }
-}
/*
* Local functions.
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 857cb177c8..f339e19761 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -611,29 +611,29 @@ static void
bin_check(void)
{
Process *rp;
- ProcBin *bp;
- int i, printed;
+ struct erl_off_heap_header* hdr;
+ int i, printed = 0;
for (i=0; i < erts_max_processes; i++) {
if ((rp = process_tab[i]) == NULL)
continue;
- if (!(bp = rp->off_heap.mso))
- continue;
- printed = 0;
- while (bp) {
- if (printed == 0) {
- erts_printf("Process %T holding binary data \n", rp->id);
- printed = 1;
+ for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) {
+ if (hdr->thing_word == HEADER_PROC_BIN) {
+ ProcBin *bp = (ProcBin*) hdr;
+ if (!printed) {
+ erts_printf("Process %T holding binary data \n", rp->id);
+ printed = 1;
+ }
+ erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
+ (unsigned long)bp->val,
+ (long)bp->val->orig_size,
+ erts_smp_atomic_read(&bp->val->refc));
}
- erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
- (unsigned long)bp->val,
- (long)bp->val->orig_size,
- erts_smp_atomic_read(&bp->val->refc));
-
- bp = bp->next;
}
- if (printed == 1)
+ if (printed) {
erts_printf("--------------------------------------\n");
+ printed = 0;
+ }
}
/* db_bin_check() has to be rewritten for the AVL trees... */
/*db_bin_check();*/
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 521a1b1788..8bee47232e 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -314,10 +314,10 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*argp = make_binary(hbot);
pb = (ProcBin*) hbot;
erts_refc_inc(&pb->val->refc, 2);
- pb->next = off_heap->mso;
+ pb->next = off_heap->first;
pb->flags = 0;
- off_heap->mso = pb;
- off_heap->overhead += pb->size / sizeof(Eterm);
+ off_heap->first = (struct erl_off_heap_header*) pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
break;
case SUB_BINARY_SUBTAG:
@@ -363,10 +363,10 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
to->val = from->val;
erts_refc_inc(&to->val->refc, 2);
to->bytes = from->bytes + offset;
- to->next = off_heap->mso;
+ to->next = off_heap->first;
to->flags = 0;
- off_heap->mso = to;
- off_heap->overhead += to->size / sizeof(Eterm);
+ off_heap->first = (struct erl_off_heap_header*) to;
+ OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
}
*argp = make_binary(hbot);
if (extra_bytes != 0) {
@@ -396,8 +396,8 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
#ifndef HYBRID /* FIND ME! */
funp = (ErlFunThing *) tp;
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*) funp;
erts_refc_inc(&funp->fe->refc, 2);
#endif
*argp = make_fun(tp);
@@ -416,8 +416,8 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*htop++ = *objp++;
}
- etp->next = off_heap->externals;
- off_heap->externals = etp;
+ etp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)etp;
erts_refc_inc(&etp->node->refc, 2);
*argp = make_external(tp);
@@ -650,9 +650,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
*hp++ = *objp++;
}
erts_refc_inc(&pb->val->refc, 2);
- pb->next = erts_global_offheap.mso;
- erts_global_offheap.mso = pb;
- erts_global_offheap.overhead += pb->size / sizeof(Eterm);
+ pb->next = erts_global_offheap.first;
+ erts_global_offheap.first = pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
continue;
}
@@ -672,9 +672,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
while (i--) {
*hp++ = *objp++;
}
-#ifndef HYBRID // FIND ME!
- funp->next = erts_global_offheap.funs;
- erts_global_offheap.funs = funp;
+#ifndef HYBRID /* FIND ME! */
+ funp->next = erts_global_offheap.first;
+ erts_global_offheap.first = funp;
erts_refc_inc(&funp->fe->refc, 2);
#endif
for (i = k; i < j; i++) {
@@ -718,8 +718,8 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
*hp++ = *objp++;
}
- etp->next = erts_global_offheap.externals;
- erts_global_offheap.externals = etp;
+ etp->next = erts_global_offheap.first;
+ erts_global_offheap.first = etp;
erts_refc_inc(&etp->node->refc, 2);
continue;
}
@@ -775,9 +775,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
to_bin->size = real_size;
to_bin->val = from_bin->val;
to_bin->bytes = from_bin->bytes + sub_offset;
- to_bin->next = erts_global_offheap.mso;
- erts_global_offheap.mso = to_bin;
- erts_global_offheap.overhead += to_bin->size / sizeof(Eterm);
+ to_bin->next = erts_global_offheap.first;
+ erts_global_offheap.first = to_bin;
+ OH_OVERHEAD(&erts_global_offheap, to_bin->size / sizeof(Eterm));
res_binary=make_binary(to_bin);
hp += PROC_BIN_SIZE;
}
@@ -910,57 +910,43 @@ copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
break;
case REFC_BINARY_SUBTAG:
{
- ProcBin* pb = (ProcBin *) (hp-1);
- int tari = thing_arityval(val);
-
- sz -= tari;
- while (tari--) {
- *hp++ = *tp++;
- }
+ ProcBin* pb = (ProcBin *) (tp-1);
erts_refc_inc(&pb->val->refc, 2);
- pb->next = off_heap->mso;
- off_heap->mso = pb;
- off_heap->overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
- break;
+ goto off_heap_common;
+
case FUN_SUBTAG:
{
-#ifndef HYBRID /* FIND ME! */
- ErlFunThing* funp = (ErlFunThing *) (hp-1);
-#endif
- int tari = thing_arityval(val);
-
- sz -= tari;
- while (tari--) {
- *hp++ = *tp++;
- }
-#ifndef HYBRID /* FIND ME! */
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ ErlFunThing* funp = (ErlFunThing *) (tp-1);
erts_refc_inc(&funp->fe->refc, 2);
-#endif
}
- break;
+ goto off_heap_common;
+
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing* etp = (ExternalThing *) (hp-1);
+ ExternalThing* etp = (ExternalThing *) (tp-1);
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_common:
+ {
+ struct erl_off_heap_header* ohh = (struct erl_off_heap_header*)(hp-1);
int tari = thing_arityval(val);
-
+
sz -= tari;
while (tari--) {
*hp++ = *tp++;
}
- etp->next = off_heap->externals;
- off_heap->externals = etp;
- erts_refc_inc(&etp->node->refc, 2);
+ ohh->next = off_heap->first;
+ off_heap->first = ohh;
}
break;
default:
{
int tari = header_arity(val);
-
+
sz -= tari;
while (tari--) {
*hp++ = *tp++;
@@ -991,7 +977,7 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
for (bp=first; bp!=NULL; bp=bp->next) {
move_one_frag(hpp, bp->mem, bp->used_size, off_heap);
- off_heap->overhead += bp->off_heap.overhead;
+ OH_OVERHEAD(off_heap, bp->off_heap.overhead);
}
hp_end = *hpp;
for (hp=hp_start; hp<hp_end; ++hp) {
@@ -1029,12 +1015,6 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
static void
move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
{
- union {
- Uint *up;
- ProcBin *pbp;
- ErlFunThing *efp;
- ExternalThing *etp;
- } ohe;
Eterm* ptr = src;
Eterm* end = ptr + src_sz;
Eterm dummy_ref;
@@ -1046,23 +1026,17 @@ move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
val = *ptr;
ASSERT(val != ERTS_HOLE_MARKER);
if (is_header(val)) {
+ struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- ohe.up = hp;
MOVE_BOXED(ptr, val, hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
case REFC_BINARY_SUBTAG:
- ohe.pbp->next = off_heap->mso;
- off_heap->mso = ohe.pbp;
- break;
case FUN_SUBTAG:
- ohe.efp->next = off_heap->funs;
- off_heap->funs = ohe.efp;
- break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
- ohe.etp->next = off_heap->externals;
- off_heap->externals = ohe.etp;
+ hdr->next = off_heap->first;
+ off_heap->first = hdr;
break;
}
}
diff --git a/erts/emulator/beam/decl.h b/erts/emulator/beam/decl.h
deleted file mode 100644
index da1be29d53..0000000000
--- a/erts/emulator/beam/decl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef __DECL_H__
-#define __DECL_H__
-
-#if defined(__STDC__) || defined(_MSC_VER)
-#define EXTERN_FUNCTION(t, f, x) extern t f x
-#define FUNCTION(t, f, x) t f x
-#define _DOTS_ ...
-#define _VOID_ void
-#elif defined(__cplusplus)
-#define EXTERN_FUNCTION(f, x) extern "C" { f x }
-#define FUNCTION(t, f, x) t f x
-#define _DOTS_ ...
-#define _VOID_ void
-#else
-#define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
-#define FUNCTION(t, f, x) t f (/*x*/)
-#define _DOTS_
-#define _VOID_
-#endif
-
-/*
-** Example of declarations
-**
-** EXTERN_FUNCTION(void, foo, (int, int, char));
-** FUNCTION(void, bar, (int, char));
-**
-** struct funcs {
-** FUNCTION(int*, (*f1), (int, int));
-** FUNCTION(void, (*f2), (int, char));
-** FUNCTION(void, (*f3), (_VOID_));
-** FUNCTION(int, (*f4), (char*, _DOTS_));
-** };
-**
-*/
-
-#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index d465017949..16b6aeac3f 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -922,12 +922,8 @@ int erts_net_message(Port *prt,
UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
/* Thanks to Luke Gorrie */
- off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- off_heap.funs = NULL;
-#endif
+ off_heap.first = NULL;
off_heap.overhead = 0;
- off_heap.externals = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -1434,16 +1430,8 @@ int erts_net_message(Port *prt,
}
}
- if (off_heap.mso) {
- erts_cleanup_mso(off_heap.mso);
- }
- if (off_heap.externals) {
- erts_cleanup_externals(off_heap.externals);
- }
+ erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
- if (off_heap.funs) {
- erts_cleanup_funs(off_heap.funs);
- }
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
@@ -1453,16 +1441,8 @@ int erts_net_message(Port *prt,
return 0;
data_error:
- if (off_heap.mso) {
- erts_cleanup_mso(off_heap.mso);
- }
- if (off_heap.externals) {
- erts_cleanup_externals(off_heap.externals);
- }
+ erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
- if (off_heap.funs) {
- erts_cleanup_funs(off_heap.funs);
- }
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
diff --git a/erts/emulator/beam/elib_malloc.c b/erts/emulator/beam/elib_malloc.c
deleted file mode 100644
index b18c48d8d6..0000000000
--- a/erts/emulator/beam/elib_malloc.c
+++ /dev/null
@@ -1,2334 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
-** Description: Faster malloc().
-*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-
-#ifdef ENABLE_ELIB_MALLOC
-
-#undef THREAD_SAFE_ELIB_MALLOC
-#ifdef USE_THREADS
-#define THREAD_SAFE_ELIB_MALLOC 1
-#else
-#define THREAD_SAFE_ELIB_MALLOC 0
-#endif
-
-#include "erl_driver.h"
-#include "erl_threads.h"
-#include "elib_stat.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-/* To avoid clobbering of names becaure of reclaim on VxWorks,
- we undefine all possible malloc, calloc etc. */
-#undef malloc
-#undef calloc
-#undef free
-#undef realloc
-
-#define ELIB_INLINE /* inline all possible functions */
-
-#ifndef ELIB_ALIGN
-#define ELIB_ALIGN sizeof(double)
-#endif
-
-#ifndef ELIB_HEAP_SIZE
-#define ELIB_HEAP_SIZE (64*1024) /* Default 64K */
-#endif
-
-#ifndef ELIB_HEAP_INCREAMENT
-#define ELIB_HEAP_INCREAMENT (32*1024) /* Default 32K */
-#endif
-
-#ifndef ELIB_FAILURE
-#define ELIB_FAILURE abort()
-#endif
-
-#undef ASSERT
-#ifdef DEBUG
-#define ASSERT(B) \
- ((void) ((B) ? 1 : (fprintf(stderr, "%s:%d: Assertion failed: %s\n", \
- __FILE__, __LINE__, #B), abort(), 0)))
-#else
-#define ASSERT(B) ((void) 1)
-#endif
-
-#ifndef USE_RECURSIVE_MALLOC_MUTEX
-#define USE_RECURSIVE_MALLOC_MUTEX 0
-#endif
-
-#if USE_RECURSIVE_MALLOC_MUTEX
-static erts_mtx_t malloc_mutex = ERTS_REC_MTX_INITER;
-#else /* #if USE_RECURSIVE_MALLOC_MUTEX */
-static erts_mtx_t malloc_mutex = ERTS_MTX_INITER;
-#if THREAD_SAFE_ELIB_MALLOC
-static erts_cnd_t malloc_cond = ERTS_CND_INITER;
-#endif
-#endif /* #if USE_RECURSIVE_MALLOC_MUTEX */
-
-typedef unsigned long EWord; /* Assume 32-bit in this implementation */
-typedef unsigned short EHalfWord; /* Assume 16-bit in this implementation */
-typedef unsigned char EByte; /* Assume 8-bit byte */
-
-
-#define elib_printf fprintf
-#define elib_putc fputc
-
-
-#if defined(__STDC__) || defined(__WIN32__)
-#define CONCAT(x,y) x##y
-#else
-#define CONCAT(x,y) x/**/y
-#endif
-
-
-#ifdef ELIB_DEBUG
-#define ELIB_PREFIX(fun, args) CONCAT(elib__,fun) args
-#else
-#define ELIB_PREFIX(fun, args) CONCAT(elib_,fun) args
-#endif
-
-#if defined(__STDC__)
-void *ELIB_PREFIX(malloc, (size_t));
-void *ELIB_PREFIX(calloc, (size_t, size_t));
-void ELIB_PREFIX(cfree, (EWord *));
-void ELIB_PREFIX(free, (EWord *));
-void *ELIB_PREFIX(realloc, (EWord *, size_t));
-void* ELIB_PREFIX(memresize, (EWord *, int));
-void* ELIB_PREFIX(memalign, (int, int));
-void* ELIB_PREFIX(valloc, (int));
-void* ELIB_PREFIX(pvalloc, (int));
-int ELIB_PREFIX(memsize, (EWord *));
-/* Extern interfaces used by VxWorks */
-size_t elib_sizeof(void *);
-void elib_init(EWord *, EWord);
-void elib_force_init(EWord *, EWord);
-#endif
-
-#if defined(__STDC__)
-/* define prototypes for missing */
-void* memalign(size_t a, size_t s);
-void* pvalloc(size_t nb);
-void* memresize(void *p, int nb);
-int memsize(void *p);
-#endif
-
-/* bytes to pages */
-#define PAGES(x) (((x)+page_size-1) / page_size)
-#define PAGE_ALIGN(p) ((char*)((((EWord)(p))+page_size-1)&~(page_size-1)))
-
-/* bytes to words */
-#define WORDS(x) (((x)+sizeof(EWord)-1) / sizeof(EWord))
-
-/* Align an address */
-#define ALIGN(p) ((EWord*)((((EWord)(p)+ELIB_ALIGN-1)&~(ELIB_ALIGN-1))))
-
-/* Calculate the size needed to keep alignment */
-
-#define ALIGN_BSZ(nb) ((nb+sizeof(EWord)+ELIB_ALIGN-1) & ~(ELIB_ALIGN-1))
-
-#define ALIGN_WSZ(nb) WORDS(ALIGN_BSZ(nb))
-
-#define ALIGN_SIZE(nb) (ALIGN_WSZ(nb) - 1)
-
-
-/* PARAMETERS */
-
-#if defined(ELIB_HEAP_SBRK)
-
-#undef PAGE_SIZE
-
-/* Get the system page size (NEED MORE DEFINES HERE) */
-#ifdef _SC_PAGESIZE
-#define PAGE_SIZE sysconf(_SC_PAGESIZE)
-#elif defined(_MSC_VER)
-# ifdef _M_ALPHA
-# define PAGE_SIZE 0x2000
-# else
-# define PAGE_SIZE 0x1000
-# endif
-#else
-#define PAGE_SIZE getpagesize()
-#endif
-
-#define ELIB_EXPAND(need) expand_sbrk(need)
-static FUNCTION(int, expand_sbrk, (EWord));
-
-#elif defined(ELIB_HEAP_FIXED)
-
-#define PAGE_SIZE 1024
-#define ELIB_EXPAND(need) -1
-static EWord fix_heap[WORDS(ELIB_HEAP_SIZE)];
-
-#elif defined(ELIB_HEAP_USER)
-
-#define PAGE_SIZE 1024
-#define ELIB_EXPAND(need) -1
-
-#else
-
-#error "ELIB HEAP TYPE NOT SET"
-
-#endif
-
-
-#define STAT_ALLOCED_BLOCK(SZ) \
-do { \
- tot_allocated += (SZ); \
- if (max_allocated < tot_allocated) \
- max_allocated = tot_allocated; \
-} while (0)
-
-#define STAT_FREED_BLOCK(SZ) \
-do { \
- tot_allocated -= (SZ); \
-} while (0)
-
-static int max_allocated = 0;
-static int tot_allocated = 0;
-static EWord* eheap; /* Align heap start */
-static EWord* eheap_top; /* Point to end of heap */
-EWord page_size = 0; /* Set by elib_init */
-
-#if defined(ELIB_DEBUG) || defined(DEBUG)
-#define ALIGN_CHECK(a, p) \
- do { \
- if ((EWord)(p) & (a-1)) { \
- elib_printf(stderr, \
- "RUNTIME ERROR: bad alignment (0x%lx:%d:%d)\n", \
- (unsigned long) (p), (int) a, __LINE__); \
- ELIB_FAILURE; \
- } \
- } while(0)
-#define ELIB_ALIGN_CHECK(p) ALIGN_CHECK(ELIB_ALIGN, p)
-#else
-#define ALIGN_CHECK(a, p)
-#define ELIB_ALIGN_CHECK(p)
-#endif
-
-#define DYNAMIC 32
-
-/*
-** Free block layout
-** 1 1 30
-** +--------------------------+
-** |F|P| Size |
-** +--------------------------+
-**
-** Where F is the free bit
-** P is the free above bit
-** Size is messured in words and does not include the hdr word
-**
-** If block is on the free list the size is also stored last in the block.
-**
-*/
-typedef struct _free_block FreeBlock;
-struct _free_block {
- EWord hdr;
- Uint flags;
- FreeBlock* parent;
- FreeBlock* left;
- FreeBlock* right;
- EWord v[1];
-};
-
-typedef struct _allocated_block {
- EWord hdr;
- EWord v[5];
-} AllocatedBlock;
-
-
-/*
- * Interface to tree routines.
- */
-typedef Uint Block_t;
-
-static Block_t* get_free_block(Uint);
-static void link_free_block(Block_t *);
-static void unlink_free_block(Block_t *del);
-
-#define FREE_BIT 0x80000000
-#define FREE_ABOVE_BIT 0x40000000
-#define SIZE_MASK 0x3fffffff /* 2^30 words = 2^32 bytes */
-
-/* Work on both FreeBlock and AllocatedBlock */
-#define SIZEOF(p) ((p)->hdr & SIZE_MASK)
-#define IS_FREE(p) (((p)->hdr & FREE_BIT) != 0)
-#define IS_FREE_ABOVE(p) (((p)->hdr & FREE_ABOVE_BIT) != 0)
-
-/* Given that we have a free block above find its size */
-#define SIZEOF_ABOVE(p) *(((EWord*) (p)) - 1)
-
-#define MIN_BLOCK_SIZE (sizeof(FreeBlock)/sizeof(EWord))
-#define MIN_WORD_SIZE (MIN_BLOCK_SIZE-1)
-#define MIN_BYTE_SIZE (sizeof(FreeBlock)-sizeof(EWord))
-
-#define MIN_ALIGN_SIZE ALIGN_SIZE(MIN_BYTE_SIZE)
-
-
-static AllocatedBlock* heap_head = 0;
-static AllocatedBlock* heap_tail = 0;
-static EWord eheap_size = 0;
-
-static int heap_locked;
-
-static int elib_need_init = 1;
-#if THREAD_SAFE_ELIB_MALLOC
-static int elib_is_initing = 0;
-#endif
-
-typedef FreeBlock RBTree_t;
-
-static RBTree_t* root = NULL;
-
-
-static FUNCTION(void, deallocate, (AllocatedBlock*, int));
-
-/*
- * Unlink a free block
- */
-
-#define mark_allocated(p, szp) do { \
- (p)->hdr = ((p)->hdr & FREE_ABOVE_BIT) | (szp); \
- (p)->v[szp] &= ~FREE_ABOVE_BIT; \
- } while(0)
-
-#define mark_free(p, szp) do { \
- (p)->hdr = FREE_BIT | (szp); \
- ((FreeBlock *)p)->v[szp-sizeof(FreeBlock)/sizeof(EWord)+1] = (szp); \
- } while(0)
-
-#if 0
-/* Help macros to log2 */
-#define LOG_1(x) (((x) > 1) ? 1 : 0)
-#define LOG_2(x) (((x) > 3) ? 2+LOG_1((x) >> 2) : LOG_1(x))
-#define LOG_4(x) (((x) > 15) ? 4+LOG_2((x) >> 4) : LOG_2(x))
-#define LOG_8(x) (((x) > 255) ? 8+LOG_4((x)>>8) : LOG_4(x))
-#define LOG_16(x) (((x) > 65535) ? 16+LOG_8((x)>>16) : LOG_8(x))
-
-#define log2(x) LOG_16(x)
-#endif
-
-/*
- * Split a block to be allocated.
- * Mark block as ALLOCATED and clear
- * FREE_ABOVE_BIT on next block
- *
- * nw is SIZE aligned and szp is SIZE aligned + 1
- */
-static void
-split_block(FreeBlock* p, EWord nw, EWord szp)
-{
- EWord szq;
- FreeBlock* q;
-
- szq = szp - nw;
- /* Preserve FREE_ABOVE bit in p->hdr !!! */
-
- if (szq >= MIN_ALIGN_SIZE+1) {
- szq--;
- p->hdr = (p->hdr & FREE_ABOVE_BIT) | nw;
-
- q = (FreeBlock*) (((EWord*) p) + nw + 1);
- mark_free(q, szq);
- link_free_block((Block_t *) q);
-
- q = (FreeBlock*) (((EWord*) q) + szq + 1);
- q->hdr |= FREE_ABOVE_BIT;
- }
- else {
- mark_allocated((AllocatedBlock*)p, szp);
- }
-}
-
-/*
- * Find a free block
- */
-static FreeBlock*
-alloc_block(EWord nw)
-{
- for (;;) {
- FreeBlock* p = (FreeBlock *) get_free_block(nw);
-
- if (p != NULL) {
- return p;
- } else if (ELIB_EXPAND(nw+MIN_WORD_SIZE)) {
- return 0;
- }
- }
-}
-
-
-size_t elib_sizeof(void *p)
-{
- AllocatedBlock* pp;
-
- if (p != 0) {
- pp = (AllocatedBlock*) (((char *)p)-1);
- return SIZEOF(pp);
- }
- return 0;
-}
-
-static void locked_elib_init(EWord*, EWord);
-static void init_elib_malloc(EWord*, EWord);
-
-/*
-** Initialize the elib
-** The addr and sz is only used when compiled with EXPAND_ADDR
-*/
-/* Not static, this is used by VxWorks */
-void elib_init(EWord* addr, EWord sz)
-{
- if (!elib_need_init)
- return;
- erts_mtx_lock(&malloc_mutex);
- locked_elib_init(addr, sz);
- erts_mtx_unlock(&malloc_mutex);
-}
-
-static void locked_elib_init(EWord* addr, EWord sz)
-{
- if (!elib_need_init)
- return;
-
-#if THREAD_SAFE_ELIB_MALLOC
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- {
- static erts_tid_t initer_tid;
-
- if(elib_is_initing) {
-
- if(erts_equal_tids(initer_tid, erts_thr_self()))
- return;
-
- /* Wait until initializing thread is done with initialization */
-
- while(elib_need_init)
- erts_cnd_wait(&malloc_cond, &malloc_mutex);
-
- return;
- }
- else {
- initer_tid = erts_thr_self();
- elib_is_initing = 1;
- }
- }
-#else
- if(elib_is_initing)
- return;
- elib_is_initing = 1;
-#endif
-
-#endif /* #if THREAD_SAFE_ELIB_MALLOC */
-
- /* Do the actual initialization of the malloc implementation */
- init_elib_malloc(addr, sz);
-
-#if THREAD_SAFE_ELIB_MALLOC
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- erts_mtx_unlock(&malloc_mutex);
-#endif
-
- /* Recursive calls to malloc are allowed here... */
- erts_mtx_set_forksafe(&malloc_mutex);
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- erts_mtx_lock(&malloc_mutex);
- elib_is_initing = 0;
-#endif
-
-#endif /* #if THREAD_SAFE_ELIB_MALLOC */
-
- elib_need_init = 0;
-
-#if THREAD_SAFE_ELIB_MALLOC && !USE_RECURSIVE_MALLOC_MUTEX
- erts_cnd_broadcast(&malloc_cond);
-#endif
-
-}
-
-static void init_elib_malloc(EWord* addr, EWord sz)
-{
- int i;
- FreeBlock* freep;
- EWord tmp_sz;
-#ifdef ELIB_HEAP_SBRK
- char* top;
- EWord n;
-#endif
-
- max_allocated = 0;
- tot_allocated = 0;
- root = NULL;
-
- /* Get the page size (may involve system call!!!) */
- page_size = PAGE_SIZE;
-
-#if defined(ELIB_HEAP_SBRK)
- sz = PAGES(ELIB_HEAP_SIZE)*page_size;
-
- if ((top = (char*) sbrk(0)) == (char*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(0)");
- ELIB_FAILURE;
- }
- n = PAGE_ALIGN(top) - top;
- if ((top = (char*) sbrk(n)) == (char*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(n)");
- ELIB_FAILURE;
- }
- if ((eheap = (EWord*) sbrk(sz)) == (EWord*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(SIZE)");
- ELIB_FAILURE;
- }
- sz = WORDS(ELIB_HEAP_SIZE);
-#elif defined(ELIB_HEAP_FIXED)
- eheap = fix_heap;
- sz = WORDS(ELIB_HEAP_SIZE);
-#elif defined(ELIB_HEAP_USER)
- eheap = addr;
- sz = WORDS(sz);
-#else
- return -1;
-#endif
- eheap_size = 0;
-
- /* Make sure that the first word of the heap_head is aligned */
- addr = ALIGN(eheap+1);
- sz -= ((addr - 1) - eheap); /* Subtract unusable size */
- eheap_top = eheap = addr - 1; /* Set new aligned heap start */
-
- eheap_top[sz-1] = 0; /* Heap stop mark */
-
- addr = eheap;
- heap_head = (AllocatedBlock*) addr;
- heap_head->hdr = MIN_ALIGN_SIZE;
- for (i = 0; i < MIN_ALIGN_SIZE; i++)
- heap_head->v[i] = 0;
-
- addr += (MIN_ALIGN_SIZE+1);
- freep = (FreeBlock*) addr;
- tmp_sz = sz - (((MIN_ALIGN_SIZE+1) + MIN_BLOCK_SIZE) + 1 + 1);
- mark_free(freep, tmp_sz);
- link_free_block((Block_t *) freep);
-
- /* No need to align heap tail */
- heap_tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
- heap_tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
- heap_tail->v[0] = 0;
- heap_tail->v[1] = 0;
- heap_tail->v[2] = 0;
-
- eheap_top += sz;
- eheap_size += sz;
-
- heap_locked = 0;
-}
-
-#ifdef ELIB_HEAP_USER
-void elib_force_init(EWord* addr, EWord sz)
-{
- elib_need_init = 1;
- elib_init(addr,sz);
-}
-#endif
-
-#ifdef ELIB_HEAP_SBRK
-
-/*
-** need in number of words (should include head and tail words)
-*/
-static int expand_sbrk(EWord sz)
-{
- EWord* p;
- EWord bytes = sz * sizeof(EWord);
- EWord size;
- AllocatedBlock* tail;
-
- if (bytes < ELIB_HEAP_SIZE)
- size = PAGES(ELIB_HEAP_INCREAMENT)*page_size;
- else
- size = PAGES(bytes)*page_size;
-
- if ((p = (EWord*) sbrk(size)) == ((EWord*) -1))
- return -1;
-
- if (p != eheap_top) {
- elib_printf(stderr, "panic: sbrk moved\n");
- ELIB_FAILURE;
- }
-
- sz = WORDS(size);
-
- /* Set new endof heap marker and a new heap tail */
- eheap_top[sz-1] = 0;
-
- tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
- tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
- tail->v[0] = 0;
- tail->v[1] = 0;
- tail->v[2] = 0;
-
- /* Patch old tail with new appended size */
- heap_tail->hdr = (heap_tail->hdr & FREE_ABOVE_BIT) |
- (MIN_WORD_SIZE+1+(sz-MIN_BLOCK_SIZE-1));
- deallocate(heap_tail, 0);
-
- heap_tail = tail;
-
- eheap_size += sz;
- eheap_top += sz;
-
- return 0;
-}
-
-#endif /* ELIB_HEAP_SBRK */
-
-
-/*
-** Scan heap and check for corrupted heap
-*/
-int elib_check_heap(void)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- if (heap_locked) {
- elib_printf(stderr, "heap is locked no info avaiable\n");
- return 0;
- }
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- if (p->v[sz-1] != sz) {
- elib_printf(stderr, "panic: heap corrupted\r\n");
- ELIB_FAILURE;
- }
- p = (AllocatedBlock*) (p->v + sz);
- if (!IS_FREE_ABOVE(p)) {
- elib_printf(stderr, "panic: heap corrupted\r\n");
- ELIB_FAILURE;
- }
- }
- else
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 1;
-}
-
-/*
-** Load the byte vector pointed to by v of length vsz
-** with a heap image
-** The scale is defined by vsz and the current heap size
-** free = 0, full = 255
-**
-**
-*/
-int elib_heap_map(EByte* v, int vsz)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
- int gsz = eheap_size / vsz; /* The granuality used */
- int fsz = 0;
- int usz = 0;
-
- if (gsz == 0)
- return -1; /* too good reolution */
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- fsz += sz;
- if ((fsz + usz) > gsz) {
- *v++ = (255*usz)/gsz;
- fsz -= (gsz - usz);
- usz = 0;
- while(fsz >= gsz) {
- *v++ = 0;
- fsz -= gsz;
- }
- }
- }
- else {
- usz += sz;
- if ((fsz + usz) > gsz) {
- *v++ = 255 - (255*fsz)/gsz;
- usz -= (gsz - fsz);
- fsz = 0;
- while(usz >= gsz) {
- *v++ = 255;
- usz -= gsz;
- }
- }
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Generate a histogram of free/allocated blocks
-** Count granuality of 10 gives
-** (0-10],(10-100],(100-1000],(1000-10000] ...
-** (0-2], (2-4], (4-8], (8-16], ....
-*/
-static int i_logb(EWord size, int base)
-{
- int lg = 0;
- while(size >= base) {
- size /= base;
- lg++;
- }
- return lg;
-}
-
-int elib_histo(EWord* vf, EWord* va, int vsz, int base)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
- int i;
- int linear;
-
- if ((vsz <= 1) || (vf == 0 && va == 0))
- return -1;
-
- if (base < 0) {
- linear = 1;
- base = -base;
- }
- else
- linear = 0;
-
- if (base <= 1)
- return -1;
-
- if (vf != 0) {
- for (i = 0; i < vsz; i++)
- vf[i] = 0;
- }
- if (va != 0) {
- for (i = 0; i < vsz; i++)
- va[i] = 0;
- }
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- if (vf != 0) {
- int val;
- if (linear)
- val = sz / base;
- else
- val = i_logb(sz, base);
- if (val >= vsz)
- vf[vsz-1]++;
- else
- vf[val]++;
- }
- }
- else {
- if (va != 0) {
- int val;
- if (linear)
- val = sz / base;
- else
- val = i_logb(sz, base);
- if (val >= vsz)
- va[vsz-1]++;
- else
- va[val]++;
- }
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Fill the info structure with actual values
-** Total
-** Allocated
-** Free
-** maxMaxFree
-*/
-void elib_stat(struct elib_stat* info)
-{
- EWord blks = 0;
- EWord sz_free = 0;
- EWord sz_alloc = 0;
- EWord sz_max_free = 0;
- EWord sz_min_used = 0x7fffffff;
- EWord sz;
- EWord num_free = 0;
- AllocatedBlock* p = heap_head;
-
- info->mem_total = eheap_size;
-
- p = (AllocatedBlock*) (p->v + SIZEOF(p));
-
- while((sz = SIZEOF(p)) != 0) {
- blks++;
- if (IS_FREE(p)) {
- if (sz > sz_max_free)
- sz_max_free = sz;
- sz_free += sz;
- ++num_free;
- }
- else {
- if (sz < sz_min_used)
- sz_min_used = sz;
- sz_alloc += sz;
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- info->mem_blocks = blks;
- info->free_blocks = num_free;
- info->mem_alloc = sz_alloc;
- info->mem_free = sz_free;
- info->min_used = sz_min_used;
- info->max_free = sz_max_free;
- info->mem_max_alloc = max_allocated;
- ASSERT(sz_alloc == tot_allocated);
-}
-
-/*
-** Dump the heap
-*/
-void elib_heap_dump(char* label)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- elib_printf(stderr, "HEAP DUMP (%s)\n", label);
- if (!elib_check_heap())
- return;
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- elib_printf(stderr, "%p: FREE, size = %d\n", p, (int) sz);
- }
- else {
- elib_printf(stderr, "%p: USED, size = %d %s\n", p, (int) sz,
- IS_FREE_ABOVE(p)?"(FREE ABOVE)":"");
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
-}
-
-/*
-** Scan heaps and count:
-** free_size, allocated_size, max_free_block
-*/
-void elib_statistics(void* to)
-{
- struct elib_stat info;
- EWord frag;
-
- if (!elib_check_heap())
- return;
-
- elib_stat(&info);
-
- frag = 1000 - ((1000 * info.max_free) / info.mem_free);
-
- elib_printf(to, "Heap Statistics: total(%d), blocks(%d), frag(%d.%d%%)\n",
- info.mem_total, info.mem_blocks,
- (int) frag/10, (int) frag % 10);
-
- elib_printf(to, " allocated(%d), free(%d), "
- "free_blocks(%d)\n",
- info.mem_alloc, info.mem_free,info.free_blocks);
- elib_printf(to, " max_free(%d), min_used(%d)\n",
- info.max_free, info.min_used);
-}
-
-/*
-** Allocate a least nb bytes with alignment a
-** Algorithm:
-** 1) Try locate a block which match exacly among the by direct index.
-** 2) Try using a fix block of greater size
-** 3) Try locate a block by searching in lists where block sizes
-** X may vary between 2^i < X <= 2^(i+1)
-**
-** Reset memory to zero if clear is true
-*/
-static AllocatedBlock* allocate(EWord nb, EWord a, int clear)
-{
- FreeBlock* p;
- EWord nw;
-
- if (a == ELIB_ALIGN) {
- /*
- * Common case: Called by malloc(), realloc(), calloc().
- */
- nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
-
- if ((p = alloc_block(nw)) == 0)
- return NULL;
- } else {
- /*
- * Special case: Called by memalign().
- */
- EWord asz, szp, szq, tmpsz;
- FreeBlock *q;
-
- if ((p = alloc_block((1+MIN_ALIGN_SIZE)*sizeof(EWord)+a-1+nb)) == 0)
- return NULL;
-
- asz = a - ((EWord) ((AllocatedBlock *)p)->v) % a;
-
- if (asz != a) {
- /* Enforce the alignment requirement by cutting of a free
- block at the beginning of the block. */
-
- if (asz < (1+MIN_ALIGN_SIZE)*sizeof(EWord) && !IS_FREE_ABOVE(p)) {
- /* Not enough room to cut of a free block;
- increase align size */
- asz += (((1+MIN_ALIGN_SIZE)*sizeof(EWord) + a - 1)/a)*a;
- }
-
- szq = ALIGN_SIZE(asz - sizeof(EWord));
- szp = SIZEOF(p) - szq - 1;
-
- q = p;
- p = (FreeBlock*) (((EWord*) q) + szq + 1);
- p->hdr = FREE_ABOVE_BIT | FREE_BIT | szp;
-
- if (IS_FREE_ABOVE(q)) { /* This should not be possible I think,
- but just in case... */
- tmpsz = SIZEOF_ABOVE(q) + 1;
- szq += tmpsz;
- q = (FreeBlock*) (((EWord*) q) - tmpsz);
- unlink_free_block((Block_t *) q);
- q->hdr = (q->hdr & FREE_ABOVE_BIT) | FREE_BIT | szq;
- }
- mark_free(q, szq);
- link_free_block((Block_t *) q);
-
- } /* else already had the correct alignment */
-
- nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
- }
-
- split_block(p, nw, SIZEOF(p));
-
- STAT_ALLOCED_BLOCK(SIZEOF(p));
-
- if (clear) {
- EWord* pp = ((AllocatedBlock*)p)->v;
-
- while(nw--)
- *pp++ = 0;
- }
-
- return (AllocatedBlock*) p;
-}
-
-
-/*
-** Deallocate memory pointed to by p
-** 1. Merge with block above if this block is free
-** 2. Merge with block below if this block is free
-** Link the block to the correct free list
-**
-** p points to the block header!
-**
-*/
-static void deallocate(AllocatedBlock* p, int stat_count)
-{
- FreeBlock* q;
- EWord szq;
- EWord szp;
-
- szp = SIZEOF(p);
-
- if (stat_count)
- STAT_FREED_BLOCK(SIZEOF(p));
-
- if (IS_FREE_ABOVE(p)) {
- szq = SIZEOF_ABOVE(p);
- q = (FreeBlock*) ( ((EWord*) p) - szq - 1);
- unlink_free_block((Block_t *) q);
-
- p = (AllocatedBlock*) q;
- szp += (szq + 1);
- }
- q = (FreeBlock*) (p->v + szp);
- if (IS_FREE(q)) {
- szq = SIZEOF(q);
- unlink_free_block((Block_t *) q);
- szp += (szq + 1);
- }
- else
- q->hdr |= FREE_ABOVE_BIT;
-
- /* The block above p can NEVER be free !!! */
- p->hdr = FREE_BIT | szp;
- p->v[szp-1] = szp;
-
- link_free_block((Block_t *) p);
-}
-
-/*
-** Reallocate memory
-** If preserve is true then data is moved if neccesary
-*/
-static AllocatedBlock* reallocate(AllocatedBlock* p, EWord nb, int preserve)
-{
- EWord szp;
- EWord szq;
- EWord sz;
- EWord nw;
- FreeBlock* q;
-
- if (nb < MIN_BYTE_SIZE)
- nw = MIN_ALIGN_SIZE;
- else
- nw = ALIGN_SIZE(nb);
-
- sz = szp = SIZEOF(p);
-
- STAT_FREED_BLOCK(szp);
-
- /* Merge with block below */
- q = (FreeBlock*) (p->v + szp);
- if (IS_FREE(q)) {
- szq = SIZEOF(q);
- unlink_free_block((Block_t *) q);
- szp += (szq + 1);
- }
-
- if (nw <= szp) {
- split_block((FreeBlock *) p, nw, szp);
- STAT_ALLOCED_BLOCK(SIZEOF(p));
- return p;
- }
- else {
- EWord* dp = p->v;
- AllocatedBlock* npp;
-
- if (IS_FREE_ABOVE(p)) {
- szq = SIZEOF_ABOVE(p);
- if (szq + szp + 1 >= nw) {
- q = (FreeBlock*) (((EWord*) p) - szq - 1);
- unlink_free_block((Block_t * )q);
- szp += (szq + 1);
- p = (AllocatedBlock*) q;
-
- if (preserve) {
- EWord* pp = p->v;
- while(sz--)
- *pp++ = *dp++;
- }
- split_block((FreeBlock *) p, nw, szp);
- STAT_ALLOCED_BLOCK(SIZEOF(p));
- return p;
- }
- }
-
- /*
- * Update p so that allocate() and deallocate() works.
- * (Note that allocate() may call expand_sbrk(), which in
- * in turn calls deallocate().)
- */
-
- p->hdr = (p->hdr & FREE_ABOVE_BIT) | szp;
- p->v[szp] &= ~FREE_ABOVE_BIT;
-
- npp = allocate(nb, ELIB_ALIGN, 0);
- if(npp == NULL)
- return NULL;
- if (preserve) {
- EWord* pp = npp->v;
- while(sz--)
- *pp++ = *dp++;
- }
- deallocate(p, 0);
- return npp;
- }
-}
-
-/*
-** What malloc() and friends should do (and return) when the heap is
-** exhausted. [sverkerw]
-*/
-static void* heap_exhausted(void)
-{
- /* Choose behaviour */
-#if 0
- /* Crash-and-burn --- leave a usable corpse (hopefully) */
- abort();
-#endif
- /* The usual ANSI-compliant behaviour */
- return NULL;
-}
-
-/*
-** Allocate size bytes of memory
-*/
-void* ELIB_PREFIX(malloc, (size_t nb))
-{
- void *res;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (nb == 0)
- res = NULL;
- else if ((p = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-
-void* ELIB_PREFIX(calloc, (size_t nelem, size_t size))
-{
- void *res;
- int nb;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if ((nb = nelem * size) == 0)
- res = NULL;
- else if ((p = allocate(nb, ELIB_ALIGN, 1)) != 0) {
- ELIB_ALIGN_CHECK(p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-/*
-** Free memory allocated by malloc
-*/
-
-void ELIB_PREFIX(free, (EWord* p))
-{
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0)
- deallocate((AllocatedBlock*)(p-1), 1);
-
- erts_mtx_unlock(&malloc_mutex);
-}
-
-void ELIB_PREFIX(cfree, (EWord* p))
-{
- ELIB_PREFIX(free, (p));
-}
-
-
-/*
-** Realloc the memory allocated in p to nb number of bytes
-**
-*/
-
-void* ELIB_PREFIX(realloc, (EWord* p, size_t nb))
-{
- void *res = NULL;
- AllocatedBlock* pp;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0) {
- pp = (AllocatedBlock*) (p-1);
- if (nb > 0) {
- if ((pp = reallocate(pp, nb, 1)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- }
- else
- deallocate(pp, 1);
- }
- else if (nb > 0) {
- if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- else
- res = heap_exhausted();
- }
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-/*
-** Resize the memory area pointed to by p with nb number of bytes
-*/
-void* ELIB_PREFIX(memresize, (EWord* p, int nb))
-{
- void *res = NULL;
- AllocatedBlock* pp;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0) {
- pp = (AllocatedBlock*) (p-1);
- if (nb > 0) {
- if ((pp = reallocate(pp, nb, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- }
- else
- deallocate(pp, 1);
- }
- else if (nb > 0) {
- if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- else
- res = heap_exhausted();
- }
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-
-/* Create aligned memory a must be a power of 2 !!! */
-
-void* ELIB_PREFIX(memalign, (int a, int nb))
-{
- void *res;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (nb == 0 || a <= 0)
- res = NULL;
- else if ((p = allocate(nb, a, 0)) != 0) {
- ALIGN_CHECK(a, p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-void* ELIB_PREFIX(valloc, (int nb))
-{
- return ELIB_PREFIX(memalign, (page_size, nb));
-}
-
-
-void* ELIB_PREFIX(pvalloc, (int nb))
-{
- return ELIB_PREFIX(memalign, (page_size, PAGES(nb)*page_size));
-}
-/* Return memory size for pointer p in bytes */
-
-int ELIB_PREFIX(memsize, (p))
-EWord* p;
-{
- return SIZEOF((AllocatedBlock*)(p-1))*4;
-}
-
-
-/*
-** --------------------------------------------------------------------------
-** DEBUG LIBRARY
-** --------------------------------------------------------------------------
-*/
-
-#ifdef ELIB_DEBUG
-
-#define IN_HEAP(p) (((p) >= (char*) eheap) && (p) < (char*) eheap_top)
-/*
-** ptr_to_block: return the pointer to heap block pointed into by ptr
-** Returns 0 if not pointing into a block
-*/
-
-static EWord* ptr_to_block(char* ptr)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- while((sz = SIZEOF(p)) != 0) {
- if ((ptr >= (char*) p->v) && (ptr < (char*)(p->v+sz)))
- return p->v;
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Validate a pointer
-** returns:
-** 0 - if points to start of a block
-** 1 - if points outsize heap
-** -1 - if points inside block
-**
-*/
-static int check_pointer(char* ptr)
-{
- if (IN_HEAP(ptr)) {
- if (ptr_to_block(ptr) == 0)
- return 1;
- return 0;
- }
- return -1;
-}
-
-/*
-** Validate a memory area
-** returns:
-** 0 - if area is included in a block
-** -1 - if area overlap a heap block
-** 1 - if area is outside heap
-*/
-static int check_area(char* ptr, int n)
-{
- if (IN_HEAP(ptr)) {
- if (IN_HEAP(ptr+n-1)) {
- EWord* p1 = ptr_to_block(ptr);
- EWord* p2 = ptr_to_block(ptr+n-1);
-
- if (p1 == p2)
- return (p1 == 0) ? -1 : 0;
- return -1;
- }
- }
- else if (IN_HEAP(ptr+n-1))
- return -1;
- return 1;
-}
-
-/*
-** Check if a block write will overwrite heap block
-*/
-static void check_write(char* ptr, int n, char* file, int line, char* fun)
-{
- if (check_area(ptr, n) == -1) {
- elib_printf(stderr, "RUNTIME ERROR: %s heap overwrite\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-}
-
-/*
-** Check if a pointer is an allocated object
-*/
-static void check_allocated_block(char* ptr, char* file, int line, char* fun)
-{
- EWord* q;
-
- if (!IN_HEAP(ptr) || ((q=ptr_to_block(ptr)) == 0) || (ptr != (char*) q)) {
- elib_printf(stderr, "RUNTIME ERROR: %s non heap pointer\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-
- if (IS_FREE((AllocatedBlock*)(q-1))) {
- elib_printf(stderr, "RUNTIME ERROR: %s free pointer\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-
-}
-
-/*
-** --------------------------------------------------------------------------
-** DEBUG VERSIONS (COMPILED WITH THE ELIB.H)
-** --------------------------------------------------------------------------
-*/
-
-void* elib_dbg_malloc(int n, char* file, int line)
-{
- return elib__malloc(n);
-}
-
-void* elib_dbg_calloc(int n, int s, char* file, int line)
-{
- return elib__calloc(n, s);
-}
-
-void* elib_dbg_realloc(EWord* p, int n, char* file, int line)
-{
- if (p == 0)
- return elib__malloc(n);
- check_allocated_block(p, file, line, "elib_realloc");
- return elib__realloc(p, n);
-}
-
-void elib_dbg_free(EWord* p, char* file, int line)
-{
- if (p == 0)
- return;
- check_allocated_block(p, file, line, "elib_free");
- elib__free(p);
-}
-
-void elib_dbg_cfree(EWord* p, char* file, int line)
-{
- if (p == 0)
- return;
- check_allocated_block(p, file, line, "elib_free");
- elib__cfree(p);
-}
-
-void* elib_dbg_memalign(int a, int n, char* file, int line)
-{
- return elib__memalign(a, n);
-}
-
-void* elib_dbg_valloc(int n, char* file, int line)
-{
- return elib__valloc(n);
-}
-
-void* elib_dbg_pvalloc(int n, char* file, int line)
-{
- return elib__pvalloc(n);
-}
-
-void* elib_dbg_memresize(EWord* p, int n, char* file, int line)
-{
- if (p == 0)
- return elib__malloc(n);
- check_allocated_block(p, file, line, "elib_memresize");
- return elib__memresize(p, n);
-}
-
-int elib_dbg_memsize(void* p, char* file, int line)
-{
- check_allocated_block(p, file, line, "elib_memsize");
- return elib__memsize(p);
-}
-
-/*
-** --------------------------------------------------------------------------
-** LINK TIME FUNCTIONS (NOT COMPILED CALLS)
-** --------------------------------------------------------------------------
-*/
-
-void* elib_malloc(int n)
-{
- return elib_dbg_malloc(n, "", -1);
-}
-
-void* elib_calloc(int n, int s)
-{
- return elib_dbg_calloc(n, s, "", -1);
-}
-
-void* elib_realloc(EWord* p, int n)
-{
- return elib_dbg_realloc(p, n, "", -1);
-}
-
-void elib_free(EWord* p)
-{
- elib_dbg_free(p, "", -1);
-}
-
-void elib_cfree(EWord* p)
-{
- elib_dbg_cfree(p, "", -1);
-}
-
-void* elib_memalign(int a, int n)
-{
- return elib_dbg_memalign(a, n, "", -1);
-}
-
-void* elib_valloc(int n)
-{
- return elib_dbg_valloc(n, "", -1);
-}
-
-void* elib_pvalloc(int n)
-{
- return elib_dbg_pvalloc(n, "", -1);
-}
-
-void* elib_memresize(EWord* p, int n)
-{
- return elib_dbg_memresize(p, n, "", -1);
-}
-
-
-int elib_memsize(EWord* p)
-{
- return elib_dbg_memsize(p, "", -1);
-}
-
-#endif /* ELIB_DEBUG */
-
-/*
-** --------------------------------------------------------------------------
-** Map c library functions to elib
-** --------------------------------------------------------------------------
-*/
-
-#if defined(ELIB_ALLOC_IS_CLIB)
-void* malloc(size_t nb)
-{
- return elib_malloc(nb);
-}
-
-void* calloc(size_t nelem, size_t size)
-{
- return elib_calloc(nelem, size);
-}
-
-
-void free(void *p)
-{
- elib_free(p);
-}
-
-void cfree(void *p)
-{
- elib_cfree(p);
-}
-
-void* realloc(void* p, size_t nb)
-{
- return elib_realloc(p, nb);
-}
-
-
-void* memalign(size_t a, size_t s)
-{
- return elib_memalign(a, s);
-}
-
-void* valloc(size_t nb)
-{
- return elib_valloc(nb);
-}
-
-void* pvalloc(size_t nb)
-{
- return elib_pvalloc(nb);
-}
-
-#if 0
-void* memresize(void* p, int nb)
-{
- return elib_memresize(p, nb);
-}
-
-int memsize(void* p)
-{
- return elib_memsize(p);
-}
-#endif
-#endif /* ELIB_ALLOC_IS_CLIB */
-
-#endif /* ENABLE_ELIB_MALLOC */
-
-void elib_ensure_initialized(void)
-{
-#ifdef ENABLE_ELIB_MALLOC
-#ifndef ELIB_DONT_INITIALIZE
- elib_init(NULL, 0);
-#endif
-#endif
-}
-
-#ifdef ENABLE_ELIB_MALLOC
-/**
- ** A Slightly modified version of the "address order best fit" algorithm
- ** used in erl_bestfit_alloc.c. Comments refer to that implementation.
- **/
-
-/*
- * Description: A combined "address order best fit"/"best fit" allocator
- * based on a Red-Black (binary search) Tree. The search,
- * insert, and delete operations are all O(log n) operations
- * on a Red-Black Tree. In the "address order best fit" case
- * n equals number of free blocks, and in the "best fit" case
- * n equals number of distinct sizes of free blocks. Red-Black
- * Trees are described in "Introduction to Algorithms", by
- * Thomas H. Cormen, Charles E. Leiserson, and
- * Ronald L. Riverest.
- *
- * This module is a callback-module for erl_alloc_util.c
- *
- * Author: Rickard Green
- */
-
-#ifdef DEBUG
-#if 0
-#define HARD_DEBUG
-#endif
-#else
-#undef HARD_DEBUG
-#endif
-
-#define SZ_MASK SIZE_MASK
-#define FLG_MASK (~(SZ_MASK))
-
-#define BLK_SZ(B) (*((Block_t *) (B)) & SZ_MASK)
-
-#define TREE_NODE_FLG (((Uint) 1) << 0)
-#define RED_FLG (((Uint) 1) << 1)
-#ifdef HARD_DEBUG
-# define LEFT_VISITED_FLG (((Uint) 1) << 2)
-# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
-#endif
-
-#define IS_TREE_NODE(N) (((RBTree_t *) (N))->flags & TREE_NODE_FLG)
-#define IS_LIST_ELEM(N) (!IS_TREE_NODE(((RBTree_t *) (N))))
-
-#define SET_TREE_NODE(N) (((RBTree_t *) (N))->flags |= TREE_NODE_FLG)
-#define SET_LIST_ELEM(N) (((RBTree_t *) (N))->flags &= ~TREE_NODE_FLG)
-
-#define IS_RED(N) (((RBTree_t *) (N)) \
- && ((RBTree_t *) (N))->flags & RED_FLG)
-#define IS_BLACK(N) (!IS_RED(((RBTree_t *) (N))))
-
-#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
-#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
-
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
-#if 1
-#define RBT_ASSERT ASSERT
-#else
-#define RBT_ASSERT(x)
-#endif
-
-
-#ifdef HARD_DEBUG
-static RBTree_t * check_tree(Uint);
-#endif
-
-#ifdef ERTS_INLINE
-# ifndef ERTS_CAN_INLINE
-# define ERTS_CAN_INLINE 1
-# endif
-#else
-# if defined(__GNUC__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline__
-# elif defined(__WIN32__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline
-# else
-# define ERTS_CAN_INLINE 0
-# define ERTS_INLINE
-# endif
-#endif
-
-/* Types... */
-#if 0
-typedef struct RBTree_t_ RBTree_t;
-
-struct RBTree_t_ {
- Block_t hdr;
- Uint flags;
- RBTree_t *parent;
- RBTree_t *left;
- RBTree_t *right;
-};
-#endif
-
-#if 0
-typedef struct {
- RBTree_t t;
- RBTree_t *next;
-} RBTreeList_t;
-
-#define LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
-#define LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
-#endif
-
-#ifdef DEBUG
-
-/* Destroy all tree fields */
-#define DESTROY_TREE_NODE(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(RBTree_t) - sizeof(Block_t)))
-
-/* Destroy all tree and list fields */
-#define DESTROY_LIST_ELEM(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(RBTreeList_t) - sizeof(Block_t)))
-
-#else
-
-#define DESTROY_TREE_NODE(N)
-#define DESTROY_LIST_ELEM(N)
-
-#endif
-
-
-/*
- * Red-Black Tree operations needed
- */
-
-static ERTS_INLINE void
-left_rotate(RBTree_t **root, RBTree_t *x)
-{
- RBTree_t *y = x->right;
- x->right = y->left;
- if (y->left)
- y->left->parent = x;
- y->parent = x->parent;
- if (!y->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->left)
- x->parent->left = y;
- else {
- RBT_ASSERT(x == x->parent->right);
- x->parent->right = y;
- }
- y->left = x;
- x->parent = y;
-}
-
-static ERTS_INLINE void
-right_rotate(RBTree_t **root, RBTree_t *x)
-{
- RBTree_t *y = x->left;
- x->left = y->right;
- if (y->right)
- y->right->parent = x;
- y->parent = x->parent;
- if (!y->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->right)
- x->parent->right = y;
- else {
- RBT_ASSERT(x == x->parent->left);
- x->parent->left = y;
- }
- y->right = x;
- x->parent = y;
-}
-
-
-/*
- * Replace node x with node y
- * NOTE: block header of y is not changed
- */
-static ERTS_INLINE void
-replace(RBTree_t **root, RBTree_t *x, RBTree_t *y)
-{
-
- if (!x->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->left)
- x->parent->left = y;
- else {
- RBT_ASSERT(x == x->parent->right);
- x->parent->right = y;
- }
- if (x->left) {
- RBT_ASSERT(x->left->parent == x);
- x->left->parent = y;
- }
- if (x->right) {
- RBT_ASSERT(x->right->parent == x);
- x->right->parent = y;
- }
-
- y->flags = x->flags;
- y->parent = x->parent;
- y->right = x->right;
- y->left = x->left;
-
- DESTROY_TREE_NODE(x);
-
-}
-
-static void
-tree_insert_fixup(RBTree_t *blk)
-{
- RBTree_t *x = blk, *y;
-
- /*
- * Rearrange the tree so that it satisfies the Red-Black Tree properties
- */
-
- RBT_ASSERT(x != root && IS_RED(x->parent));
- do {
-
- /*
- * x and its parent are both red. Move the red pair up the tree
- * until we get to the root or until we can separate them.
- */
-
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(x->parent->parent);
-
- if (x->parent == x->parent->parent->left) {
- y = x->parent->parent->right;
- if (IS_RED(y)) {
- SET_BLACK(y);
- x = x->parent;
- SET_BLACK(x);
- x = x->parent;
- SET_RED(x);
- }
- else {
-
- if (x == x->parent->right) {
- x = x->parent;
- left_rotate(&root, x);
- }
-
- RBT_ASSERT(x == x->parent->parent->left->left);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(IS_BLACK(y));
-
- SET_BLACK(x->parent);
- SET_RED(x->parent->parent);
- right_rotate(&root, x->parent->parent);
-
- RBT_ASSERT(x == x->parent->left);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent->right));
- RBT_ASSERT(IS_BLACK(x->parent));
- break;
- }
- }
- else {
- RBT_ASSERT(x->parent == x->parent->parent->right);
- y = x->parent->parent->left;
- if (IS_RED(y)) {
- SET_BLACK(y);
- x = x->parent;
- SET_BLACK(x);
- x = x->parent;
- SET_RED(x);
- }
- else {
-
- if (x == x->parent->left) {
- x = x->parent;
- right_rotate(&root, x);
- }
-
- RBT_ASSERT(x == x->parent->parent->right->right);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(IS_BLACK(y));
-
- SET_BLACK(x->parent);
- SET_RED(x->parent->parent);
- left_rotate(&root, x->parent->parent);
-
- RBT_ASSERT(x == x->parent->right);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent->left));
- RBT_ASSERT(IS_BLACK(x->parent));
- break;
- }
- }
- } while (x != root && IS_RED(x->parent));
-
- SET_BLACK(root);
-}
-
-static void
-unlink_free_block(Block_t *del)
-{
- Uint spliced_is_black;
- RBTree_t *x, *y, *z = (RBTree_t *) del;
- RBTree_t null_x; /* null_x is used to get the fixup started when we
- splice out a node without children. */
-
- null_x.parent = NULL;
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-
- /* Remove node from tree... */
-
- /* Find node to splice out */
- if (!z->left || !z->right)
- y = z;
- else
- /* Set y to z:s successor */
- for(y = z->right; y->left; y = y->left);
- /* splice out y */
- x = y->left ? y->left : y->right;
- spliced_is_black = IS_BLACK(y);
- if (x) {
- x->parent = y->parent;
- }
- else if (!x && spliced_is_black) {
- x = &null_x;
- x->flags = 0;
- SET_BLACK(x);
- x->right = x->left = NULL;
- x->parent = y->parent;
- y->left = x;
- }
-
- if (!y->parent) {
- RBT_ASSERT(root == y);
- root = x;
- }
- else if (y == y->parent->left)
- y->parent->left = x;
- else {
- RBT_ASSERT(y == y->parent->right);
- y->parent->right = x;
- }
- if (y != z) {
- /* We spliced out the successor of z; replace z by the successor */
- replace(&root, z, y);
- }
-
- if (spliced_is_black) {
- /* We removed a black node which makes the resulting tree
- violate the Red-Black Tree properties. Fixup tree... */
-
- while (IS_BLACK(x) && x->parent) {
-
- /*
- * x has an "extra black" which we move up the tree
- * until we reach the root or until we can get rid of it.
- *
- * y is the sibbling of x
- */
-
- if (x == x->parent->left) {
- y = x->parent->right;
- RBT_ASSERT(y);
- if (IS_RED(y)) {
- RBT_ASSERT(y->right);
- RBT_ASSERT(y->left);
- SET_BLACK(y);
- RBT_ASSERT(IS_BLACK(x->parent));
- SET_RED(x->parent);
- left_rotate(&root, x->parent);
- y = x->parent->right;
- }
- RBT_ASSERT(y);
- RBT_ASSERT(IS_BLACK(y));
- if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
- SET_RED(y);
- x = x->parent;
- }
- else {
- if (IS_BLACK(y->right)) {
- SET_BLACK(y->left);
- SET_RED(y);
- right_rotate(&root, y);
- y = x->parent->right;
- }
- RBT_ASSERT(y);
- if (IS_RED(x->parent)) {
-
- SET_BLACK(x->parent);
- SET_RED(y);
- }
- RBT_ASSERT(y->right);
- SET_BLACK(y->right);
- left_rotate(&root, x->parent);
- x = root;
- break;
- }
- }
- else {
- RBT_ASSERT(x == x->parent->right);
- y = x->parent->left;
- RBT_ASSERT(y);
- if (IS_RED(y)) {
- RBT_ASSERT(y->right);
- RBT_ASSERT(y->left);
- SET_BLACK(y);
- RBT_ASSERT(IS_BLACK(x->parent));
- SET_RED(x->parent);
- right_rotate(&root, x->parent);
- y = x->parent->left;
- }
- RBT_ASSERT(y);
- RBT_ASSERT(IS_BLACK(y));
- if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
- SET_RED(y);
- x = x->parent;
- }
- else {
- if (IS_BLACK(y->left)) {
- SET_BLACK(y->right);
- SET_RED(y);
- left_rotate(&root, y);
- y = x->parent->left;
- }
- RBT_ASSERT(y);
- if (IS_RED(x->parent)) {
- SET_BLACK(x->parent);
- SET_RED(y);
- }
- RBT_ASSERT(y->left);
- SET_BLACK(y->left);
- right_rotate(&root, x->parent);
- x = root;
- break;
- }
- }
- }
- SET_BLACK(x);
-
- if (null_x.parent) {
- if (null_x.parent->left == &null_x)
- null_x.parent->left = NULL;
- else {
- RBT_ASSERT(null_x.parent->right == &null_x);
- null_x.parent->right = NULL;
- }
- RBT_ASSERT(!null_x.left);
- RBT_ASSERT(!null_x.right);
- }
- else if (root == &null_x) {
- root = NULL;
- RBT_ASSERT(!null_x.left);
- RBT_ASSERT(!null_x.right);
- }
- }
-
-
- DESTROY_TREE_NODE(del);
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-
-}
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * "Address order best fit" specific callbacks. *
-\* */
-
-static void
-link_free_block(Block_t *block)
-{
- RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
-
- blk->flags = 0;
- blk->left = NULL;
- blk->right = NULL;
-
- if (!root) {
- blk->parent = NULL;
- SET_BLACK(blk);
- root = blk;
- } else {
- RBTree_t *x = root;
- while (1) {
- Uint size;
-
- size = BLK_SZ(x);
-
- if (blk_sz < size || (blk_sz == size && blk < x)) {
- if (!x->left) {
- blk->parent = x;
- x->left = blk;
- break;
- }
- x = x->left;
- }
- else {
- if (!x->right) {
- blk->parent = x;
- x->right = blk;
- break;
- }
- x = x->right;
- }
-
- }
-
- /* Insert block into size tree */
- RBT_ASSERT(blk->parent);
-
- SET_RED(blk);
- if (IS_RED(blk->parent)) {
- tree_insert_fixup(blk);
- }
- }
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-}
-
-
-static Block_t *
-get_free_block(Uint size)
-{
- RBTree_t *x = root;
- RBTree_t *blk = NULL;
- Uint blk_sz;
-
- while (x) {
- blk_sz = BLK_SZ(x);
- if (blk_sz < size) {
- x = x->right;
- }
- else {
- blk = x;
- x = x->left;
- }
- }
-
- if (!blk)
- return NULL;
-
-#ifdef HARD_DEBUG
- ASSERT(blk == check_tree(size));
-#endif
-
- unlink_free_block((Block_t *) blk);
-
- return (Block_t *) blk;
-}
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Debug functions *
-\* */
-
-
-#ifdef HARD_DEBUG
-
-#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
-#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
-
-#define SET_LEFT_VISITED(FB) ((FB)->flags |= LEFT_VISITED_FLG)
-#define SET_RIGHT_VISITED(FB) ((FB)->flags |= RIGHT_VISITED_FLG)
-
-#define UNSET_LEFT_VISITED(FB) ((FB)->flags &= ~LEFT_VISITED_FLG)
-#define UNSET_RIGHT_VISITED(FB) ((FB)->flags &= ~RIGHT_VISITED_FLG)
-
-
-#if 0
-# define PRINT_TREE
-#else
-# undef PRINT_TREE
-#endif
-
-#ifdef PRINT_TREE
-static void print_tree(void);
-#endif
-
-/*
- * Checks that the order between parent and children are correct,
- * and that the Red-Black Tree properies are satisfied. if size > 0,
- * check_tree() returns a node that satisfies "best fit" resp.
- * "address order best fit".
- *
- * The Red-Black Tree properies are:
- * 1. Every node is either red or black.
- * 2. Every leaf (NIL) is black.
- * 3. If a node is red, then both its children are black.
- * 4. Every simple path from a node to a descendant leaf
- * contains the same number of black nodes.
- */
-
-static RBTree_t *
-check_tree(Uint size)
-{
- RBTree_t *res = NULL;
- Sint blacks;
- Sint curr_blacks;
- RBTree_t *x;
-
-#ifdef PRINT_TREE
- print_tree();
-#endif
-
- if (!root)
- return res;
-
- x = root;
- ASSERT(IS_BLACK(x));
- ASSERT(!x->parent);
- curr_blacks = 1;
- blacks = -1;
-
- while (x) {
- if (!IS_LEFT_VISITED(x)) {
- SET_LEFT_VISITED(x);
- if (x->left) {
- x = x->left;
- if (IS_BLACK(x))
- curr_blacks++;
- continue;
- }
- else {
- if (blacks < 0)
- blacks = curr_blacks;
- ASSERT(blacks == curr_blacks);
- }
- }
-
- if (!IS_RIGHT_VISITED(x)) {
- SET_RIGHT_VISITED(x);
- if (x->right) {
- x = x->right;
- if (IS_BLACK(x))
- curr_blacks++;
- continue;
- }
- else {
- if (blacks < 0)
- blacks = curr_blacks;
- ASSERT(blacks == curr_blacks);
- }
- }
-
-
- if (IS_RED(x)) {
- ASSERT(IS_BLACK(x->right));
- ASSERT(IS_BLACK(x->left));
- }
-
- ASSERT(x->parent || x == root);
-
- if (x->left) {
- ASSERT(x->left->parent == x);
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
- || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
- }
-
- if (x->right) {
- ASSERT(x->right->parent == x);
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
- || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
- }
-
- if (size && BLK_SZ(x) >= size) {
- if (!res
- || BLK_SZ(x) < BLK_SZ(res)
- || (BLK_SZ(x) == BLK_SZ(res) && x < res))
- res = x;
- }
-
- UNSET_LEFT_VISITED(x);
- UNSET_RIGHT_VISITED(x);
- if (IS_BLACK(x))
- curr_blacks--;
- x = x->parent;
-
- }
-
- ASSERT(curr_blacks == 0);
-
- UNSET_LEFT_VISITED(root);
- UNSET_RIGHT_VISITED(root);
-
- return res;
-
-}
-
-
-#ifdef PRINT_TREE
-#define INDENT_STEP 2
-
-#include <stdio.h>
-
-static void
-print_tree_aux(RBTree_t *x, int indent)
-{
- int i;
-
- if (!x) {
- for (i = 0; i < indent; i++) {
- putc(' ', stderr);
- }
- fprintf(stderr, "BLACK: nil\r\n");
- }
- else {
- print_tree_aux(x->right, indent + INDENT_STEP);
- for (i = 0; i < indent; i++) {
- putc(' ', stderr);
- }
- fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
- IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x),
- (Uint) x);
- print_tree_aux(x->left, indent + INDENT_STEP);
- }
-}
-
-
-static void
-print_tree(void)
-{
- fprintf(stderr, " --- Size-Adress tree begin ---\r\n");
- print_tree_aux(root, 0);
- fprintf(stderr, " --- Size-Adress tree end ---\r\n");
-}
-
-#endif
-
-#endif
-
-#endif /* ENABLE_ELIB_MALLOC */
diff --git a/erts/emulator/beam/elib_stat.h b/erts/emulator/beam/elib_stat.h
deleted file mode 100644
index d8c7f31737..0000000000
--- a/erts/emulator/beam/elib_stat.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
-** Interface to elib statistics
-**
-*/
-#ifndef __ELIB_STAT_H__
-#define __ELIB_STAT_H__
-
-struct elib_stat {
- int mem_total; /* Number of heap words */
- int mem_blocks; /* Number of block */
- int mem_alloc; /* Number of words in use */
- int mem_free; /* Number of words free */
- int min_used; /* Size of the smallest block used */
- int max_free; /* Size of the largest free block */
- int free_blocks; /* Number of fragments in free list */
- int mem_max_alloc;/* Max number of words in use */
-};
-
-EXTERN_FUNCTION(void, elib_statistics, (void*));
-EXTERN_FUNCTION(int, elib_check_heap, (_VOID_));
-EXTERN_FUNCTION(void, elib_heap_dump, (char*));
-EXTERN_FUNCTION(void, elib_stat, (struct elib_stat*));
-EXTERN_FUNCTION(int, elib_heap_map, (unsigned char*, int));
-EXTERN_FUNCTION(int, elib_histo, (unsigned long*, unsigned long*, int, int));
-
-#endif
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 16ae643ed9..07b4167b27 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -38,9 +38,6 @@
#include "erl_bits.h"
#include "erl_instrument.h"
#include "erl_mseg.h"
-#ifdef ELIB_ALLOC_IS_CLIB
-#include "erl_version.h"
-#endif
#include "erl_monitors.h"
#include "erl_bif_timer.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
@@ -64,8 +61,15 @@
#ifdef DEBUG
static Uint install_debug_functions(void);
+#if 0
+#define HARD_DEBUG
+#ifdef __GNUC__
+#warning "* * * * * * * * * * * * * *"
+#warning "* HARD DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * *"
+#endif
+#endif
#endif
-extern void elib_ensure_initialized(void);
ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
@@ -391,6 +395,10 @@ refuse_af_strategy(struct au_init *init)
static void init_thr_ix(int static_ixs);
+#ifdef HARD_DEBUG
+static void hdbg_init(void);
+#endif
+
void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
@@ -406,6 +414,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
ERTS_DEFAULT_ALCU_INIT
};
+#ifdef HARD_DEBUG
+ hdbg_init();
+#endif
+
erts_sys_alloc_init();
init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
@@ -2305,13 +2317,8 @@ erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
l = 0;
as[l] = am_atom_put("e", 1);
ts[l++] = am_true;
-#ifdef ELIB_ALLOC_IS_CLIB
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("elib", 4);
-#else
as[l] = am_atom_put("m", 1);
ts[l++] = am_atom_put("libc", 4);
-#endif
if(sas.trim_threshold >= 0) {
as[l] = am_atom_put("tt", 2);
ts[l++] = erts_bld_uint(hpp, szp,
@@ -2465,11 +2472,7 @@ erts_allocator_info(int to, void *arg)
case ERTS_ALC_A_SYSTEM: {
SysAllocStat sas;
erts_print(to, arg, "option e: true\n");
-#ifdef ELIB_ALLOC_IS_CLIB
- erts_print(to, arg, "option m: elib\n");
-#else
erts_print(to, arg, "option m: libc\n");
-#endif
sys_alloc_stat(&sas);
if(sas.trim_threshold >= 0)
erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
@@ -2573,13 +2576,8 @@ erts_allocator_options(void *proc)
switch (a) {
case ERTS_ALC_A_SYSTEM:
-#ifdef ELIB_ALLOC_IS_CLIB
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("elib", 4);
-#else
as[l] = am_atom_put("m", 1);
ts[l++] = am_atom_put("libc", 4);
-#endif
if(sas.trim_threshold >= 0) {
as[l] = am_atom_put("tt", 2);
ts[l++] = erts_bld_uint(hpp, szp,
@@ -2650,23 +2648,7 @@ erts_allocator_options(void *proc)
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
-#if defined(ELIB_ALLOC_IS_CLIB)
- {
- Eterm version;
- int i;
- int ver[5];
- i = sscanf(ERLANG_VERSION,
- "%d.%d.%d.%d.%d",
- &ver[0], &ver[1], &ver[2], &ver[3], &ver[4]);
-
- version = NIL;
- for(i--; i >= 0; i--)
- version = erts_bld_cons(hpp, szp, make_small(ver[i]), version);
-
- res = erts_bld_tuple(hpp, szp, 4,
- am_elib_malloc, version, features, settings);
- }
-#elif defined(__GLIBC__)
+#if defined(__GLIBC__)
{
Eterm AM_glibc = am_atom_put("glibc", 5);
Eterm version;
@@ -2862,12 +2844,10 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0a:
- if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_lock((ethr_mutex *) a1);
break;
case 0xf0b:
- if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_unlock((ethr_mutex *) a1);
break;
case 0xf0c: {
ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
@@ -2883,16 +2863,13 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0e:
- if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_cond_broadcast((ethr_cond *) a1);
break;
case 0xf0f: {
int res;
do {
res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
} while (res == EINTR);
- if (res != 0)
- ERTS_ALC_TEST_ABORT;
break;
}
case 0xf10: {
@@ -2939,8 +2916,11 @@ unsigned long erts_alc_test(unsigned long op,
#undef PRINT_OPS
#endif
-
+#ifdef HARD_DEBUG
+#define FENCE_SZ (4*sizeof(UWord))
+#else
#define FENCE_SZ (3*sizeof(UWord))
+#endif
#if defined(ARCH_64)
#define FENCE_PATTERN 0xABCDEF97ABCDEF97
@@ -2962,6 +2942,104 @@ unsigned long erts_alc_test(unsigned long op,
#define GET_TYPE_OF_PATTERN(P) \
(((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
+#ifdef HARD_DEBUG
+
+#define ERL_ALC_HDBG_MAX_MBLK 100000
+#define ERTS_ALC_O_CHECK -1
+
+typedef struct hdbg_mblk_ hdbg_mblk;
+struct hdbg_mblk_ {
+ hdbg_mblk *next;
+ hdbg_mblk *prev;
+ void *p;
+ Uint s;
+ ErtsAlcType_t n;
+};
+
+static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
+
+static hdbg_mblk *free_hdbg_mblks;
+static hdbg_mblk *used_hdbg_mblks;
+static erts_mtx_t hdbg_mblk_mtx;
+
+static void
+hdbg_init(void)
+{
+ int i;
+ for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
+ hdbg_mblks[i].next = &hdbg_mblks[i+1];
+ hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
+ free_hdbg_mblks = &hdbg_mblks[0];
+ used_hdbg_mblks = NULL;
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+}
+
+static void *check_memory_fence(void *ptr,
+ Uint *size,
+ ErtsAlcType_t n,
+ int func);
+void erts_hdbg_chk_blks(void);
+
+void
+erts_hdbg_chk_blks(void)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
+ Uint sz;
+ check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
+ ASSERT(sz == mblk->s);
+ }
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+static hdbg_mblk *
+hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ mblk = free_hdbg_mblks;
+ if (!mblk) {
+ erts_fprintf(stderr,
+ "Ran out of debug blocks; please increase "
+ "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
+ ERL_ALC_HDBG_MAX_MBLK);
+ abort();
+ }
+ free_hdbg_mblks = mblk->next;
+
+ mblk->p = p;
+ mblk->s = s;
+ mblk->n = n;
+
+ mblk->next = used_hdbg_mblks;
+ mblk->prev = NULL;
+ if (used_hdbg_mblks)
+ used_hdbg_mblks->prev = mblk;
+ used_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+ return (void *) mblk;
+}
+
+static void
+hdbg_free(hdbg_mblk *mblk)
+{
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ if (mblk->next)
+ mblk->next->prev = mblk->prev;
+ if (mblk->prev)
+ mblk->prev->next = mblk->next;
+ else
+ used_hdbg_mblks = mblk->next;
+
+ mblk->next = free_hdbg_mblks;
+ free_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+#endif
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
@@ -3007,17 +3085,28 @@ set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
{
UWord *ui_ptr;
UWord pattern;
+#ifdef HARD_DEBUG
+ hdbg_mblk **mblkpp;
+#endif
if (!ptr)
return NULL;
ui_ptr = (UWord *) ptr;
pattern = MK_PATTERN(n);
-
+
+#ifdef HARD_DEBUG
+ mblkpp = (hdbg_mblk **) ui_ptr++;
+#endif
+
*(ui_ptr++) = sz;
*(ui_ptr++) = pattern;
memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
+#ifdef HARD_DEBUG
+ *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
+#endif
+
return (void *) ui_ptr;
}
@@ -3029,6 +3118,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
UWord pre_pattern;
UWord post_pattern;
UWord *ui_ptr;
+#ifdef HARD_DEBUG
+ hdbg_mblk *mblk;
+#endif
if (!ptr)
return NULL;
@@ -3036,6 +3128,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
ui_ptr = (UWord *) ptr;
pre_pattern = *(--ui_ptr);
*size = sz = *(--ui_ptr);
+#ifdef HARD_DEBUG
+ mblk = (hdbg_mblk *) *(--ui_ptr);
+#endif
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
if (pre_pattern != MK_PATTERN(n)) {
@@ -3091,6 +3186,17 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
}
+#ifdef HARD_DEBUG
+ switch (func) {
+ case ERTS_ALC_O_REALLOC:
+ case ERTS_ALC_O_FREE:
+ hdbg_free(mblk);
+ break;
+ default:
+ break;
+ }
+#endif
+
return (void *) ui_ptr;
}
@@ -3103,6 +3209,10 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint dsize;
void *res;
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
res = (*real_af->alloc)(n, real_af->extra, dsize);
@@ -3132,13 +3242,17 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
dsize = size + FENCE_SZ;
dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
if (old_size > size)
sys_memset((void *) (((char *) ptr) + size),
0xf,
sizeof(Uint) + old_size - size);
res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
-
+
res = set_memory_fence(res, size, n);
#ifdef PRINT_OPS
@@ -3168,6 +3282,10 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
#endif
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
}
static Uint
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6f88bbe5b8..7df9f19af0 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -212,7 +212,8 @@ type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
# INFO_DSBUF have to use the SYSTEM allocator; otherwise, a deadlock might occur
-type SCHDLR_DATA LONG_LIVED PROCESSES scheduler_data
+type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
+type SCHDLR_SLP_INFO LONG_LIVED SYSTEM scheduler_sleep_info
type RUNQS LONG_LIVED SYSTEM run_queues
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
type DDLL_HANDLE STANDARD SYSTEM ddll_handle
@@ -246,6 +247,7 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
+type RDR_GRPS_MAP LONG_LIVED SYSTEM reader_groups_map
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -269,7 +271,9 @@ type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
+if threads
-type ETHR_INTERNAL SYSTEM SYSTEM ethread_internal
+type ETHR_STD STANDARD SYSTEM ethread_standard
+type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
+type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
+ifnot smp
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index be691317ee..12c7631448 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -70,7 +70,6 @@ static ErlAsync* async_ready_list = NULL;
/* Detach from driver */
static void async_detach(DE_Handle* dh)
{
- /* XXX:PaN what should happen here? we want to unload the driver or??? */
return;
}
@@ -176,7 +175,6 @@ int exit_async()
static void async_add(ErlAsync* a, AsyncQueue* q)
{
- /* XXX:PaN Is this still necessary when ports lock drivers? */
if (is_internal_port(a->port)) {
ERTS_LC_ASSERT(erts_drvportid2port(a->port));
/* make sure the driver will stay around */
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 82f1e06e8e..024ff2a684 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -2511,13 +2511,13 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
pb = (ProcBin *) HAlloc(BIF_P, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = target_size;
- pb->next = MSO(BIF_P).mso;
- MSO(BIF_P).mso = pb;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
pb->val = save;
pb->bytes = t;
pb->flags = 0;
- MSO(BIF_P).overhead += target_size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(BIF_P)), target_size / sizeof(Eterm));
BUMP_REDS(BIF_P,(pos - opos) / BINARY_COPY_LOOP_FACTOR);
BIF_RET(make_binary(pb));
@@ -2551,7 +2551,8 @@ BIF_RETTYPE binary_referenced_byte_size_1(BIF_ALIST_1)
}
pb = (ProcBin *) binary_val(bin);
if (pb->thing_word == HEADER_PROC_BIN) {
- res = erts_make_integer((Uint) pb->val->orig_size, BIF_P); /* XXX:PaN Halfword? orig_size is a long */
+ /* XXX:PaN - Halfword - orig_size is a long, we should handle that */
+ res = erts_make_integer((Uint) pb->val->orig_size, BIF_P);
} else { /* heap binary */
res = erts_make_integer((Uint) ((ErlHeapBin *) pb)->size, BIF_P);
}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 9d5f0d9c02..2c2e283f65 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1646,7 +1646,8 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
if (save_name != NULL) {
*save_name = mkatom(q->name);
}
- /* XXX:PaN Future locking problems? Don't dare to let go of the diver_list lock here!*/
+ /* Future locking problems? Don't dare to let go of the
+ diver_list lock here!*/
if (q->finish) {
int fpe_was_unmasked = erts_block_fpe();
(*(q->finish))();
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 48cda52612..dace5b9297 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -38,9 +38,6 @@
#include "erl_instrument.h"
#include "dist.h"
#include "erl_gc.h"
-#ifdef ELIB_ALLOC_IS_CLIB
-#include "elib_stat.h"
-#endif
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -122,22 +119,26 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#endif
static Eterm
-bld_bin_list(Uint **hpp, Uint *szp, ProcBin* pb)
+bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
{
+ struct erl_off_heap_header* ohh;
Eterm res = NIL;
Eterm tuple;
- for (; pb; pb = pb->next) {
- Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
- Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
-
- if (szp)
- *szp += 4+2;
- if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
- tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
- res = CONS(*hpp + 4, tuple, res);
- *hpp += 4+2;
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (ohh->thing_word == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin*) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
+ Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
}
}
return res;
@@ -176,10 +177,10 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
Eterm tup;
Eterm r = (IS_CONST(mon->ref)
? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->ref));
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
Eterm p = (IS_CONST(mon->pid)
? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->pid));
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
pmlc->hp += 6;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
@@ -240,7 +241,7 @@ static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
Eterm old_res, targets = NIL;
Eterm p = (IS_CONST(lnk->pid)
? lnk->pid
- : STORE_NC(&(pllc->hp), &MSO(pllc->p).externals, lnk->pid));
+ : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
if (lnk->type == LINK_NODE) {
targets = make_small(ERTS_LINK_REFC(lnk));
} else if (ERTS_LINK_ROOT(lnk) != NULL) {
@@ -1225,7 +1226,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1258,9 +1259,7 @@ process_info_aux(Process *BIF_P,
else {
/* Monitor by pid. Build {process, Pid} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp,
- &MSO(BIF_P).externals,
- mic.mi[i].entity);
+ Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
t = TUPLE2(hp, am_process, pid);
hp += 3;
res = CONS(hp, t, res);
@@ -1282,7 +1281,7 @@ process_info_aux(Process *BIF_P,
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1491,7 +1490,7 @@ process_info_aux(Process *BIF_P,
case am_group_leader: {
int sz = NC_HEAP_SIZE(rp->group_leader);
hp = HAlloc(BIF_P, 3 + sz);
- res = STORE_NC(&hp, &MSO(BIF_P).externals, rp->group_leader);
+ res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
break;
}
@@ -1516,9 +1515,9 @@ process_info_aux(Process *BIF_P,
case am_binary: {
Uint sz = 3;
- (void) bld_bin_list(NULL, &sz, MSO(rp).mso);
+ (void) bld_bin_list(NULL, &sz, &MSO(rp));
hp = HAlloc(BIF_P, sz);
- res = bld_bin_list(&hp, NULL, MSO(rp).mso);
+ res = bld_bin_list(&hp, NULL, &MSO(rp));
break;
}
@@ -2124,86 +2123,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
}
else if (BIF_ARG_1 == am_elib_malloc) {
-#ifdef ELIB_ALLOC_IS_CLIB
- struct elib_stat stat;
- DECL_AM(heap_size);
- DECL_AM(max_alloced_size);
- DECL_AM(alloced_size);
- DECL_AM(free_size);
- DECL_AM(no_alloced_blocks);
- DECL_AM(no_free_blocks);
- DECL_AM(smallest_alloced_block);
- DECL_AM(largest_free_block);
- Eterm atoms[8];
- Eterm ints[8];
- Uint **hpp;
- Uint sz;
- Uint *szp;
- int length;
-#ifdef DEBUG
- Uint *endp;
-#endif
-
- elib_stat(&stat);
-
- /* First find out the heap size needed ... */
- hpp = NULL;
- szp = &sz;
- sz = 0;
-
- build_elib_malloc_term:
- length = 0;
- atoms[length] = AM_heap_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_total*sizeof(Uint));
- atoms[length] = AM_max_alloced_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_max_alloc*sizeof(Uint));
- atoms[length] = AM_alloced_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_alloc*sizeof(Uint));
- atoms[length] = AM_free_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_free*sizeof(Uint));
- atoms[length] = AM_no_alloced_blocks;
- ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.mem_blocks);
- atoms[length] = AM_no_free_blocks;
- ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.free_blocks);
- atoms[length] = AM_smallest_alloced_block;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.min_used*sizeof(Uint));
- atoms[length] = AM_largest_free_block;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.max_free*sizeof(Uint));
-
-
-
- ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
- ASSERT(length <= sizeof(ints)/sizeof(Eterm));
-
- res = erts_bld_2tup_list(hpp, szp, length, atoms, ints);
-
- if (szp) {
- /* ... and then build the term */
- hp = HAlloc(BIF_P, sz);
-#ifdef DEBUG
- endp = hp + sz;
-#endif
-
- szp = NULL;
- hpp = &hp;
- goto build_elib_malloc_term;
- }
-
-#ifdef DEBUG
- ASSERT(endp == hp);
-#endif
-
-#else /* #ifdef ELIB_ALLOC_IS_CLIB */
- res = am_false;
-#endif /* #ifdef ELIB_ALLOC_IS_CLIB */
-
- BIF_RET(res);
+ /* To be removed in R15 */
+ BIF_RET(am_false);
}
else if (BIF_ARG_1 == am_os_version) {
int major, minor, build;
@@ -2314,6 +2235,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
res = erts_get_cpu_topology_term(BIF_P, am_used);
BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
+ } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
+ if (erts_update_cpu_info()) {
+ ERTS_DECL_AM(changed);
+ BIF_RET(AM_changed);
+ }
+ else {
+ ERTS_DECL_AM(unchanged);
+ BIF_RET(AM_unchanged);
+ }
#if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
} else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
register unsigned high asm("%l0");
@@ -2385,7 +2315,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
/* Arguments that are unusual follow ... */
else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
- int no = erts_get_cpu_configured(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_configured(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2394,7 +2327,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
}
else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
- int no = erts_get_cpu_online(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_online(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2403,7 +2339,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
}
else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
- int no = erts_get_cpu_available(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_available(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2563,6 +2502,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_sched_stat_term(BIF_P, 1));
} else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
BIF_RET(erts_nif_taints(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
+ BIF_RET(erts_get_reader_groups_map(BIF_P));
}
BIF_ERROR(BIF_P, BADARG);
@@ -2678,7 +2619,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -2698,7 +2639,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
t = TUPLE2(hp, am_process, item);
hp += 3;
res = CONS(hp, t, res);
@@ -3426,6 +3367,16 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
return erts_fake_scheduler_bindings(BIF_P, tp[2]);
}
+ else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
+ Sint groups;
+ if (is_not_small(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ groups = signed_val(tp[2]);
+ if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
+ BIF_ERROR(BIF_P, BADARG);
+
+ BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
+ }
break;
}
default:
@@ -3730,8 +3681,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- ethr_atomic_read(&stats->tries, (long *)&tries);
- ethr_atomic_read(&stats->colls, (long *)&colls);
+ tries = (unsigned long) ethr_atomic_read(&stats->tries);
+ colls = (unsigned long) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 443cac9033..0509e51a6f 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -2161,7 +2161,7 @@ trace_delivered_1(BIF_ALIST_1)
#ifdef ERTS_SMP
bp = new_message_buffer(REF_THING_SIZE + 4);
hp = &bp->mem[0];
- msg_ref = STORE_NC(&hp, &bp->off_heap.externals, ref);
+ msg_ref = STORE_NC(&hp, &bp->off_heap, ref);
#else
hp = HAlloc(BIF_P, 4);
msg_ref = ref;
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index defe18c92b..88d2c06246 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1335,12 +1335,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = used_size_in_bytes;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
- MSO(c_p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
/*
* Now allocate the sub binary and set its size to include the
@@ -1506,12 +1506,12 @@ erts_bs_init_writable(Process* p, Eterm sz)
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = 0;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
- MSO(p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
/*
* Now allocate the sub binary.
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index cbdaa459de..b0369a402b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -78,11 +78,19 @@ enum DbIterSafety {
** The main meta table, containing all ets tables.
*/
#ifdef ERTS_SMP
-# define META_MAIN_TAB_LOCK_CNT 16
-static union {
- erts_smp_spinlock_t lck;
- byte _cache_line_alignment[64];
-}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
+
+#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
+#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
+#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+
+typedef union {
+ erts_smp_rwmtx_t rwmtx;
+ byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_smp_rwmtx_t))];
+} erts_meta_main_tab_lock_t;
+
+static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+
#endif
static struct {
union {
@@ -104,17 +112,13 @@ static struct {
#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-static ERTS_INLINE void meta_main_tab_lock(unsigned slot)
+static ERTS_INLINE erts_smp_rwmtx_t *
+get_meta_main_tab_lock(unsigned slot)
{
#ifdef ERTS_SMP
- erts_smp_spin_lock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
-#endif
-}
-
-static ERTS_INLINE void meta_main_tab_unlock(unsigned slot)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_unlock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+ return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#else
+ return NULL;
#endif
}
@@ -166,7 +170,8 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3 /* record write access */
+ LCK_WRITE_REC=3, /* record write access */
+ LCK_NONE=4
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -214,17 +219,17 @@ Export ets_select_continue_exp;
*/
static Export ets_delete_continue_exp;
-static ERTS_INLINE DbTable* db_ref(DbTable* tb)
+static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind)
{
- if (tb != NULL) {
+ if (tb != NULL && kind != LCK_READ) {
erts_refc_inc(&tb->common.ref, 2);
}
return tb;
}
-static ERTS_INLINE DbTable* db_unref(DbTable* tb)
+static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind)
{
- if (!erts_refc_dectest(&tb->common.ref, 0)) {
+ if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) {
#ifdef HARDDEBUG
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
@@ -256,12 +261,19 @@ static ERTS_INLINE DbTable* db_unref(DbTable* tb)
return tb;
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
+ char *rwname, char* fixname)
{
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ if (use_frequent_read_lock)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+#endif
erts_refc_init(&tb->common.ref, 1);
erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
+ rwname, tb->common.the_name);
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
@@ -297,7 +309,7 @@ static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
- (void) db_ref(tb);
+ (void) db_ref(tb, kind);
#ifdef ERTS_SMP
db_lock_take_over_ref(tb, kind);
#endif
@@ -331,7 +343,7 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
#endif
- (void) db_unref(tb); /* May delete table... */
+ (void) db_unref(tb, kind); /* May delete table... */
}
@@ -349,32 +361,49 @@ static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
}
static ERTS_INLINE
-DbTable* db_get_table(Process *p,
- Eterm id,
- int what,
- db_lock_kind_t kind)
+DbTable* db_get_table_aux(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind,
+ int meta_already_locked)
{
DbTable *tb = NULL;
+ erts_smp_rwmtx_t *mtl = NULL;
if (is_small(id)) {
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- meta_main_tab_lock(slot);
+ if (!meta_already_locked) {
+ mtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rlock(mtl);
+ }
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ else {
+ erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
+ || erts_lc_rwmtx_is_rwlocked(test_mtl));
+ }
+#endif
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
/* SMP: inc to prevent race, between unlock of meta_main_tab_lock
* and the table locking outside the meta_main_tab_lock
*/
- tb = db_ref(meta_main_tab[slot].u.tb);
+ tb = db_ref(meta_main_tab[slot].u.tb, kind);
}
- meta_main_tab_unlock(slot);
}
else if (is_atom(id)) {
- erts_smp_rwmtx_t* rwlock;
- struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
- erts_smp_rwmtx_rlock(rwlock);
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
+ if (!meta_already_locked)
+ erts_smp_rwmtx_rlock(mtl);
+ else{
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ || erts_lc_rwmtx_is_rwlocked(mtl));
+ mtl = NULL;
+ }
+
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id) {
- tb = db_ref(bucket->pu.tb);
+ tb = db_ref(bucket->pu.tb, kind);
}
}
else { /* multi */
@@ -382,23 +411,33 @@ DbTable* db_get_table(Process *p,
Uint i;
for (i=0; i<cnt; i++) {
if (bucket->pu.mvec[i].u.name_atom == id) {
- tb = db_ref(bucket->pu.mvec[i].pu.tb);
+ tb = db_ref(bucket->pu.mvec[i].pu.tb, kind);
break;
}
}
}
}
- erts_smp_rwmtx_runlock(rwlock);
}
if (tb) {
db_lock_take_over_ref(tb, kind);
- if (tb->common.id == id && ((tb->common.status & what) != 0 ||
- p->id == tb->common.owner)) {
- return tb;
+ if (tb->common.id != id
+ || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ db_unlock(tb, kind);
+ tb = NULL;
}
- db_unlock(tb, kind);
}
- return NULL;
+ if (mtl)
+ erts_smp_rwmtx_runlock(mtl);
+ return tb;
+}
+
+static ERTS_INLINE
+DbTable* db_get_table(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind)
+{
+ return db_get_table_aux(p, id, what, kind, 0);
}
/* Requires meta_main_tab_locks[slot] locked.
@@ -413,15 +452,15 @@ static ERTS_INLINE void free_slot(int slot)
erts_smp_spin_unlock(&meta_main_tab_main_lock);
}
-static int insert_named_tab(Eterm name_atom, DbTable* tb)
+static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-
- erts_smp_rwmtx_rwlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -468,17 +507,32 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
-static int remove_named_tab(Eterm name_atom)
+static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.id;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
- erts_smp_rwmtx_rwlock(rwlock);
+#ifdef ERTS_SMP
+ if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(rwlock);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+
if (bucket->pu.tb == NULL) {
goto done;
}
@@ -529,7 +583,8 @@ static int remove_named_tab(Eterm name_atom)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -1133,6 +1188,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1141,34 +1197,65 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+
+ if (is_not_atom(BIF_ARG_2)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (is_not_atom(BIF_ARG_2)) {
- goto badarg;
+ (void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
+
+ if (is_small(BIF_ARG_1)) {
+ Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
+ lck2 = get_meta_main_tab_lock(slot);
+ }
+ else if (is_atom(BIF_ARG_1)) {
+ (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (lck1 == lck2)
+ lck2 = NULL;
+ else if (lck1 > lck2) {
+ erts_smp_rwmtx_t *tmp = lck1;
+ lck1 = lck2;
+ lck2 = tmp;
+ }
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
}
+ erts_smp_rwmtx_rwlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwlock(lck2);
+
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ if (!tb)
+ goto badarg;
+
if (is_not_atom(tb->common.id)) { /* Not a named table */
tb->common.the_name = BIF_ARG_2;
goto done;
}
- if (!insert_named_tab(BIF_ARG_2,tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
goto badarg;
- }
- if (!remove_named_tab(tb->common.id)) {
+
+ if (!remove_named_tab(tb, 1))
erl_exit(1,"Could not find named tab %s", tb->common.id);
- }
tb->common.id = tb->common.the_name = BIF_ARG_2;
done:
ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_RET(ret);
badarg:
- db_unlock(tb, LCK_WRITE);
+ if (tb)
+ db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1189,10 +1276,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
UWord heir_data;
Uint32 status;
Sint keypos;
- int is_named, is_fine_locked;
+ int is_named, is_fine_locked, frequent_read;
int cret;
DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
+ erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1205,6 +1293,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = 1;
is_named = 0;
is_fine_locked = 0;
+ frequent_read = 0;
heir = am_none;
heir_data = (UWord) am_undefined;
@@ -1238,6 +1327,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_fine_locked = 0;
} else break;
}
+ else if (tp[1] == am_read_concurrency) {
+ if (tp[2] == am_true) {
+ frequent_read = 1;
+ } else if (tp[2] == am_false) {
+ frequent_read = 0;
+ } else break;
+ }
else if (tp[1] == am_heir && tp[2] == am_none) {
heir = am_none;
heir_data = am_undefined;
@@ -1286,6 +1382,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+#ifdef ERTS_SMP
+ if (frequent_read && !(status & DB_PRIVATE))
+ status |= DB_FREQ_READ;
+#endif
+
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
* fails to find a slot
@@ -1308,7 +1409,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- db_init_lock(tb, "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
+ "db_tab", "db_tab_fix");
tb->common.keypos = keypos;
tb->common.owner = BIF_P->id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -1351,15 +1453,17 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.id = ret;
tb->common.slot = slot; /* store slot for erase */
- meta_main_tab_lock(slot);
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
meta_main_tab[slot].u.tb = tb;
ASSERT(IS_SLOT_ALIVE(slot));
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
- if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
- meta_main_tab_lock(slot);
+ if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
free_slot(slot);
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
db_lock_take_over_ref(tb,LCK_WRITE);
free_heir_data(tb);
@@ -1499,6 +1603,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
int trap;
DbTable* tb;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1520,13 +1625,23 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
/* We must keep the slot, to be found by db_proc_dead() if process dies */
MARK_SLOT_DEAD(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
- if (is_atom(tb->common.id)) {
- remove_named_tab(tb->common.id);
- }
+ erts_smp_rwmtx_rwunlock(mmtl);
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb, 0);
if (tb->common.owner != BIF_P->id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
@@ -1919,14 +2034,15 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0)
previous = NIL;
j = 0;
for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
- meta_main_tab_lock(i);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(i)) {
j++;
tb = meta_main_tab[i].u.tb;
previous = CONS(hp, tb->common.id, previous);
hp += 2;
}
- meta_main_tab_unlock(i);
+ erts_smp_rwmtx_runlock(mmtl);
}
HRelease(BIF_P, hendp, hp);
BIF_RET(previous);
@@ -2630,12 +2746,30 @@ void init_db(void)
size_t size;
#ifdef ERTS_SMP
- for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
- erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ meta_main_tab_locks = erts_alloc(ERTS_ALC_T_DB_TABLES,
+ (sizeof(erts_meta_main_tab_lock_t)
+ * (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE+1)));
+
+ if ((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) != 0)
+ meta_main_tab_locks = ((erts_meta_main_tab_lock_t *)
+ ((((Uint) meta_main_tab_locks)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) == 0);
+
+ for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
+ erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
+ "meta_main_tab_slot", make_small(i));
}
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i));
}
#endif
@@ -2895,12 +3029,12 @@ retry:
to_pid, to_locks,
ERTS_P2P_FLG_TRY_LOCK);
if (to_proc == ERTS_PROC_LOCK_BUSY) {
- db_ref(tb); /* while unlocked */
+ db_ref(tb, LCK_NONE); /* while unlocked */
db_unlock(tb,LCK_WRITE);
to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
to_pid, to_locks);
db_lock(tb,LCK_WRITE);
- tb = db_unref(tb);
+ tb = db_unref(tb, LCK_NONE);
ASSERT(tb != NULL);
if (tb->common.owner != p->id) {
@@ -3008,12 +3142,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (!IS_SLOT_FREE(ix)) {
- tb = db_ref(GET_ANY_SLOT_TAB(ix));
+ tb = db_ref(GET_ANY_SLOT_TAB(ix), LCK_WRITE);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int do_yield;
db_lock_take_over_ref(tb, LCK_WRITE);
@@ -3045,7 +3180,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
tb->common.status |= DB_DELETE;
if (is_atom(tb->common.id))
- remove_named_tab(tb->common.id);
+ remove_named_tab(tb, 0);
free_heir_data(tb);
free_fixations_locked(tb);
@@ -3095,12 +3230,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(ix)) {
- tb = db_ref(meta_main_tab[ix].u.tb);
+ tb = db_ref(meta_main_tab[ix].u.tb, LCK_WRITE_REC);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int reds;
DbFixation** pp;
@@ -3274,6 +3410,7 @@ unlocked:
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
+ db_ref(tb, LCK_WRITE); /* LCK_WRITE need it, but not LCK_READ */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
@@ -3386,6 +3523,7 @@ static int free_table_cont(Process *p,
int clean_meta_tab)
{
Eterm result;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
if (!first) {
@@ -3411,9 +3549,20 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
free_slot(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
if (clean_meta_tab) {
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
@@ -3421,7 +3570,7 @@ static int free_table_cont(Process *p,
make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
}
- db_unref(tb);
+ db_unref(tb, LCK_NONE);
BUMP_REDS(p, 100);
return 0;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 124129a371..5abd2e50fa 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -623,12 +623,16 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->is_resizing, 0);
#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
int i;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", make_small(i));
}
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
@@ -2741,6 +2745,7 @@ static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
ASSERT(&oldp->dbterm == handle->dbterm);
if (handle->mustResize) {
+ ErlOffHeap tmp_offheap;
Eterm* top;
Eterm copy;
DbTerm* newDbTerm;
@@ -2751,18 +2756,15 @@ static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
newDbTerm = &newp->dbterm;
newDbTerm->size = handle->new_size;
- newDbTerm->off_heap.mso = NULL;
- newDbTerm->off_heap.externals = NULL;
- #ifndef HYBRID /* FIND ME! */
- newDbTerm->off_heap.funs = NULL;
- #endif
- newDbTerm->off_heap.overhead = 0;
+ tmp_offheap.first = NULL;
+ tmp_offheap.overhead = 0;
/* make a flat copy */
top = DBTERM_BUF(newDbTerm);
copy = copy_struct(make_tuple(handle->dbterm->tpl),
handle->new_size,
- &top, &newDbTerm->off_heap);
+ &top, &tmp_offheap);
+ newDbTerm->first_oh = tmp_offheap.first;
DBTERM_SET_TPL(newDbTerm,tuple_val(copy));
WUNLOCK_HASH(lck);
@@ -2805,7 +2807,11 @@ void db_foreach_offheap_hash(DbTable *tbl,
for (i = 0; i < nactive; i++) {
list = BUCKET(tb,i);
while(list != 0) {
- (*func)(&(list->dbterm.off_heap), arg);
+ ErlOffHeap tmp_offheap;
+ tmp_offheap.first = list->dbterm.first_oh;
+ tmp_offheap.overhead = 0;
+ (*func)(&tmp_offheap, arg);
+ list->dbterm.first_oh = tmp_offheap.first;
list = list->next;
}
}
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index b6b3cabafe..5644e85f97 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1081,11 +1081,12 @@ static int db_select_continue_tree(Process *p,
static int db_select_tree(Process *p, DbTable *tbl,
Eterm pattern, int reverse, Eterm *ret)
{
+ /* Strategy: Traverse backwards to build resulting list from tail to head */
DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1293,7 +1294,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
DbTreeStack* stack;
struct select_count_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1395,7 +1396,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1636,7 +1637,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
DbTableTree *tb = &tbl->tree;
struct select_delete_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1817,10 +1818,14 @@ do_db_tree_foreach_offheap(TreeDbTerm *tdbt,
void (*func)(ErlOffHeap *, void *),
void * arg)
{
+ ErlOffHeap tmp_offheap;
if(!tdbt)
return;
do_db_tree_foreach_offheap(tdbt->left, func, arg);
- (*func)(&(tdbt->dbterm.off_heap), arg);
+ tmp_offheap.first = tdbt->dbterm.first_oh;
+ tmp_offheap.overhead = 0;
+ (*func)(&tmp_offheap, arg);
+ tdbt->dbterm.first_oh = tmp_offheap.first;
do_db_tree_foreach_offheap(tdbt->right, func, arg);
}
@@ -2574,6 +2579,7 @@ static int db_lookup_dbterm_tree(DbTable *tbl, Eterm key, DbUpdateHandle* handle
static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
{
if (handle->mustResize) {
+ ErlOffHeap tmp_offheap;
Eterm* top;
Eterm copy;
DbTerm* newDbTerm;
@@ -2588,18 +2594,15 @@ static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
newDbTerm = &newp->dbterm;
newDbTerm->size = handle->new_size;
- newDbTerm->off_heap.mso = NULL;
- newDbTerm->off_heap.externals = NULL;
- #ifndef HYBRID /* FIND ME! */
- newDbTerm->off_heap.funs = NULL;
- #endif
- newDbTerm->off_heap.overhead = 0;
+ tmp_offheap.first = NULL;
+ tmp_offheap.overhead = 0;
/* make a flat copy */
top = DBTERM_BUF(newDbTerm);
copy = copy_struct(make_tuple(handle->dbterm->tpl),
handle->new_size,
- &top, &newDbTerm->off_heap);
+ &top, &tmp_offheap);
+ newDbTerm->first_oh = tmp_offheap.first;
DBTERM_SET_TPL(newDbTerm,tuple_val(copy));
db_free_term_data(handle->dbterm);
@@ -2628,7 +2631,7 @@ static void traverse_backwards(DbTableTree *tb,
{
TreeDbTerm *this, *next;
- if (lastkey == NIL) {
+ if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
if (( this = tb->root ) == NULL) {
return;
@@ -2666,7 +2669,7 @@ static void traverse_forward(DbTableTree *tb,
{
TreeDbTerm *this, *next;
- if (lastkey == NIL) {
+ if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
if (( this = tb->root ) == NULL) {
return;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 93a47eb76f..2f34561234 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -363,12 +363,7 @@ static ErtsMatchPseudoProcess *match_pseudo_process;
static ERTS_INLINE void
cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
{
- if (mpsp->process.mbuf
- || mpsp->process.off_heap.mso
-#ifndef HYBRID /* FIND ME! */
- || mpsp->process.off_heap.funs
-#endif
- || mpsp->process.off_heap.externals) {
+ if (mpsp->process.mbuf || mpsp->process.off_heap.first) {
erts_cleanup_empty_process(&mpsp->process);
}
#ifdef DEBUG
@@ -1738,8 +1733,7 @@ restart:
FAIL();
ep = termp;
break;
- case matchArrayBind: /* When the array size is unknown. */ /* XXX:PaN - where does
- this array come from? */
+ case matchArrayBind: /* When the array size is unknown. */
n = *pc++;
hp[n] = dpm_array_to_list(psp, termp, arity);
break;
@@ -2457,9 +2451,13 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
DbTerm* p;
Eterm copy;
Eterm *top;
+ ErlOffHeap tmp_offheap;
if (old != 0) {
- erts_cleanup_offheap(&old->off_heap);
+ tmp_offheap.first = old->first_oh;
+ tmp_offheap.overhead = 0;
+ erts_cleanup_offheap(&tmp_offheap);
+ old->first_oh = tmp_offheap.first;
if (size == old->size) {
p = old;
} else {
@@ -2495,15 +2493,12 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
p = (DbTerm*) ((void *)(((char *) structp) + offset));
}
p->size = size;
- p->off_heap.mso = NULL;
- p->off_heap.externals = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.overhead = 0;
+ tmp_offheap.first = NULL;
+ tmp_offheap.overhead = 0;
top = DBTERM_BUF(p);
- copy = copy_struct(obj, size, &top, &p->off_heap);
+ copy = copy_struct(obj, size, &top, &tmp_offheap);
+ p->first_oh = tmp_offheap.first;
DBTERM_SET_TPL(p,tuple_val(copy));
return structp;
@@ -2512,7 +2507,10 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
void db_free_term_data(DbTerm* p)
{
- erts_cleanup_offheap(&p->off_heap);
+ ErlOffHeap tmp_offheap;
+ tmp_offheap.first = p->first_oh;
+ tmp_offheap.overhead = 0;
+ erts_cleanup_offheap(&tmp_offheap);
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 382e5dceb5..0f333e8b34 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -57,7 +57,7 @@
* A datatype for a database entry stored out of a process heap
*/
typedef struct db_term {
- ErlOffHeap off_heap; /* Off heap data for term. */
+ struct erl_off_heap_header* first_oh; /* Off heap data for term. */
Uint size; /* Size of term in "words" */
Eterm tpl[1]; /* Untagged "constant pointer" to top tuple */
/* (assumed to be first in buffer) */
@@ -240,8 +240,9 @@ typedef struct db_table_common {
#define DB_DUPLICATE_BAG (1 << 8)
#define DB_ORDERED_SET (1 << 9)
#define DB_DELETE (1 << 10) /* table is being deleted */
+#define DB_FREQ_READ (1 << 11)
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED)
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index aa37edafd1..d42820ddf3 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -186,10 +186,9 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_trylock(&dmtx->mtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_mutex_trylock()");
- return res;
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_trylock()");
+ return ethr_mutex_trylock(&dmtx->mtx);
#else
return 0;
#endif
@@ -199,9 +198,9 @@ void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_lock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_lock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_lock()");
+ ethr_mutex_lock(&dmtx->mtx);
#endif
}
@@ -209,9 +208,9 @@ void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_unlock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_unlock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+ ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -256,9 +255,9 @@ void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_signal(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_signal()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_signal()");
+ ethr_cond_signal(&dcnd->cnd);
#endif
}
@@ -266,9 +265,9 @@ void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_broadcast(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_broadcast()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_broadcast()");
+ ethr_cond_broadcast(&dcnd->cnd);
#endif
}
@@ -277,18 +276,13 @@ void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res;
if (!dcnd || !dmtx) {
- res = EINVAL;
- error:
- fatal_error(res, "erl_drv_cond_wait()");
+ fatal_error(EINVAL, "erl_drv_cond_wait()");
}
while (1) {
- res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
+ int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
if (res == 0)
break;
- if (res != EINTR)
- goto error;
}
#endif
}
@@ -333,10 +327,9 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
+ return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -346,9 +339,9 @@ void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+ ethr_rwmutex_rlock(&drwlck->rwmtx);
#endif
}
@@ -356,9 +349,9 @@ void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_runlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_runlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -366,10 +359,9 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrwlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
+ return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -379,9 +371,9 @@ void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+ ethr_rwmutex_rwlock(&drwlck->rwmtx);
#endif
}
@@ -389,9 +381,9 @@ void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwunlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwunlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 15d9538301..5dce5ad262 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -37,8 +37,6 @@ static erts_smp_rwmtx_t erts_fun_table_lock;
#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
-#define erts_fun_init_lock() erts_smp_rwmtx_init(&erts_fun_table_lock, \
- "fun_tab")
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -57,8 +55,12 @@ void
erts_init_fun_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
- erts_fun_init_lock();
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
f.alloc = (HALLOC_FUN) fun_alloc;
@@ -192,20 +194,6 @@ erts_erase_fun_entry(ErlFunEntry* fe)
erts_fun_write_unlock();
}
-#ifndef HYBRID /* FIND ME! */
-void
-erts_cleanup_funs(ErlFunThing* funp)
-{
- while (funp) {
- ErlFunEntry* fe = funp->fe;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- erts_erase_fun_entry(fe);
- }
- funp = funp->next;
- }
-}
-#endif
-
void
erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end)
{
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 944d4b3df5..2f165afa06 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -53,10 +53,10 @@ typedef struct erl_fun_entry {
typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
+ ErlFunEntry* fe; /* Pointer to fun entry. */
#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* next; /* Next fun in mso list. */
+ struct erl_off_heap_header* next;
#endif
- ErlFunEntry* fe; /* Pointer to fun entry. */
#ifdef HIPE
UWord* native_address; /* Native code for the fun. */
#endif
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index a19e090f1e..0f4d2a2ef9 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -110,9 +110,7 @@ static Uint adjust_after_fullsweep(Process *p, int size_before,
int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
-static void sweep_proc_bins(Process *p, int fullsweep);
-static void sweep_proc_funs(Process *p, int fullsweep);
-static void sweep_proc_externals(Process *p, int fullsweep);
+static void sweep_off_heap(Process *p, int fullsweep);
static void offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
@@ -145,6 +143,16 @@ erts_init_gc(void)
{
int i = 0;
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
+ ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
+ ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
+ ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
+ ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
+ ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
+ ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
+
erts_smp_spinlock_init(&info_lck, "gc_info");
garbage_cols = 0;
reclaimed = 0;
@@ -286,25 +294,14 @@ erts_offset_heap_ptr(Eterm* hp, Uint sz, Sint offs,
offset_heap_ptr(hp, sz, offs, (char *) low, ((char *)high)-((char *)low));
}
+
#define ptr_within(ptr, low, high) ((ptr) < (high) && (ptr) >= (low))
void
erts_offset_off_heap(ErlOffHeap *ohp, Sint offs, Eterm* low, Eterm* high)
{
- if (ohp->mso && ptr_within((Eterm *)ohp->mso, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->mso;
- *uptr += offs;
- }
-
-#ifndef HYBRID /* FIND ME! */
- if (ohp->funs && ptr_within((Eterm *)ohp->funs, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->funs;
- *uptr += offs;
- }
-#endif
-
- if (ohp->externals && ptr_within((Eterm *)ohp->externals, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->externals;
+ if (ohp->first && ptr_within((Eterm *)ohp->first, low, high)) {
+ Eterm** uptr = (Eterm**) (void *) &ohp->first;
*uptr += offs;
}
}
@@ -504,14 +501,8 @@ erts_garbage_collect_hibernate(Process* p)
cleanup_rootset(&rootset);
- if (MSO(p).mso) {
- sweep_proc_bins(p, 1);
- }
- if (MSO(p).funs) {
- sweep_proc_funs(p, 1);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 1);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 1);
}
/*
@@ -752,7 +743,10 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* is large enough.
*/
- if (OLD_HEAP(p) && mature <= OLD_HEND(p) - OLD_HTOP(p)) {
+ if (OLD_HEAP(p) &&
+ ((mature <= OLD_HEND(p) - OLD_HTOP(p)) &&
+ ((BIN_VHEAP_MATURE(p) < ( BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p)))) &&
+ ((BIN_OLD_VHEAP_SZ(p) > BIN_OLD_VHEAP(p))) ) ) {
ErlMessage *msgp;
Uint size_after;
Uint need_after;
@@ -1041,15 +1035,8 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
OLD_HTOP(p) = old_htop;
HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop;
- if (MSO(p).mso) {
- sweep_proc_bins(p, 0);
- }
-
- if (MSO(p).funs) {
- sweep_proc_funs(p, 0);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 0);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 0);
}
#ifdef HARDDEBUG
@@ -1271,17 +1258,11 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
}
- if (MSO(p).mso) {
- sweep_proc_bins(p, 1);
- }
- if (MSO(p).funs) {
- sweep_proc_funs(p, 1);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 1);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 1);
}
- if (OLD_HEAP(p) != NULL) {
+ if (OLD_HEAP(p) != NULL) {
ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
OLD_HEAP(p),
(OLD_HEND(p) - OLD_HEAP(p)) * sizeof(Eterm));
@@ -1305,6 +1286,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
HIGH_WATER(p) = HEAP_TOP(p);
ErtsGcQuickSanityCheck(p);
+
/*
* Copy newly received message onto the end of the new heap.
*/
@@ -2003,8 +1985,8 @@ shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj)
HEAP_SIZE(p) = new_sz;
}
-static Uint
-do_next_vheap_size(Uint vheap, Uint vheap_sz) {
+static Uint64
+do_next_vheap_size(Uint64 vheap, Uint64 vheap_sz) {
/* grow
*
@@ -2021,131 +2003,53 @@ do_next_vheap_size(Uint vheap, Uint vheap_sz) {
* ----------------------
*/
- if (vheap > (Uint) (vheap_sz*3/4)) {
+ if ((Uint64) vheap/3 > (Uint64) (vheap_sz/4)) {
+ Uint64 new_vheap_sz = vheap_sz;
- while(vheap > (Uint) (vheap_sz*3/4)) {
- vheap_sz = vheap_sz*2;
+ while((Uint64) vheap/3 > (Uint64) (vheap_sz/4)) {
+ /* the golden ratio = 1.618 */
+ new_vheap_sz = (Uint64) vheap_sz * 1.618;
+ if (new_vheap_sz < vheap_sz ) {
+ return vheap_sz;
+ }
+ vheap_sz = new_vheap_sz;
}
- return erts_next_heap_size(vheap_sz, 0);
+ return vheap_sz;
}
- if (vheap < (Uint) (vheap_sz/4)) {
- return erts_next_heap_size((Uint) (vheap_sz / 2), 0);
+ if (vheap < (Uint64) (vheap_sz/4)) {
+ return (vheap_sz >> 1);
}
return vheap_sz;
}
-static Uint
-next_vheap_size(Process* p, Uint vheap, Uint vheap_sz) {
- vheap_sz = do_next_vheap_size(vheap, vheap_sz);
- return vheap_sz < p->min_vheap_size ? p->min_vheap_size : vheap_sz;
-}
-
-static void
-sweep_proc_externals(Process *p, int fullsweep)
-{
- ExternalThing** prev;
- ExternalThing* ptr;
- char* oh = 0;
- Uint oh_size = 0;
-
- if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
- }
-
- prev = &MSO(p).externals;
- ptr = MSO(p).externals;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
- ExternalThing* ro = external_thing_ptr(*ppt);
-
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- prev = &ptr->next;
- ptr = ptr->next;
- } else { /* Object has not been moved - deref it */
- erts_deref_node_entry(ptr->node);
- *prev = ptr = ptr->next;
- }
- }
- ASSERT(*prev == NULL);
-}
-
-static void
-sweep_proc_funs(Process *p, int fullsweep)
-{
- ErlFunThing** prev;
- ErlFunThing* ptr;
- char* oh = 0;
- Uint oh_size = 0;
-
- if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
- }
-
- prev = &MSO(p).funs;
- ptr = MSO(p).funs;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
- ErlFunThing* ro = (ErlFunThing *) fun_val(*ppt);
-
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- prev = &ptr->next;
- ptr = ptr->next;
- } else { /* Object has not been moved - deref it */
- ErlFunEntry* fe = ptr->fe;
-
- *prev = ptr = ptr->next;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- erts_erase_fun_entry(fe);
- }
- }
- }
- ASSERT(*prev == NULL);
+static Uint64
+next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz) {
+ Uint64 new_vheap_sz = do_next_vheap_size(vheap, vheap_sz);
+ return new_vheap_sz < p->min_vheap_size ? p->min_vheap_size : new_vheap_sz;
}
struct shrink_cand_data {
- ProcBin* new_candidates;
- ProcBin* new_candidates_end;
- ProcBin* old_candidates;
+ struct erl_off_heap_header* new_candidates;
+ struct erl_off_heap_header* new_candidates_end;
+ struct erl_off_heap_header* old_candidates;
Uint no_of_candidates;
Uint no_of_active;
};
static ERTS_INLINE void
link_live_proc_bin(struct shrink_cand_data *shrink,
- ProcBin ***prevppp,
- ProcBin **pbpp,
+ struct erl_off_heap_header*** prevppp,
+ struct erl_off_heap_header** currpp,
int new_heap)
{
- ProcBin *pbp = *pbpp;
-
- *pbpp = pbp->next;
+ ProcBin *pbp = (ProcBin*) *currpp;
+ ASSERT(**prevppp == *currpp);
+ *currpp = pbp->next;
if (pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE)) {
ASSERT(((pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
== (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
@@ -2162,15 +2066,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
if (unused >= 8) { /* A shrink candidate; save in candidate list */
+ **prevppp = pbp->next;
if (new_heap) {
if (!shrink->new_candidates)
- shrink->new_candidates_end = pbp;
+ shrink->new_candidates_end = (struct erl_off_heap_header*)pbp;
pbp->next = shrink->new_candidates;
- shrink->new_candidates = pbp;
+ shrink->new_candidates = (struct erl_off_heap_header*)pbp;
}
else {
pbp->next = shrink->old_candidates;
- shrink->old_candidates = pbp;
+ shrink->old_candidates = (struct erl_off_heap_header*)pbp;
}
shrink->no_of_candidates++;
return;
@@ -2178,83 +2083,117 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
}
}
- /* Not a shrink candidate; keep in original mso list */
- **prevppp = pbp;
+ /* Not a shrink candidate; keep in original mso list */
*prevppp = &pbp->next;
-
}
-static void
-sweep_proc_bins(Process *p, int fullsweep)
+static void
+sweep_off_heap(Process *p, int fullsweep)
{
struct shrink_cand_data shrink = {0};
- ProcBin** prev;
- ProcBin* ptr;
- Binary* bptr;
- char* oh = NULL;
- Uint oh_size = 0;
- Uint bin_vheap = 0;
+ struct erl_off_heap_header* ptr;
+ struct erl_off_heap_header** prev;
+ char* oheap = NULL;
+ Uint oheap_sz = 0;
+ Uint64 bin_vheap = 0;
+#ifdef DEBUG
+ int seen_mature = 0;
+#endif
if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
+ oheap = (char *) OLD_HEAP(p);
+ oheap_sz = (char *) OLD_HEND(p) - oheap;
}
BIN_OLD_VHEAP(p) = 0;
- prev = &MSO(p).mso;
- ptr = MSO(p).mso;
+ prev = &MSO(p).first;
+ ptr = MSO(p).first;
- /*
- * Note: In R7 we no longer force a fullsweep when we find binaries
- * on the old heap. The reason is that with the introduction of the
- * bit syntax we can expect binaries to be used a lot more. Note that
- * in earlier releases a brand new binary (or any other term) could
- * be put on the old heap during a gen-gc fullsweep, but this is
- * no longer the case in R7.
+ /* Firts part of the list will reside on the (old) new-heap.
+ * Keep if moved, otherwise deref.
*/
while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
- bin_vheap += ptr->size / sizeof(Eterm);
- ptr = (ProcBin*) binary_val(*ppt);
- link_live_proc_bin(&shrink,
- &prev,
- &ptr,
- !in_area(ptr, oh, oh_size));
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
- link_live_proc_bin(&shrink, &prev, &ptr, 0);
- } else { /* Object has not been moved - deref it */
-
- *prev = ptr->next;
- bptr = ptr->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0)
- erts_bin_free(bptr);
- ptr = *prev;
- }
+ if (IS_MOVED_BOXED(ptr->thing_word)) {
+ ASSERT(!in_area(ptr, oheap, oheap_sz));
+ *prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
+ ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ int to_new_heap = !in_area(ptr, oheap, oheap_sz);
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ if (to_new_heap) {
+ bin_vheap += ptr->size / sizeof(Eterm);
+ } else {
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ }
+ link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
+ }
+ else {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
+ }
+ else if (!in_area(ptr, oheap, oheap_sz)) {
+ /* garbage */
+ switch (thing_subtag(ptr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ {
+ Binary* bptr = ((ProcBin*)ptr)->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ break;
+ }
+ case FUN_SUBTAG:
+ {
+ ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
+ break;
+ }
+ default:
+ ASSERT(is_external_header(ptr->thing_word));
+ erts_deref_node_entry(((ExternalThing*)ptr)->node);
+ }
+ *prev = ptr = ptr->next;
+ }
+ else break; /* and let old-heap loop continue */
}
- if (BIN_OLD_VHEAP(p) >= BIN_OLD_VHEAP_SZ(p)) {
- FLAGS(p) |= F_NEED_FULLSWEEP;
+ /* The rest of the list resides on old-heap, and we just did a
+ * generational collection - keep objects in list.
+ */
+ while (ptr) {
+ ASSERT(in_area(ptr, oheap, oheap_sz));
+ ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ link_live_proc_bin(&shrink, &prev, &ptr, 0);
+ }
+ else {
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
}
- BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p));
- BIN_OLD_VHEAP_SZ(p) = next_vheap_size(p, BIN_OLD_VHEAP(p), BIN_OLD_VHEAP_SZ(p));
- MSO(p).overhead = bin_vheap;
+ if (fullsweep) {
+ BIN_OLD_VHEAP_SZ(p) = next_vheap_size(p, BIN_OLD_VHEAP(p) + MSO(p).overhead, BIN_OLD_VHEAP_SZ(p));
+ }
+ BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p));
+ MSO(p).overhead = bin_vheap;
+ BIN_VHEAP_MATURE(p) = bin_vheap;
/*
* If we got any shrink candidates, check them out.
*/
if (shrink.no_of_candidates) {
- ProcBin *candlist[] = {shrink.new_candidates, shrink.old_candidates};
+ ProcBin *candlist[] = { (ProcBin*)shrink.new_candidates,
+ (ProcBin*)shrink.old_candidates };
Uint leave_unused = 0;
int i;
@@ -2266,21 +2205,21 @@ sweep_proc_bins(Process *p, int fullsweep)
}
for (i = 0; i < sizeof(candlist)/sizeof(candlist[0]); i++) {
-
- for (ptr = candlist[i]; ptr; ptr = ptr->next) {
- Uint new_size = ptr->size;
+ ProcBin* pb;
+ for (pb = candlist[i]; pb; pb = (ProcBin*)pb->next) {
+ Uint new_size = pb->size;
if (leave_unused) {
new_size += (new_size * 100) / leave_unused;
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
- if (new_size + 8 >= ptr->val->orig_size)
+ if (new_size + 8 >= pb->val->orig_size)
continue;
}
- ptr->val = erts_bin_realloc(ptr->val, new_size);
- ptr->val->orig_size = new_size;
- ptr->bytes = (byte *) ptr->val->orig_bytes;
+ pb->val = erts_bin_realloc(pb->val, new_size);
+ pb->val->orig_size = new_size;
+ pb->bytes = (byte *) pb->val->orig_bytes;
}
}
@@ -2289,21 +2228,20 @@ sweep_proc_bins(Process *p, int fullsweep)
* We now potentially have the mso list divided into three lists:
* - shrink candidates on new heap (inactive writable with unused data)
* - shrink candidates on old heap (inactive writable with unused data)
- * - other binaries (read only + active writable ...)
+ * - other binaries (read only + active writable ...) + funs and externals
*
* Put them back together: new candidates -> other -> old candidates
* This order will ensure that the list only refers from new
* generation to old and never from old to new *which is important*.
*/
if (shrink.new_candidates) {
- if (prev == &MSO(p).mso) /* empty other binaries list */
+ if (prev == &MSO(p).first) /* empty other binaries list */
prev = &shrink.new_candidates_end->next;
else
- shrink.new_candidates_end->next = MSO(p).mso;
- MSO(p).mso = shrink.new_candidates;
+ shrink.new_candidates_end->next = MSO(p).first;
+ MSO(p).first = shrink.new_candidates;
}
}
-
*prev = shrink.old_candidates;
}
@@ -2334,15 +2272,17 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
tari = thing_arityval(val);
switch (thing_subtag(val)) {
case REFC_BINARY_SUBTAG:
+ case FUN_SUBTAG:
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
{
- ProcBin* pb = (ProcBin*) hp;
- Eterm** uptr = (Eterm **) (void *) &pb->next;
+ struct erl_off_heap_header* oh = (struct erl_off_heap_header*) hp;
- if (*uptr && in_area((Eterm *)pb->next, area, area_size)) {
+ if (in_area(oh->next, area, area_size)) {
+ Eterm** uptr = (Eterm **) (void *) &oh->next;
*uptr += offs; /* Patch the mso chain */
}
- sz -= tari;
- hp += tari + 1;
}
break;
case BIN_MATCHSTATE_SUBTAG:
@@ -2353,40 +2293,11 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
mb->orig = offset_ptr(mb->orig, offs);
mb->base = binary_bytes(mb->orig);
}
- sz -= tari;
- hp += tari + 1;
}
break;
- case FUN_SUBTAG:
- {
- ErlFunThing* funp = (ErlFunThing *) hp;
- Eterm** uptr = (Eterm **) (void *) &funp->next;
-
- if (*uptr && in_area((Eterm *)funp->next, area, area_size)) {
- *uptr += offs;
- }
- sz -= tari;
- hp += tari + 1;
- }
- break;
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- {
- ExternalThing* etp = (ExternalThing *) hp;
- Eterm** uptr = (Eterm **) (void *) &etp->next;
-
- if (*uptr && in_area((Eterm *)etp->next, area, area_size)) {
- *uptr += offs;
- }
- sz -= tari;
- hp += tari + 1;
- }
- break;
- default:
- sz -= tari;
- hp += tari + 1;
}
+ sz -= tari;
+ hp += tari + 1;
break;
}
default:
@@ -2423,18 +2334,8 @@ offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
static void
offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
{
- if (MSO(p).mso && in_area((Eterm *)MSO(p).mso, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).mso;
- *uptr += offs;
- }
-
- if (MSO(p).funs && in_area((Eterm *)MSO(p).funs, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).funs;
- *uptr += offs;
- }
-
- if (MSO(p).externals && in_area((Eterm *)MSO(p).externals, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).externals;
+ if (MSO(p).first && in_area((Eterm *)MSO(p).first, area, area_size)) {
+ Eterm** uptr = (Eterm**) (void *) &MSO(p).first;
*uptr += offs;
}
}
@@ -2555,8 +2456,8 @@ do { \
__FILE__, __LINE__, #EXP); \
} while (0)
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
-# define ERTS_EXTERNAL_VISITED_BIT ((Eterm) 1 << 31)
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
+# define ERTS_OFFHEAP_VISITED_BIT ((Eterm) 1 << 31)
#endif
@@ -2566,62 +2467,45 @@ erts_check_off_heap2(Process *p, Eterm *htop)
Eterm *oheap = (Eterm *) OLD_HEAP(p);
Eterm *ohtop = (Eterm *) OLD_HTOP(p);
int old;
- ProcBin *pb;
- ErlFunThing *eft;
- ExternalThing *et;
+ union erl_off_heap_ptr u;
old = 0;
- for (pb = MSO(p).mso; pb; pb = pb->next) {
- Eterm *ptr = (Eterm *) pb;
- long refc = erts_refc_read(&pb->val->refc, 1);
+ for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) {
+ long refc;
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ refc = erts_refc_read(&u.pb->val->refc, 1);
+ break;
+ case FUN_SUBTAG:
+ refc = erts_refc_read(&u.fun->fe->refc, 1);
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ refc = erts_refc_read(&u.ext->node->refc, 1);
+ break;
+ default:
+ ASSERT(!!"erts_check_off_heap2: Invalid thing_word");
+ }
ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
+ ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_EXTERNAL_VISITED_BIT));
+ u.hdr->thing_word |= ERTS_OFFHEAP_VISITED_BIT;
+#endif
if (old) {
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
+ ERTS_CHK_OFFHEAP_ASSERT(oheap <= u.ep && u.ep < ohtop);
}
- else if (oheap <= ptr && ptr < ohtop)
+ else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
}
}
- old = 0;
- for (eft = MSO(p).funs; eft; eft = eft->next) {
- Eterm *ptr = (Eterm *) eft;
- long refc = erts_refc_read(&eft->fe->refc, 1);
- ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
- if (old)
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
- else if (oheap <= ptr && ptr < ohtop)
- old = 1;
- else
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
- }
-
- old = 0;
- for (et = MSO(p).externals; et; et = et->next) {
- Eterm *ptr = (Eterm *) et;
- long refc = erts_refc_read(&et->node->refc, 1);
- ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- ERTS_CHK_OFFHEAP_ASSERT(!(et->header & ERTS_EXTERNAL_VISITED_BIT));
-#endif
- if (old)
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
- else if (oheap <= ptr && ptr < ohtop)
- old = 1;
- else
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- et->header |= ERTS_EXTERNAL_VISITED_BIT;
+ for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next)
+ u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT;
#endif
- }
-
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- for (et = MSO(p).externals; et; et = et->next)
- et->header &= ~ERTS_EXTERNAL_VISITED_BIT;
-#endif
-
}
void
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4a4507b212..14bd10b42c 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -78,6 +78,7 @@ static erts_tid_t main_thread;
erts_cpu_info_t *erts_cpuinfo;
+int erts_reader_groups;
int erts_use_sender_punish;
/*
@@ -110,6 +111,7 @@ int erts_compat_rel;
static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
+static int max_reader_groups;
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -505,6 +507,7 @@ void erts_usage(void)
ERTS_MIN_COMPAT_REL, this_rel_num());
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
@@ -538,6 +541,50 @@ void erts_usage(void)
erl_exit(-1, "");
}
+#ifdef USE_THREADS
+/*
+ * allocators for thread lib
+ */
+
+static void *ethr_std_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_STD, (Uint) size);
+}
+static void *ethr_std_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_STD, ptr, (Uint) size);
+}
+static void ethr_std_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_STD, ptr);
+}
+static void *ethr_sl_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_SL, (Uint) size);
+}
+static void *ethr_sl_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_SL, ptr, (Uint) size);
+}
+static void ethr_sl_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_SL, ptr);
+}
+static void *ethr_ll_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_LL, (Uint) size);
+}
+static void *ethr_ll_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_LL, ptr, (Uint) size);
+}
+static void ethr_ll_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_LL, ptr);
+}
+
+#endif
+
static void
early_init(int *argc, char **argv) /*
* Only put things here which are
@@ -615,9 +662,15 @@ early_init(int *argc, char **argv) /*
? ncpuavail
: (ncpuonln > 0 ? ncpuonln : no_schedulers));
+#ifdef ERTS_SMP
+ erts_max_main_threads = no_schedulers_online;
+#endif
+
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ max_reader_groups = ERTS_MAX_READER_GROUPS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -627,6 +680,24 @@ early_init(int *argc, char **argv) /*
}
if (argv[i][0] == '-') {
switch (argv[i][1]) {
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ char *arg = get_arg(sub_param+1, argv[i+1], &i);
+ if (sscanf(arg, "%d", &max_reader_groups) != 1) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %s\n", arg);
+ erts_usage();
+ }
+ if (max_reader_groups < 0) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %d\n",
+ max_reader_groups);
+ erts_usage();
+ }
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -699,6 +770,36 @@ early_init(int *argc, char **argv) /*
erts_early_init_scheduling(); /* Require allocators */
erts_init_utils(); /* Require allocators */
+#ifdef USE_THREADS
+ {
+ erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
+ elid.mem.std.alloc = ethr_std_alloc;
+ elid.mem.std.realloc = ethr_std_realloc;
+ elid.mem.std.free = ethr_std_free;
+ elid.mem.sl.alloc = ethr_sl_alloc;
+ elid.mem.sl.realloc = ethr_sl_realloc;
+ elid.mem.sl.free = ethr_sl_free;
+ elid.mem.ll.alloc = ethr_ll_alloc;
+ elid.mem.ll.realloc = ethr_ll_realloc;
+ elid.mem.ll.free = ethr_ll_free;
+
+#ifdef ERTS_SMP
+ elid.main_threads = erts_max_main_threads;
+#else
+ elid.main_threads = 1;
+#endif
+ elid.reader_groups = (elid.main_threads > 1
+ ? elid.main_threads
+ : 0);
+ if (max_reader_groups <= 1)
+ elid.reader_groups = 0;
+ if (elid.reader_groups > max_reader_groups)
+ elid.reader_groups = max_reader_groups;
+ erts_reader_groups = elid.reader_groups;
+
+ erts_thr_late_init(&elid);
+ }
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
#endif
@@ -1193,9 +1294,17 @@ erl_start(int argc, char **argv)
erts_async_thread_suggested_stack_size));
break;
- case 'r':
- erts_ets_realloc_always_moves = 1;
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ get_arg(sub_param+1, argv[i+1], &i);
+ /* already handled */
+ }
+ else {
+ erts_ets_realloc_always_moves = 1;
+ }
break;
+ }
case 'n': /* XXX obsolete */
break;
case 'c':
@@ -1280,6 +1389,7 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
+ erts_thr_set_main_status(1, 1);
set_main_stack_size();
process_main();
#endif
@@ -1353,7 +1463,7 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
+#if defined(USE_THREADS)
exit_async();
#endif
#if HAVE_ERTS_MSEG
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index cee470ae37..d6138fa4e4 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -96,10 +96,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_status", "pid" },
{ "proc_tab", NULL },
{ "ports_snapshot", NULL },
- { "db_tab", "address" },
- { "db_tab_fix", "address" },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
{ "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
@@ -119,9 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "child_status", NULL },
#endif
#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
+ { "sys_driver_data_lock", NULL },
#endif
- { "drv_ev_state_grow", NULL, },
+ { "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
@@ -153,6 +153,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "instr", NULL },
{ "fix_alloc", "index" },
{ "alcu_allocator", "index" },
+ { "alcu_delayed_free", "index" },
{ "mseg", NULL },
#ifdef HALFWORD_HEAP
{ "pmmap", NULL },
@@ -177,17 +178,19 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
+ { "run_queue_sleep_list", "address" },
#endif
{ "alloc_thr_ix_lock", NULL },
#ifdef ERTS_SMP
- { "proc_lck_wtr_alloc", NULL },
+ { "proc_lck_qs_alloc", NULL },
#endif
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
#endif
#endif
- { "mtrace_buf", NULL }
+ { "mtrace_buf", NULL },
+ { "erts_alloc_hard_debug", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
@@ -199,6 +202,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
& ERTS_LC_FLG_LT_ALL \
& ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+static __decl_noreturn void __noreturn lc_abort(void);
+
static char *
lock_type(Uint16 flags)
{
@@ -222,7 +227,7 @@ rw_op_str(Uint16 flags)
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
erts_fprintf(stderr, "\nInternal error\n");
- abort();
+ lc_abort();
default:
break;
}
@@ -271,28 +276,18 @@ static erts_lc_free_block_t *free_blocks;
#define ERTS_LC_FB_CHUNK_SIZE 10
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
static ethr_spinlock_t free_blocks_lock;
-#define ERTS_LC_LOCK ethr_spin_lock
-#define ERTS_LC_UNLOCK ethr_spin_unlock
-#else
-static ethr_mutex free_blocks_lock;
-#define ERTS_LC_LOCK ethr_mutex_lock
-#define ERTS_LC_UNLOCK ethr_mutex_unlock
-#endif
static ERTS_INLINE void
lc_lock(void)
{
- if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_lock(&free_blocks_lock);
}
static ERTS_INLINE void
lc_unlock(void)
{
- if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_unlock(&free_blocks_lock);
}
static ERTS_INLINE void lc_free(void *p)
@@ -313,7 +308,7 @@ static void *lc_core_alloc(void)
{
lc_unlock();
erts_fprintf(stderr, "Lock checker out of memory!\n");
- abort();
+ lc_abort();
}
#else
@@ -327,7 +322,7 @@ static void *lc_core_alloc(void)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- abort();
+ lc_abort();
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -367,11 +362,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- abort();
+ lc_abort();
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
@@ -513,7 +508,7 @@ uninitialized_lock(void)
{
erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
static void
@@ -523,7 +518,7 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -533,7 +528,7 @@ unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -541,7 +536,7 @@ unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -550,7 +545,7 @@ lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
print_lock("Lock order violation occured when locking ", lck, "!\n");
print_curr_locks(l_lcks);
print_lock_order();
- abort();
+ lc_abort();
}
static void
@@ -561,7 +556,7 @@ type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
print_lock(op, lck, "!\n");
ASSERT(l_lcks);
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -613,7 +608,7 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
}
}
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -621,7 +616,7 @@ unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -629,7 +624,7 @@ unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *l
{
print_lock("Unrequire on ", lck, " lock not required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -637,7 +632,7 @@ require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -645,7 +640,7 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
@@ -658,13 +653,23 @@ thread_exit_handler(void)
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
destroy_locked_locks(l_lcks);
/* erts_tsd_set(locks_key, NULL); */
}
}
+static __decl_noreturn void
+lc_abort(void)
+{
+#ifdef __WIN32__
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
void
erts_lc_set_thread_name(char *thread_name)
{
@@ -676,7 +681,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
}
}
@@ -686,7 +691,7 @@ erts_lc_assert_failed(char *file, int line, char *assertion)
erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
file, line, assertion);
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
return 0;
}
@@ -699,7 +704,7 @@ void erts_lc_fail(char *fmt, ...)
va_end(args);
erts_fprintf(stderr, "\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
@@ -719,7 +724,7 @@ erts_lc_get_lock_order_id(char *name)
"(update erl_lock_check.c)\n",
name);
}
- abort();
+ lc_abort();
return (Sint16) -1;
}
@@ -895,6 +900,25 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
}
+void
+erts_lc_check_no_locked_of_type(Uint16 flags)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks) {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
+ if (l_lck->flags & flags) {
+ erts_fprintf(stderr,
+ "Locked lock of type %s found which isn't "
+ "allowed here!\n",
+ lock_type(l_lck->flags));
+ print_curr_locks(l_lcks);
+ lc_abort();
+ }
+ }
+ }
+}
+
int
erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
@@ -1234,7 +1258,6 @@ erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
{
lck->id = erts_lc_get_lock_order_id(name);
- /* XXX:PaN What to do with the extra information? */
lck->extra = make_boxed(&lck->extra);
lck->flags = flags;
lck->inited = ERTS_LC_INITITALIZED;
@@ -1283,13 +1306,8 @@ erts_lc_init(void)
free_blocks = NULL;
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-#ifdef ETHR_HAVE_NATIVE_LOCKS
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- abort();
-#else
- if (ethr_mutex_init(&free_blocks_lock) != 0)
- abort();
-#endif
+ lc_abort();
erts_tsd_key_create(&locks_key);
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d5e2ede9ac..0372e6850d 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -77,6 +77,7 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
+void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 26028aeefc..239773f366 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -166,8 +166,8 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
int i;
type = lcnt_lock_type(lock->flag);
- ethr_atomic_read(&lock->r_state, &r_state);
- ethr_atomic_read(&lock->w_state, &w_state);
+ r_state = ethr_atomic_read(&lock->r_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (lock->flag & flag) {
@@ -394,10 +394,10 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
ASSERT(eltd);
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (option & ERTS_LCNT_LO_WRITE) {
- ethr_atomic_read(&lock->r_state, &r_state);
+ r_state = ethr_atomic_read(&lock->r_state);
ethr_atomic_inc( &lock->w_state);
}
if (option & ERTS_LCNT_LO_READ) {
@@ -423,7 +423,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ethr_atomic_inc( &lock->w_state);
eltd = lcnt_get_thread_data();
@@ -478,7 +478,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
#ifdef DEBUG
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
}
@@ -522,12 +522,12 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
/* flowstate */
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
ethr_atomic_dec( &lock->flowstate);
/* write state */
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ASSERT(w_state > 0)
#endif
ethr_atomic_dec(&lock->w_state);
@@ -558,7 +558,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
if (res != EBUSY) {
#ifdef DEBUG
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
#endif
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index b63f3df7df..82f272d28a 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -30,6 +30,7 @@
#include "erl_message.h"
#include "erl_process.h"
#include "erl_nmgc.h"
+#include "erl_binary.h"
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
ErlMessage,
@@ -164,16 +165,25 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
void
erts_cleanup_offheap(ErlOffHeap *offheap)
{
- if (offheap->mso) {
- erts_cleanup_mso(offheap->mso);
- }
-#ifndef HYBRID /* FIND ME! */
- if (offheap->funs) {
- erts_cleanup_funs(offheap->funs);
- }
-#endif
- if (offheap->externals) {
- erts_cleanup_externals(offheap->externals);
+ union erl_off_heap_ptr u;
+
+ for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
+ erts_bin_free(u.pb->val);
+ }
+ break;
+ case FUN_SUBTAG:
+ if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
+ erts_erase_fun_entry(u.fun->fe);
+ }
+ break;
+ default:
+ ASSERT(is_external_header(u.hdr->thing_word));
+ erts_deref_node_entry(u.ext->node);
+ break;
+ }
}
}
@@ -201,40 +211,16 @@ link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
MBUF_SIZE(proc) += bp->used_size;
FLAGS(proc) |= F_FORCE_GC;
- /* Move any binaries into the process */
- if (bp->off_heap.mso != NULL) {
- ProcBin** next_p = &bp->off_heap.mso;
+ /* Move any off_heap's into the process */
+ if (bp->off_heap.first != NULL) {
+ struct erl_off_heap_header** next_p = &bp->off_heap.first;
while (*next_p != NULL) {
next_p = &((*next_p)->next);
}
- *next_p = MSO(proc).mso;
- MSO(proc).mso = bp->off_heap.mso;
- bp->off_heap.mso = NULL;
- MSO(proc).overhead += bp->off_heap.overhead;
- }
-
- /* Move any funs into the process */
-#ifndef HYBRID
- if (bp->off_heap.funs != NULL) {
- ErlFunThing** next_p = &bp->off_heap.funs;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
- }
- *next_p = MSO(proc).funs;
- MSO(proc).funs = bp->off_heap.funs;
- bp->off_heap.funs = NULL;
- }
-#endif
-
- /* Move any external things into the process */
- if (bp->off_heap.externals != NULL) {
- ExternalThing** next_p = &bp->off_heap.externals;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
- }
- *next_p = MSO(proc).externals;
- MSO(proc).externals = bp->off_heap.externals;
- bp->off_heap.externals = NULL;
+ *next_p = MSO(proc).first;
+ MSO(proc).first = bp->off_heap.first;
+ bp->off_heap.first = NULL;
+ OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead);
}
}
}
@@ -506,19 +492,7 @@ erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
void
erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
{
- /* Unions for typecasts avoids warnings about type-punned pointers and aliasing */
- union {
- Uint** upp;
- ProcBin **pbpp;
- ErlFunThing **efpp;
- ExternalThing **etpp;
- } oh_list_pp, oh_el_next_pp;
- union {
- Uint *up;
- ProcBin *pbp;
- ErlFunThing *efp;
- ExternalThing *etp;
- } oh_el_p;
+ struct erl_off_heap_header* oh;
Eterm term, token, *fhp, *hp;
Sint offs;
Uint sz;
@@ -561,7 +535,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
goto copy_done;
}
- off_heap->overhead += bp->off_heap.overhead;
+ OH_OVERHEAD(off_heap, bp->off_heap.overhead);
sz = bp->used_size;
ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp));
@@ -571,9 +545,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
hp = *hpp;
offs = hp - fhp;
- oh_list_pp.upp = NULL;
- oh_el_next_pp.upp = NULL; /* Shut up compiler warning */
- oh_el_p.up = NULL; /* Shut up compiler warning */
+ oh = NULL;
while (sz--) {
Uint cpy_sz;
Eterm val = *fhp++;
@@ -593,25 +565,11 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
case ARITYVAL_SUBTAG:
break;
case REFC_BINARY_SUBTAG:
- oh_list_pp.pbpp = &off_heap->mso;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.pbpp = &(oh_el_p.pbp)->next;
- cpy_sz = thing_arityval(val);
- goto cpy_words;
case FUN_SUBTAG:
-#ifndef HYBRID
- oh_list_pp.efpp = &off_heap->funs;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.efpp = &(oh_el_p.efp)->next;
-#endif
- cpy_sz = thing_arityval(val);
- goto cpy_words;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
- oh_list_pp.etpp = &off_heap->externals;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.etpp = &(oh_el_p.etp)->next;
+ oh = (struct erl_off_heap_header*) (hp-1);
cpy_sz = thing_arityval(val);
goto cpy_words;
default:
@@ -641,44 +599,13 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
case 1: *hp++ = *fhp++;
default: break;
}
- if (oh_list_pp.upp) {
-#ifdef HARD_DEBUG
- Uint *dbg_old_oh_list_p = *oh_list_pp.upp;
-#endif
+ if (oh) {
/* Add to offheap list */
- *oh_el_next_pp.upp = *oh_list_pp.upp;
- *oh_list_pp.upp = oh_el_p.up;
- ASSERT(*hpp <= oh_el_p.up);
- ASSERT(hp > oh_el_p.up);
-#ifdef HARD_DEBUG
- switch (val & _HEADER_SUBTAG_MASK) {
- case REFC_BINARY_SUBTAG:
- ASSERT(off_heap->mso == *oh_list_pp.pbpp);
- ASSERT(off_heap->mso->next
- == (ProcBin *) dbg_old_oh_list_p);
- break;
-#ifndef HYBRID
- case FUN_SUBTAG:
- ASSERT(off_heap->funs == *oh_list_pp.efpp);
- ASSERT(off_heap->funs->next
- == (ErlFunThing *) dbg_old_oh_list_p);
- break;
-#endif
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- ASSERT(off_heap->externals
- == *oh_list_pp.etpp);
- ASSERT(off_heap->externals->next
- == (ExternalThing *) dbg_old_oh_list_p);
- break;
- default:
- ASSERT(0);
- }
-#endif
- oh_list_pp.upp = NULL;
-
-
+ oh->next = off_heap->first;
+ off_heap->first = oh;
+ ASSERT(*hpp <= (Eterm*)oh);
+ ASSERT(hp > (Eterm*)oh);
+ oh = NULL;
}
break;
}
@@ -765,11 +692,7 @@ copy_done:
#endif
- bp->off_heap.mso = NULL;
-#ifndef HYBRID
- bp->off_heap.funs = NULL;
-#endif
- bp->off_heap.externals = NULL;
+ bp->off_heap.first = NULL;
free_message_buffer(bp);
msg->data.heap_frag = NULL;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index f478572ac2..5aca0db6fe 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -28,13 +28,22 @@ struct external_thing_;
* but is stored outside of any heap.
*/
-typedef struct erl_off_heap {
- struct proc_bin* mso; /* List of associated binaries. */
-#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* funs; /* List of funs. */
+struct erl_off_heap_header {
+ Eterm thing_word;
+ Uint size;
+#if HALFWORD_HEAP
+ void* dummy_ptr_padding__;
#endif
- struct external_thing_* externals; /* List of external things. */
- int overhead; /* Administrative overhead (used to force GC). */
+ struct erl_off_heap_header* next;
+};
+
+#define OH_OVERHEAD(oh, size) do { \
+ (oh)->overhead += size; \
+} while(0)
+
+typedef struct erl_off_heap {
+ struct erl_off_heap_header* first;
+ Uint64 overhead; /* Administrative overhead (used to force GC). */
} ErlOffHeap;
#include "external.h"
@@ -201,9 +210,7 @@ do { \
(HEAP_FRAG_P)->next = NULL; \
(HEAP_FRAG_P)->alloc_size = (DATA_WORDS); \
(HEAP_FRAG_P)->used_size = (DATA_WORDS); \
- (HEAP_FRAG_P)->off_heap.mso = NULL; \
- (HEAP_FRAG_P)->off_heap.funs = NULL; \
- (HEAP_FRAG_P)->off_heap.externals = NULL; \
+ (HEAP_FRAG_P)->off_heap.first = NULL; \
(HEAP_FRAG_P)->off_heap.overhead = 0; \
} while (0)
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 9cf55ee319..b1478758a1 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -585,9 +585,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
Uint16 port;
erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_set_forksafe(&mtrace_buf_mutex);
erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
- erts_mtx_set_forksafe(&mtrace_op_mutex);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 3d63fa1caf..1dd9c8bd4a 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -258,9 +258,7 @@ void enif_free_env(ErlNifEnv* env)
static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
{
- oh->mso = NULL;
- oh->externals = NULL;
- oh->funs = NULL;
+ oh->first = NULL;
oh->overhead = 0;
}
@@ -364,7 +362,7 @@ ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* oh)
{
- return oh->mso != NULL || oh->funs != NULL || oh->externals != NULL;
+ return oh->first != NULL;
}
#endif
@@ -627,13 +625,13 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = bptr->orig_size;
- pb->next = MSO(env->proc).mso;
- MSO(env->proc).mso = pb;
+ pb->next = MSO(env->proc).first;
+ MSO(env->proc).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
- MSO(env->proc).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
bin_term = make_binary(pb);
if (erts_refc_read(&bptr->refc, 1) == 1) {
/* Total ownership transfer */
@@ -752,7 +750,19 @@ int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
#endif
}
-int enif_get_double(ErlNifEnv* env, Eterm term, double* dp)
+#if HAVE_INT64 && SIZEOF_LONG != 8
+int enif_get_int64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifSInt64* ip)
+{
+ return term_to_Sint64(term, ip);
+}
+
+int enif_get_uint64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifUInt64* ip)
+{
+ return term_to_Uint64(term, ip);
+}
+#endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
+
+int enif_get_double(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)
{
FloatDef f;
if (is_not_float(term)) {
@@ -819,6 +829,26 @@ ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i)
return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
}
+#if HAVE_INT64 && SIZEOF_LONG != 8
+ERL_NIF_TERM enif_make_int64(ErlNifEnv* env, ErlNifSInt64 i)
+{
+ Uint* hp;
+ Uint need = 0;
+ erts_bld_sint64(NULL, &need, i);
+ hp = alloc_heap(env, need);
+ return erts_bld_sint64(&hp, NULL, i);
+}
+
+ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i)
+{
+ Uint* hp;
+ Uint need = 0;
+ erts_bld_uint64(NULL, &need, i);
+ hp = alloc_heap(env, need);
+ return erts_bld_uint64(&hp, NULL, i);
+}
+#endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
+
ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d)
{
Eterm* hp = alloc_heap(env,FLOAT_SIZE_OBJECT);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 936f03bce1..ee3a7cd5f4 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -66,6 +66,19 @@
extern "C" {
#endif
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef unsigned __int64 ErlNifUInt64;
+typedef __int64 ErlNifSInt64;
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlNifUInt64;
+typedef long ErlNifSInt64;
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlNifUInt64;
+typedef long long ErlNifSInt64;
+#else
+#error No 64-bit integer type
+#endif
+
#ifdef HALFWORD_HEAP_EMULATOR
typedef unsigned int ERL_NIF_TERM;
#else
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index ef4e9580b0..eca506593d 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -122,6 +122,12 @@ ERL_NIF_API_FUNC_DECL(ErlNifPid*,enif_self,(ErlNifEnv* caller_env, ErlNifPid* pi
ERL_NIF_API_FUNC_DECL(int,enif_get_local_pid,(ErlNifEnv* env, ERL_NIF_TERM, ErlNifPid* pid));
ERL_NIF_API_FUNC_DECL(void,enif_keep_resource,(void* obj));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* obj,const void* data, size_t size));
+#if SIZEOF_LONG != 8
+ERL_NIF_API_FUNC_DECL(int,enif_get_int64,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifSInt64* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_uint64,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifUInt64* ip));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int64,(ErlNifEnv*, ErlNifSInt64));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
+#endif
/*
** Add last to keep compatibility on Windows!!!
@@ -230,6 +236,13 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* o
# define enif_get_local_pid ERL_NIF_API_FUNC_MACRO(enif_get_local_pid)
# define enif_keep_resource ERL_NIF_API_FUNC_MACRO(enif_keep_resource)
# define enif_make_resource_binary ERL_NIF_API_FUNC_MACRO(enif_make_resource_binary)
+#if SIZEOF_LONG != 8
+# define enif_get_int64 ERL_NIF_API_FUNC_MACRO(enif_get_int64)
+# define enif_get_uint64 ERL_NIF_API_FUNC_MACRO(enif_get_uint64)
+# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
+# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
+#endif
+
#endif
#ifndef enif_make_list1
@@ -253,5 +266,13 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* o
# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
+
+#if SIZEOF_LONG == 8
+# define enif_get_int64 enif_get_long
+# define enif_get_uint64 enif_get_ulong
+# define enif_make_int64 enif_make_long
+# define enif_make_uint64 enif_make_ulong
+#endif
+
#endif
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 5865d33138..e430b4ad77 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -80,6 +80,8 @@ dist_table_alloc(void *dep_tmpl)
Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
return dep_tmpl;
@@ -92,7 +94,7 @@ dist_table_alloc(void *dep_tmpl)
dep->prev = NULL;
erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_x(&dep->rwmtx, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -580,6 +582,18 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
ErlNode ne;
ne.sysname = sysname;
ne.creation = creation;
+
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ res = hash_get(&erts_node_table, (void *) &ne);
+ if (res && res != erts_this_node) {
+ long refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ if (res)
+ return res;
+
erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
@@ -696,8 +710,12 @@ erts_set_this_node(Eterm sysname, Uint creation)
void erts_init_node_tables(void)
{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
HashFunctions f;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
f.alloc = (HALLOC_FUN) dist_table_alloc;
@@ -719,9 +737,10 @@ void erts_init_node_tables(void)
erts_this_dist_entry->prev = NULL;
erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
- erts_smp_rwmtx_init_x(&erts_this_dist_entry->rwmtx,
- "dist_entry",
- make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
+ &rwmtx_opt,
+ "dist_entry",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
erts_this_dist_entry->sysname = am_Noname;
erts_this_dist_entry->cid = NIL;
erts_this_dist_entry->connection_id = 0;
@@ -772,8 +791,8 @@ void erts_init_node_tables(void)
(void) hash_put(&erts_node_table, (void *) erts_this_node);
- erts_smp_rwmtx_init(&erts_node_table_rwmtx, "node_table");
- erts_smp_rwmtx_init(&erts_dist_table_rwmtx, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
references_atoms_need_init = 1;
}
@@ -1087,30 +1106,24 @@ insert_offheap2(ErlOffHeap *oh, void *arg)
static void
insert_offheap(ErlOffHeap *oh, int type, Eterm id)
{
- if(oh->externals) {
- ExternalThing *etp = oh->externals;
- while (etp) {
- insert_node(etp->node, type, id);
- etp = etp->next;
- }
- }
+ union erl_off_heap_ptr u;
+ struct insert_offheap2_arg a;
+ a.type = BIN_REF;
- if(oh->mso) {
- ProcBin *pb;
- struct insert_offheap2_arg a;
- a.type = BIN_REF;
- for(pb = oh->mso; pb; pb = pb->next) {
- if(IsMatchProgBinary(pb->val)) {
+ for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ if(IsMatchProgBinary(u.pb->val)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == pb->val) {
+ if(ib->bin_val == u.pb->val) {
insert_bin = 0;
break;
}
if (insert_bin) {
#if HALFWORD_HEAP
- UWord val = (UWord) pb->val;
+ UWord val = (UWord) u.pb->val;
DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE*2); /* extra place allocated */
#else
DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE);
@@ -1124,13 +1137,13 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
a.id = erts_bld_uword(&hp, NULL, (UWord) val);
#else
UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
- a.id = erts_bld_uint(&hp, NULL, (Uint) pb->val);
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
#endif
- erts_match_prog_foreach_offheap(pb->val,
+ erts_match_prog_foreach_offheap(u.pb->val,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = pb->val;
+ nib->bin_val = u.pb->val;
nib->next = inserted_bins;
inserted_bins = nib;
#if HALFWORD_HEAP
@@ -1139,15 +1152,16 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
#endif
}
- }
+ }
+ break;
+ case FUN_SUBTAG:
+ break; /* No need to */
+ default:
+ ASSERT(is_external_header(u.hdr->thing_word));
+ insert_node(u.ext->node, type, id);
+ break;
}
}
-
-#if 0
- if(oh->funs) {
- /* No need to */
- }
-#endif
}
static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
@@ -1289,6 +1303,7 @@ setup_reference_table(void)
for (i = 0; i < erts_max_processes; i++)
if (process_tab[i]) {
ErlMessage *msg;
+
/* Insert Heap */
insert_offheap(&(process_tab[i]->off_heap),
HEAP_REF,
@@ -1375,21 +1390,22 @@ setup_reference_table(void)
{ /* Add binaries stored elsewhere ... */
ErlOffHeap oh;
- ProcBin pb[2] = {{0},{0}};
- ProcBin *mso = NULL;
+ ProcBin pb[2];
int i = 0;
Binary *default_match_spec;
Binary *default_meta_match_spec;
- /* Only the ProcBin members val and next will be inspected
+ oh.first = NULL;
+ /* Only the ProcBin members thing_word, val and next will be inspected
(by insert_offheap()) */
#undef ADD_BINARY
-#define ADD_BINARY(Bin) \
- if ((Bin)) { \
- pb[i].val = (Bin); \
- pb[i].next = mso; \
- mso = &pb[i]; \
- i++; \
+#define ADD_BINARY(Bin) \
+ if ((Bin)) { \
+ pb[i].thing_word = REFC_BINARY_SUBTAG; \
+ pb[i].val = (Bin); \
+ pb[i].next = oh.first; \
+ oh.first = (struct erl_off_heap_header*) &pb[i]; \
+ i++; \
}
erts_get_default_trace_pattern(NULL,
@@ -1401,11 +1417,6 @@ setup_reference_table(void)
ADD_BINARY(default_match_spec);
ADD_BINARY(default_meta_match_spec);
- oh.mso = mso;
- oh.externals = NULL;
-#ifndef HYBRID /* FIND ME! */
- oh.funs = NULL;
-#endif
insert_offheap(&oh, BIN_REF, AM_match_spec);
#undef ADD_BINARY
}
diff --git a/erts/emulator/beam/erl_obsolete.c b/erts/emulator/beam/erl_obsolete.c
deleted file mode 100644
index 9c5a7c7ff9..0000000000
--- a/erts/emulator/beam/erl_obsolete.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_driver.h"
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * *
- * ------------------------- OBSOLETE! DO NOT USE! ------------------------- *
- * *
-\* */
-
-/* cut from ../obsolete/driver.h (since it doesn't mix well with other
- * headers from the emulator).
- */
-#ifdef __WIN32__
-#ifdef CONST
-# undef CONST
-#endif
-#endif
-
-#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
-# define _USING_PROTOTYPES_ 1
-# define _ANSI_ARGS_(x) x
-# define CONST const
-#else
-# define _ANSI_ARGS_(x) ()
-# define CONST
-#endif
-
-typedef void* erl_mutex_t;
-typedef void* erl_cond_t;
-typedef void* erl_thread_t;
-
-EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
-EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
-
-EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
-EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
-EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
-
-EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
- void* (*func)(void*),
- void* arg,
- int detached));
-EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
-EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
-EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
-EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
-
-/*
- * These functions implement the thread interface in ../obsolete/driver.h.
- * Do *not* use this interface! Within the emulator, use the erl_threads.h,
- * erl_smp.h, or ethread.h interface. From a driver use the thread interface
- * in erl_driver.h.
- */
-
-erl_mutex_t
-erts_mutex_create(void)
-{
- return (erl_mutex_t) erl_drv_mutex_create(NULL);
-}
-
-int
-erts_mutex_destroy(erl_mutex_t mtx)
-{
- erl_drv_mutex_destroy((ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_mutex_lock(erl_mutex_t mtx)
-{
- erl_drv_mutex_lock((ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_mutex_unlock(erl_mutex_t mtx)
-{
- erl_drv_mutex_unlock((ErlDrvMutex *) mtx);
- return 0;
-}
-
-erl_cond_t
-erts_cond_create(void)
-{
- return (erl_cond_t) erl_drv_cond_create(NULL);
-}
-
-int
-erts_cond_destroy(erl_cond_t cnd)
-{
- erl_drv_cond_destroy((ErlDrvCond *) cnd);
- return 0;
-}
-
-
-int
-erts_cond_signal(erl_cond_t cnd)
-{
- erl_drv_cond_signal((ErlDrvCond *) cnd);
- return 0;
-}
-
-int
-erts_cond_broadcast(erl_cond_t cnd)
-{
- erl_drv_cond_broadcast((ErlDrvCond *) cnd);
- return 0;
-}
-
-
-int
-erts_cond_wait(erl_cond_t cnd, erl_mutex_t mtx)
-{
- erl_drv_cond_wait((ErlDrvCond *) cnd, (ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_cond_timedwait(erl_cond_t cnd, erl_mutex_t mtx, long ms)
-{
- return ENOTSUP;
-}
-
-int
-erts_thread_create(erl_thread_t *tid,
- void* (*func)(void*),
- void* arg,
- int detached)
-{
- if (detached)
- return ENOTSUP;
- return erl_drv_thread_create(NULL, (ErlDrvTid *) tid, func, arg, NULL);
-}
-
-erl_thread_t
-erts_thread_self(void)
-{
- return (erl_thread_t) erl_drv_thread_self();
-}
-
-void
-erts_thread_exit(void *res)
-{
- erl_drv_thread_exit(res);
-}
-
-int
-erts_thread_join(erl_thread_t tid, void **respp)
-{
- return erl_drv_thread_join((ErlDrvTid) tid, respp);
-}
-
-int
-erts_thread_kill(erl_thread_t tid)
-{
- return ENOTSUP;
-}
-
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 967a14f0d1..c10724b951 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -625,6 +625,7 @@ erts_port_task_schedule(Eterm id,
if (!enq_port) {
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ erts_smp_runq_unlock(runq);
}
else {
enqueue_port(runq, pp);
@@ -634,9 +635,10 @@ erts_port_task_schedule(Eterm id,
profile_runnable_port(pp, am_active);
}
+ erts_smp_runq_unlock(runq);
+
erts_smp_notify_inc_runq(runq);
}
- erts_smp_runq_unlock(runq);
return 0;
}
@@ -944,8 +946,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
- erts_smp_notify_inc_runq(xrunq);
erts_smp_runq_unlock(xrunq);
+ erts_smp_notify_inc_runq(xrunq);
}
#endif
port_was_enqueued = 1;
@@ -1112,7 +1114,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
dequeue_port(from_rq, prt);
erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
enqueue_port(to_rq, prt);
- erts_smp_notify_inc_runq(to_rq);
return ERTS_MIGRATE_SUCCESS;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 41031f5468..761096e9ad 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -46,7 +46,13 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#define ERTS_SCHED_SLEEP_SPINCOUNT 10000
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
+ (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
+#define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
#define ERTS_WAKEUP_OTHER_LIMIT (100*CONTEXT_REDS/2)
#define ERTS_WAKEUP_OTHER_DEC 10
@@ -106,6 +112,10 @@ Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
+#ifdef ERTS_SMP
+Uint erts_max_main_threads;
+#endif
+
int erts_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -116,16 +126,34 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHED_CHANGING_ONLINE 1
-#define ERTS_SCHED_CHANGING_MULTI_SCHED 2
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+
+#ifndef DEBUG
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+
+#else
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+
+#endif
+
static struct {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- int changing;
int online;
int curr_online;
int wait_curr_online;
+ erts_smp_atomic_t changing;
erts_smp_atomic_t active;
struct {
erts_smp_atomic_t ongoing;
@@ -231,6 +259,17 @@ typedef union {
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_SMP
+
+typedef union {
+ ErtsSchedulerSleepInfo ssi;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
+} ErtsAlignedSchedulerSleepInfo;
+
+static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
+
+#endif
+
#ifndef BM_COUNTERS
static int processes_busy;
#endif
@@ -288,8 +327,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
-#define ERTS_RUNQ_IX(IX) (&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_SCHEDULER_IX(IX) (&erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_RUNQ_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ &erts_aligned_run_queues[(IX)].runq)
+#define ERTS_SCHEDULER_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -353,6 +399,11 @@ static void signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size
#endif
+static int reader_group_lookup(int logical);
+static void create_tmp_cpu_topology_copy(erts_cpu_topology_t **cpudata,
+ int *cpudata_size);
+static void destroy_tmp_cpu_topology_copy(erts_cpu_topology_t *cpudata);
+
static void early_cpu_bind_init(void);
static void late_cpu_bind_init(void);
@@ -582,6 +633,76 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
+void
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+{
+ switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_sys_schedule_interrupt(1);
+ break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_tse_set(ssi->event);
+ break;
+ case 0:
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
+ __FILE__, __LINE__);
+ break;
+ }
+}
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void
+erts_smp_notify_check_children_needed(void)
+{
+ int i;
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ long aux_work;
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
+ aux_work = erts_smp_atomic_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
+ erts_sched_poke(ssi);
+ }
+}
+#endif
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+blockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
+ aux_work = erts_smp_atomic_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+ erts_check_children();
+ }
+#endif
+ }
+ return aux_work;
+}
+
+#endif
+
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+nonblockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+
+ }
+}
+#endif
+
static void
prepare_for_block(void *vrq)
{
@@ -634,9 +755,33 @@ erts_active_schedulers(void)
return as;
}
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ return 0;
+#else
+ return !erts_port_task_have_outstanding_io_tasks();
+#endif
+}
+
#ifdef ERTS_SMP
static ERTS_INLINE void
+sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting < 0);
+ rq->waiting *= -1;
+}
+
+static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -676,7 +821,11 @@ empty_runq(ErtsRunQueue *rq)
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
long empty = erts_smp_atomic_read(&no_empty_run_queues);
- ASSERT(0 <= empty && empty < erts_no_run_queues);
+ /*
+ * For a short period of time no_empty_run_queues may have
+ * been increased twice for a specific run queue.
+ */
+ ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
erts_smp_atomic_inc(&no_empty_run_queues);
}
@@ -689,107 +838,314 @@ non_empty_runq(ErtsRunQueue *rq)
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
long empty = erts_smp_atomic_read(&no_empty_run_queues);
- ASSERT(0 < empty && empty <= erts_no_run_queues);
+ /*
+ * For a short period of time no_empty_run_queues may have
+ * been increased twice for a specific run queue.
+ */
+ ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
erts_smp_atomic_dec(&no_empty_run_queues);
}
}
-static ERTS_INLINE int
-sched_spin_wake(ErtsRunQueue *rq)
+static long
+sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = 0;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_atomic_inc(&rq->spin_wake);
- return 1;
- }
- return 0;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (!(oflgs & ERTS_SSI_FLG_SUSPENDED));
+ return oflgs;
}
-static ERTS_INLINE int
-sched_spin_wake_all(ErtsRunQueue *rq)
+static long
+sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = ERTS_SSI_FLG_WAITING;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0)
- erts_smp_atomic_add(&rq->spin_wake, val);
- return val;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ } while (oflgs & ERTS_SSI_FLG_WAITING);
+ return oflgs;
}
+static long
+sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+{
+ long oflgs;
+ long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+
+ if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ }
+}
+
+#define ERTS_SCHED_WAIT_WOKEN(FLGS) \
+ (((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
+ != ERTS_SSI_FLG_WAITING)
+
static void
-sched_sys_wait(Uint no, ErtsRunQueue *rq)
+scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
- long dt;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ int spincount;
+ long flgs;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
+
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ erts_smp_spin_lock(&rq->sleepers.lock);
+ flgs = sched_prep_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ /* Go suspend instead... */
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+ return;
+ }
+
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+
+ /*
+ * If all schedulers are waiting, one of them *should*
+ * be waiting in erl_sys_schedule()
+ */
+
+ if (!prepare_for_sys_schedule()) {
+
+ sched_waiting(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ tse_wait:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ tse_blockable_aux_work:
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- sched_waiting_sys(no, rq);
+ while (1) {
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
- erl_sys_schedule(1); /* Might give us something to do */
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto tse_blockable_aux_work;
+ }
+#endif
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_runq_unlock(rq);
}
- }
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
+ erts_smp_runq_lock(rq);
+ sched_active(esdp->no, rq);
+
}
else {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ long dt;
+
+ erts_smp_atomic_set(&function_calls, 0);
+ *fcalls = 0;
+
+ sched_waiting_sys(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
+
+ while (spincount-- > 0) {
+
+ sys_poll_aux_work:
+
+ erl_sys_schedule(1); /* Might give us something to do */
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ sys_aux_work:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ }
+
+ /*
+ * If we got new I/O tasks we aren't allowed to
+ * call erl_sys_schedule() until it is handled.
+ */
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to continue checking for I/O...
+ */
+ if (!prepare_for_sys_schedule()) {
+ spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ goto tse_wait;
+ }
+ }
+ }
+
+ erts_smp_runq_lock(rq);
+
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
*/
- if (!erts_port_task_have_outstanding_io_tasks()) {
-#endif
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to wait in erl_sys_schedule() after all...
+ */
+ if (prepare_for_sys_schedule())
+ goto do_sys_schedule;
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ else {
+ do_sys_schedule:
erts_sys_schedule_interrupt(0);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING))
+ goto sys_locked_woken;
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
+
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+
erts_smp_runq_unlock(rq);
erl_sys_schedule(0);
@@ -797,134 +1153,103 @@ sched_sys_wait(Uint no, ErtsRunQueue *rq)
dt = do_time_read_and_reset();
if (dt) bump_timer(dt);
- erts_smp_runq_lock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ sys_woken:
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ sched_active_sys(esdp->no, rq);
}
}
-#endif
- sched_active_sys(no, rq);
-}
-
-static void
-sched_cnd_wait(Uint no, ErtsRunQueue *rq)
-{
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
-
- sched_waiting(no, rq);
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
-#else
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_mtx_unlock(&rq->mtx);
-
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_mtx_unlock(&rq->mtx);
- }
- }
-
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val == 0) {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
- }
- else {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- }
-#endif
-
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
- sched_active(no, rq);
}
-static void
-wake_one_scheduler(void)
-{
- ASSERT(erts_common_run_queue);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(erts_common_run_queue));
- if (erts_common_run_queue->waiting) {
- if (!sched_spin_wake(erts_common_run_queue)) {
- if (erts_common_run_queue->waiting == -1) /* One scheduler waiting
- and doing so in
- sys_schedule */
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&erts_common_run_queue->cnd);
- }
+static ERTS_INLINE long
+ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ /* reset all flags but suspended */
+ long oflgs;
+ long nflgs = 0;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return oflgs;
+ nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ xflgs = oflgs;
}
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq, int incq, int one)
{
- ASSERT(!erts_common_run_queue);
- ASSERT(-1 <= rq->waiting && rq->waiting <= 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->waiting && !rq->woken) {
- if (!sched_spin_wake(rq)) {
- if (rq->waiting < 0)
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&rq->cnd);
+ int res;
+ ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepList *sl;
+
+ /*
+ * The unlocked run queue is not strictly necessary
+ * from a thread safety or deadlock prevention
+ * perspective. It will, however, cost us performance
+ * if it is locked during wakup of another scheduler,
+ * so all code *should* handle this without having
+ * the lock on the run queue.
+ */
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+
+ sl = &rq->sleepers;
+
+ erts_smp_spin_lock(&sl->lock);
+ ssi = sl->list;
+ if (!ssi)
+ erts_smp_spin_unlock(&sl->lock);
+ else if (one) {
+ long flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
}
- rq->woken = 1;
- if (incq)
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ res = sl->list != NULL;
+ erts_smp_spin_unlock(&sl->lock);
+
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+
+ if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
non_empty_runq(rq);
}
+ else {
+ sl->list = NULL;
+ erts_smp_spin_unlock(&sl->lock);
+ do {
+ ErtsSchedulerSleepInfo *wake_ssi = ssi;
+ ssi = ssi->next;
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(wake_ssi));
+ } while (ssi);
+ }
}
static void
wake_all_schedulers(void)
{
- if (erts_common_run_queue) {
- erts_smp_runq_lock(erts_common_run_queue);
- if (erts_common_run_queue->waiting) {
- if (erts_common_run_queue->waiting < 0)
- erts_sys_schedule_interrupt(1);
- sched_spin_wake_all(erts_common_run_queue);
- erts_smp_cnd_broadcast(&erts_common_run_queue->cnd);
- }
- erts_smp_runq_unlock(erts_common_run_queue);
- }
+ if (erts_common_run_queue)
+ wake_scheduler(erts_common_run_queue, 0, 0);
else {
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- wake_scheduler(rq, 0);
- erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
}
}
}
@@ -939,14 +1264,14 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
wrq = ERTS_RUNQ_IX(ix);
iflgs = erts_smp_atomic_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
- erts_smp_xrunq_lock(crq, wrq);
if (activate) {
if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
+ erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0);
- erts_smp_xrunq_unlock(crq, wrq);
+ wake_scheduler(wrq, 0, 1);
return 1;
}
return 0;
@@ -992,15 +1317,13 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- wake_one_scheduler();
- else
- wake_scheduler(runq, 1);
+ if (runq)
+ wake_scheduler(runq, 1, 1);
#endif
}
void
-erts_smp_notify_inc_runq__(ErtsRunQueue *runq)
+erts_smp_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -1146,20 +1469,23 @@ static void
evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
{
Port *prt;
+ int notify_to_rq = 0;
int prio;
int prt_locked = 0;
int rq_locked = 0;
int evac_rq_locked = 1;
+ ErtsMigrateResult mres;
erts_smp_runq_lock(evac_rq);
+ erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
-
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1187,9 +1513,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
/* Evacuate scheduled ports */
prt = evac_rq->ports.start;
while (prt) {
- (void) erts_port_migrate(prt, &prt_locked,
+ mres = erts_port_migrate(prt, &prt_locked,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (prt_locked)
erts_smp_port_unlock(prt);
if (!evac_rq_locked) {
@@ -1218,9 +1546,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
goto end_of_proc;
}
- (void) erts_proc_migrate(proc, &proc_locks,
+ mres = erts_proc_migrate(proc, &proc_locks,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (proc_locks)
erts_smp_proc_unlock(proc, proc_locks);
if (!evac_rq_locked) {
@@ -1252,10 +1582,13 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (rq_locked)
erts_smp_runq_unlock(rq);
- if (!evac_rq_locked)
- erts_smp_runq_lock(evac_rq);
- wake_scheduler(evac_rq, 0);
- erts_smp_runq_unlock(evac_rq);
+ if (evac_rq_locked)
+ erts_smp_runq_unlock(evac_rq);
+
+ if (notify_to_rq)
+ smp_notify_inc_runq(rq);
+
+ wake_scheduler(evac_rq, 0, 1);
}
static int
@@ -1483,31 +1816,6 @@ try_steal_task(ErtsRunQueue *rq)
return res;
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++) {
- erts_smp_runq_lock(ERTS_SCHEDULER_IX(i)->run_queue);
- ERTS_SCHEDULER_IX(i)->check_children = 1;
- if (!erts_common_run_queue)
- wake_scheduler(ERTS_SCHEDULER_IX(i)->run_queue, 0);
- erts_smp_runq_unlock(ERTS_SCHEDULER_IX(i)->run_queue);
- }
- if (ongoing_multi_scheduling_block()) {
- /* Also blocked schedulers need to check children */
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- for (i = 0; i < erts_no_schedulers; i++)
- ERTS_SCHEDULER_IX(i)->blocked_check_children = 1;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
- if (erts_common_run_queue)
- wake_all_schedulers();
-}
-#endif
-
/* Run queue balancing */
typedef struct {
@@ -2100,6 +2408,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_atomic_init(&no_empty_run_queues, 0);
#endif
+ erts_no_run_queues = n;
+
for (ix = 0; ix < n; ix++) {
int pix, rix;
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
@@ -2114,8 +2424,10 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
- erts_smp_atomic_init(&rq->spin_waiter, 0);
- erts_smp_atomic_init(&rq->spin_wake, 0);
+#ifdef ERTS_SMP
+ erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
+ rq->sleepers.list = NULL;
+#endif
rq->waiting = 0;
rq->woken = 0;
@@ -2166,7 +2478,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
- erts_no_run_queues = n;
#ifdef ERTS_SMP
@@ -2181,9 +2492,34 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#endif
+ n = (int) no_schedulers;
+ erts_no_schedulers = n;
+
+#ifdef ERTS_SMP
+ /* Create and initialize scheduler sleep info */
+
+ aligned_sched_sleep_info = erts_alloc(ERTS_ALC_T_SCHDLR_SLP_INFO,
+ (sizeof(ErtsAlignedSchedulerSleepInfo)
+ *(n+1)));
+ if ((((Uint) aligned_sched_sleep_info) & ERTS_CACHE_LINE_MASK) == 0)
+ aligned_sched_sleep_info = ((ErtsAlignedSchedulerSleepInfo *)
+ ((((Uint) aligned_sched_sleep_info)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ for (ix = 0; ix < n; ix++) {
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+#if 0 /* no need to initialize these... */
+ ssi->next = NULL;
+ ssi->prev = NULL;
+#endif
+ erts_smp_atomic_init(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_thread_func */
+ erts_smp_atomic_init(&ssi->aux_work, 0);
+ }
+#endif
+
/* Create and initialize scheduler specific data */
- n = (int) no_schedulers;
erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
(sizeof(ErtsAlignedSchedulerData)
*(n+1)));
@@ -2200,6 +2536,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
#if HALFWORD_HEAP
/* Registers need to be heap allocated (correct memory range) for tracing to work */
@@ -2228,11 +2565,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->check_children = 0;
- esdp->blocked_check_children = 0;
-#endif
- erts_smp_atomic_init(&esdp->suspended, 0);
erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2241,7 +2573,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- schdlr_sspnd.changing = 0;
+ erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
@@ -2264,7 +2596,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_set(&(ERTS_SCHEDULER_IX(ix)->suspended), 1);
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2275,7 +2608,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.wait_curr_online = no_schedulers_online;
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
erts_smp_atomic_init(&doing_sys_schedule, 0);
@@ -2423,13 +2757,115 @@ susp_sched_resume_block(void *unused)
}
static void
+scheduler_ix_resume_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long oflgs;
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ if (oflgs == xflgs) {
+ erts_sched_finish_poke(ssi, oflgs);
+ break;
+ }
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+}
+
+static long
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = xpct;
+
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+
+ return oflgs;
+}
+
+static long
+sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ }
+}
+
+static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
+ long flgs;
+ int changing;
long no = (long) esdp->no;
ErtsRunQueue *rq = esdp->run_queue;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
int wake = 0;
+ int reset_read_group = 0;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
/*
* Schedulers may be suspended in two different ways:
@@ -2451,113 +2887,151 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(erts_cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ reset_read_group = 1;
}
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (reset_read_group)
+ erts_smp_rwmtx_set_reader_group(0);
+
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(0, 0);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED) {
- if (active_schedulers == schdlr_sspnd.msb.wait_active)
- wake = 1;
- if (active_schedulers == 1)
- schdlr_sspnd.changing = 0;
- }
-
- while (1) {
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children;
- erts_smp_runq_lock(esdp->run_queue);
- check_children = esdp->check_children;
- esdp->check_children = 0;
- erts_smp_runq_unlock(esdp->run_queue);
- if (check_children) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_check_children();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == schdlr_sspnd.msb.wait_active)
+ wake = 1;
+ if (active_schedulers == 1) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
}
-#endif
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE) {
- int changed = 0;
- if (no > schdlr_sspnd.online && curr_online) {
- schdlr_sspnd.curr_online--;
- curr_online = 0;
- changed = 1;
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > schdlr_sspnd.online && curr_online) {
+ schdlr_sspnd.curr_online--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= schdlr_sspnd.online && !curr_online) {
+ schdlr_sspnd.curr_online++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ wake = 1;
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
}
- else if (no <= schdlr_sspnd.online && !curr_online) {
- schdlr_sspnd.curr_online++;
- curr_online = 1;
- changed = 1;
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
}
- if (changed
- && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- wake = 1;
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online)
- schdlr_sspnd.changing = 0;
- }
- if (wake) {
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- wake = 0;
- }
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ|ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ blockable_aux_work:
+ blockable_aux_work(esdp, ssi, aux_work);
+#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (1) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ while (1) {
+ long flgs;
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->blocked_check_children)
- break;
+ flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto blockable_aux_work;
+ }
#endif
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ }
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE)
- break;
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->blocked_check_children = 0;
-#endif
+ active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && schdlr_sspnd.online == active_schedulers) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- }
+ ASSERT(no <= schdlr_sspnd.online);
+ ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED
- && schdlr_sspnd.online == active_schedulers) {
- schdlr_sspnd.changing = 0;
}
+
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ ASSERT(curr_online);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(1, (int) esdp->no);
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -2622,8 +3096,10 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
+ long changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (yield_allowed && schdlr_sspnd.changing)
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
*active = *online = schdlr_sspnd.online;
@@ -2643,6 +3119,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
+ long changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -2652,7 +3129,8 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
else {
@@ -2661,17 +3139,19 @@ erts_set_schedulers_online(Process *p,
res = ERTS_SCHDLR_SSPND_DONE;
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
schdlr_sspnd.online = no;
if (no > online) {
int ix;
schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block())
- /* No schedulers to resume */;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
else if (erts_common_run_queue) {
for (ix = online; ix < no; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 0);
+ scheduler_ix_resume_wake(ix);
}
else {
if (plocks) {
@@ -2685,6 +3165,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/*
* Spread evacuation paths among all online
@@ -2699,7 +3180,6 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
res = ERTS_SCHDLR_SSPND_DONE;
}
else /* if (no < online) */ {
@@ -2716,12 +3196,17 @@ erts_set_schedulers_online(Process *p,
schdlr_sspnd.wait_curr_online = no+1;
}
- if (ongoing_multi_scheduling_block())
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- else if (erts_common_run_queue) {
+ if (ongoing_multi_scheduling_block()) {
for (ix = no; ix < online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 1);
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else if (erts_common_run_queue) {
+ for (ix = no; ix < online; ix++) {
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
wake_all_schedulers();
}
else {
@@ -2748,7 +3233,10 @@ erts_set_schedulers_online(Process *p,
erts_smp_atomic_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ERTS_FOREACH_OP_RUNQ(rq, wake_scheduler(rq, 0));
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq, 0, 1);
+ }
}
}
@@ -2762,6 +3250,12 @@ erts_set_schedulers_online(Process *p,
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -2776,11 +3270,12 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
+ long changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
else if (on) { /* ------ BLOCK ------ */
@@ -2794,19 +3289,22 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
else {
+ int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
- if (schdlr_sspnd.online == 1) {
+ if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
if (p->scheduler_data->no == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 1;
@@ -2820,17 +3318,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 1);
+ for (ix = 1; ix < online; ix++)
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
erts_smp_atomic_set(&balance_info.used_runqs, 1);
- for (ix = 0; ix < schdlr_sspnd.online; ix++) {
+ for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
erts_smp_runq_unlock(rq);
}
@@ -2855,6 +3355,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -2898,7 +3405,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.msb.procs)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
#ifdef DEBUG
ERTS_FOREACH_RUNQ(rq,
{
@@ -2925,13 +3432,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
- schdlr_sspnd.changing = 0;
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 0);
+ erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
else {
int online = schdlr_sspnd.online;
@@ -2948,6 +3455,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/* Spread evacuation paths among all online run queues */
@@ -2963,7 +3471,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -3035,18 +3542,34 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+#ifdef ERTS_SMP
+ Uint no = ((ErtsSchedulerData *) vesdp)->no;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(((ErtsSchedulerData *) vesdp)->no);
+ erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+
+ if (no <= erts_max_main_threads) {
+ erts_thr_set_main_status(1, (int) no);
+ if (erts_reader_groups) {
+ int rg = (int) no;
+ if (rg > erts_reader_groups)
+ rg = (((int) no) - 1) % erts_reader_groups + 1;
+ erts_smp_rwmtx_set_reader_group(rg);
+ }
+ }
+
erts_proc_lock_prepare_proc_lock_waiter();
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+
+
#endif
erts_register_blockable_thread();
#ifdef HIPE
@@ -3055,30 +3578,30 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE);
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
- schdlr_sspnd.curr_online--;
+ if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (((ErtsSchedulerData *) vesdp)->no != 1)
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ }
- if (((ErtsSchedulerData *) vesdp)->no != 1) {
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- schdlr_sspnd.changing = 0;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ if (((ErtsSchedulerData *) vesdp)->no == 1) {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
}
- }
- else if (schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- schdlr_sspnd.changing = 0;
- else {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- ASSERT(!schdlr_sspnd.changing);
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -3428,6 +3951,7 @@ processor_order_cmp(const void *vx, const void *vy)
static void
check_cpu_bind(ErtsSchedulerData *esdp)
{
+ int rg = 0;
int res;
int cpu_id;
erts_smp_runq_unlock(esdp->run_queue);
@@ -3446,19 +3970,25 @@ check_cpu_bind(ErtsSchedulerData *esdp)
goto unbind;
}
}
- else if (cpu_id < 0 && scheduler2cpu_map[esdp->no].bound_id >= 0) {
+ else if (cpu_id < 0) {
unbind:
/* Get rid of old binding */
res = erts_unbind_from_cpu(erts_cpuinfo);
if (res == 0)
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
- else {
+ else if (res != -ENOTSUP) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Scheduler %d failed to unbind from cpu %d: %s\n",
(int) esdp->no, cpu_id, erl_errno_id(-res));
erts_send_error_to_logger_nogl(dsbufp);
}
}
+ if (erts_reader_groups) {
+ if (esdp->cpu_id >= 0)
+ rg = reader_group_lookup(esdp->cpu_id);
+ else
+ rg = (((int) esdp->no) - 1) % erts_reader_groups + 1;
+ }
erts_smp_runq_lock(esdp->run_queue);
#ifdef ERTS_SMP
if (erts_common_run_queue)
@@ -3469,6 +3999,8 @@ check_cpu_bind(ErtsSchedulerData *esdp)
#endif
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (erts_reader_groups)
+ erts_smp_rwmtx_set_reader_group(rg);
}
static void
@@ -3497,11 +4029,13 @@ signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
wake_all_schedulers();
}
else {
- ERTS_FOREACH_RUNQ(rq,
- {
+ for (s_ix = 0; s_ix < erts_no_run_queues; s_ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(s_ix);
+ erts_smp_runq_lock(rq);
rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- wake_scheduler(rq, 0);
- });
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
+ };
}
#else
check_cpu_bind(erts_get_scheduler_data());
@@ -3541,6 +4075,490 @@ erts_init_scheduler_bind_type(char *how)
return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
}
+/*
+ * reader groups map
+ */
+
+typedef struct {
+ int level[ERTS_TOPOLOGY_MAX_DEPTH+1];
+} erts_avail_cput;
+
+typedef struct {
+ int *map;
+ int size;
+ int groups;
+} erts_reader_groups_map_test;
+
+typedef struct {
+ int id;
+ int sub_levels;
+ int reader_groups;
+} erts_rg_count_t;
+
+typedef struct {
+ int logical;
+ int reader_group;
+} erts_reader_groups_map_t;
+
+typedef struct {
+ erts_reader_groups_map_t *map;
+ int map_size;
+ int logical_processors;
+ int groups;
+} erts_make_reader_groups_map_test;
+
+static int reader_groups_available_cpu_check;
+static int reader_groups_logical_processors;
+static int reader_groups_map_size;
+static erts_reader_groups_map_t *reader_groups_map;
+
+#define ERTS_TOPOLOGY_RG ERTS_TOPOLOGY_MAX_DEPTH
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test);
+
+static Eterm
+get_reader_groups_map(Process *c_p,
+ erts_reader_groups_map_t *map,
+ int map_size,
+ int logical_processors)
+{
+#ifdef DEBUG
+ Eterm *endp;
+#endif
+ Eterm res = NIL, tuple;
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(c_p, logical_processors*(2+3));
+#ifdef DEBUG
+ endp = hp + logical_processors*(2+3);
+#endif
+ for (i = map_size - 1; i >= 0; i--) {
+ if (map[i].logical >= 0) {
+ tuple = TUPLE2(hp,
+ make_small(map[i].logical),
+ make_small(map[i].reader_group));
+ hp += 3;
+ res = CONS(hp, tuple, res);
+ hp += 2;
+ }
+ }
+ ASSERT(hp == endp);
+ return res;
+}
+
+Eterm
+erts_debug_reader_groups_map(Process *c_p, int groups)
+{
+ Eterm res;
+ erts_make_reader_groups_map_test test;
+
+ test.groups = groups;
+ make_reader_groups_map(&test);
+ if (!test.map)
+ res = NIL;
+ else {
+ res = get_reader_groups_map(c_p,
+ test.map,
+ test.map_size,
+ test.logical_processors);
+ erts_free(ERTS_ALC_T_TMP, test.map);
+ }
+ return res;
+}
+
+
+Eterm
+erts_get_reader_groups_map(Process *c_p)
+{
+ Eterm res;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ res = get_reader_groups_map(c_p,
+ reader_groups_map,
+ reader_groups_map_size,
+ reader_groups_logical_processors);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static void
+make_available_cpu_topology(erts_avail_cput *no,
+ erts_avail_cput *avail,
+ erts_cpu_topology_t *cpudata,
+ int *size,
+ int test)
+{
+ int len = *size;
+ erts_cpu_topology_t last;
+ int a, i, j;
+
+ no->level[ERTS_TOPOLOGY_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_CORE] = -1;
+ no->level[ERTS_TOPOLOGY_THREAD] = -1;
+ no->level[ERTS_TOPOLOGY_LOGICAL] = -1;
+
+ last.node = INT_MIN;
+ last.processor = INT_MIN;
+ last.processor_node = INT_MIN;
+ last.core = INT_MIN;
+ last.thread = INT_MIN;
+ last.logical = INT_MIN;
+
+ a = 0;
+
+ for (i = 0; i < len; i++) {
+
+ if (!test && !erts_is_cpu_available(erts_cpuinfo, cpudata[i].logical))
+ continue;
+
+ if (last.node != cpudata[i].node)
+ goto node;
+ if (last.processor != cpudata[i].processor)
+ goto processor;
+ if (last.processor_node != cpudata[i].processor_node)
+ goto processor_node;
+ if (last.core != cpudata[i].core)
+ goto core;
+ ASSERT(last.thread != cpudata[i].thread);
+ goto thread;
+
+ node:
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ processor:
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ processor_node:
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ core:
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ thread:
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ for (j = 0; j < ERTS_TOPOLOGY_LOGICAL; j++)
+ avail[a].level[j] = no->level[j];
+
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL] = cpudata[i].logical;
+ avail[a].level[ERTS_TOPOLOGY_RG] = 0;
+
+ ASSERT(last.logical != cpudata[a].logical);
+
+ last = cpudata[i];
+ a++;
+ }
+
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ *size = a;
+}
+
+static int
+reader_group_lookup(int logical)
+{
+ int start = logical % reader_groups_map_size;
+ int ix = start;
+
+ do {
+ if (reader_groups_map[ix].logical == logical) {
+ ASSERT(reader_groups_map[ix].reader_group > 0);
+ return reader_groups_map[ix].reader_group;
+ }
+ ix++;
+ if (ix == reader_groups_map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
+}
+
+static void
+reader_group_insert(erts_reader_groups_map_t *map, int map_size,
+ int logical, int reader_group)
+{
+ int start = logical % map_size;
+ int ix = start;
+
+ do {
+ if (map[ix].logical < 0) {
+ map[ix].logical = logical;
+ map[ix].reader_group = reader_group;
+ return;
+ }
+ ix++;
+ if (ix == map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
+}
+
+
+static int
+sub_levels(erts_rg_count_t *rgc, int level, int aix, int avail_sz, erts_avail_cput *avail)
+{
+ int sub_level = level+1;
+ int last = -1;
+ rgc->sub_levels = 0;
+
+ do {
+ if (last != avail[aix].level[sub_level]) {
+ rgc->sub_levels++;
+ last = avail[aix].level[sub_level];
+ }
+ aix++;
+ }
+ while (aix < avail_sz && rgc->id == avail[aix].level[level]);
+ rgc->reader_groups = 0;
+ return aix;
+}
+
+static int
+write_reader_groups(int *rgp, erts_rg_count_t *rgcp,
+ int level, int a,
+ int avail_sz, erts_avail_cput *avail)
+{
+ int rg = *rgp;
+ int sub_level = level+1;
+ int sl_per_gr = rgcp->sub_levels / rgcp->reader_groups;
+ int xsl = rgcp->sub_levels % rgcp->reader_groups;
+ int sls = 0;
+ int last = -1;
+ int xsl_rg_lim = (rgcp->reader_groups - xsl) + rg + 1;
+
+ ASSERT(level < 0 || avail[a].level[level] == rgcp->id)
+
+ do {
+ if (last != avail[a].level[sub_level]) {
+ if (!sls) {
+ sls = sl_per_gr;
+ rg++;
+ if (rg >= xsl_rg_lim)
+ sls++;
+ }
+ last = avail[a].level[sub_level];
+ sls--;
+ }
+ avail[a].level[ERTS_TOPOLOGY_RG] = rg;
+ a++;
+ } while (a < avail_sz && (level < 0
+ || avail[a].level[level] == rgcp->id));
+
+ ASSERT(rgcp->reader_groups == rg - *rgp);
+
+ *rgp = rg;
+
+ return a;
+}
+
+static int
+rg_count_sub_levels_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ if (x->sub_levels != y->sub_levels)
+ return y->sub_levels - x->sub_levels;
+ return x->id - y->id;
+}
+
+static int
+rg_count_id_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ return x->id - y->id;
+}
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test)
+{
+ int i, spread_level, avail_sz;
+ erts_avail_cput no, *avail;
+ erts_cpu_topology_t *cpudata;
+ erts_reader_groups_map_t *map;
+ int map_sz;
+ int groups = erts_reader_groups;
+
+ if (test) {
+ test->map = NULL;
+ test->map_size = 0;
+ groups = test->groups;
+ }
+
+ if (!groups)
+ return;
+
+ if (!test) {
+ if (reader_groups_map)
+ erts_free(ERTS_ALC_T_RDR_GRPS_MAP, reader_groups_map);
+
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &avail_sz);
+
+ if (!cpudata)
+ return;
+
+ cpu_bind_order_sort(cpudata,
+ avail_sz,
+ ERTS_CPU_BIND_NO_SPREAD,
+ 1);
+
+ avail = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(erts_avail_cput)*avail_sz);
+
+ make_available_cpu_topology(&no, avail, cpudata,
+ &avail_sz, test != NULL);
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ map_sz = avail_sz*2+1;
+
+ if (test) {
+ map = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ test->map = map;
+ test->map_size = map_sz;
+ test->logical_processors = avail_sz;
+ }
+ else {
+ map = erts_alloc(ERTS_ALC_T_RDR_GRPS_MAP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ reader_groups_map = map;
+ reader_groups_logical_processors = avail_sz;
+ reader_groups_map_size = map_sz;
+
+ }
+
+ for (i = 0; i < map_sz; i++) {
+ map[i].logical = -1;
+ map[i].reader_group = 0;
+ }
+
+ spread_level = ERTS_TOPOLOGY_CORE;
+ for (i = ERTS_TOPOLOGY_NODE; i < ERTS_TOPOLOGY_THREAD; i++) {
+ if (no.level[i] > groups) {
+ spread_level = i;
+ break;
+ }
+ }
+
+ if (no.level[spread_level] <= groups) {
+ int a, rg, last = -1;
+ rg = 0;
+ ASSERT(spread_level == ERTS_TOPOLOGY_CORE);
+ for (a = 0; a < avail_sz; a++) {
+ if (last != avail[a].level[spread_level]) {
+ rg++;
+ last = avail[a].level[spread_level];
+ }
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ rg);
+ }
+ }
+ else { /* groups < no.level[spread_level] */
+ erts_rg_count_t *rg_count;
+ int a, rg, tl, toplevels;
+
+ tl = spread_level-1;
+
+ if (spread_level == ERTS_TOPOLOGY_NODE)
+ toplevels = 1;
+ else
+ toplevels = no.level[tl];
+
+ rg_count = erts_alloc(ERTS_ALC_T_TMP,
+ toplevels*sizeof(erts_rg_count_t));
+
+ if (toplevels == 1) {
+ rg_count[0].id = 0;
+ rg_count[0].sub_levels = no.level[spread_level];
+ rg_count[0].reader_groups = groups;
+ }
+ else {
+ int rgs_per_tl, rgs;
+ rgs = groups;
+ rgs_per_tl = rgs / toplevels;
+
+ a = 0;
+ for (i = 0; i < toplevels; i++) {
+ rg_count[i].id = avail[a].level[tl];
+ a = sub_levels(&rg_count[i], tl, a, avail_sz, avail);
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_sub_levels_compare);
+
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels < rgs_per_tl) {
+ rg_count[i].reader_groups = rg_count[i].sub_levels;
+ rgs -= rg_count[i].sub_levels;
+ }
+ else {
+ rg_count[i].reader_groups = rgs_per_tl;
+ rgs -= rgs_per_tl;
+ }
+ }
+
+ while (rgs > 0) {
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels == rg_count[i].reader_groups)
+ break;
+ else {
+ rg_count[i].reader_groups++;
+ if (--rgs == 0)
+ break;
+ }
+ }
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_id_compare);
+ }
+
+ a = i = rg = 0;
+ while (a < avail_sz) {
+ a = write_reader_groups(&rg, &rg_count[i], tl,
+ a, avail_sz, avail);
+ i++;
+ }
+
+ ASSERT(groups == rg);
+
+ for (a = 0; a < avail_sz; a++)
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ avail[a].level[ERTS_TOPOLOGY_RG]);
+
+ erts_free(ERTS_ALC_T_TMP, rg_count);
+ }
+
+ erts_free(ERTS_ALC_T_TMP, avail);
+}
+
+/*
+ * CPU topology
+ */
+
typedef struct {
int *id;
int used;
@@ -4054,6 +5072,8 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
sizeof(erts_cpu_topology_t)*cpudata_size);
}
+ make_reader_groups_map(NULL);
+
signal_schedulers_bind_change(cpudata, cpudata_size);
done:
@@ -4457,6 +5477,11 @@ early_cpu_bind_init(void)
cpu_bind_order = ERTS_CPU_BIND_UNDEFINED;
+ reader_groups_available_cpu_check = 1;
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+
if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
|| ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
system_cpudata_size)) {
@@ -4492,6 +5517,8 @@ late_cpu_bind_init(void)
: ERTS_CPU_BIND_NONE);
}
+ make_reader_groups_map(NULL);
+
if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
erts_cpu_topology_t *cpudata;
int cpudata_size;
@@ -4502,6 +5529,39 @@ late_cpu_bind_init(void)
}
}
+int
+erts_update_cpu_info(void)
+{
+ int changed;
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+ changed = erts_cpu_info_update(erts_cpuinfo);
+ if (changed) {
+ erts_cpu_topology_t *cpudata;
+ int cpudata_size;
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+
+ system_cpudata_size = erts_get_cpu_topology_size(erts_cpuinfo);
+ system_cpudata = erts_alloc(ERTS_ALC_T_CPUDATA,
+ (sizeof(erts_cpu_topology_t)
+ * system_cpudata_size));
+
+ if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
+ || ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
+ system_cpudata_size)) {
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+ system_cpudata = NULL;
+ system_cpudata_size = 0;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
+ ASSERT(cpudata);
+ signal_schedulers_bind_change(cpudata, cpudata_size);
+ destroy_tmp_cpu_topology_copy(cpudata);
+ }
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ return changed;
+}
+
#ifdef ERTS_SMP
static void
@@ -5357,7 +6417,7 @@ dequeue_process(ErtsRunQueue *runq, Process *p)
}
/* schedule a process */
-static ERTS_INLINE void
+static ERTS_INLINE ErtsRunQueue *
internal_add_to_runq(ErtsRunQueue *runq, Process *p)
{
Uint32 prev_status = p->status;
@@ -5368,12 +6428,12 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
- return;
+ return NULL;
else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
ASSERT(p->status != P_SUSPENDED);
ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- return;
+ return NULL;
}
ASSERT(!p->scheduler_data);
#endif
@@ -5412,20 +6472,23 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
profile_runnable_proc(p, am_active);
}
- smp_notify_inc_runq(add_runq);
-
if (add_runq != runq)
erts_smp_runq_unlock(add_runq);
+
+ return add_runq;
}
void
erts_add_to_runq(Process *p)
{
+ ErtsRunQueue *notify_runq;
ErtsRunQueue *runq = erts_get_runq_proc(p);
erts_smp_runq_lock(runq);
- internal_add_to_runq(runq, p);
+ notify_runq = internal_add_to_runq(runq, p);
erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(notify_runq);
+
}
/* Possibly remove a scheduled process we need to suspend */
@@ -5564,8 +6627,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
p->run_queue = to_rq;
enqueue_process(to_rq, p);
- smp_notify_inc_runq(to_rq);
-
return ERTS_MIGRATE_SUCCESS;
}
#endif /* ERTS_SMP */
@@ -5762,30 +6823,6 @@ erts_set_process_priority(Process *p, Eterm new_value)
return old_value;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- return 0;
-}
-
-#else
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- return !erts_port_task_have_outstanding_io_tasks();
-}
-
-#endif
-
/* note that P_RUNNING is only set so that we don't try to remove
** running processes from the schedule queue if they exit - a running
** process not being in the schedule queue!!
@@ -5920,8 +6957,11 @@ Process *schedule(Process *p, int calls)
p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ ErtsRunQueue *notify_runq;
p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
+ if (notify_runq != rq)
+ smp_notify_inc_runq(notify_runq);
}
#endif
@@ -5989,7 +7029,10 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
@@ -5998,12 +7041,21 @@ Process *schedule(Process *p, int calls)
}
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->check_children) {
- esdp->check_children = 0;
- erts_smp_runq_unlock(rq);
- erts_check_children();
- erts_smp_runq_lock(rq);
+#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
+ {
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work) {
+ erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+ erts_smp_runq_lock(rq);
+ }
}
#endif
@@ -6035,7 +7087,10 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
}
@@ -6052,17 +7107,7 @@ Process *schedule(Process *p, int calls)
}
}
- if (prepare_for_sys_schedule()) {
- erts_smp_atomic_set(&function_calls, 0);
- fcalls = 0;
- sched_sys_wait(esdp->no, rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- else {
- /* If all schedulers are waiting, one of them *should*
- be waiting in erl_sys_schedule() */
- sched_cnd_wait(esdp->no, rq);
- }
+ scheduler_wait(&fcalls, esdp, rq);
non_empty_runq(rq);
@@ -6124,7 +7169,7 @@ Process *schedule(Process *p, int calls)
else {
if (erts_common_run_queue) {
if (erts_common_run_queue->waiting)
- wake_one_scheduler();
+ wake_scheduler(erts_common_run_queue, 0, 1);
}
else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
@@ -6439,8 +7484,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
else
rq->misc.start = molp;
rq->misc.end = molp;
- smp_notify_inc_runq(rq);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(rq);
}
static void
@@ -6682,7 +7727,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
- ErtsRunQueue *rq;
+ ErtsRunQueue *rq, *notify_runq;
Process *p;
Sint arity; /* Number of arguments. */
#ifndef HYBRID
@@ -6761,11 +7806,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/*
* Must initialize binary lists here before copying binaries to process.
*/
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
heap_need +=
@@ -6799,6 +7840,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->bin_vheap_sz = p->min_vheap_size;
p->bin_old_vheap_sz = p->min_vheap_size;
p->bin_old_vheap = 0;
+ p->bin_vheap_mature = 0;
/* No need to initialize p->fcalls. */
@@ -6855,7 +7897,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->group_leader =
IS_CONST(parent->group_leader)
? parent->group_leader
- : STORE_NC(&p->htop, &p->off_heap.externals, parent->group_leader);
+ : STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
@@ -6999,10 +8041,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
p->status = P_WAITING;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(notify_runq);
+
res = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
@@ -7049,6 +8093,7 @@ void erts_init_empty_process(Process *p)
p->bin_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap = 0;
+ p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
p->u.ptimer = NULL;
p->bound_runq = NULL;
@@ -7056,11 +8101,7 @@ void erts_init_empty_process(Process *p)
memset(&(p->u.tm), 0, sizeof(ErlTimer));
#endif
p->next = NULL;
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
p->reg = NULL;
p->heap_sz = 0;
@@ -7207,11 +8248,7 @@ erts_debug_verify_clean_empty_process(Process* p)
/* Thing that erts_cleanup_empty_process() cleans up */
- ASSERT(p->off_heap.mso == NULL);
-#ifndef HYBRID /* FIND ME! */
- ASSERT(p->off_heap.funs == NULL);
-#endif
- ASSERT(p->off_heap.externals == NULL);
+ ASSERT(p->off_heap.first == NULL);
ASSERT(p->off_heap.overhead == 0);
ASSERT(p->mbuf == NULL);
@@ -7225,11 +8262,7 @@ erts_cleanup_empty_process(Process* p)
/* We only check fields that are known to be used... */
erts_cleanup_offheap(&p->off_heap);
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
if (p->mbuf != NULL) {
@@ -7266,7 +8299,7 @@ delete_process(Process* p)
* The mso list should not be used anymore, but if it is, make sure that
* we'll notice.
*/
- p->off_heap.mso = (void *) 0x8DEFFACD;
+ p->off_heap.first = (void *) 0x8DEFFACD;
if (p->arg_reg != p->def_arg_reg) {
erts_free(ERTS_ALC_T_ARG_REG, p->arg_reg);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8f9f7f004e..e49710a7ed 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -89,6 +89,7 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
+extern Uint erts_max_main_threads;
#include "erl_bits.h"
#endif
@@ -219,6 +220,51 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
+#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+
+#define ERTS_SSI_FLGS_SLEEP_TYPE \
+ (ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
+
+#define ERTS_SSI_FLGS_SLEEP \
+ (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLGS_SLEEP_TYPE)
+
+#define ERTS_SSI_FLGS_ALL \
+ (ERTS_SSI_FLGS_SLEEP \
+ | ERTS_SSI_FLG_WAITING \
+ | ERTS_SSI_FLG_SUSPENDED)
+
+
+#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN)
+#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+#endif
+
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+
+#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
+ (ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
+#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
+ (0)
+
+typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
+
+typedef struct {
+ erts_smp_spinlock_t lock;
+ ErtsSchedulerSleepInfo *list;
+} ErtsSchedulerSleepList;
+
+struct ErtsSchedulerSleepInfo_ {
+ ErtsSchedulerSleepInfo *next;
+ ErtsSchedulerSleepInfo *prev;
+ erts_smp_atomic_t flags;
+ erts_tse_t *event;
+ erts_smp_atomic_t aux_work;
+};
+
/* times to reschedule low prio process before running */
#define RESCHEDULE_LOW 8
@@ -271,8 +317,9 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- erts_smp_atomic_t spin_waiter;
- erts_smp_atomic_t spin_wake;
+#ifdef ERTS_SMP
+ ErtsSchedulerSleepList sleepers;
+#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -353,6 +400,7 @@ struct ErtsSchedulerData_ {
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
+ ErtsSchedulerSleepInfo *ssi;
Process *free_process;
#endif
#if !HEAP_ON_C_STACK
@@ -374,11 +422,6 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children; /* run queue mutex */
- int blocked_check_children; /* schdlr_sspnd mutex */
-#endif
- erts_smp_atomic_t suspended; /* Only used when common run queue */
erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -494,6 +537,7 @@ struct ErtsPendingSuspend_ {
# define MIN_VHEAP_SIZE(p) (p)->min_vheap_size
# define BIN_VHEAP_SZ(p) (p)->bin_vheap_sz
+# define BIN_VHEAP_MATURE(p) (p)->bin_vheap_mature
# define BIN_OLD_VHEAP_SZ(p) (p)->bin_old_vheap_sz
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
@@ -611,9 +655,10 @@ struct process {
Uint mbuf_sz; /* Size of all message buffers */
ErtsPSD *psd; /* Rarely used process specific data */
- Uint bin_vheap_sz; /* Virtual heap block size for binaries */
- Uint bin_old_vheap_sz; /* Virtual old heap block size for binaries */
- Uint bin_old_vheap; /* Virtual old heap size for binaries */
+ Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */
+ Uint64 bin_vheap_mature; /* Virtual heap block size for binaries */
+ Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
+ Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
union {
#ifdef ERTS_SMP
@@ -828,7 +873,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_INSLPQUEUE (1 << 1) /* Set if in timer queue */
#define F_TIMO (1 << 2) /* Set if timeout */
#define F_HEAP_GROW (1 << 3)
-#define F_NEED_FULLSWEEP (1 << 4) /* If process has old binaries & funs. */
+#define F_NEED_FULLSWEEP (1 << 4)
#define F_USING_DB (1 << 5) /* If have created tables */
#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
@@ -982,6 +1027,7 @@ int erts_init_scheduler_bind_type(char *how);
#define ERTS_INIT_CPU_TOPOLOGY_MISSING 9
int erts_init_cpu_topology(char *topology_str);
+int erts_update_cpu_info(void);
void erts_pre_init_process(void);
void erts_late_init_process(void);
@@ -1085,6 +1131,9 @@ void erts_handle_pending_exit(Process *, ErtsProcLocks);
void erts_deep_process_dump(int, void *);
+Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
+
Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
@@ -1509,29 +1558,30 @@ extern int erts_disable_proc_not_running_opt;
#define ERTS_MIN_PROCESSES 16
#endif
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-void erts_smp_notify_inc_runq__(ErtsRunQueue *runq);
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#ifdef ERTS_SMP
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- if (runq->waiting)
- erts_smp_notify_inc_runq__(runq);
-#endif
+ long flags = erts_smp_atomic_read(&ssi->flags);
+ ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
+ || (flags & ERTS_SSI_FLG_WAITING));
+ if (flags & ERTS_SSI_FLG_SLEEPING) {
+ flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ erts_sched_finish_poke(ssi, flags);
+ }
}
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* #ifdef ERTS_SMP */
+
#include "erl_process_lock.h"
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 52440fb635..a4d12139e9 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -71,9 +71,12 @@ const Process erts_proc_lock_busy;
#ifdef ERTS_SMP
-/*#define ERTS_PROC_LOCK_SPIN_ON_GATE*/
-#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 16000
+#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
+#define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
+#define ERTS_PROC_LOCK_AUX_SPIN_COUNT 50
+
+#define ERTS_PROC_LOCK_SPIN_UNTIL_YIELD 25
#ifdef ERTS_PROC_LOCK_DEBUG
#define ERTS_PROC_LOCK_HARD_DEBUG
@@ -83,32 +86,19 @@ const Process erts_proc_lock_busy;
static void check_queue(erts_proc_lock_t *lck);
#endif
-
-typedef struct erts_proc_lock_waiter_t_ erts_proc_lock_waiter_t;
-struct erts_proc_lock_waiter_t_ {
- erts_proc_lock_waiter_t *next;
- erts_proc_lock_waiter_t *prev;
- ErtsProcLocks wait_locks;
- erts_smp_gate_t gate;
- erts_proc_lock_queues_t *queues;
-};
+#if SIZEOF_INT < 4
+#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
+#endif
struct erts_proc_lock_queues_t_ {
erts_proc_lock_queues_t *next;
- erts_proc_lock_waiter_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-};
-
-struct erts_proc_lock_thr_spec_data_t_ {
- erts_proc_lock_queues_t *qs;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
};
static erts_proc_lock_queues_t zeroqs = {0};
-static erts_smp_spinlock_t wtr_lock;
-static erts_proc_lock_waiter_t *waiter_free_list;
+static erts_smp_spinlock_t qs_lock;
static erts_proc_lock_queues_t *queue_free_list;
-static erts_tsd_key_t waiter_key;
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
@@ -122,35 +112,26 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
static int proc_lock_spin_count;
-static int proc_lock_trans_spin_cost;
+static int aux_thr_proc_lock_spin_count;
-static void cleanup_waiter(void);
+static void cleanup_tse(void);
void
erts_init_proc_lock(void)
{
int i;
int cpus;
- erts_smp_spinlock_init(&wtr_lock, "proc_lck_wtr_alloc");
+ erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i));
-#else
- erts_smp_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
-#else
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck, "pix_lock", make_small(i));
+ erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
-#endif
}
- waiter_free_list = NULL;
queue_free_list = NULL;
- erts_tsd_key_create(&waiter_key);
- erts_thr_install_exit_handler(cleanup_waiter);
+ erts_thr_install_exit_handler(cleanup_tse);
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
@@ -158,86 +139,106 @@ erts_init_proc_lock(void)
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
#endif
cpus = erts_get_cpu_configured(erts_cpuinfo);
- if (cpus > 1)
- proc_lock_spin_count = (ERTS_PROC_LOCK_SPIN_COUNT_BASE
- * ((int) erts_no_schedulers));
- else if (cpus == 1)
- proc_lock_spin_count = 0;
- else /* No of cpus unknown. Assume multi proc, but be conservative. */
+ if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
- if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
- proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
- proc_lock_trans_spin_cost = proc_lock_spin_count/20;
-}
-
-static ERTS_INLINE erts_proc_lock_waiter_t *
-alloc_wtr(void)
-{
- erts_proc_lock_waiter_t *wtr;
- erts_smp_spin_lock(&wtr_lock);
- wtr = waiter_free_list;
- if (wtr) {
- waiter_free_list = wtr->next;
- ERTS_LC_ASSERT(queue_free_list);
- wtr->queues = queue_free_list;
- queue_free_list = wtr->queues->next;
- erts_smp_spin_unlock(&wtr_lock);
+ proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
+ * ((int) erts_no_schedulers));
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT;
}
- else {
- erts_smp_spin_unlock(&wtr_lock);
- wtr = erts_alloc(ERTS_ALC_T_PROC_LCK_WTR,
- sizeof(erts_proc_lock_waiter_t));
- erts_smp_gate_init(&wtr->gate);
- wtr->wait_locks = (ErtsProcLocks) 0;
- wtr->queues = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
- sizeof(erts_proc_lock_queues_t));
- sys_memcpy((void *) wtr->queues,
- (void *) &zeroqs,
- sizeof(erts_proc_lock_queues_t));
+ else if (cpus == 1) {
+ proc_lock_spin_count = 0;
+ aux_thr_proc_lock_spin_count = 0;
}
- return wtr;
+ else { /* No of cpus unknown. Assume multi proc, but be conservative. */
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2;
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2;
+ }
+ if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
}
#ifdef ERTS_ENABLE_LOCK_CHECK
static void
-check_unused_waiter(erts_proc_lock_waiter_t *wtr)
+check_unused_tse(erts_tse_t *wtr)
{
int i;
- ERTS_LC_ASSERT(wtr->wait_locks == 0);
+ erts_proc_lock_queues_t *queues = wtr->udata;
+ ERTS_LC_ASSERT(wtr->uflgs == 0);
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- ERTS_LC_ASSERT(!wtr->queues->queue[i]);
+ ERTS_LC_ASSERT(!queues->queue[i]);
}
-#define CHECK_UNUSED_WAITER(W) check_unused_waiter((W))
+#define CHECK_UNUSED_TSE(W) check_unused_tse((W))
#else
-#define CHECK_UNUSED_WAITER(W)
+#define CHECK_UNUSED_TSE(W)
#endif
+static ERTS_INLINE erts_tse_t *
+tse_fetch(erts_pix_lock_t *pix_lock)
+{
+ erts_tse_t *tse = erts_tse_fetch();
+ if (!tse->udata) {
+ erts_proc_lock_queues_t *qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_unlock(pix_lock);
+#endif
+ erts_smp_spin_lock(&qs_lock);
+ qs = queue_free_list;
+ if (qs) {
+ queue_free_list = queue_free_list->next;
+ erts_smp_spin_unlock(&qs_lock);
+ }
+ else {
+ erts_smp_spin_unlock(&qs_lock);
+ qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
+ sizeof(erts_proc_lock_queues_t));
+ sys_memcpy((void *) qs,
+ (void *) &zeroqs,
+ sizeof(erts_proc_lock_queues_t));
+ }
+ tse->udata = qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_lock(pix_lock);
+#endif
+ }
+ tse->uflgs = 0;
+ return tse;
+}
static ERTS_INLINE void
-free_wtr(erts_proc_lock_waiter_t *wtr)
+tse_return(erts_tse_t *tse, int force_free_q)
{
- CHECK_UNUSED_WAITER(wtr);
- erts_smp_spin_lock(&wtr_lock);
- wtr->next = waiter_free_list;
- waiter_free_list = wtr;
- wtr->queues->next = queue_free_list;
- queue_free_list = wtr->queues;
- erts_smp_spin_unlock(&wtr_lock);
+ CHECK_UNUSED_TSE(tse);
+ if (force_free_q || erts_tse_is_tmp(tse)) {
+ erts_proc_lock_queues_t *qs = tse->udata;
+ ASSERT(qs);
+ erts_smp_spin_lock(&qs_lock);
+ qs->next = queue_free_list;
+ queue_free_list = qs;
+ erts_smp_spin_unlock(&qs_lock);
+ tse->udata = NULL;
+ }
+ erts_tse_return(tse);
}
void
erts_proc_lock_prepare_proc_lock_waiter(void)
{
- erts_tsd_set(waiter_key, (void *) alloc_wtr());
+ tse_return(tse_fetch(NULL), 0);
}
static void
-cleanup_waiter(void)
+cleanup_tse(void)
{
- erts_proc_lock_waiter_t *wtr = erts_tsd_get(waiter_key);
- if (wtr)
- free_wtr(wtr);
+ erts_tse_t *tse = erts_tse_fetch();
+ if (tse) {
+ if (tse->udata)
+ tse_return(tse, 1);
+ else
+ erts_tse_return(tse);
+ }
}
@@ -250,7 +251,7 @@ cleanup_waiter(void)
static ERTS_INLINE void
enqueue_waiter(erts_proc_lock_queues_t *qs,
int ix,
- erts_proc_lock_waiter_t *wtr)
+ erts_tse_t *wtr)
{
if (!qs->queue[ix]) {
qs->queue[ix] = wtr;
@@ -266,10 +267,10 @@ enqueue_waiter(erts_proc_lock_queues_t *qs,
}
}
-static erts_proc_lock_waiter_t *
+static erts_tse_t *
dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
{
- erts_proc_lock_waiter_t *wtr = qs->queue[ix];
+ erts_tse_t *wtr = qs->queue[ix];
ERTS_LC_ASSERT(qs->queue[ix]);
if (wtr->next == wtr) {
ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
@@ -295,10 +296,10 @@ dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
* lock.
*/
static ERTS_INLINE void
-try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
+try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
{
ErtsProcLocks got_locks = (ErtsProcLocks) 0;
- ErtsProcLocks locks = wtr->wait_locks;
+ ErtsProcLocks locks = wtr->uflgs;
int lock_no;
ERTS_LC_ASSERT(lck->queues);
@@ -334,7 +335,7 @@ try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
}
}
- wtr->wait_locks &= ~got_locks;
+ wtr->uflgs &= ~got_locks;
}
/*
@@ -350,8 +351,8 @@ transfer_locks(Process *p,
int unlock)
{
int transferred = 0;
- erts_proc_lock_waiter_t *wake = NULL;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wake = NULL;
+ erts_tse_t *wtr;
ErtsProcLocks unset_waiter = 0;
ErtsProcLocks tlocks = trnsfr_lcks;
int lock_no;
@@ -377,11 +378,11 @@ transfer_locks(Process *p,
ERTS_LC_ASSERT(wtr);
if (!qs->queue[lock_no])
unset_waiter |= lock;
- ERTS_LC_ASSERT(wtr->wait_locks & lock);
- wtr->wait_locks &= ~lock;
- if (wtr->wait_locks)
+ ERTS_LC_ASSERT(wtr->uflgs & lock);
+ wtr->uflgs &= ~lock;
+ if (wtr->uflgs)
try_aquire(&p->lock, wtr);
- if (!wtr->wait_locks) {
+ if (!wtr->uflgs) {
/*
* The other thread got all locks it needs;
* need to wake it up.
@@ -412,9 +413,10 @@ transfer_locks(Process *p,
erts_pix_unlock(pix_lock);
do {
- erts_proc_lock_waiter_t *tmp = wake;
+ erts_tse_t *tmp = wake;
wake = wake->next;
- erts_smp_gate_let_through(&tmp->gate, 1);
+ erts_atomic_set(&tmp->uaflgs, 0);
+ erts_tse_set(tmp);
} while (wake);
if (!unlock)
@@ -462,26 +464,16 @@ wait_for_locks(Process *p,
ErtsProcLocks olflgs)
{
erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
- int tsd;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
+ erts_proc_lock_queues_t *qs;
/* Acquire a waiter object on which this thread can wait. */
- wtr = erts_tsd_get(waiter_key);
- if (wtr)
- tsd = 1;
- else {
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_unlock(pix_lock);
-#endif
- wtr = alloc_wtr();
- tsd = 0;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_lock(pix_lock);
-#endif
- }
+ wtr = tse_fetch(pix_lock);
/* Record which locks this waiter needs. */
- wtr->wait_locks = need_locks;
+ wtr->uflgs = need_locks;
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -489,14 +481,16 @@ wait_for_locks(Process *p,
ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+ qs = wtr->udata;
+ ASSERT(qs);
/* Provide the process with waiter queues, if it doesn't have one. */
if (!p->lock.queues) {
- wtr->queues->next = NULL;
- p->lock.queues = wtr->queues;
+ qs->next = NULL;
+ p->lock.queues = qs;
}
else {
- wtr->queues->next = p->lock.queues->next;
- p->lock.queues->next = wtr->queues;
+ qs->next = p->lock.queues->next;
+ p->lock.queues->next = qs;
}
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
@@ -506,46 +500,59 @@ wait_for_locks(Process *p,
/* Try to aquire locks one at a time in lock order and set wait flag */
try_aquire(&p->lock, wtr);
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
check_queue(&p->lock);
#endif
- if (wtr->wait_locks) { /* We didn't get them all; need to wait... */
- /* Got to wait for locks... */
+ if (wtr->uflgs) {
+ /* We didn't get them all; need to wait... */
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
+ erts_atomic_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
- /*
- * Wait for needed locks. When we return all needed locks have
- * have been acquired by other threads and transfered to us.
- */
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- erts_smp_gate_swait(&wtr->gate, proc_lock_spin_count);
-#else
- erts_smp_gate_wait(&wtr->gate);
-#endif
+ while (1) {
+ int res;
+ erts_tse_reset(wtr);
+
+ if (erts_atomic_read(&wtr->uaflgs) == 0)
+ break;
+
+ /*
+ * Wait for needed locks. When we are woken all needed locks have
+ * have been acquired by other threads and transfered to us.
+ * However, we need to be prepared for spurious wakeups.
+ */
+ do {
+ res = erts_tse_wait(wtr); /* might return EINTR */
+ } while (res != 0);
+ }
erts_pix_lock(pix_lock);
+
+ ASSERT(wtr->uflgs == 0);
}
/* Recover some queues to store in the waiter. */
ERTS_LC_ASSERT(p->lock.queues);
if (p->lock.queues->next) {
- wtr->queues = p->lock.queues->next;
- p->lock.queues->next = wtr->queues->next;
+ qs = p->lock.queues->next;
+ p->lock.queues->next = qs->next;
}
else {
- wtr->queues = p->lock.queues;
+ qs = p->lock.queues;
p->lock.queues = NULL;
}
+ wtr->udata = qs;
erts_pix_unlock(pix_lock);
ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
- if (tsd)
- CHECK_UNUSED_WAITER(wtr);
- else
- free_wtr(wtr);
+ tse_return(wtr, 0);
}
/*
@@ -563,52 +570,57 @@ erts_proc_lock_failed(Process *p,
ErtsProcLocks locks,
ErtsProcLocks old_lflgs)
{
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- int spin_count = 0;
-#else
- int spin_count = proc_lock_spin_count;
-#endif
-
+ int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ int thr_spin_count;
+ int spin_count;
ErtsProcLocks need_locks = locks;
ErtsProcLocks olflgs = old_lflgs;
- while (need_locks != 0)
- {
- ErtsProcLocks can_grab = in_order_locks(olflgs, need_locks);
+ if (erts_thr_get_main_status())
+ thr_spin_count = proc_lock_spin_count;
+ else
+ thr_spin_count = aux_thr_proc_lock_spin_count;
+
+ spin_count = thr_spin_count;
+
+ while (need_locks != 0) {
+ ErtsProcLocks can_grab;
+
+ can_grab = in_order_locks(olflgs, need_locks);
- if (can_grab == 0)
- {
+ if (can_grab == 0) {
/* Someone already has the lowest-numbered lock we want. */
- if (spin_count-- <= 0)
- {
+ if (spin_count-- <= 0) {
/* Too many retries, give up and sleep for the lock. */
wait_for_locks(p, pixlck, locks, need_locks, olflgs);
return;
}
+ ERTS_SPIN_BODY;
+
+ if (--until_yield == 0) {
+ until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+
olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
}
- else
- {
+ else {
/* Try to grab all of the grabbable locks at once with cmpxchg. */
ErtsProcLocks grabbed = olflgs | can_grab;
ErtsProcLocks nflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, grabbed, olflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);
- if (nflgs == olflgs)
- {
+ if (nflgs == olflgs) {
/* Success! We grabbed the 'can_grab' locks. */
olflgs = grabbed;
need_locks &= ~can_grab;
-#ifndef ERTS_PROC_LOCK_SPIN_ON_GATE
/* Since we made progress, reset the spin count. */
- spin_count = proc_lock_spin_count;
-#endif
+ spin_count = thr_spin_count;
}
- else
- {
+ else {
/* Compare-and-exchange failed, try again. */
olflgs = nflgs;
}
@@ -1407,7 +1419,7 @@ check_queue(erts_proc_lock_t *lck)
wtr = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
if (lflgs & wtr) {
int n;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
wtr = lck->queues->queue[lock_no];
n = 0;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index d71e5a0a6e..7cfc9893fa 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -255,11 +255,7 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_t mtx;
-#else
erts_smp_spinlock_t spnlck;
-#endif
char buf[64]; /* Try to get locks in different cache lines */
} u;
} erts_pix_lock_t;
@@ -277,9 +273,12 @@ typedef struct {
((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
@@ -289,6 +288,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_band(erts_proc_lock_t *,
ErtsProcLocks);
ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_bor(erts_proc_lock_t *,
ErtsProcLocks);
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *,
+ ErtsProcLocks,
+ ErtsProcLocks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -322,7 +324,9 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
@@ -348,9 +352,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
- erts_pix_lock_t *,
- ErtsProcLocks,
- char *file, unsigned int line);
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
erts_pix_lock_t *,
@@ -372,30 +376,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_lock(&pixlck->u.mtx);
-#else
erts_smp_spin_lock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_unlock(&pixlck->u.mtx);
-#else
erts_smp_spin_unlock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- return erts_smp_lc_mtx_is_locked(&pixlck->u.mtx);
-#else
return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
-#endif
}
/*
@@ -417,9 +409,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
ErtsProcLocks expct_lflgs = 0;
while (1) {
- ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock,
- expct_lflgs | locks,
- expct_lflgs);
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock,
+ expct_lflgs | locks,
+ expct_lflgs);
if (ERTS_LIKELY(lflgs == expct_lflgs)) {
/* We successfully grabbed all locks. */
return 0;
@@ -535,7 +527,7 @@ erts_smp_proc_unlock__(Process *p,
if (want_lflgs != old_lflgs) {
ErtsProcLocks new_lflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, want_lflgs, old_lflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(&p->lock, want_lflgs, old_lflgs);
if (new_lflgs != old_lflgs) {
/* cmpxchg failed, try again. */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 03d2a586e3..b41fa70476 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -43,9 +43,17 @@ typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
typedef erts_tid_t erts_smp_tid_t;
typedef erts_mtx_t erts_smp_mtx_t;
typedef erts_cnd_t erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
+#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
+#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
+typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef erts_gate_t erts_smp_gate_t;
typedef ethr_atomic_t erts_smp_atomic_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
@@ -54,15 +62,27 @@ void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER 0
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
typedef int erts_smp_thr_opts_t;
typedef int erts_smp_thr_init_data_t;
typedef int erts_smp_tid_t;
typedef int erts_smp_mtx_t;
typedef int erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_LONG_LIVED 0
+#define ERTS_SMP_RWMTX_SHORT_LIVED 0
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef int erts_smp_gate_t;
typedef long erts_smp_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
@@ -103,8 +123,6 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
@@ -119,9 +137,17 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
@@ -155,6 +181,16 @@ ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -190,12 +226,6 @@ ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_gate_init(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_destroy(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_close(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_smp_gate_wait(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount);
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
@@ -331,22 +361,6 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_set_forksafe(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unset_forksafe(mtx);
-#endif
-}
-
ERTS_GLB_INLINE int
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
{
@@ -433,6 +447,25 @@ erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_set_reader_group(int no)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_set_reader_group(no);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
@@ -441,6 +474,16 @@ erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt(rwmtx, opt, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
{
#ifdef ERTS_SMP
@@ -709,6 +752,73 @@ erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
#endif
}
+ERTS_GLB_INLINE long
+erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
ERTS_GLB_INLINE void
erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
{
@@ -919,54 +1029,6 @@ erts_smp_tsd_get(erts_smp_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_gate_init(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_init((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_destroy(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_destroy((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_close(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_close((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no)
-{
-#ifdef ERTS_SMP
- erts_gate_let_through((erts_gate_t *) gp, no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_wait(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_wait((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount)
-{
-#ifdef ERTS_SMP
- erts_gate_swait((erts_gate_t *) gp, spincount);
-#endif
-}
-
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 3a8c30fe6a..b8e4473141 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -821,10 +821,10 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A A A A A A A A A A A A A A A A A A A A A A A A A A|t t t t|0 0| Thing
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E| ErlNode
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X| Data 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* . . .
@@ -835,7 +835,7 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
* t : External pid thing tag (1100)
* t : External port thing tag (1101)
* t : External ref thing tag (1110)
- * N : Next (external thing) pointer
+ * N : Next (off_heap) pointer
* E : ErlNode pointer
* X : Type specific data
*
@@ -852,13 +852,16 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
/* XXX:PaN - this structure is not perfect for halfword heap, it takes
a lot of memory due to padding, and the array will not begin at the end of the
structure, as otherwise expected. Be sure to access data.ui32 array and not try
- to do pointer manipulation on an Eterm * to reach the actual data... */
+ to do pointer manipulation on an Eterm * to reach the actual data...
+ XXX:Sverk - Problem made worse by "one off-heap list" when 'next' pointer
+ must align with 'next' in ProcBin, erl_fun_thing and erl_off_heap_header.
+*/
typedef struct external_thing_ {
/* ----+ */
Eterm header; /* | */
- struct external_thing_ *next; /* > External thing head */
- struct erl_node_ *node; /* | */
+ struct erl_node_* node; /* > External thing head */
+ struct erl_off_heap_header* next; /* | */
/* ----+ */
union {
Uint32 ui32[1];
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 35b338c6eb..0b7269262e 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -25,6 +25,11 @@
#ifndef ERL_THREAD_H__
#define ERL_THREAD_H__
+#define ERTS_SPIN_BODY ETHR_SPIN_BODY
+
+#define ERTS_MAX_READER_GROUPS 8
+extern int erts_reader_groups;
+
#include "sys.h"
#ifdef USE_THREADS
@@ -48,6 +53,7 @@
#define ERTS_THR_OPTS_DEFAULT_INITER ETHR_THR_OPTS_DEFAULT_INITER
typedef ethr_thr_opts erts_thr_opts_t;
typedef ethr_init_data erts_thr_init_data_t;
+typedef ethr_late_init_data erts_thr_late_init_data_t;
typedef ethr_tid erts_tid_t;
/* mutex */
@@ -73,8 +79,19 @@ typedef struct {
erts_lcnt_lock_t lcnt;
#endif
} erts_rwmtx_t;
+
+#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
+#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
+#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_RWMTX_LONG_LIVED ETHR_RWMUTEX_LONG_LIVED
+#define ERTS_RWMTX_SHORT_LIVED ETHR_RWMUTEX_SHORT_LIVED
+#define ERTS_RWMTX_UNKNOWN_LIVED ETHR_RWMUTEX_UNKNOWN_LIVED
+typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
+
typedef ethr_tsd_key erts_tsd_key_t;
-typedef ethr_gate erts_gate_t;
+typedef ethr_ts_event erts_tse_t;
typedef ethr_atomic_t erts_atomic_t;
/* spinlock */
@@ -103,25 +120,14 @@ typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#define ERTS_REC_MTX_INITER \
- {ETHR_REC_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1,THE_NON_VALUE,ERTS_LC_FLG_LT_MUTEX)}
-#define ERTS_MTX_INITER \
- {ETHR_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_MUTEX)}
-#else
-#define ERTS_REC_MTX_INITER {ETHR_REC_MUTEX_INITER}
-#define ERTS_MTX_INITER {ETHR_MUTEX_INITER}
-#endif
-#define ERTS_CND_INITER ETHR_COND_INITER
#define ERTS_THR_INIT_DATA_DEF_INITER ETHR_INIT_DATA_DEFAULT_INITER
+#define ERTS_THR_LATE_INIT_DATA_DEF_INITER \
+ ETHR_LATE_INIT_DATA_DEFAULT_INITER
#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-
#else /* #ifdef USE_THREADS */
#define ERTS_THR_MEMORY_BARRIER
@@ -129,12 +135,26 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
typedef int erts_thr_init_data_t;
+typedef int erts_thr_late_init_data_t;
typedef int erts_tid_t;
typedef int erts_mtx_t;
typedef int erts_cnd_t;
+#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_RWMTX_TYPE_NORMAL 0
+#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_RWMTX_LONG_LIVED 0
+#define ERTS_RWMTX_SHORT_LIVED 0
+#define ERTS_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_rwmtx_opt_t;
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
-typedef int erts_gate_t;
+typedef int erts_tse_t;
typedef long erts_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -148,7 +168,6 @@ typedef struct {
long tv_nsec;
} erts_thr_timeval_t;
-#define ERTS_REC_MTX_INITER 0
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
#define ERTS_THR_INIT_DATA_DEF_INITER 0
@@ -158,6 +177,7 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
void *arg, erts_thr_opts_t *opts);
ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res);
@@ -166,9 +186,6 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void erts_rec_mtx_init(erts_mtx_t *mtx);
-#endif
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
@@ -177,8 +194,6 @@ ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_set_forksafe(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_unset_forksafe(erts_mtx_t *mtx);
ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
@@ -192,9 +207,17 @@ ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
@@ -228,6 +251,20 @@ ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
+ char *name,
+ Eterm extra,
+ Uint16 opt);
ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
char *name,
Eterm extra);
@@ -263,12 +300,16 @@ ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
-ERTS_GLB_INLINE void erts_gate_init(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_destroy(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_close(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_let_through(erts_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_gate_wait(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_swait(erts_gate_t *gp, int spincount);
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void);
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
+ERTS_GLB_INLINE int erts_thr_get_main_status(void);
+ERTS_GLB_INLINE void erts_thr_yield(void);
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
@@ -290,6 +331,16 @@ erts_thr_init(erts_thr_init_data_t *id)
}
ERTS_GLB_INLINE void
+erts_thr_late_init(erts_thr_late_init_data_t *id)
+{
+#ifdef USE_THREADS
+ int res = ethr_late_init(id);
+ if (res)
+ erts_thr_fatal_error(res, "complete initialization of thread library");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
@@ -362,20 +413,6 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
#endif
}
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_rec_mtx_init(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_rec_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize recursive mutex");
-#endif
-}
-#endif
-
-
ERTS_GLB_INLINE void
erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
@@ -422,9 +459,7 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -463,9 +498,7 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -492,26 +525,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_mtx_set_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_set_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "set mutex forksafe");
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_unset_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_unset_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "unset mutex forksafe");
-#endif
-}
-
ERTS_GLB_INLINE int
erts_mtx_trylock(erts_mtx_t *mtx)
{
@@ -531,11 +544,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
-#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try lock mutex");
-
+#endif
return res;
#else
return 0;
@@ -551,19 +560,16 @@ erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
#endif
}
@@ -571,16 +577,13 @@ ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&mtx->lcnt);
#endif
- res = ethr_mutex_unlock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "unlock mutex");
+ ethr_mutex_unlock(&mtx->mtx);
#endif
}
@@ -648,9 +651,7 @@ ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_signal(cnd);
- if (res)
- erts_thr_fatal_error(res, "signal on condition variable");
+ ethr_cond_signal(cnd);
#endif
}
@@ -659,19 +660,34 @@ ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_broadcast(cnd);
- if (res)
- erts_thr_fatal_error(res, "broadcast on condition variable");
+ ethr_cond_broadcast(cnd);
#endif
}
/* rwmutex */
ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_rwmtx_set_reader_group(int no)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+#endif
+ res = ethr_rwmutex_set_reader_group(no);
+ if (res != 0)
+ erts_thr_fatal_error(res, "set reader group");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -684,10 +700,20 @@ erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra)
+{
+ erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -700,6 +726,12 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
}
ERTS_GLB_INLINE void
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+{
+ erts_rwmtx_init_opt(rwmtx, NULL, name);
+}
+
+ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
@@ -736,9 +768,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try read lock rwmutex");
return res;
#else
@@ -754,19 +783,16 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_rlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "read lock rwmutex");
#endif
}
@@ -774,16 +800,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_runlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "read unlock rwmutex");
+ ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
}
@@ -808,9 +831,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try write lock rwmutex");
return res;
#else
@@ -826,19 +846,16 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "write lock rwmutex");
#endif
}
@@ -846,16 +863,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "write unlock rwmutex");
+ ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
}
@@ -917,9 +931,7 @@ ERTS_GLB_INLINE void
erts_atomic_init(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_init(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic init");
+ ethr_atomic_init(var, i);
#else
*var = i;
#endif
@@ -929,9 +941,7 @@ ERTS_GLB_INLINE void
erts_atomic_set(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_set(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic set");
+ ethr_atomic_set(var, i);
#else
*var = i;
#endif
@@ -941,11 +951,7 @@ ERTS_GLB_INLINE long
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
- long i;
- int res = ethr_atomic_read(var, &i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic read");
- return i;
+ return ethr_atomic_read(var);
#else
return *var;
#endif
@@ -955,11 +961,7 @@ ERTS_GLB_INLINE long
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_inctest(incp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment and test");
- return test;
+ return ethr_atomic_inc_read(incp);
#else
return ++(*incp);
#endif
@@ -969,11 +971,7 @@ ERTS_GLB_INLINE long
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_dectest(decp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement and test");
- return test;
+ return ethr_atomic_dec_read(decp);
#else
return --(*decp);
#endif
@@ -983,9 +981,7 @@ ERTS_GLB_INLINE void
erts_atomic_inc(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_inc(incp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment");
+ ethr_atomic_inc(incp);
#else
++(*incp);
#endif
@@ -995,9 +991,7 @@ ERTS_GLB_INLINE void
erts_atomic_dec(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_dec(decp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement");
+ ethr_atomic_dec(decp);
#else
--(*decp);
#endif
@@ -1007,11 +1001,7 @@ ERTS_GLB_INLINE long
erts_atomic_addtest(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_addtest(addp, i, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition and test");
- return test;
+ return ethr_atomic_add_read(addp, i);
#else
return *addp += i;
#endif
@@ -1021,9 +1011,7 @@ ERTS_GLB_INLINE void
erts_atomic_add(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_add(addp, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition");
+ ethr_atomic_add(addp, i);
#else
*addp += i;
#endif
@@ -1034,9 +1022,7 @@ erts_atomic_xchg(erts_atomic_t *xchgp, long new)
{
long old;
#ifdef USE_THREADS
- int res = ethr_atomic_xchg(xchgp, new, &old);
- if (res)
- erts_thr_fatal_error(res, "perform atomic exchange");
+ return ethr_atomic_xchg(xchgp, new);
#else
old = *xchgp;
*xchgp = new;
@@ -1048,11 +1034,7 @@ ERTS_GLB_INLINE long
erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
{
#ifdef USE_THREADS
- long old;
- int res = ethr_atomic_cmpxchg(xchgp, new, expected, &old);
- if (ERTS_UNLIKELY(res != 0))
- erts_thr_fatal_error(res, "perform atomic exchange");
- return old;
+ return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
long old = *xchgp;
if (old == expected)
@@ -1064,31 +1046,95 @@ erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
ERTS_GLB_INLINE long
erts_atomic_bor(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_or_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise or");
+ return ethr_atomic_read_bor(var, mask);
#else
+ long old;
old = *var;
*var |= mask;
-#endif
return old;
+#endif
}
ERTS_GLB_INLINE long
erts_atomic_band(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_and_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise and");
+ return ethr_atomic_read_band(var, mask);
#else
+ long old;
old = *var;
*var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_read_acqb(erts_atomic_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_set_relb(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ ethr_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_dec_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_dectest_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_dec_read_relb(decp);
+#else
+ return --(*decp);
#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
return old;
+#endif
}
/* spinlock */
@@ -1112,6 +1158,26 @@ erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
+ Uint16 opt)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
erts_spinlock_init(erts_spinlock_t *lock, char *name)
{
#ifdef USE_THREADS
@@ -1152,16 +1218,13 @@ ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&lock->lcnt);
#endif
- res = ethr_spin_unlock(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "release spin lock");
+ ethr_spin_unlock(&lock->slck);
#else
(void)lock;
#endif
@@ -1175,19 +1238,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
- res = ethr_spin_lock(&lock->slck);
+ ethr_spin_lock(&lock->slck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take spin lock");
#else
(void)lock;
#endif
@@ -1268,16 +1328,13 @@ ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release read lock");
+ ethr_read_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1291,19 +1348,16 @@ erts_read_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_lock(&lock->rwlck);
+ ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take read lock");
#else
(void)lock;
#endif
@@ -1313,16 +1367,13 @@ ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release write lock");
+ ethr_write_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1336,19 +1387,16 @@ erts_write_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_lock(&lock->rwlck);
+ ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take write lock");
#else
(void)lock;
#endif
@@ -1432,66 +1480,95 @@ erts_tsd_get(erts_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_init(erts_gate_t *gp)
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_init((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize gate");
+ return (erts_tse_t *) ethr_get_ts_event();
+#else
+ return (erts_tse_t *) NULL;
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_destroy(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_destroy((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "destroy gate");
+ ethr_leave_ts_event(ep);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_close(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_close((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "close gate");
+ ethr_event_set(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_let_through(erts_gate_t *gp, unsigned no)
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_let_through((ethr_gate *) gp, no);
- if (res != 0)
- erts_thr_fatal_error(res, "let through gate");
+ ethr_event_reset(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_wait(erts_gate_t *gp)
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
+{
+#ifdef USE_THREADS
+ return ethr_event_wait(&((ethr_ts_event *) ep)->event);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
+{
+#ifdef USE_THREADS
+ return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_wait((ethr_gate *) gp);
+ return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
+{
+#ifdef USE_THREADS
+ int res = ethr_set_main_thr_status(on, no);
if (res != 0)
- erts_thr_fatal_error(res, "wait on gate");
+ erts_thr_fatal_error(res, "set thread main status");
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_swait(erts_gate_t *gp, int spincount)
+ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_swait((ethr_gate *) gp, spincount);
+ int main_status;
+ int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
- erts_thr_fatal_error(res, "swait on gate");
+ erts_thr_fatal_error(res, "get thread main status");
+ return main_status;
+#else
+ return 1;
#endif
}
+ERTS_GLB_INLINE void erts_thr_yield(void)
+{
+#ifdef USE_THREADS
+ int res = ETHR_YIELD();
+ if (res != 0)
+ erts_thr_fatal_error(res, "yield");
+#endif
+}
+
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index c15f85f8f1..7b8706ea13 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -650,6 +650,22 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
t.tm_sec = *second;
t.tm_isdst = isdst;
the_clock = mktime(&t);
+ if (the_clock == -1) {
+ if (isdst) {
+ /* If this is a timezone without DST and the OS (correctly)
+ refuses to give us a DST time, we simulate the Linux/Solaris
+ behaviour of giving the same data as if is_dst was not set. */
+ t.tm_isdst = 0;
+ the_clock = mktime(&t);
+ if (the_clock == -1) {
+ /* Failed anyway, something else is bad - will be a badarg */
+ return 0;
+ }
+ } else {
+ /* Something else is the matter, badarg. */
+ return 0;
+ }
+ }
#ifdef HAVE_GMTIME_R
gmtime_r(&the_clock, (tm = &tmbuf));
#else
@@ -663,6 +679,10 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
*second = tm->tm_sec;
return 1;
}
+#if defined(HAVE_POSIX2TIME) && defined(HAVE_DECL_POSIX2TIME) && \
+ !HAVE_DECL_POSIX2TIME
+extern time_t posix2time(time_t);
+#endif
int
univ_to_local(Sint *year, Sint *month, Sint *day,
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 66b05c0e9d..5e81a2d624 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -43,8 +43,6 @@ static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table.
#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
-#define export_init_lock() erts_smp_rwmtx_init(&export_table_lock, \
- "export_tab")
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
@@ -111,8 +109,12 @@ void
init_export_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
- export_init_lock();
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index f41b61d73d..d7c8aa84e9 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -65,11 +65,9 @@
# endif
#endif
-/*
- * For backward compatibility reasons, only encode integers that
- * fit in 28 bits (signed) using INTEGER_EXT.
+/* Does Sint fit in Sint32?
*/
-#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+#define IS_SSMALL32(x) (((Uint) (((x) >> (32-1)) + 1)) < 2)
/*
* Valid creations for nodes are 1, 2, or 3. 0 can also be sent
@@ -504,7 +502,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
ASSERT(edep->ext_endp >= edep->extp);
ext_sz = edep->ext_endp - edep->extp;
- align_sz = ERTS_WORD_ALIGN_PAD_SZ(dist_ext_sz + ext_sz);
+ align_sz = ERTS_EXTRA_DATA_ALIGN_SZ(dist_ext_sz + ext_sz);
new_edep = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA,
dist_ext_sz + ext_sz + align_sz + xsize);
@@ -1470,11 +1468,11 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
*hpp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_pid_header(1);
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
etp->data.ui[0] = data;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*) etp;
*objp = make_external_pid(etp);
}
return ep;
@@ -1571,13 +1569,15 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
case SMALL_DEF:
{
+ /* From R14B we no longer restrict INTEGER_EXT to 28 bits,
+ * as done earlier for backward compatibility reasons. */
Sint val = signed_val(obj);
if ((Uint)val < 256) {
*ep++ = SMALL_INTEGER_EXT;
put_int8(val, ep);
ep++;
- } else if (sizeof(Sint) == 4 || IS_SSMALL28(val)) {
+ } else if (sizeof(Sint) == 4 || IS_SSMALL32(val)) {
*ep++ = INTEGER_EXT;
put_int32(val, ep);
ep += 4;
@@ -1599,18 +1599,32 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
break;
case BIG_DEF:
- if ((n = big_bytes(obj)) < 256) {
- *ep++ = SMALL_BIG_EXT;
- put_int8(n, ep);
- ep += 1;
- }
- else {
- *ep++ = LARGE_BIG_EXT;
- put_int32(n, ep);
- ep += 4;
+ {
+ int sign = big_sign(obj);
+ n = big_bytes(obj);
+ if (sizeof(Sint)==4 && n<=4) {
+ Uint dig = big_digit(obj,0);
+ Sint val = sign ? -dig : dig;
+ if ((val<0) == sign) {
+ *ep++ = INTEGER_EXT;
+ put_int32(val, ep);
+ ep += 4;
+ break;
+ }
+ }
+ if (n < 256) {
+ *ep++ = SMALL_BIG_EXT;
+ put_int8(n, ep);
+ ep += 1;
+ }
+ else {
+ *ep++ = LARGE_BIG_EXT;
+ put_int32(n, ep);
+ ep += 4;
+ }
+ *ep++ = sign;
+ ep = big_to_bytes(obj, ep);
}
- *ep++ = big_sign(obj);
- ep = big_to_bytes(obj, ep);
break;
case PID_DEF:
@@ -1896,69 +1910,30 @@ is_external_string(Eterm list, int* p_is_string)
return len;
}
-/* Assumes that the ones to undo are preluding the lists. */
+/* Assumes that the ones to undo are preluding the list. */
static void
undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end)
{
const Uint area_sz = (end - start) * sizeof(Eterm);
- struct proc_bin* mso;
- struct proc_bin** mso_nextp = NULL;
-#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* funs;
- struct erl_fun_thing** funs_nextp = NULL;
-#endif
- struct external_thing_* ext;
- struct external_thing_** ext_nextp = NULL;
-
- for (mso = off_heap->mso; ; mso=mso->next) {
- if (!in_area(mso, start, area_sz)) {
- if (mso_nextp != NULL) {
- *mso_nextp = NULL;
- erts_cleanup_mso(off_heap->mso);
- off_heap->mso = mso;
+ struct erl_off_heap_header* hdr;
+ struct erl_off_heap_header** hdr_nextp = NULL;
+
+ for (hdr = off_heap->first; ; hdr=hdr->next) {
+ if (!in_area(hdr, start, area_sz)) {
+ if (hdr_nextp != NULL) {
+ *hdr_nextp = NULL;
+ erts_cleanup_offheap(off_heap);
+ off_heap->first = hdr;
}
break;
}
- mso_nextp = &mso->next;
+ hdr_nextp = &hdr->next;
}
-#ifndef HYBRID /* FIND ME! */
- for (funs = off_heap->funs; ; funs=funs->next) {
- if (!in_area(funs, start, area_sz)) {
- if (funs_nextp != NULL) {
- *funs_nextp = NULL;
- erts_cleanup_funs(off_heap->funs);
- off_heap->funs = funs;
- }
- break;
- }
- funs_nextp = &funs->next;
- }
-#endif
- for (ext = off_heap->externals; ; ext=ext->next) {
- if (!in_area(ext, start, area_sz)) {
- if (ext_nextp != NULL) {
- *ext_nextp = NULL;
- erts_cleanup_externals(off_heap->externals);
- off_heap->externals = ext;
- }
- break;
- }
- ext_nextp = &ext->next;
- }
-
- /* Assert that the ones to undo were indeed preluding the lists. */
+ /* Assert that the ones to undo were indeed preluding the list. */
#ifdef DEBUG
- for (mso = off_heap->mso; mso != NULL; mso=mso->next) {
- ASSERT(!in_area(mso, start, area_sz));
- }
-# ifndef HYBRID /* FIND ME! */
- for (funs = off_heap->funs; funs != NULL; funs=funs->next) {
- ASSERT(!in_area(funs, start, area_sz));
- }
-# endif
- for (ext = off_heap->externals; ext != NULL; ext=ext->next) {
- ASSERT(!in_area(ext, start, area_sz));
+ for (hdr = off_heap->first; hdr != NULL; hdr = hdr->next) {
+ ASSERT(!in_area(hdr, start, area_sz));
}
#endif /* DEBUG */
}
@@ -2202,11 +2177,11 @@ dec_term_atom_common:
hp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_port_header(1);
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
etp->data.ui[0] = num;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_port(etp);
}
@@ -2284,10 +2259,10 @@ dec_term_atom_common:
#else
etp->header = make_external_ref_header(ref_words);
#endif
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
ref_num = &(etp->data.ui32[0]);
}
@@ -2330,8 +2305,8 @@ dec_term_atom_common:
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->mso;
- off_heap->mso = pb;
+ pb->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -2367,8 +2342,8 @@ dec_term_atom_common:
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->mso;
- off_heap->mso = pb;
+ pb->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -2488,8 +2463,8 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)funp;
#endif
funp->fe = erts_put_fun_entry2(module, old_uniq, old_index,
@@ -2566,8 +2541,8 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)funp;
#endif
old_uniq = unsigned_val(temp);
@@ -2687,7 +2662,7 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
if ((Uint)val < 256)
result += 1 + 1; /* SMALL_INTEGER_EXT */
- else if (sizeof(Sint) == 4 || IS_SSMALL28(val))
+ else if (sizeof(Sint) == 4 || IS_SSMALL32(val))
result += 1 + 4; /* INTEGER_EXT */
else {
DeclareTmpHeapNoproc(tmp_big,2);
@@ -2699,7 +2674,10 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
break;
case BIG_DEF:
- if ((i = big_bytes(obj)) < 256)
+ i = big_bytes(obj);
+ if (sizeof(Sint)==4 && i <= 4 && (big_digit(obj,0)-big_sign(obj)) < (1<<31))
+ result += 1 + 4; /* INTEGER_EXT */
+ else if (i < 256)
result += 1 + 1 + 1 + i; /* tag,size,sign,digits */
else
result += 1 + 4 + 1 + i; /* tag,size,sign,digits */
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index db86b4d796..cee48bbeb0 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -211,7 +211,7 @@ ERTS_GLB_INLINE void *
erts_dist_ext_trailer(ErtsDistExternal *edep)
{
void *res = (void *) (edep->ext_endp
- + ERTS_WORD_ALIGN_PAD_SZ(edep->ext_endp));
+ + ERTS_EXTRA_DATA_ALIGN_SZ(edep->ext_endp));
ASSERT((((UWord) res) % sizeof(Uint)) == 0);
return res;
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b4a7a22082..280421952e 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -464,7 +464,10 @@ typedef union {
typedef struct proc_bin {
Eterm thing_word; /* Subtag REFC_BINARY_SUBTAG. */
Uint size; /* Binary size in bytes. */
- struct proc_bin *next; /* Pointer to next ProcBin. */
+#if HALFWORD_HEAP
+ void* dummy_ptr_padding__;
+#endif
+ struct erl_off_heap_header *next;
Binary *val; /* Pointer to Binary structure. */
byte *bytes; /* Pointer to the actual data bytes. */
Uint flags; /* Flag word. */
@@ -494,8 +497,8 @@ erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
pb->thing_word = HEADER_PROC_BIN;
pb->size = 0;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
pb->val = mbp;
pb->bytes = (byte *) mbp->orig_bytes;
pb->flags = 0;
@@ -512,6 +515,15 @@ erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
&& (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
&& (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
+
+union erl_off_heap_ptr {
+ struct erl_off_heap_header* hdr;
+ ProcBin *pb;
+ struct erl_fun_thing* fun;
+ struct external_thing_* ext;
+ Eterm* ep;
+};
+
/* arrays that get malloced at startup */
extern Port* erts_port;
extern erts_smp_atomic_t erts_ports_alive;
@@ -823,7 +835,6 @@ Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
Eterm erts_new_mso_binary(Process*, byte*, int);
Eterm new_binary(Process*, byte*, int);
Eterm erts_realloc_binary(Eterm bin, size_t size);
-void erts_cleanup_mso(ProcBin* pb);
/* erl_bif_info.c */
@@ -1015,7 +1026,7 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
-void init_emulator(_VOID_);
+void init_emulator(void);
void process_main(void);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
@@ -1499,7 +1510,6 @@ void p_slpq(_VOID_);
void erts_silence_warn_unused_result(long unused);
void erts_cleanup_offheap(ErlOffHeap *offheap);
-void erts_cleanup_externals(ExternalThing *);
Uint erts_fit_in_bits(Uint);
int list_length(Eterm);
@@ -1532,7 +1542,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
Eterm atoms[], Uint uints1[], Uint uints2[]);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
-Eterm store_external_or_ref_(Uint **, ExternalThing **, Eterm);
+Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
#define NC_HEAP_SIZE(NC) \
(ASSERT_EXPR(is_node_container((NC))), \
@@ -1676,7 +1686,7 @@ int io_list_to_buf(Eterm, char*, int);
int io_list_to_buf2(Eterm, char*, int);
int io_list_len(Eterm);
int is_string(Eterm);
-void erl_at_exit(FUNCTION(void,(*),(void*)), void*);
+void erl_at_exit(void (*) (void*), void*);
Eterm collect_memory(Process *);
void dump_memory_to_fd(int);
int dump_memory_data(const char *);
@@ -1957,5 +1967,5 @@ erts_alloc_message_heap(Uint size,
# define UnUseTmpHeap(Size,Proc) /* Nothing */
# define UseTmpHeapNoproc(Size) /* Nothing */
# define UnUseTmpHeapNoproc(Size) /* Nothing */
-#endif
-#endif
+#endif /* HEAP_ON_C_STACK */
+#endif /* !__GLOBAL_H__ */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 68625801cf..79022d5dd7 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1575,14 +1575,14 @@ static void deliver_read_message(Port* prt, Eterm to,
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
hp += PROC_BIN_SIZE;
- ohp->overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
listp = make_binary(pb);
}
@@ -1725,14 +1725,14 @@ deliver_vec_message(Port* prt, /* Port */
}
pb->thing_word = HEADER_PROC_BIN;
pb->size = iov->iov_len;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = base;
pb->flags = 0;
hp += PROC_BIN_SIZE;
- ohp->overhead += iov->iov_len / sizeof(Eterm);
+ OH_OVERHEAD(ohp, iov->iov_len / sizeof(Eterm));
if (listp == NIL) { /* compatible with deliver_bin_message */
listp = make_binary(pb);
@@ -2259,12 +2259,12 @@ erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = dbin->orig_size;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(dbin);
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
- MSO(p).overhead += dbin->orig_size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), dbin->orig_size / sizeof(Eterm));
return make_binary(pb);
}
port_resp = dbin->orig_bytes;
@@ -3033,14 +3033,14 @@ driver_deliver_term(ErlDrvPort port,
driver_binary_inc_refc(b); /* caller will free binary */
pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = ((byte*) b->orig_bytes) + offset;
pb->flags = 0;
mess = make_binary(pb);
hp += PROC_BIN_SIZE;
- ohp->overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
}
ptr += 3;
break;
@@ -3072,12 +3072,12 @@ driver_deliver_term(ErlDrvPort port,
hp += PROC_BIN_SIZE;
pbp->thing_word = HEADER_PROC_BIN;
pbp->size = size;
- pbp->next = ohp->mso;
- ohp->mso = pbp;
+ pbp->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pbp;
pbp->val = bp;
pbp->bytes = (byte*) bp->orig_bytes;
pbp->flags = 0;
- ohp->overhead += (pbp->size / sizeof(Eterm));
+ OH_OVERHEAD(ohp, pbp->size / sizeof(Eterm));
mess = make_binary(pbp);
}
ptr += 2;
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
index 8c8029d450..5bcd567b5f 100644
--- a/erts/emulator/beam/packet_parser.c
+++ b/erts/emulator/beam/packet_parser.c
@@ -679,7 +679,7 @@ int packet_parse_http(const char* buf, int len, int* statep,
while (n && SP(ptr)) {
ptr++; n--;
}
- if (ptr==p0) return -1;
+ if (ptr==p0 && n>0) return -1;
/* NOTE: the syntax allows empty reason phrases */
(*statep) = !0;
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 900ebcbbf7..c9bb7bbe91 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -39,8 +39,6 @@ static Hash process_reg;
static erts_smp_rwmtx_t regtab_rwmtx;
-#define reg_lock_init() erts_smp_rwmtx_init(&regtab_rwmtx, \
- "reg_tab")
#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
@@ -147,8 +145,11 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- reg_lock_init();
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index ca87d3d70f..0031568af6 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -25,14 +25,6 @@
# define NO_FPE_SIGNALS
#endif
-/* Never use elib-malloc when purify-memory-tracing */
-#if defined(PURIFY)
-#undef ENABLE_ELIB_MALLOC
-#undef ELIB_HEAP_SBRK
-#undef ELIB_ALLOC_IS_CLIB
-#endif
-
-
/* xxxP __VXWORKS__ */
#ifdef VXWORKS
#include <vxWorks.h>
@@ -171,23 +163,6 @@ void erl_assert_error(char* expr, char* file, int line);
#include <stdarg.h>
-#if defined(__STDC__) || defined(_MSC_VER)
-# define EXTERN_FUNCTION(t, f, x) extern t f x
-# define FUNCTION(t, f, x) t f x
-# define _DOTS_ ...
-# define _VOID_ void
-#elif defined(__cplusplus)
-# define EXTERN_FUNCTION(f, x) extern "C" { f x }
-# define FUNCTION(t, f, x) t f x
-# define _DOTS_ ...
-# define _VOID_ void
-#else
-# define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
-# define FUNCTION(t, f, x) t f (/*x*/)
-# define _DOTS_
-# define _VOID_
-#endif
-
/* This isn't sys-dependent, but putting it here benefits sys.c and drivers
- allow use of 'const' regardless of compiler */
@@ -197,7 +172,7 @@ void erl_assert_error(char* expr, char* file, int line);
#ifdef VXWORKS
/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */
-EXTERN_FUNCTION(int, real_printf, (const char *fmt, ...));
+int real_printf(const char *fmt, ...);
# define printf real_printf
#endif
@@ -359,15 +334,8 @@ typedef unsigned char byte;
#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
#endif
-#if defined(ARCH_64)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
+# define ERTS_EXTRA_DATA_ALIGN_SZ(X) \
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
-#elif defined(ARCH_32)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
- (((size_t) 4) - (((size_t) (X)) & ((size_t) 3)))
-#else
-#error "Not supported..."
-#endif
#include "erl_lock_check.h"
#include "erl_smp.h"
@@ -667,12 +635,12 @@ extern void erl_sys_args(int *argc, char **argv);
extern void erl_sys_schedule(int);
void sys_tty_reset(int);
-EXTERN_FUNCTION(int, sys_max_files, (_VOID_));
+int sys_max_files(void);
void sys_init_io(void);
Preload* sys_preloaded(void);
-EXTERN_FUNCTION(unsigned char*, sys_preload_begin, (Preload*));
-EXTERN_FUNCTION(void, sys_preload_end, (Preload*));
-EXTERN_FUNCTION(int, sys_get_key, (int));
+unsigned char* sys_preload_begin(Preload*);
+void sys_preload_end(Preload*);
+int sys_get_key(int);
void elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
unsigned long *ms_user_diff, unsigned long *ms_sys_diff);
void wall_clock_elapsed_time_both(unsigned long *ms_total,
@@ -689,7 +657,7 @@ int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
void get_now(Uint*, Uint*, Uint*);
void get_sys_now(Uint*, Uint*, Uint*);
-EXTERN_FUNCTION(void, set_break_quit, (void (*)(void), void (*)(void)));
+void set_break_quit(void (*)(void), void (*)(void));
void os_flavor(char*, unsigned);
void os_version(int*, int*, int*);
@@ -729,7 +697,7 @@ int erts_write_env(char *key, char *value);
#define ERTS_DEFAULT_MMAP_THRESHOLD (128 * 1024)
#define ERTS_DEFAULT_MMAP_MAX 64
-EXTERN_FUNCTION(int, sys_alloc_opt, (int, int));
+int sys_alloc_opt(int, int);
typedef struct {
Sint trim_threshold;
@@ -738,7 +706,7 @@ typedef struct {
Sint mmap_max;
} SysAllocStat;
-EXTERN_FUNCTION(void, sys_alloc_stat, (SysAllocStat *));
+void sys_alloc_stat(SysAllocStat *);
/* Block the whole system... */
@@ -1127,13 +1095,10 @@ erts_refc_read(erts_refc_t *refcp, long min_val)
extern int erts_use_kernel_poll;
#endif
-void elib_ensure_initialized(void);
-
-
#if defined(VXWORKS)
/* NOTE! sys_calloc2 does not exist on other
platforms than VxWorks and OSE */
-EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
+void* sys_calloc2(Uint, Uint);
#endif /* VXWORKS || OSE */
@@ -1225,8 +1190,8 @@ EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
*/
#ifdef DEBUG
-EXTERN_FUNCTION(void, erl_debug, (char* format, ...));
-EXTERN_FUNCTION(void, erl_bin_write, (unsigned char *, int, int));
+void erl_debug(char* format, ...);
+void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x) erl_debug x
#else
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index da6f9ed12f..ab5e8b5d4a 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -48,11 +48,11 @@
#undef M_MMAP_THRESHOLD
#undef M_MMAP_MAX
-#if !defined(ELIB_ALLOC_IS_CLIB) && defined(__GLIBC__) && defined(HAVE_MALLOC_H)
+#if defined(__GLIBC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#endif
-#if defined(ELIB_ALLOC_IS_CLIB) || !defined(HAVE_MALLOPT)
+#if !defined(HAVE_MALLOPT)
#undef HAVE_MALLOPT
#define HAVE_MALLOPT 0
#endif
@@ -162,13 +162,8 @@ erts_heap_alloc(Process* p, Uint need)
bp->alloc_size = n;
bp->used_size = n;
MBUF_SIZE(p) += n;
- bp->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- bp->off_heap.funs = NULL;
-#endif
- bp->off_heap.externals = NULL;
+ bp->off_heap.first = NULL;
bp->off_heap.overhead = 0;
-
return bp->mem;
}
@@ -409,7 +404,7 @@ erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
}
else {
if (szp)
- *szp = ERTS_UINT64_HEAP_SIZE(ui64);
+ *szp += ERTS_UINT64_HEAP_SIZE(ui64);
if (hpp)
res = erts_uint64_to_big(ui64, hpp);
}
@@ -426,7 +421,7 @@ erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
}
else {
if (szp)
- *szp = ERTS_SINT64_HEAP_SIZE(si64);
+ *szp += ERTS_SINT64_HEAP_SIZE(si64);
if (hpp)
res = erts_sint64_to_big(si64, hpp);
}
@@ -2729,21 +2724,8 @@ not_equal:
}
-void
-erts_cleanup_externals(ExternalThing *etp)
-{
- ExternalThing *tetp;
-
- tetp = etp;
-
- while(tetp) {
- erts_deref_node_entry(tetp->node);
- tetp = tetp->next;
- }
-}
-
Eterm
-store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
+store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
Uint i;
Uint size;
@@ -2762,8 +2744,8 @@ store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
- ((ExternalThing *) to_hp)->next = *etpp;
- *etpp = (ExternalThing *) to_hp;
+ ((struct erl_off_heap_header*) to_hp)->next = oh->first;
+ oh->first = (struct erl_off_heap_header*) to_hp;
return make_external(to_hp);
}
@@ -2792,7 +2774,7 @@ store_external_or_ref_in_proc_(Process *proc, Eterm ns)
sz = NC_HEAP_SIZE(ns);
ASSERT(sz > 0);
hp = HAlloc(proc, sz);
- return store_external_or_ref_(&hp, &MSO(proc).externals, ns);
+ return store_external_or_ref_(&hp, &MSO(proc), ns);
}
void bin_write(int to, void *to_arg, byte* buf, int sz)
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 60ae4cb108..c450f10f48 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -104,7 +104,7 @@
#include <ctype.h>
#include <sys/types.h>
-extern void erl_exit(int n, char *fmt, _DOTS_);
+void erl_exit(int n, char *fmt, ...);
static ErlDrvSysInfo sys_info;
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 87691fc1bc..059288d1cb 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -1045,7 +1045,7 @@ struct erl_drv_entry inet_driver_entry =
};
/* XXX: is this a driver interface function ??? */
-extern void erl_exit(int n, char*, _DOTS_);
+void erl_exit(int n, char*, ...);
/*
* Malloc wrapper,
@@ -9333,11 +9333,13 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
if (err != ERRNO_BLOCK) {
if (!desc->active) {
#ifdef HAVE_SCTP
- if (short_recv)
+ if (short_recv) {
async_error_am(desc, am_short_recv);
- else
-#else
+ } else {
async_error(desc, err);
+ }
+#else
+ async_error(desc, err);
#endif
driver_cancel_timer(desc->port);
sock_select(desc,FD_READ,0);
diff --git a/erts/emulator/drivers/unix/mem_drv.c b/erts/emulator/drivers/unix/mem_drv.c
deleted file mode 100644
index 1417ca1121..0000000000
--- a/erts/emulator/drivers/unix/mem_drv.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/* Purpose: Access to elib memory statistics */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_driver.h"
-#include "elib_stat.h"
-
-#define MAP_BUF_SIZE 1000 /* Max map size */
-#define HISTO_BUF_SIZE 100 /* Max histogram buckets */
-
-static ErlDrvData mem_start(ErlDrvPort);
-static int mem_init(void);
-static void mem_stop(ErlDrvData);
-static void mem_command(ErlDrvData, char*, int);
-
-const struct driver_entry mem_driver_entry = {
- mem_init,
- mem_start,
- mem_stop,
- mem_command,
- NULL,
- NULL,
- "mem_drv"
-};
-
-static int mem_init(void)
-{
- return 0;
-}
-
-static ErlDrvData mem_start(ErlDrvPort port, char* buf)
-{
- return (ErlDrvData)port;
-}
-
-static void mem_stop(ErlDrvData port)
-{
-}
-
-void putint32(p, v)
-byte* p; int v;
-{
- p[0] = (v >> 24) & 0xff;
- p[1] = (v >> 16) & 0xff;
- p[2] = (v >> 8) & 0xff;
- p[3] = (v) & 0xff;
-}
-
-int getint16(p)
-byte* p;
-{
- return (p[0] << 8) | p[1];
-}
-
-/*
-** Command:
-** m L1 L0 -> a heap map of length L1*256 + L0 is returned
-** s -> X3 X2 X1 X0 Y3 Y2 Y1 Y0 Z3 Z2 Z1 Z0
-** X == Total heap size bytes
-** Y == Total free bytes
-** Z == Size of largest free block in bytes
-**
-** h L1 L0 B0 -> Generate a logarithm historgram base B with L buckets
-** l L1 L0 S0 -> Generate a linear histogram with step S with L buckets
-*/
-unsigned char outbuf[HISTO_BUF_SIZE*2*4];
-
-static void mem_command(ErlDrvData port, char* buf, int count)
-{
- if ((count == 1) && buf[0] == 's') {
- struct elib_stat info;
- char v[3*4];
-
- elib_stat(&info);
-
- putint32(v, info.mem_total*4);
- putint32(v+4, info.mem_free*4);
- putint32(v+8, info.max_free*4);
- driver_output((ErlDrvPort)port, v, 12);
- return;
- }
- else if ((count == 3) && buf[0] == 'm') {
- char w[MAP_BUF_SIZE];
- int n = getint16(buf+1);
-
- if (n > MAP_BUF_SIZE)
- n = MAP_BUF_SIZE;
- elib_heap_map(w, n);
- driver_output((ErlDrvPort)port, w, n);
- return;
- }
- else if ((count == 4) && (buf[0] == 'h' || buf[0] == 'l')) {
- unsigned long vf[HISTO_BUF_SIZE];
- unsigned long va[HISTO_BUF_SIZE];
- int n = getint16(buf+1);
- int base = (unsigned char) buf[3];
-
- if (n >= HISTO_BUF_SIZE)
- n = HISTO_BUF_SIZE;
- if (buf[0] == 'l')
- base = -base;
- if (elib_histo(vf, va, n, base) < 0) {
- driver_failure((ErlDrvPort)port, -1);
- return;
- }
- else {
- char* p = outbuf;
- int i;
-
- for (i = 0; i < n; i++) {
- putint32(p, vf[i]);
- p += 4;
- }
- for (i = 0; i < n; i++) {
- putint32(p, va[i]);
- p += 4;
- }
- driver_output((ErlDrvPort)port, outbuf, n*8);
- }
- return;
- }
- driver_failure((ErlDrvPort)port, -1);
-}
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index 0052ac0739..b19f632f52 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -98,7 +98,7 @@ extern STATUS copy(char *, char *);
#define EF_SAFE_REALLOC(P, S) ef_safe_realloc((P), (S))
#define EF_FREE(P) do { if((P)) driver_free((P)); } while(0)
-extern void erl_exit(int n, char *fmt, _DOTS_);
+void erl_exit(int n, char *fmt, ...);
static void *ef_safe_alloc(Uint s)
{
@@ -127,7 +127,7 @@ static void *ef_safe_realloc(void *op, Uint s)
(s[0] == '.' && (s[1] == '\0' || (s[1] == '.' && s[2] == '\0')))
#ifdef VXWORKS
-static FUNCTION(int, vxworks_to_posix, (int vx_errno));
+static int vxworks_to_posix(int vx_errno);
#endif
/*
@@ -146,7 +146,7 @@ static FUNCTION(int, vxworks_to_posix, (int vx_errno));
#define CHECK_PATHLEN(X,Y) /* Nothing */
#endif
-static FUNCTION(int, check_error, (int result, Efile_error* errInfo));
+static int check_error(int result, Efile_error* errInfo);
static int
check_error(int result, Efile_error *errInfo)
diff --git a/erts/emulator/drivers/win32/mem_drv.c b/erts/emulator/drivers/win32/mem_drv.c
deleted file mode 100644
index fa7c46eca8..0000000000
--- a/erts/emulator/drivers/win32/mem_drv.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/* Purpose: Access to elib memory statistics */
-
-#include "sys.h"
-#include "erl_driver.h"
-#include "elib_stat.h"
-
-#define MAP_BUF_SIZE 1000 /* Max map size */
-#define HISTO_BUF_SIZE 100 /* Max histogram buckets */
-
-static ErlDrvData mem_start(ErlDrvPort, char*);
-static int mem_init(void);
-static void mem_stop(ErlDrvData);
-static void mem_command(ErlDrvData);
-
-ErlDrvEntry mem_driver_entry = {
- mem_init,
- mem_start,
- mem_stop,
- mem_command,
- NULL,
- NULL,
- "mem_drv"
-};
-
-static int mem_init(void)
-{
- return 0;
-}
-
-static ErlDrvData mem_start(ErlDrvPort port, char* buf)
-{
- return (ErlDrvData)port;
-}
-
-static void mem_stop(ErlDrvData port)
-{
-}
-
-void putint32(p, v)
-byte* p; int v;
-{
- p[0] = (v >> 24) & 0xff;
- p[1] = (v >> 16) & 0xff;
- p[2] = (v >> 8) & 0xff;
- p[3] = (v) & 0xff;
-}
-
-int getint16(p)
-byte* p;
-{
- return (p[0] << 8) | p[1];
-}
-
-/*
-** Command:
-** m L1 L0 -> a heap map of length L1*256 + L0 is returned
-** s -> X3 X2 X1 X0 Y3 Y2 Y1 Y0 Z3 Z2 Z1 Z0
-** X == Total heap size bytes
-** Y == Total free bytes
-** Z == Size of largest free block in bytes
-**
-** h L1 L0 B0 -> Generate a logarithm histogram base B with L buckets
-** l L1 L0 S0 -> Generate a linear histogram with step S with L buckets
-*/
-unsigned char outbuf[HISTO_BUF_SIZE*2*4];
-
-static void mem_command(ErlDrvData port, char* buf, int count)
-{
- if ((count == 1) && buf[0] == 's') {
- struct elib_stat info;
- char v[3*4];
-
- elib_stat(&info);
-
- putint32(v, info.mem_total*4);
- putint32(v+4, info.mem_free*4);
- putint32(v+8, info.max_free*4);
- driver_output((ErlDrvPort)port, v, 12);
- return;
- }
- else if ((count == 3) && buf[0] == 'm') {
- char w[MAP_BUF_SIZE];
- int n = getint16(buf+1);
-
- if (n > MAP_BUF_SIZE)
- n = MAP_BUF_SIZE;
- elib_heap_map(w, n);
- driver_output((ErlDrvPort)port, w, n);
- return;
- }
- else if ((count == 4) && (buf[0] == 'h' || buf[0] == 'l')) {
- unsigned long vf[HISTO_BUF_SIZE];
- unsigned long va[HISTO_BUF_SIZE];
- int n = getint16(buf+1);
- int base = (unsigned char) buf[3];
-
- if (n >= HISTO_BUF_SIZE)
- n = HISTO_BUF_SIZE;
- if (buf[0] == 'l')
- base = -base;
- if (elib_histo(vf, va, n, base) < 0) {
- driver_failure((ErlDrvPort)port, -1);
- return;
- }
- else {
- char* p = outbuf;
- int i;
-
- for (i = 0; i < n; i++) {
- putint32(p, vf[i]);
- p += 4;
- }
- for (i = 0; i < n; i++) {
- putint32(p, va[i]);
- p += 4;
- }
- driver_output((ErlDrvPort)port, outbuf, n*8);
- }
- return;
- }
- driver_failure((ErlDrvPort)port, -1);
-}
-
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index b0abfd2310..2a877d8ace 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -440,9 +440,12 @@ BIF_RETTYPE hipe_bifs_alloc_data_2(BIF_ALIST_2)
align != sizeof(long) && align != sizeof(double)))
BIF_ERROR(BIF_P, BADARG);
nrbytes = unsigned_val(BIF_ARG_2);
+ if (nrbytes == 0)
+ BIF_RET(make_small(0));
block = erts_alloc(ERTS_ALC_T_HIPE, nrbytes);
if ((unsigned long)block & (align-1))
- fprintf(stderr, "Yikes! erts_alloc() returned misaligned address %p\r\n", block);
+ fprintf(stderr, "%s: erts_alloc(%lu) returned %p which is not %lu-byte aligned\r\n",
+ __FUNCTION__, (unsigned long)nrbytes, block, (unsigned long)align);
BIF_RET(address_to_term(block, BIF_P));
}
diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c
index 5188950e17..8f43811537 100644
--- a/erts/emulator/hipe/hipe_bif1.c
+++ b/erts/emulator/hipe/hipe_bif1.c
@@ -876,22 +876,44 @@ BIF_RETTYPE hipe_bifs_misc_timer_clear_0(BIF_ALIST_0)
* + The fallback, which is the same as {X,_} = runtime(statistics).
*/
+static double fallback_get_hrvtime(void)
+{
+ unsigned long ms_user;
+
+ elapsed_time_both(&ms_user, NULL, NULL, NULL);
+ return (double)ms_user;
+}
+
#if USE_PERFCTR
#include "hipe_perfctr.h"
-static int hrvtime_is_open;
-#define hrvtime_is_started() hrvtime_is_open
+static int hrvtime_started; /* 0: closed, +1: perfctr, -1: fallback */
+#define hrvtime_is_started() (hrvtime_started != 0)
static void start_hrvtime(void)
{
if (hipe_perfctr_hrvtime_open() >= 0)
- hrvtime_is_open = 1;
+ hrvtime_started = 1;
+ else
+ hrvtime_started = -1;
}
-#define get_hrvtime() hipe_perfctr_hrvtime_get()
-#define stop_hrvtime() hipe_perfctr_hrvtime_close()
+static void stop_hrvtime(void)
+{
+ if (hrvtime_started > 0)
+ hipe_perfctr_hrvtime_close();
+ hrvtime_started = 0;
+}
-#else
+static double get_hrvtime(void)
+{
+ if (hrvtime_started > 0)
+ return hipe_perfctr_hrvtime_get();
+ else
+ return fallback_get_hrvtime();
+}
+
+#else /* !USE_PERFCTR */
/*
* Fallback, if nothing better exists.
@@ -902,15 +924,9 @@ static void start_hrvtime(void)
#define hrvtime_is_started() 1
#define start_hrvtime() do{}while(0)
#define stop_hrvtime() do{}while(0)
+#define get_hrvtime() fallback_get_hrvtime()
-static double get_hrvtime(void)
-{
- unsigned long ms_user;
- elapsed_time_both(&ms_user, NULL, NULL, NULL);
- return (double)ms_user;
-}
-
-#endif /* hrvtime support */
+#endif /* !USE_PERFCTR */
BIF_RETTYPE hipe_bifs_get_hrvtime_0(BIF_ALIST_0)
{
@@ -918,11 +934,8 @@ BIF_RETTYPE hipe_bifs_get_hrvtime_0(BIF_ALIST_0)
Eterm res;
FloatDef f;
- if (!hrvtime_is_started()) {
+ if (!hrvtime_is_started())
start_hrvtime();
- if (!hrvtime_is_started())
- BIF_RET(NIL); /* arity 0 BIFs may not fail */
- }
f.fd = get_hrvtime();
hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT);
res = make_float(hp);
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index a77aec7919..900dfc5a8a 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -261,7 +261,7 @@ static const struct literal {
/* Field offsets in a process struct */
{ "P_HP", offsetof(struct process, htop) },
{ "P_HP_LIMIT", offsetof(struct process, stop) },
- { "P_OFF_HEAP_MSO", offsetof(struct process, off_heap.mso) },
+ { "P_OFF_HEAP_FIRST", offsetof(struct process, off_heap.first) },
{ "P_MBUF", offsetof(struct process, mbuf) },
{ "P_ID", offsetof(struct process, id) },
{ "P_FLAGS", offsetof(struct process, flags) },
@@ -456,7 +456,7 @@ static const struct rts_param {
} rts_params[] = {
{ 1, "P_OFF_HEAP_FUNS",
#if !defined(HYBRID)
- 1, offsetof(struct process, off_heap.funs)
+ 1, offsetof(struct process, off_heap.first)
#endif
},
diff --git a/erts/emulator/obsolete/driver.h b/erts/emulator/obsolete/driver.h
deleted file mode 100644
index 708fe68e1a..0000000000
--- a/erts/emulator/obsolete/driver.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/*
- * OLD, OBSOLETE include file for erlang driver writers.
- * New drivers should use erl_driver.h instead.
- */
-
-#ifndef __DRIVER_H__
-#define __DRIVER_H__
-
-#include <stdlib.h>
-#include "driver_int.h"
-
-#undef _ANSI_ARGS_
-#undef CONST
-
-#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
-# define _USING_PROTOTYPES_ 1
-# define _ANSI_ARGS_(x) x
-# define CONST const
-#else
-# define _ANSI_ARGS_(x) ()
-# define CONST
-#endif
-
-#ifdef __cplusplus
-# define EXTERN extern "C"
-#else
-# define EXTERN extern
-#endif
-
-/* Values for mode arg to driver_select() */
-
-#define DO_READ (1 << 0)
-#define DO_WRITE (1 << 1)
-
-/* Flags for set_port_control_flags() */
-#define PORT_CONTROL_FLAG_BINARY 1
-#define PORT_CONTROL_FLAG_HEAVY 2
-
-/* This macro is used to name a dynamic driver's init function in */
-/* a way that doesn't lead to conflicts. This is crucial when using */
-/* operating systems that has one namespace for all symbols */
-/* (e.g. VxWorks). Example: if you have an dynamic driver C source */
-/* file named echo_drv.c, you use the macro like this: */
-/* int DRIVER_INIT(echo_drv)(void *handle) */
-#if defined(VXWORKS)
-# define DRIVER_INIT(DRIVER_NAME) DRIVER_NAME ## _init
-#elif defined(__WIN32__)
-# define DRIVER_INIT(DRIVER_NAME) __declspec(dllexport) driver_init
-#else
-# define DRIVER_INIT(DRIVER_NAME) driver_init
-#endif
-
-typedef int (*F_PTR)(); /* a function pointer */
-typedef long (*L_PTR)(); /* pointer to a function returning long */
-
-extern int null_func();
-
-/* This structure MUST match Binary in global.h exactly!!! */
-typedef struct driver_binary {
- int orig_size; /* total length of binary */
- char orig_bytes[1]; /* the data (char instead of byte!) */
-} DriverBinary;
-
-typedef struct {
- int vsize; /* length of vectors */
- int size; /* total size in bytes */
- SysIOVec* iov;
- DriverBinary** binv;
-} ErlIOVec;
-
-/*
- * OLD, OBSOLETE driver entry structure.
- */
-
-typedef struct driver_entry {
- F_PTR init; /* called at system start up (no args) */
- L_PTR start; /* called when some one does an open_port
- args: port, command (nul-terminated),
- additional/alternate args for fd/vanilla/spawn driver.
- return value -1 means failure, other
- is saved and passed to the other funcs */
- F_PTR stop; /* called when port is closed, and when the
- emulator is halted - arg: start_return */
- F_PTR output; /* called when we have output from erlang to the port
- args: start_return, buf, buflen */
- F_PTR ready_input; /* called when we have input from one of the driver's
- file descriptors - args: start_return, fd */
- F_PTR ready_output; /* called when output is possible to one of the driver's
- file descriptors - args: start_return, fd */
- char *driver_name; /* name supplied as {driver,Name,Args} to open_port */
-
- F_PTR finish; /* called before unloading (DYNAMIC DRIVERS ONLY) */
- void *handle; /* file handle (DYNAMIC DRIVERS ONLY) */
- F_PTR control; /* "ioctl" for drivers (invoked by port_command/3) */
- F_PTR timeout; /* Reserved */
- F_PTR outputv; /* Reserved */
- F_PTR ready_async; /* Completion routine for driver_async */
- F_PTR padding1[3]; /* pad to match size of modern driver struct */
- int padding2[4]; /* more pad */
- F_PTR padding3[3]; /* even more padding */
-} DriverEntry;
-
-
-/* These are the kernel functions available for driver writers */
-
-EXTERN int driver_select _ANSI_ARGS_((int,int,int,int));
-
-EXTERN int driver_output _ANSI_ARGS_((int, char*, int));
-EXTERN int driver_output2 _ANSI_ARGS_((int, char*, int, char*, int));
-EXTERN int driver_output_binary _ANSI_ARGS_((int, char*, int,
- DriverBinary*, int, int));
-EXTERN int driver_outputv _ANSI_ARGS_((int, char*,int,ErlIOVec*,int));
-
-EXTERN int driver_vec_to_buf _ANSI_ARGS_((ErlIOVec*, char*, int));
-
-EXTERN int driver_set_timer _ANSI_ARGS_((int, unsigned long));
-EXTERN int driver_cancel_timer _ANSI_ARGS_((int));
-
-/*
- * The following functions are used to initiate a close of a port
- * from a driver.
- */
-EXTERN int driver_failure_eof _ANSI_ARGS_((int));
-EXTERN int driver_failure_atom _ANSI_ARGS_((int, char *));
-EXTERN int driver_failure_posix _ANSI_ARGS_((int, int));
-EXTERN int driver_failure _ANSI_ARGS_((int, int));
-EXTERN int driver_exit _ANSI_ARGS_ ((int, int));
-
-EXTERN char* erl_errno_id _ANSI_ARGS_((int error));
-EXTERN void set_busy_port _ANSI_ARGS_((int, int));
-EXTERN void add_driver_entry _ANSI_ARGS_((DriverEntry *));
-EXTERN int remove_driver_entry _ANSI_ARGS_((DriverEntry *));
-EXTERN void set_port_control_flags _ANSI_ARGS_((int, int));
-
-/* Binary interface */
-/* NOTE: DO NOT overwrite a binary with new data (if the data is delivered);
-** since the binary is a shared object it MUST be written once.
-*/
-
-EXTERN DriverBinary* driver_alloc_binary _ANSI_ARGS_((int));
-EXTERN DriverBinary* driver_realloc_binary _ANSI_ARGS_((DriverBinary*, int));
-EXTERN void driver_free_binary _ANSI_ARGS_((DriverBinary*));
-
-
-/* Queue interface */
-EXTERN int driver_enqv _ANSI_ARGS_((int, ErlIOVec*, int));
-EXTERN int driver_pushqv _ANSI_ARGS_((int, ErlIOVec*, int));
-EXTERN int driver_deq _ANSI_ARGS_((int, int));
-EXTERN SysIOVec* driver_peekq _ANSI_ARGS_((int, int*));
-EXTERN int driver_sizeq _ANSI_ARGS_((int));
-EXTERN int driver_enq_bin _ANSI_ARGS_((int, DriverBinary*, int, int));
-EXTERN int driver_enq _ANSI_ARGS_((int, char*, int));
-EXTERN int driver_pushq_bin _ANSI_ARGS_((int, DriverBinary*, int, int));
-EXTERN int driver_pushq _ANSI_ARGS_((int, char*, int));
-
-/* Memory management */
-EXTERN void *driver_alloc _ANSI_ARGS_((size_t));
-EXTERN void *driver_realloc _ANSI_ARGS_((void*, size_t));
-EXTERN void driver_free _ANSI_ARGS_((void*));
-
-/* Shared / dynamic link libraries */
-EXTERN void *driver_dl_open _ANSI_ARGS_((char *));
-EXTERN void *driver_dl_sym _ANSI_ARGS_((void *, char *));
-EXTERN int driver_dl_close _ANSI_ARGS_((void *));
-EXTERN char *driver_dl_error _ANSI_ARGS_((void));
-
-/* Async IO functions */
-EXTERN long driver_async _ANSI_ARGS_((int,
- unsigned int*,
- void (*)(void*),
- void *,
- void (*)(void*)));
-EXTERN int driver_async_cancel _ANSI_ARGS_((long));
-
-EXTERN int driver_lock_driver _ANSI_ARGS_((int));
-
-/* Threads */
-typedef void* erl_mutex_t;
-typedef void* erl_cond_t;
-typedef void* erl_thread_t;
-
-EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
-EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
-
-EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
-EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
-EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
-
-EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
- void* (*func)(void*),
- void* arg,
- int detached));
-EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
-EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
-EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
-EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
-
-
-typedef unsigned long DriverTermData;
-
-#define TERM_DATA(x) ((DriverTermData) (x))
-
-/* Possible types to send from driver Argument type */
-#define ERL_DRV_NIL ((DriverTermData) 1) /* None */
-#define ERL_DRV_ATOM ((DriverTermData) 2) /* driver_mk_atom(string) */
-#define ERL_DRV_INT ((DriverTermData) 3) /* int */
-#define ERL_DRV_PORT ((DriverTermData) 4) /* driver_mk_port(ix) */
-#define ERL_DRV_BINARY ((DriverTermData) 5) /* ErlDriverBinary*, int */
-#define ERL_DRV_STRING ((DriverTermData) 6) /* char*, int */
-#define ERL_DRV_TUPLE ((DriverTermData) 7) /* int */
-#define ERL_DRV_LIST ((DriverTermData) 8) /* int */
-#define ERL_DRV_STRING_CONS ((DriverTermData) 9) /* char*, int */
-#define ERL_DRV_PID ((DriverTermData) 10) /* driver_connected,... */
-
-/* DriverTermData is the type to use for casts when building
- * terms that should be sent to connected process,
- * for instance a tuple on the form {tcp, Port, [Tag|Binary]}
- *
- * DriverTermData spec[] = {
- * ERL_DRV_ATOM, driver_mk_atom("tcp"),
- * ERL_DRV_PORT, driver_mk_port(drv->ix),
- * ERL_DRV_INT, REPLY_TAG,
- * ERL_DRV_BIN, 50, TERM_DATA(buffer),
- * ERL_DRV_LIST, 2,
- * ERL_DRV_TUPLE, 3,
- * }
- *
- */
-
-EXTERN DriverTermData driver_mk_atom _ANSI_ARGS_ ((char*));
-EXTERN DriverTermData driver_mk_port _ANSI_ARGS_ ((int));
-EXTERN DriverTermData driver_connected _ANSI_ARGS_((int));
-EXTERN DriverTermData driver_caller _ANSI_ARGS_((int));
-
-EXTERN int driver_output_term _ANSI_ARGS_((int, DriverTermData *, int));
-EXTERN int driver_send_term _ANSI_ARGS_((int, DriverTermData, DriverTermData *, int));
-
-#endif
-
-
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 09fb6337f7..c17806d96c 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -286,7 +286,7 @@ struct ErtsPollSet_ {
ErtsPollSet next;
int internal_fd_limit;
ErtsFdStatus *fds_status;
- int no_of_user_fds;
+ erts_smp_atomic_t no_of_user_fds;
int fds_status_len;
#if ERTS_POLL_USE_KERNEL_POLL
int kp_fd;
@@ -852,7 +852,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
ASSERT(ps->fds_status[fd].used_events);
ps->fds_status[fd].used_events = 0;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
update_fallback_pollset(ps, fd);
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
break;
@@ -902,11 +902,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
if (!events) {
buf[buf_len].events = POLLREMOVE;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
buf[buf_len].events = events;
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
else {
if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
@@ -996,12 +996,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
}
if (used_events) {
if (!events) {
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
}
else {
if (events)
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
@@ -1075,7 +1075,7 @@ update_pollset(ErtsPollSet ps, int fd)
epe.data.fd = epe_templ.data.fd;
res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
} while (res != 0 && errno == EINTR);
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
ps->fds_status[fd].used_events = 0;
}
@@ -1083,11 +1083,11 @@ update_pollset(ErtsPollSet ps, int fd)
/* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9
need a non-NULL event pointer even though it is ignored... */
op = EPOLL_CTL_DEL;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
op = EPOLL_CTL_ADD;
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
else {
op = EPOLL_CTL_MOD;
@@ -1137,7 +1137,7 @@ update_pollset(ErtsPollSet ps, int fd)
/* Fall through ... */
case EPOLL_CTL_ADD: {
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
#if ERTS_POLL_USE_CONCURRENT_UPDATE
if (!*update_fallback) {
*update_fallback = 1;
@@ -1225,7 +1225,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
#if ERTS_POLL_USE_FALLBACK
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
#endif
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
last_pix = --ps->no_poll_fds;
if (pix != last_pix) {
/* Move last pix to this pix */
@@ -1252,7 +1252,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
|| fd == ps->kp_fd);
#endif
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
ps->fds_status[fd].pix = pix = ps->no_poll_fds++;
if (pix >= ps->poll_fds_len)
grow_poll_fds(ps, pix);
@@ -1303,7 +1303,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
if (!ps->fds_status[fd].used_events) {
ASSERT(events);
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds++;
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
@@ -1311,7 +1311,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
}
else if (!events) {
ASSERT(ps->fds_status[fd].used_events);
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
ps->fds_status[fd].events = events;
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds--;
@@ -1912,7 +1912,8 @@ static ERTS_INLINE int
check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
{
ASSERT(!*ps_locked);
- if (ps->no_of_user_fds == 0 && tv->tv_usec == 0 && tv->tv_sec == 0) {
+ if (erts_smp_atomic_read(&ps->no_of_user_fds) == 0
+ && tv->tv_usec == 0 && tv->tv_sec == 0) {
/* Nothing to poll and zero timeout; done... */
return 0;
}
@@ -1950,7 +1951,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* the maximum number of file descriptors in the poll set.
*/
struct dvpoll poll_res;
- int nfds = ps->no_of_user_fds;
+ int nfds = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
#ifdef ERTS_SMP
nfds++; /* Wakeup pipe */
#endif
@@ -2228,7 +2229,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = 0;
ps->fds_status = NULL;
ps->fds_status_len = 0;
- ps->no_of_user_fds = 0;
+ erts_smp_atomic_init(&ps->no_of_user_fds, 0);
#if ERTS_POLL_USE_KERNEL_POLL
ps->kp_fd = -1;
#if ERTS_POLL_USE_EPOLL
@@ -2326,7 +2327,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#if ERTS_POLL_USE_FALLBACK
ps->fallback_used = 0;
#endif
- ps->no_of_user_fds = 0; /* Don't count wakeup pipe and fallback fd */
+ erts_smp_atomic_set(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
erts_smp_spin_lock(&pollsets_lock);
ps->next = pollsets;
@@ -2472,7 +2473,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->memory_size = size;
- pip->poll_set_size = ps->no_of_user_fds;
+ pip->poll_set_size = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
#ifdef ERTS_SMP
pip->poll_set_size++; /* Wakeup pipe */
#endif
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 737ffd9f94..af4ab693dc 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -123,8 +123,6 @@ static ErtsSysReportExit *report_exit_transit_list;
extern int check_async_ready(void);
extern int driver_interrupt(int, int);
-/*EXTERN_FUNCTION(void, increment_time, (int));*/
-/*EXTERN_FUNCTION(int, next_time, (_VOID_));*/
extern void do_break(void);
extern void erl_sys_args(int*, char**);
@@ -221,10 +219,10 @@ static struct fd_data {
} *fd_data; /* indexed by fd */
/* static FUNCTION(int, write_fill, (int, char*, int)); unused? */
-static FUNCTION(void, note_child_death, (int, int));
+static void note_child_death(int, int);
#if CHLDWTHR
-static FUNCTION(void *, child_waiter, (void *));
+static void* child_waiter(void *);
#endif
/********************* General functions ****************************/
@@ -384,18 +382,6 @@ MALLOC_USE_HASH(1);
#endif
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
@@ -488,9 +474,6 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -538,13 +521,14 @@ erts_sys_pre_init(void)
#endif
#endif /* USE_THREADS */
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
}
void
erl_sys_init(void)
{
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
+ {
int res;
char bindir[MAXPATHLEN];
size_t bindirsz = sizeof(bindir);
@@ -574,6 +558,7 @@ erl_sys_init(void)
bindir,
DIR_SEPARATOR_CHAR,
CHILD_SETUP_PROG_NAME);
+ }
#endif
#ifdef USE_SETLINEBUF
@@ -2575,7 +2560,6 @@ extern Preload pre_loaded[];
void erts_sys_alloc_init(void)
{
- elib_ensure_initialized();
}
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index c59c99f65e..5a682ab045 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -799,8 +799,17 @@ sys_chars_to_double(char* buf, double* fp)
}
#ifdef NO_FPE_SIGNALS
- if (errno == ERANGE && (*fp == 0.0 || *fp == HUGE_VAL || *fp == -HUGE_VAL)) {
- return -1;
+ if (errno == ERANGE) {
+ if (*fp == HUGE_VAL || *fp == -HUGE_VAL) {
+ /* overflow, should give error */
+ return -1;
+ } else if (t == s && *fp == 0.0) {
+ /* This should give 0.0 - OTP-7178 */
+ errno = 0;
+
+ } else if (*fp == 0.0) {
+ return -1;
+ }
}
#endif
return 0;
@@ -810,7 +819,9 @@ int
matherr(struct exception *exc)
{
#if !defined(NO_FPE_SIGNALS)
- set_current_fp_exception((unsigned long)__builtin_return_address(0));
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+ if (fpexnp != NULL)
+ *fpexnp = (unsigned long)__builtin_return_address(0);
#endif
return 1;
}
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index bd02cf85eb..15d4cd7361 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -37,7 +37,7 @@
void erts_sys_init_float(void);
void erl_start(int, char**);
-void erl_exit(int n, char*, _DOTS_);
+void erl_exit(int n, char*, ...);
void erl_error(char*, va_list);
void erl_crash_dump(char*, int, char*, ...);
@@ -67,10 +67,10 @@ static void async_read_file(struct async_io* aio, LPVOID buf, DWORD numToRead);
static int async_write_file(struct async_io* aio, LPVOID buf, DWORD numToWrite);
static int get_overlapped_result(struct async_io* aio,
LPDWORD pBytesRead, BOOL wait);
-static FUNCTION(BOOL, CreateChildProcess, (char *, HANDLE, HANDLE,
- HANDLE, LPHANDLE, BOOL,
- LPVOID, LPTSTR, unsigned,
- char **, int *));
+static BOOL CreateChildProcess(char *, HANDLE, HANDLE,
+ HANDLE, LPHANDLE, BOOL,
+ LPVOID, LPTSTR, unsigned,
+ char **, int *);
static int create_pipe(LPHANDLE, LPHANDLE, BOOL, BOOL);
static int ApplicationType(const char* originalName, char fullPath[MAX_PATH],
BOOL search_in_path, BOOL handle_quotes,
@@ -93,7 +93,7 @@ static erts_smp_mtx_t sys_driver_data_lock;
#define APPL_WIN3X 2
#define APPL_WIN32 3
-static FUNCTION(int, driver_write, (long, HANDLE, byte*, int));
+static int driver_write(long, HANDLE, byte*, int);
static void common_stop(int);
static int create_file_thread(struct async_io* aio, int mode);
#ifdef ERTS_SMP
@@ -2627,7 +2627,6 @@ erts_sys_main_thread(void)
void erts_sys_alloc_init(void)
{
- elib_ensure_initialized();
}
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
@@ -2974,19 +2973,6 @@ check_supported_os_version(void)
}
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
-
#ifdef ERTS_ENABLE_LOCK_COUNT
static void
thr_create_prepare_child(void *vtcdp)
@@ -3005,14 +2991,9 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
-
#ifdef ERTS_ENABLE_LOCK_COUNT
eid.thread_create_child_func = thr_create_prepare_child;
#endif
-
erts_thr_init(&eid);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init();
@@ -3020,7 +3001,6 @@ erts_sys_pre_init(void)
}
#endif
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_sys_env_init();
}
void noinherit_std_handle(DWORD type)
@@ -3036,6 +3016,8 @@ void erl_sys_init(void)
{
HANDLE handle;
+ erts_sys_env_init();
+
noinherit_std_handle(STD_OUTPUT_HANDLE);
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 5ec17a5e2a..a4c02da626 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -75,7 +75,6 @@ MODULES= \
node_container_SUITE \
nofrag_SUITE \
num_bif_SUITE \
- obsolete_SUITE \
op_SUITE \
port_SUITE \
port_bif_SUITE \
diff --git a/erts/emulator/test/decode_packet_SUITE.erl b/erts/emulator/test/decode_packet_SUITE.erl
index 6cde286871..d9e961be2f 100644
--- a/erts/emulator/test/decode_packet_SUITE.erl
+++ b/erts/emulator/test/decode_packet_SUITE.erl
@@ -304,6 +304,10 @@ http(Config) when is_list(Config) ->
{ok, {http_request, 'GET', ResB, {1,1}}, Rest} = decode_pkt(http_bin,Bin)
end,
lists:foreach(UriF, http_uri_variants()),
+
+ %% Response with empty phrase
+ ?line {ok,{http_response,{1,1},200,[]},<<>>} = decode_pkt(http, <<"HTTP/1.1 200\r\n">>, []),
+ ?line {ok,{http_response,{1,1},200,<<>>},<<>>} = decode_pkt(http_bin, <<"HTTP/1.1 200\r\n">>, []),
ok.
http_with_bin(http) ->
diff --git a/erts/emulator/test/float_SUITE.erl b/erts/emulator/test/float_SUITE.erl
index 102e472ea6..99e9457985 100644
--- a/erts/emulator/test/float_SUITE.erl
+++ b/erts/emulator/test/float_SUITE.erl
@@ -22,7 +22,10 @@
-include("test_server.hrl").
-export([all/1,init_per_testcase/2,fin_per_testcase/2,
- fpe/1,fp_drv/1,fp_drv_thread/1,denormalized/1,match/1,bad_float_unpack/1]).
+ fpe/1,fp_drv/1,fp_drv_thread/1,denormalized/1,match/1,
+ bad_float_unpack/1]).
+-export([otp_7178/1]).
+
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Dog = ?t:timetrap(?t:minutes(3)),
@@ -33,7 +36,29 @@ fin_per_testcase(_Func, Config) ->
?t:timetrap_cancel(Dog).
all(suite) ->
- [fpe,fp_drv,fp_drv_thread,denormalized,match,bad_float_unpack].
+ [fpe,
+ fp_drv,
+ fp_drv_thread,
+ otp_7178,
+ denormalized,
+ match,
+ bad_float_unpack].
+
+%%
+%% OTP-7178, list_to_float on very small numbers should give 0.0
+%% instead of exception, i.e. ignore underflow.
+%%
+otp_7178(suite) ->
+ [];
+otp_7178(doc) ->
+ ["test that list_to_float on very small numbers give 0.0"];
+otp_7178(Config) when is_list(Config) ->
+ ?line X = list_to_float("1.0e-325"),
+ ?line true = (X < 0.00000001) and (X > -0.00000001),
+ ?line Y = list_to_float("1.0e-325325325"),
+ ?line true = (Y < 0.00000001) and (Y > -0.00000001),
+ ?line {'EXIT', {badarg,_}} = (catch list_to_float("1.0e83291083210")),
+ ok.
%% Forces floating point exceptions and tests that subsequent, legal,
%% operations are calculated correctly. Original version by Sebastian
diff --git a/erts/emulator/test/hash_SUITE.erl b/erts/emulator/test/hash_SUITE.erl
index 85bdb8bff8..f5d1871bfb 100644
--- a/erts/emulator/test/hash_SUITE.erl
+++ b/erts/emulator/test/hash_SUITE.erl
@@ -480,14 +480,14 @@ otp_5292_test() ->
S2 = md5([md5(hash_int(S, E, PH)) || {Start, N, Sz} <- d(),
{S, E} <- int(Start, N, Sz)]),
?line Comment = case S1 of
- <<43,186,76,102,87,4,110,245,203,177,206,6,130,69,43,99>> ->
+ <<4,248,208,156,200,131,7,1,173,13,239,173,112,81,16,174>> ->
?line big = erlang:system_info(endian),
"Big endian machine";
- <<21,206,139,15,149,28,167,81,98,225,132,254,49,125,174,195>> ->
+ <<180,28,33,231,239,184,71,125,76,47,227,241,78,184,176,233>> ->
?line little = erlang:system_info(endian),
"Little endian machine"
end,
- ?line <<140,37,79,80,26,242,130,22,20,229,123,240,223,244,43,99>> = S2,
+ ?line <<124,81,198,121,174,233,19,137,10,83,33,80,226,111,238,99>> = S2,
?line 2 = erlang:hash(1, (1 bsl 27) -1),
?line {'EXIT', _} = (catch erlang:hash(1, (1 bsl 27))),
{comment, Comment}.
@@ -507,7 +507,7 @@ hash_int(Start, End, F) ->
{Start, End, md5(HL)}.
md5(T) ->
- erlang:md5(term_to_binary(T)).
+ erlang:md5(term_to_binary(T)).
bit_level_binaries() ->
?line [3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] =
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 888bf582d8..f45cfa3e4a 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -30,7 +30,7 @@
fin_per_testcase/2, basic/1, reload/1, upgrade/1, heap_frag/1,
types/1, many_args/1, binaries/1, get_string/1, get_atom/1, api_macros/1,
from_array/1, iolist_as_binary/1, resource/1, resource_binary/1, resource_takeover/1,
- threading/1, send/1, send2/1, send_threaded/1, neg/1, is_checks/1,
+ threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1, is_checks/1,
get_length/1, make_atom/1, make_string/1]).
-export([many_args_100/100]).
@@ -51,7 +51,7 @@
all(suite) ->
[basic, reload, upgrade, heap_frag, types, many_args, binaries, get_string,
get_atom, api_macros, from_array, iolist_as_binary, resource, resource_binary,
- resource_takeover, threading, send, send2, send_threaded, neg, is_checks,
+ resource_takeover, threading, send, send2, send3, send_threaded, neg, is_checks,
get_length, make_atom, make_string].
%%init_per_testcase(_Case, Config) ->
@@ -916,7 +916,164 @@ forwarder(To, N) ->
other_term() ->
{fun(X,Y) -> X*Y end, make_ref()}.
-
+
+send3(doc) -> ["Message sending stress test"];
+send3(Config) when is_list(Config) ->
+ %% Let a number of processes send random message blobs between each other
+ %% using enif_send. Kill and spawn new ones randomly to keep a ~constant
+ %% number of workers running.
+ Seed = now(),
+ io:format("seed: ~p\n",[Seed]),
+ random:seed(Seed),
+ ets:new(nif_SUITE,[named_table,public]),
+ ?line true = ets:insert(nif_SUITE,{send3,0,0,0,0}),
+ timer:send_after(10000, timeout), % Run for 10 seconds
+ SpawnCnt = send3_controller(0, [], [], 20),
+ ?line [{_,Rcv,SndOk,SndFail,Balance}] = ets:lookup(nif_SUITE,send3),
+ io:format("spawns=~p received=~p, sent=~p send-failure=~p balance=~p\n",
+ [SpawnCnt,Rcv,SndOk,SndFail,Balance]),
+ ets:delete(nif_SUITE).
+
+send3_controller(SpawnCnt, [], _, infinity) ->
+ SpawnCnt;
+send3_controller(SpawnCnt0, Mons0, Pids0, Tick) ->
+ receive
+ timeout ->
+ io:format("Timeout. Sending 'halt' to ~p\n",[Pids0]),
+ lists:foreach(fun(P) -> P ! {halt,self()} end, Pids0),
+ lists:foreach(fun(P) -> receive {halted,P} -> ok end end, Pids0),
+ QTot = lists:foldl(fun(P,QSum) ->
+ {message_queue_len,QLen} =
+ erlang:process_info(P,message_queue_len),
+ QSum + QLen
+ end, 0, Pids0),
+ io:format("Total queue length ~p\n",[QTot]),
+ lists:foreach(fun(P) -> P ! die end, Pids0),
+ send3_controller(SpawnCnt0, Mons0, [], infinity);
+ {'DOWN', MonRef, process, _Pid, _} ->
+ Mons1 = lists:delete(MonRef, Mons0),
+ %%io:format("Got DOWN from ~p. Monitors left: ~p\n",[Pid,Mons1]),
+ send3_controller(SpawnCnt0, Mons1, Pids0, Tick)
+ after Tick ->
+ Max = 20,
+ N = length(Pids0),
+ PidN = random:uniform(Max),
+ %%io:format("N=~p PidN=~p Pids0=~p\n", [N,PidN,Pids0]),
+ case PidN > N of
+ true ->
+ {NewPid,Mon} = spawn_opt(fun send3_proc/0, [link,monitor]),
+ lists:foreach(fun(P) -> P ! {is_born,NewPid} end, Pids0),
+ ?line Balance = ets:lookup_element(nif_SUITE,send3,5),
+ Inject = (Balance =< 0),
+ case Inject of
+ true -> ok;
+ false -> ets:update_element(nif_SUITE,send3,{5,-1})
+ end,
+ NewPid ! {pids,Pids0,Inject},
+ send3_controller(SpawnCnt0+1, [Mon|Mons0], [NewPid|Pids0], Tick);
+ false ->
+ KillPid = lists:nth(PidN,Pids0),
+ KillPid ! die,
+ Pids1 = lists:delete(KillPid, Pids0),
+ lists:foreach(fun(P) -> P ! {is_dead,KillPid} end, Pids1),
+ send3_controller(SpawnCnt0, Mons0, Pids1, Tick)
+ end
+ end.
+
+send3_proc() ->
+ %%io:format("Process ~p spawned\n",[self()]),
+ send3_proc([self()], {0,0,0}, {1,2,3,4,5}).
+send3_proc(Pids0, Counters={Rcv,SndOk,SndFail}, State0) ->
+ %%io:format("~p: Pids0=~p", [self(), Pids0]),
+ %%timer:sleep(10),
+ receive
+ {pids, Pids1, Inject} ->
+ %%io:format("~p: got ~p Inject=~p\n", [self(), Pids1, Inject]),
+ ?line Pids0 = [self()],
+ Pids2 = [self() | Pids1],
+ case Inject of
+ true -> send3_proc_send(Pids2, Counters, State0);
+ false -> send3_proc(Pids2, Counters, State0)
+ end;
+ {is_born, Pid} ->
+ %%io:format("~p: is_born ~p, got ~p\n", [self(), Pid, Pids0]),
+ send3_proc([Pid | Pids0], Counters, State0);
+ {is_dead, Pid} ->
+ Pids1 = lists:delete(Pid,Pids0),
+ %%io:format("~p: is_dead ~p, got ~p\n", [self(), Pid, Pids1]),
+ send3_proc(Pids1, Counters, State0);
+ {blob, Blob0} ->
+ %%io:format("~p: blob ~p\n", [self(), Blob0]),
+ State1 = send3_new_state(State0, Blob0),
+ send3_proc_send(Pids0, {Rcv+1,SndOk,SndFail}, State1);
+ die ->
+ %%io:format("Process ~p terminating, stats = ~p\n",[self(),Counters]),
+ {message_queue_len,Dropped} = erlang:process_info(self(),message_queue_len),
+ _R = ets:update_counter(nif_SUITE,send3,
+ [{2,Rcv},{3,SndOk},{4,SndFail},{5,1-Dropped}]),
+ %%io:format("~p: dies R=~p\n", [self(), R]),
+ ok;
+ {halt,Papa} ->
+ Papa ! {halted,self()},
+ io:format("~p halted\n",[self()]),
+ receive die -> ok end,
+ io:format("~p dying\n",[self()])
+ end.
+
+send3_proc_send(Pids, {Rcv,SndOk,SndFail}, State0) ->
+ To = lists:nth(random:uniform(length(Pids)),Pids),
+ Blob = send3_make_blob(),
+ State1 = send3_new_state(State0,Blob),
+ case send3_send(To, Blob) of
+ true ->
+ send3_proc(Pids, {Rcv,SndOk+1,SndFail}, State1);
+ false ->
+ send3_proc(Pids, {Rcv,SndOk,SndFail+1}, State1)
+ end.
+
+
+send3_make_blob() ->
+ case random:uniform(20)-1 of
+ 0 -> {term,[]};
+ N ->
+ MsgEnv = alloc_msgenv(),
+ repeat(N bsr 1,
+ fun(_) -> grow_blob(MsgEnv,other_term(),random:uniform(1 bsl 20))
+ end, void),
+ case (N band 1) of
+ 0 -> {term,copy_blob(MsgEnv)};
+ 1 -> {msgenv,MsgEnv}
+ end
+ end.
+
+send3_send(Pid, Msg) ->
+ %% 90% enif_send and 10% normal bang
+ case random:uniform(10) of
+ 1 -> send3_send_bang(Pid,Msg);
+ _ -> send3_send_nif(Pid,Msg)
+ end.
+send3_send_nif(Pid, {term,Blob}) ->
+ %%io:format("~p send term nif\n",[self()]),
+ send_term(Pid, {blob, Blob}) =:= 1;
+send3_send_nif(Pid, {msgenv,MsgEnv}) ->
+ %%io:format("~p send blob nif\n",[self()]),
+ send3_blob(MsgEnv, Pid, blob) =:= 1.
+
+send3_send_bang(Pid, {term,Blob}) ->
+ %%io:format("~p send term bang\n",[self()]),
+ Pid ! {blob, Blob},
+ true;
+send3_send_bang(Pid, {msgenv,MsgEnv}) ->
+ %%io:format("~p send blob bang\n",[self()]),
+ Pid ! {blob, copy_blob(MsgEnv)},
+ true.
+
+send3_new_state(State, Blob) ->
+ case random:uniform(5+2) of
+ N when N =< 5-> setelement(N, State, Blob);
+ _ -> State % Don't store blob
+ end.
+
neg(doc) -> ["Negative testing of load_nif"];
neg(Config) when is_list(Config) ->
TmpMem = tmpmem(),
@@ -1070,10 +1227,13 @@ send_new_blob(_,_) -> ?nif_stub.
alloc_msgenv() -> ?nif_stub.
clear_msgenv(_) -> ?nif_stub.
grow_blob(_,_) -> ?nif_stub.
+grow_blob(_,_,_) -> ?nif_stub.
send_blob(_,_) -> ?nif_stub.
+send3_blob(_,_,_) -> ?nif_stub.
send_blob_thread(_,_,_) -> ?nif_stub.
join_send_thread(_) -> ?nif_stub.
-
+copy_blob(_) -> ?nif_stub.
+send_term(_,_) -> ?nif_stub.
nif_stub_error(Line) ->
exit({nif_not_loaded,module,?MODULE,line,Line}).
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index c8cd323b7e..5384a32f21 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -296,6 +296,30 @@ static int test_ulong(ErlNifEnv* env, unsigned long i1)
return 1;
}
+static int test_int64(ErlNifEnv* env, ErlNifSInt64 i1)
+{
+ ErlNifSInt64 i2 = 0;
+ ERL_NIF_TERM int_term = enif_make_int64(env, i1);
+ if (!enif_get_int64(env,int_term, &i2) || i1 != i2) {
+ fprintf(stderr, "test_int64(%ld) ...FAILED i2=%ld\r\n",
+ (long)i1, (long)i2);
+ return 0;
+ }
+ return 1;
+}
+
+static int test_uint64(ErlNifEnv* env, ErlNifUInt64 i1)
+{
+ ErlNifUInt64 i2 = 0;
+ ERL_NIF_TERM int_term = enif_make_uint64(env, i1);
+ if (!enif_get_uint64(env,int_term, &i2) || i1 != i2) {
+ fprintf(stderr, "test_ulong(%lu) ...FAILED i2=%lu\r\n",
+ (unsigned long)i1, (unsigned long)i2);
+ return 0;
+ }
+ return 1;
+}
+
static int test_double(ErlNifEnv* env, double d1)
{
double d2 = 0;
@@ -319,6 +343,8 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
unsigned uint;
long slong;
unsigned long ulong;
+ ErlNifSInt64 sint64;
+ ErlNifUInt64 uint64;
double d;
ERL_NIF_TERM atom, ref1, ref2;
@@ -352,11 +378,25 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
slong -= slong / 3 + 1;
} while (slong >= 0);
+ sint64 = ((ErlNifSInt64)1 << 63); /* INT64_MIN */
+ do {
+ if (!test_int64(env,sint64)) {
+ goto error;
+ }
+ sint64 += ~sint64 / 3 + 1;
+ } while (sint64 < 0);
+ sint64 = ((ErlNifUInt64)1 << 63) - 1; /* INT64_MAX */
+ do {
+ if (!test_int64(env,sint64)) {
+ goto error;
+ }
+ sint64 -= sint64 / 3 + 1;
+ } while (sint64 >= 0);
uint = UINT_MAX;
for (;;) {
if (!test_uint(env,uint)) {
-
+ goto error;
}
if (uint == 0) break;
uint -= uint / 3 + 1;
@@ -364,11 +404,19 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
ulong = ULONG_MAX;
for (;;) {
if (!test_ulong(env,ulong)) {
-
+ goto error;
}
if (ulong == 0) break;
ulong -= ulong / 3 + 1;
}
+ uint64 = (ErlNifUInt64)-1; /* UINT64_MAX */
+ for (;;) {
+ if (!test_uint64(env,uint64)) {
+ goto error;
+ }
+ if (uint64 == 0) break;
+ uint64 -= uint64 / 3 + 1;
+ }
if (MAX_SMALL < INT_MAX) { /* 32-bit */
for (i=-10 ; i <= 10; i++) {
@@ -391,11 +439,18 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
for (i=-10 ; i < 10; i++) {
if (!test_long(env,MAX_SMALL+i) || !test_ulong(env,MAX_SMALL+i) ||
- !test_long(env,MIN_SMALL+i)) {
+ !test_long(env,MIN_SMALL+i) ||
+ !test_int64(env,MAX_SMALL+i) || !test_uint64(env,MAX_SMALL+i) ||
+ !test_int64(env,MIN_SMALL+i)) {
goto error;
}
+ if (MAX_SMALL < INT_MAX) {
+ if (!test_int(env,MAX_SMALL+i) || !test_uint(env,MAX_SMALL+i) ||
+ !test_int(env,MIN_SMALL+i)) {
+ goto error;
+ }
+ }
}
-
for (d=3.141592e-100 ; d < 1e100 ; d *= 9.97) {
if (!test_double(env,d) || !test_double(env,-d)) {
goto error;
@@ -1038,36 +1093,41 @@ static ERL_NIF_TERM make_term_copy(struct make_term_info* mti, int n)
{
return enif_make_copy(mti->dst_env, mti->other_term);
}
+
+typedef ERL_NIF_TERM Make_term_Func(struct make_term_info*, int);
+static Make_term_Func* make_funcs[] = {
+ make_term_binary,
+ make_term_int,
+ make_term_ulong,
+ make_term_double,
+ make_term_atom,
+ make_term_existing_atom,
+ make_term_string,
+ //make_term_ref,
+ make_term_sub_binary,
+ make_term_uint,
+ make_term_long,
+ make_term_tuple0,
+ make_term_list0,
+ make_term_resource,
+ make_term_new_binary,
+ make_term_caller_pid,
+ make_term_tuple,
+ make_term_list,
+ make_term_list_cell,
+ make_term_tuple_from_array,
+ make_term_list_from_array,
+ make_term_garbage,
+ make_term_copy
+};
+static unsigned num_of_make_funcs()
+{
+ return sizeof(make_funcs)/sizeof(*make_funcs);
+}
static int make_term_n(struct make_term_info* mti, int n, ERL_NIF_TERM* res)
{
- typedef ERL_NIF_TERM Make_term_Func(struct make_term_info*, int);
- static Make_term_Func* funcs[] = {
- make_term_binary,
- make_term_int,
- make_term_ulong,
- make_term_double,
- make_term_atom,
- make_term_existing_atom,
- make_term_string,
- //make_term_ref,
- make_term_sub_binary,
- make_term_uint,
- make_term_long,
- make_term_tuple0,
- make_term_list0,
- make_term_resource,
- make_term_new_binary,
- make_term_caller_pid,
- make_term_tuple,
- make_term_list,
- make_term_list_cell,
- make_term_tuple_from_array,
- make_term_list_from_array,
- make_term_garbage,
- make_term_copy
- };
- if (n < sizeof(funcs)/sizeof(*funcs)) {
- *res = funcs[n](mti, n);
+ if (n < num_of_make_funcs()) {
+ *res = make_funcs[n](mti, n);
push_term(mti, *res);
return 1;
}
@@ -1167,14 +1227,14 @@ static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
{
union { void* vp; struct make_term_info* p; }mti;
ERL_NIF_TERM term;
- if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || (argc>2 && !enif_get_uint(env,argv[2], &mti.p->n))) {
return enif_make_badarg(env);
}
mti.p->caller_env = env;
mti.p->other_term = argv[1];
- while (!make_term_n(mti.p, mti.p->n++, &term)) {
- mti.p->n = 0;
- }
+ mti.p->n %= num_of_make_funcs();
+ make_term_n(mti.p, mti.p->n++, &term);
mti.p->blob = enif_make_list_cell(mti.p->dst_env, term, mti.p->blob);
return atom_ok;
}
@@ -1194,6 +1254,23 @@ static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy);
}
+static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ ErlNifPid to;
+ ERL_NIF_TERM copy;
+ int res;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || !enif_get_local_pid(env, argv[1], &to)) {
+ return enif_make_badarg(env);
+ }
+ mti.p->blob = enif_make_tuple2(mti.p->dst_env,
+ enif_make_copy(mti.p->dst_env, argv[2]),
+ mti.p->blob);
+ res = enif_send(env, &to, mti.p->dst_env, mti.p->blob);
+ return enif_make_int(env,res);
+}
+
void* threaded_sender(void *arg)
{
@@ -1253,6 +1330,28 @@ static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TER
return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res));
}
+static ERL_NIF_TERM copy_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ return enif_make_badarg(env);
+ }
+ return enif_make_copy(env, mti.p->blob);
+}
+
+static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifEnv* menv;
+ ErlNifPid pid;
+ int ret;
+ if (!enif_get_local_pid(env, argv[0], &pid)) {
+ return enif_make_badarg(env);
+ }
+ menv = enif_alloc_env();
+ ret = enif_send(env, &pid, menv, enif_make_copy(menv, argv[1]));
+ enif_free_env(menv);
+ return enif_make_int(env, ret);
+}
static ErlNifFunc nif_funcs[] =
{
@@ -1291,9 +1390,13 @@ static ErlNifFunc nif_funcs[] =
{"alloc_msgenv", 0, alloc_msgenv},
{"clear_msgenv", 1, clear_msgenv},
{"grow_blob", 2, grow_blob},
+ {"grow_blob", 3, grow_blob},
{"send_blob", 2, send_blob},
+ {"send3_blob", 3, send3_blob},
{"send_blob_thread", 3, send_blob_thread},
- {"join_send_thread", 1, join_send_thread}
+ {"join_send_thread", 1, join_send_thread},
+ {"copy_blob", 1, copy_blob},
+ {"send_term", 2, send_term}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/test/obsolete_SUITE.erl b/erts/emulator/test/obsolete_SUITE.erl
deleted file mode 100644
index b191f84ee0..0000000000
--- a/erts/emulator/test/obsolete_SUITE.erl
+++ /dev/null
@@ -1,123 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-
-
--module(obsolete_SUITE).
--author('[email protected]').
--compile(nowarn_obsolete_guard).
-
--export([all/1]).
-
--export([erl_threads/1]).
-
--include("test_server.hrl").
-
--define(DEFAULT_TIMETRAP_SECS, 240).
-
-all(doc) -> [];
-all(suite) ->
- case catch erlang:system_info({wordsize,external}) of
- 4 -> [erl_threads];
- _ -> {skip, "Only expected to work on true 32-bit architectures"}
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% %%
-%% Testcases %%
-%% %%
-
-erl_threads(suite) -> [];
-erl_threads(doc) -> [];
-erl_threads(Cfg) ->
- ?line case erlang:system_info(threads) of
- true ->
- ?line drv_case(Cfg, erl_threads);
- false ->
- ?line {skip, "Emulator not compiled with threads support"}
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% %%
-%% Internal functions %%
-%% %%
-
-drv_case(Config, CaseName) ->
- drv_case(Config, CaseName, "").
-
-drv_case(Config, CaseName, TimeTrap) when integer(TimeTrap) ->
- drv_case(Config, CaseName, "", TimeTrap);
-drv_case(Config, CaseName, Command) when list(Command) ->
- drv_case(Config, CaseName, Command, ?DEFAULT_TIMETRAP_SECS).
-
-drv_case(Config, CaseName, TimeTrap, Command) when list(Command),
- integer(TimeTrap) ->
- drv_case(Config, CaseName, Command, TimeTrap);
-drv_case(Config, CaseName, Command, TimeTrap) when list(Config),
- atom(CaseName),
- list(Command),
- integer(TimeTrap) ->
- case ?t:os_type() of
- {Family, _} when Family == unix; Family == win32 ->
- ?line run_drv_case(Config, CaseName, Command, TimeTrap);
- SkipOs ->
- ?line {skipped,
- lists:flatten(["Not run on "
- | io_lib:format("~p",[SkipOs])])}
- end.
-
-run_drv_case(Config, CaseName, Command, TimeTrap) ->
- ?line Dog = test_server:timetrap(test_server:seconds(TimeTrap)),
- ?line DataDir = ?config(data_dir,Config),
- case erl_ddll:load_driver(DataDir, CaseName) of
- ok -> ok;
- {error, Error} ->
- io:format("~s\n", [erl_ddll:format_error(Error)]),
- ?line ?t:fail()
- end,
- ?line Port = open_port({spawn, atom_to_list(CaseName)}, []),
- ?line true = is_port(Port),
- ?line Port ! {self(), {command, Command}},
- ?line Result = receive_drv_result(Port, CaseName),
- ?line Port ! {self(), close},
- ?line receive
- {Port, closed} ->
- ok
- end,
- ?line ok = erl_ddll:unload_driver(CaseName),
- ?line test_server:timetrap_cancel(Dog),
- ?line Result.
-
-receive_drv_result(Port, CaseName) ->
- ?line receive
- {print, Port, CaseName, Str} ->
- ?line ?t:format("~s", [Str]),
- ?line receive_drv_result(Port, CaseName);
- {'EXIT', Port, Error} ->
- ?line ?t:fail(Error);
- {'EXIT', error, Error} ->
- ?line ?t:fail(Error);
- {failed, Port, CaseName, Comment} ->
- ?line ?t:fail(Comment);
- {skipped, Port, CaseName, Comment} ->
- ?line {skipped, Comment};
- {succeeded, Port, CaseName, ""} ->
- ?line succeeded;
- {succeeded, Port, CaseName, Comment} ->
- ?line {comment, Comment}
- end.
diff --git a/erts/emulator/test/obsolete_SUITE_data/Makefile.src b/erts/emulator/test/obsolete_SUITE_data/Makefile.src
deleted file mode 100644
index d8e2b861c0..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/Makefile.src
+++ /dev/null
@@ -1,33 +0,0 @@
-# ``The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved via the world wide web at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# The Initial Developer of the Original Code is Ericsson Utvecklings AB.
-# Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
-# AB. All Rights Reserved.''
-#
-# $Id$
-#
-
-TEST_DRVS = erl_threads@dll@
-CC = @CC@
-LD = @LD@
-CFLAGS = @SHLIB_CFLAGS@ -I@erl_include@ @DEFS@
-SHLIB_EXTRA_LDLIBS = testcase_driver@obj@
-
-all: $(TEST_DRVS)
-
-@SHLIB_RULES@
-
-testcase_driver@obj@: testcase_driver.c testcase_driver.h
-$(TEST_DRVS): testcase_driver@obj@
-
-
-
diff --git a/erts/emulator/test/obsolete_SUITE_data/erl_threads.c b/erts/emulator/test/obsolete_SUITE_data/erl_threads.c
deleted file mode 100644
index 27a5163121..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/erl_threads.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#include "testcase_driver.h"
-
-#ifndef __WIN32__
-
-#define NO_OF_THREADS 2
-
-#include <unistd.h>
-#include <errno.h>
-
-static int die;
-static int cw_passed;
-static int res_tf0;
-static int res_tf1;
-static erl_mutex_t mtx;
-static erl_cond_t cnd;
-static erl_thread_t tid[NO_OF_THREADS];
-static int need_join[NO_OF_THREADS];
-
-typedef struct {
- int n;
-} thr_arg_t;
-
-
-static void *tf0(void *vta)
-{
- int r;
-
- if (((thr_arg_t *) vta)->n != 0)
- goto fail;
-
- r = erts_mutex_lock(mtx);
- if (r != 0) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_mutex_unlock(mtx);
- if (r != 0)
- goto fail;
-
- res_tf0 = 0;
-
- return (void *) &res_tf0;
-
- fail:
- return NULL;
-}
-
-
-static void *tf1(void *vta)
-{
- int r;
-
- if (((thr_arg_t *) vta)->n != 1)
- goto fail;
-
- r = erts_mutex_lock(mtx);
- if (r != 0) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_mutex_unlock(mtx);
- if (r != 0)
- goto fail;
-
- res_tf1 = 1;
-
- erts_thread_exit((void *) &res_tf1);
-
- res_tf1 = 4711;
-
- fail:
- return NULL;
-}
-
-#endif /* #ifndef __WIN32__ */
-
-void
-testcase_run(TestCaseState_t *tcs)
-{
-#ifdef __WIN32__
- testcase_skipped(tcs, "Nothing to test; not supported on windows.");
-#else
- int i, r;
- void *tres[NO_OF_THREADS];
- thr_arg_t ta[NO_OF_THREADS];
- erl_thread_t t1;
-
- die = 0;
- cw_passed = 0;
-
- for (i = 0; i < NO_OF_THREADS; i++)
- need_join[i] = 0;
-
- res_tf0 = 17;
- res_tf1 = 17;
-
- cnd = mtx = NULL;
-
- /* Create mutex and cond */
- mtx = erts_mutex_create();
- ASSERT(tcs, mtx);
- cnd = erts_cond_create();
- ASSERT(tcs, cnd);
-
- /* Create the threads */
- ta[0].n = 0;
- r = erts_thread_create(&tid[0], tf0, (void *) &ta[0], 0);
- ASSERT(tcs, r == 0);
- need_join[0] = 1;
-
- ta[1].n = 1;
- r = erts_thread_create(&tid[1], tf1, (void *) &ta[1], 0);
- ASSERT(tcs, r == 0);
- need_join[1] = 1;
-
- /* Make sure the threads waits on cond wait */
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 0, (void) erts_mutex_unlock(mtx));
-
-
- /* Let one thread pass one cond wait */
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 1, (void) erts_mutex_unlock(mtx));
-
-
- /* Let both threads pass one cond wait */
- r = erts_cond_broadcast(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 3, (void) erts_mutex_unlock(mtx));
-
-
- /* Let the thread that only have passed one cond wait pass the other one */
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 4, (void) erts_mutex_unlock(mtx));
-
- /* Both threads should have passed both cond waits and exited;
- join them and check returned values */
-
- r = erts_thread_join(tid[0], &tres[0]);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
- need_join[0] = 0;
-
- ASSERT_CLNUP(tcs, tres[0] == &res_tf0, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs, res_tf0 == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_thread_join(tid[1], &tres[1]);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
- need_join[1] = 0;
-
- ASSERT_CLNUP(tcs, tres[1] == &res_tf1, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs, res_tf1 == 1, (void) erts_mutex_unlock(mtx));
-
- /* Test signaling when noone waits */
-
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- /* Test broadcasting when noone waits */
-
- r = erts_cond_broadcast(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- /* erts_cond_timedwait() not supported anymore */
- r = erts_cond_timedwait(cnd, mtx, 1000);
- ASSERT_CLNUP(tcs, r != 0, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs,
- strcmp(erl_errno_id(r), "enotsup") == 0,
- (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- r = erts_mutex_destroy(mtx);
- ASSERT(tcs, r == 0);
- mtx = NULL;
-
- r = erts_cond_destroy(cnd);
- ASSERT(tcs, r == 0);
- cnd = NULL;
-
- /* ... */
- t1 = erts_thread_self();
-
- if (cw_passed == 4711) {
- /* We don't want to execute this just check that the
- symbol/symbols is/are defined */
- erts_thread_kill(t1);
- }
-
-#endif /* #ifndef __WIN32__ */
-}
-
-char *
-testcase_name(void)
-{
- return "erl_threads";
-}
-
-void
-testcase_cleanup(TestCaseState_t *tcs)
-{
- int i;
- for (i = 0; i < NO_OF_THREADS; i++) {
- if (need_join[i]) {
- erts_mutex_lock(mtx);
- die = 1;
- erts_cond_broadcast(cnd);
- erts_mutex_unlock(mtx);
- erts_thread_join(tid[1], NULL);
- }
- }
- if (mtx)
- erts_mutex_destroy(mtx);
- if (cnd)
- erts_cond_destroy(cnd);
-}
-
diff --git a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c b/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c
deleted file mode 100644
index 99d5adb041..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#include "testcase_driver.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <setjmp.h>
-#include <string.h>
-
-#ifdef __WIN32__
-#undef HAVE_VSNPRINTF
-#define HAVE_VSNPRINTF 1
-#define vsnprintf _vsnprintf
-#endif
-
-#ifndef HAVE_VSNPRINTF
-#define HAVE_VSNPRINTF 0
-#endif
-
-#define COMMENT_BUF_SZ 4096
-
-#define TESTCASE_FAILED 0
-#define TESTCASE_SKIPPED 1
-#define TESTCASE_SUCCEEDED 2
-
-typedef struct {
- TestCaseState_t visible;
- int port;
- int result;
- jmp_buf done_jmp_buf;
- char *comment;
- char comment_buf[COMMENT_BUF_SZ];
-} InternalTestCaseState_t;
-
-long testcase_drv_start(int port, char *command);
-int testcase_drv_stop(long drv_data);
-int testcase_drv_run(long drv_data, char *buf, int len);
-
-static DriverEntry testcase_drv_entry = {
- NULL,
- testcase_drv_start,
- testcase_drv_stop,
- testcase_drv_run
-};
-
-
-int DRIVER_INIT(testcase_drv)(void *arg)
-{
- testcase_drv_entry.driver_name = testcase_name();
- return (int) &testcase_drv_entry;
-}
-
-long
-testcase_drv_start(int port, char *command)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *)
- driver_alloc(sizeof(InternalTestCaseState_t));
- if (!itcs) {
- return -1;
- }
-
- itcs->visible.testcase_name = testcase_name();
- itcs->visible.extra = NULL;
- itcs->port = port;
- itcs->result = TESTCASE_FAILED;
- itcs->comment = "";
-
- return (long) itcs;
-}
-
-int
-testcase_drv_stop(long drv_data)
-{
- testcase_cleanup((TestCaseState_t *) drv_data);
- driver_free((void *) drv_data);
- return 0;
-}
-
-int
-testcase_drv_run(long drv_data, char *buf, int len)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) drv_data;
- DriverTermData result_atom;
- DriverTermData msg[12];
-
- itcs->visible.command = buf;
- itcs->visible.command_len = len;
-
- if (setjmp(itcs->done_jmp_buf) == 0) {
- testcase_run((TestCaseState_t *) itcs);
- itcs->result = TESTCASE_SUCCEEDED;
- }
-
- switch (itcs->result) {
- case TESTCASE_SUCCEEDED:
- result_atom = driver_mk_atom("succeeded");
- break;
- case TESTCASE_SKIPPED:
- result_atom = driver_mk_atom("skipped");
- break;
- case TESTCASE_FAILED:
- default:
- result_atom = driver_mk_atom("failed");
- break;
- }
-
- msg[0] = ERL_DRV_ATOM;
- msg[1] = (DriverTermData) result_atom;
-
- msg[2] = ERL_DRV_PORT;
- msg[3] = driver_mk_port(itcs->port);
-
- msg[4] = ERL_DRV_ATOM;
- msg[5] = driver_mk_atom(itcs->visible.testcase_name);
-
- msg[6] = ERL_DRV_STRING;
- msg[7] = (DriverTermData) itcs->comment;
- msg[8] = (DriverTermData) strlen(itcs->comment);
-
- msg[9] = ERL_DRV_TUPLE;
- msg[10] = (DriverTermData) 4;
-
- driver_output_term(itcs->port, msg, 11);
- return 0;
-}
-
-int
-testcase_assertion_failed(TestCaseState_t *tcs,
- char *file, int line, char *assertion)
-{
- testcase_failed(tcs, "%s:%d: Assertion failed: \"%s\"",
- file, line, assertion);
- return 0;
-}
-
-void
-testcase_printf(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- DriverTermData msg[12];
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- msg[0] = ERL_DRV_ATOM;
- msg[1] = (DriverTermData) driver_mk_atom("print");
-
- msg[2] = ERL_DRV_PORT;
- msg[3] = driver_mk_port(itcs->port);
-
- msg[4] = ERL_DRV_ATOM;
- msg[5] = driver_mk_atom(itcs->visible.testcase_name);
-
- msg[6] = ERL_DRV_STRING;
- msg[7] = (DriverTermData) itcs->comment_buf;
- msg[8] = (DriverTermData) strlen(itcs->comment_buf);
-
- msg[9] = ERL_DRV_TUPLE;
- msg[10] = (DriverTermData) 4;
-
- driver_output_term(itcs->port, msg, 11);
-}
-
-
-void testcase_succeeded(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_SUCCEEDED;
- itcs->comment = itcs->comment_buf;
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void testcase_skipped(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_SKIPPED;
- itcs->comment = itcs->comment_buf;
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void testcase_failed(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- char buf[10];
- size_t bufsz = sizeof(buf);
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_FAILED;
- itcs->comment = itcs->comment_buf;
-
- if (erl_drv_getenv("ERL_ABORT_ON_FAILURE", buf, &bufsz) == 0
- && strcmp("true", buf) == 0) {
- fprintf(stderr, "Testcase \"%s\" failed: %s\n",
- itcs->visible.testcase_name, itcs->comment);
- abort();
- }
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void *testcase_alloc(size_t size)
-{
- return driver_alloc(size);
-}
-
-void *testcase_realloc(void *ptr, size_t size)
-{
- return driver_realloc(ptr, size);
-}
-
-void testcase_free(void *ptr)
-{
- driver_free(ptr);
-}
diff --git a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h b/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h
deleted file mode 100644
index 3d85ca6df0..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#ifndef TESTCASE_DRIVER_H__
-#define TESTCASE_DRIVER_H__
-
-#include "obsolete/driver.h"
-#include <stdlib.h>
-
-typedef struct {
- char *testcase_name;
- char *command;
- int command_len;
- void *extra;
-} TestCaseState_t;
-
-#define ASSERT_CLNUP(TCS, B, CLN) \
-do { \
- if (!(B)) { \
- CLN; \
- testcase_assertion_failed((TCS), __FILE__, __LINE__, #B); \
- } \
-} while (0)
-
-#define ASSERT(TCS, B) ASSERT_CLNUP(TCS, B, (void) 0)
-
-void testcase_printf(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_succeeded(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_skipped(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_failed(TestCaseState_t *tcs, char *frmt, ...);
-int testcase_assertion_failed(TestCaseState_t *tcs, char *file, int line,
- char *assertion);
-void *testcase_alloc(size_t size);
-void *testcase_realloc(void *ptr, size_t size);
-void testcase_free(void *ptr);
-
-
-char *testcase_name(void);
-void testcase_run(TestCaseState_t *tcs);
-void testcase_cleanup(TestCaseState_t *tcs);
-
-#endif
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index ab727b7cb1..4f4458802c 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -33,7 +33,7 @@
-include("test_server.hrl").
%-compile(export_all).
--export([all/1, init_per_testcase/2, fin_per_testcase/2]).
+-export([all/1, init_per_testcase/2, fin_per_testcase/2, end_per_suite/1]).
-export([equal/1,
few_low/1,
@@ -49,7 +49,8 @@
cpu_topology/1,
sct_cmd/1,
sbt_cmd/1,
- scheduler_suspend/1]).
+ scheduler_suspend/1,
+ reader_groups/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(10)).
@@ -67,7 +68,8 @@ all(suite) ->
equal_with_high_max,
bound_process,
scheduler_bind,
- scheduler_suspend].
+ scheduler_suspend,
+ reader_groups].
init_per_testcase(Case, Config) when is_list(Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
@@ -81,6 +83,10 @@ fin_per_testcase(_Case, Config) when is_list(Config) ->
?t:timetrap_cancel(Dog),
ok.
+end_per_suite(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
+ Config.
+
-define(ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED, (2000*2000)).
-define(DEFAULT_TEST_REDS_PER_SCHED, 200000000).
@@ -965,12 +971,361 @@ sst3_loop(S, N) ->
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
sst3_loop(S, N-1).
+
+reader_groups(Config) when is_list(Config) ->
+ %% White box testing. These results are correct, but other results
+ %% could be too...
+
+ %% The actual tilepro64 topology
+ CPUT0 = [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}],
+
+ ?line [{0,1},{1,1},{2,1},{3,3},{4,3},{5,3},{6,3},{7,3},{8,1},{9,1},{10,1},
+ {11,1},{12,3},{13,3},{14,4},{15,4},{16,2},{17,2},{18,2},{19,2},
+ {20,4},{21,4},{22,4},{23,4},{24,2},{25,2},{26,7},{27,2},{28,4},
+ {29,2},{30,4},{31,5},{32,7},{33,7},{34,7},{35,7},{36,5},{37,5},
+ {38,5},{39,7},{40,7},{41,8},{42,8},{43,8},{44,5},{45,5},{46,5},
+ {47,6},{48,8},{49,8},{50,8},{51,6},{52,6},{53,6},{54,6},{55,6},
+ {58,8},{60,6},{61,6}]
+ = reader_groups_map(CPUT0, 8),
+
+ CPUT1 = [n([p([c([t(l(0)),t(l(1)),t(l(2)),t(l(3))]),
+ c([t(l(4)),t(l(5)),t(l(6)),t(l(7))]),
+ c([t(l(8)),t(l(9)),t(l(10)),t(l(11))]),
+ c([t(l(12)),t(l(13)),t(l(14)),t(l(15))])]),
+ p([c([t(l(16)),t(l(17)),t(l(18)),t(l(19))]),
+ c([t(l(20)),t(l(21)),t(l(22)),t(l(23))]),
+ c([t(l(24)),t(l(25)),t(l(26)),t(l(27))]),
+ c([t(l(28)),t(l(29)),t(l(30)),t(l(31))])])]),
+ n([p([c([t(l(32)),t(l(33)),t(l(34)),t(l(35))]),
+ c([t(l(36)),t(l(37)),t(l(38)),t(l(39))]),
+ c([t(l(40)),t(l(41)),t(l(42)),t(l(43))]),
+ c([t(l(44)),t(l(45)),t(l(46)),t(l(47))])]),
+ p([c([t(l(48)),t(l(49)),t(l(50)),t(l(51))]),
+ c([t(l(52)),t(l(53)),t(l(54)),t(l(55))]),
+ c([t(l(56)),t(l(57)),t(l(58)),t(l(59))]),
+ c([t(l(60)),t(l(61)),t(l(62)),t(l(63))])])]),
+ n([p([c([t(l(64)),t(l(65)),t(l(66)),t(l(67))]),
+ c([t(l(68)),t(l(69)),t(l(70)),t(l(71))]),
+ c([t(l(72)),t(l(73)),t(l(74)),t(l(75))]),
+ c([t(l(76)),t(l(77)),t(l(78)),t(l(79))])]),
+ p([c([t(l(80)),t(l(81)),t(l(82)),t(l(83))]),
+ c([t(l(84)),t(l(85)),t(l(86)),t(l(87))]),
+ c([t(l(88)),t(l(89)),t(l(90)),t(l(91))]),
+ c([t(l(92)),t(l(93)),t(l(94)),t(l(95))])])]),
+ n([p([c([t(l(96)),t(l(97)),t(l(98)),t(l(99))]),
+ c([t(l(100)),t(l(101)),t(l(102)),t(l(103))]),
+ c([t(l(104)),t(l(105)),t(l(106)),t(l(107))]),
+ c([t(l(108)),t(l(109)),t(l(110)),t(l(111))])]),
+ p([c([t(l(112)),t(l(113)),t(l(114)),t(l(115))]),
+ c([t(l(116)),t(l(117)),t(l(118)),t(l(119))]),
+ c([t(l(120)),t(l(121)),t(l(122)),t(l(123))]),
+ c([t(l(124)),t(l(125)),t(l(126)),t(l(127))])])])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},
+ {10,3},{11,3},{12,4},{13,4},{14,4},{15,4},{16,5},{17,5},{18,5},
+ {19,5},{20,6},{21,6},{22,6},{23,6},{24,7},{25,7},{26,7},{27,7},
+ {28,8},{29,8},{30,8},{31,8},{32,9},{33,9},{34,9},{35,9},{36,10},
+ {37,10},{38,10},{39,10},{40,11},{41,11},{42,11},{43,11},{44,12},
+ {45,12},{46,12},{47,12},{48,13},{49,13},{50,13},{51,13},{52,14},
+ {53,14},{54,14},{55,14},{56,15},{57,15},{58,15},{59,15},{60,16},
+ {61,16},{62,16},{63,16},{64,17},{65,17},{66,17},{67,17},{68,18},
+ {69,18},{70,18},{71,18},{72,19},{73,19},{74,19},{75,19},{76,20},
+ {77,20},{78,20},{79,20},{80,21},{81,21},{82,21},{83,21},{84,22},
+ {85,22},{86,22},{87,22},{88,23},{89,23},{90,23},{91,23},{92,24},
+ {93,24},{94,24},{95,24},{96,25},{97,25},{98,25},{99,25},{100,26},
+ {101,26},{102,26},{103,26},{104,27},{105,27},{106,27},{107,27},
+ {108,28},{109,28},{110,28},{111,28},{112,29},{113,29},{114,29},
+ {115,29},{116,30},{117,30},{118,30},{119,30},{120,31},{121,31},
+ {122,31},{123,31},{124,32},{125,32},{126,32},{127,32}]
+ = reader_groups_map(CPUT1, 128),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,1},{17,1},{18,1},{19,1},
+ {20,1},{21,1},{22,1},{23,1},{24,1},{25,1},{26,1},{27,1},{28,1},
+ {29,1},{30,1},{31,1},{32,1},{33,1},{34,1},{35,1},{36,1},{37,1},
+ {38,1},{39,1},{40,1},{41,1},{42,1},{43,1},{44,1},{45,1},{46,1},
+ {47,1},{48,1},{49,1},{50,1},{51,1},{52,1},{53,1},{54,1},{55,1},
+ {56,1},{57,1},{58,1},{59,1},{60,1},{61,1},{62,1},{63,1},{64,2},
+ {65,2},{66,2},{67,2},{68,2},{69,2},{70,2},{71,2},{72,2},{73,2},
+ {74,2},{75,2},{76,2},{77,2},{78,2},{79,2},{80,2},{81,2},{82,2},
+ {83,2},{84,2},{85,2},{86,2},{87,2},{88,2},{89,2},{90,2},{91,2},
+ {92,2},{93,2},{94,2},{95,2},{96,2},{97,2},{98,2},{99,2},{100,2},
+ {101,2},{102,2},{103,2},{104,2},{105,2},{106,2},{107,2},{108,2},
+ {109,2},{110,2},{111,2},{112,2},{113,2},{114,2},{115,2},{116,2},
+ {117,2},{118,2},{119,2},{120,2},{121,2},{122,2},{123,2},{124,2},
+ {125,2},{126,2},{127,2}]
+ = reader_groups_map(CPUT1, 2),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},{10,3},
+ {11,3},{12,3},{13,3},{14,3},{15,3},{16,4},{17,4},{18,4},{19,4},
+ {20,4},{21,4},{22,4},{23,4},{24,5},{25,5},{26,5},{27,5},{28,5},
+ {29,5},{30,5},{31,5},{32,6},{33,6},{34,6},{35,6},{36,6},{37,6},
+ {38,6},{39,6},{40,7},{41,7},{42,7},{43,7},{44,7},{45,7},{46,7},
+ {47,7},{48,8},{49,8},{50,8},{51,8},{52,8},{53,8},{54,8},{55,8},
+ {56,9},{57,9},{58,9},{59,9},{60,9},{61,9},{62,9},{63,9},{64,10},
+ {65,10},{66,10},{67,10},{68,10},{69,10},{70,10},{71,10},{72,11},
+ {73,11},{74,11},{75,11},{76,11},{77,11},{78,11},{79,11},{80,12},
+ {81,12},{82,12},{83,12},{84,12},{85,12},{86,12},{87,12},{88,13},
+ {89,13},{90,13},{91,13},{92,13},{93,13},{94,13},{95,13},{96,14},
+ {97,14},{98,14},{99,14},{100,14},{101,14},{102,14},{103,14},
+ {104,15},{105,15},{106,15},{107,15},{108,15},{109,15},{110,15},
+ {111,15},{112,16},{113,16},{114,16},{115,16},{116,16},{117,16},
+ {118,16},{119,16},{120,17},{121,17},{122,17},{123,17},{124,17},
+ {125,17},{126,17},{127,17}]
+ = reader_groups_map(CPUT1, 17),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,2},{17,2},{18,2},{19,2},
+ {20,2},{21,2},{22,2},{23,2},{24,2},{25,2},{26,2},{27,2},{28,2},
+ {29,2},{30,2},{31,2},{32,3},{33,3},{34,3},{35,3},{36,3},{37,3},
+ {38,3},{39,3},{40,3},{41,3},{42,3},{43,3},{44,3},{45,3},{46,3},
+ {47,3},{48,4},{49,4},{50,4},{51,4},{52,4},{53,4},{54,4},{55,4},
+ {56,4},{57,4},{58,4},{59,4},{60,4},{61,4},{62,4},{63,4},{64,5},
+ {65,5},{66,5},{67,5},{68,5},{69,5},{70,5},{71,5},{72,5},{73,5},
+ {74,5},{75,5},{76,5},{77,5},{78,5},{79,5},{80,6},{81,6},{82,6},
+ {83,6},{84,6},{85,6},{86,6},{87,6},{88,6},{89,6},{90,6},{91,6},
+ {92,6},{93,6},{94,6},{95,6},{96,7},{97,7},{98,7},{99,7},{100,7},
+ {101,7},{102,7},{103,7},{104,7},{105,7},{106,7},{107,7},{108,7},
+ {109,7},{110,7},{111,7},{112,7},{113,7},{114,7},{115,7},{116,7},
+ {117,7},{118,7},{119,7},{120,7},{121,7},{122,7},{123,7},{124,7},
+ {125,7},{126,7},{127,7}]
+ = reader_groups_map(CPUT1, 7),
+
+ ?line CPUT2 = [p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))]),
+ p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},{6,2},{7,2},{8,2},{9,2},
+ {10,3},
+ {11,4},{12,4},{13,4},
+ {14,5},{15,5}] = reader_groups_map(CPUT2, 5),
+
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,5},{13,5},
+ {14,6},{15,6}] = reader_groups_map(CPUT2, 6),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,7}] = reader_groups_map(CPUT2, 7),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,8}] = reader_groups_map(CPUT2, 8),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,7},
+ {14,8},{15,9}] = reader_groups_map(CPUT2, 9),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,8},
+ {14,9},{15,10}] = reader_groups_map(CPUT2, 10),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,4},
+ {5,5},{6,5},{7,5},{8,5},{9,5},
+ {10,6},
+ {11,7},{12,8},{13,9},
+ {14,10},{15,11}] = reader_groups_map(CPUT2, 11),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,5},
+ {5,6},{6,6},{7,6},{8,6},{9,6},
+ {10,7},
+ {11,8},{12,9},{13,10},
+ {14,11},{15,12}] = reader_groups_map(CPUT2, 100),
+
+ CPUT3 = [p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))]),
+ p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))])],
+
+ ?line [{0,5},{1,5},{2,6},{3,6},{4,6},
+ {5,1},{6,1},{7,1},{8,1},{9,1},
+ {10,2},{11,3},{12,3},{13,3},
+ {14,4},{15,4}] = reader_groups_map(CPUT3, 6),
+
+ CPUT4 = [p([t(l(0)),t(l(1)),t(l(2)),t(l(3)),t(l(4))]),
+ p([t(l(5))]),
+ p([c(l(6)),c(l(7)),c(l(8))]),
+ p([c(l(9)),c(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13)),c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,3},{8,3},
+ {9,4},{10,4},
+ {11,5},{12,5},{13,6},{14,6},{15,6}] = reader_groups_map(CPUT4, 6),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,4},{8,4},
+ {9,5},{10,5},
+ {11,6},{12,6},{13,7},{14,7},{15,7}] = reader_groups_map(CPUT4, 7),
+
+ ?line [{0,1},{65535,2}] = reader_groups_map([c(l(0)),c(l(65535))], 10),
+
+ ?line ok.
+reader_groups_map(CPUT, Groups) ->
+ Old = erlang:system_info({cpu_topology, defined}),
+ erlang:system_flag(cpu_topology, CPUT),
+ enable_internal_state(),
+ Res = erts_debug:get_internal_state({reader_groups_map, Groups}),
+ erlang:system_flag(cpu_topology, Old),
+ lists:sort(Res).
+
%%
%% Utils
%%
+tilera_cpu_topology() ->
+ [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}].
+
+l(Id) ->
+ {logical, Id}.
+
+t(X) ->
+ {thread, X}.
+
+c(X) ->
+ {core, X}.
+
+p(X) ->
+ {processor, X}.
+
+n(X) ->
+ {node, X}.
+
mcall(Node, Funs) ->
Parent = self(),
Refs = lists:map(fun (Fun) ->
diff --git a/erts/emulator/test/send_term_SUITE.erl b/erts/emulator/test/send_term_SUITE.erl
index 489adbd660..5fd01a9ac5 100644
--- a/erts/emulator/test/send_term_SUITE.erl
+++ b/erts/emulator/test/send_term_SUITE.erl
@@ -76,40 +76,43 @@ basic(Config) when is_list(Config) ->
?line ExpectedBinTup = term(P, 7),
%% single terms
- ?line [] = term(P, 8), % ERL_DRV_NIL
- ?line '' = term(P, 9), % ERL_DRV_ATOM
- ?line an_atom = term(P, 10), % ERL_DRV_ATOM
- ?line -4711 = term(P, 11), % ERL_DRV_INT
- ?line 4711 = term(P, 12), % ERL_DRV_UINT
- ?line P = term(P, 13), % ERL_DRV_PORT
- ?line <<>> = term(P, 14), % ERL_DRV_BINARY
- ?line <<"hejsan">> = term(P, 15), % ERL_DRV_BINARY
- ?line <<>> = term(P, 16), % ERL_DRV_BUF2BINARY
- ?line <<>> = term(P, 17), % ERL_DRV_BUF2BINARY
- ?line <<"hoppsan">> = term(P, 18), % ERL_DRV_BUF2BINARY
- ?line "" = term(P, 19), % ERL_DRV_STRING
- ?line "" = term(P, 20), % ERL_DRV_STRING
- ?line "hippsan" = term(P, 21), % ERL_DRV_STRING
- ?line {} = term(P, 22), % ERL_DRV_TUPLE
- ?line [] = term(P, 23), % ERL_DRV_LIST
- ?line Self = term(P, 24), % ERL_DRV_PID
- ?line [] = term(P, 25), % ERL_DRV_STRING_CONS
- ?line AFloat = term(P, 26), % ERL_DRV_FLOAT
+ Singles = [{[], 8}, % ERL_DRV_NIL
+ {'', 9}, % ERL_DRV_ATOM
+ {an_atom, 10}, % ERL_DRV_ATOM
+ {-4711, 11}, % ERL_DRV_INT
+ {4711, 12}, % ERL_DRV_UINT
+ {P, 13}, % ERL_DRV_PORT
+ {<<>>, 14}, % ERL_DRV_BINARY
+ {<<"hejsan">>, 15}, % ERL_DRV_BINARY
+ {<<>>, 16}, % ERL_DRV_BUF2BINARY
+ {<<>>, 17}, % ERL_DRV_BUF2BINARY
+ {<<"hoppsan">>, 18}, % ERL_DRV_BUF2BINARY
+ {"", 19}, % ERL_DRV_STRING
+ {"", 20}, % ERL_DRV_STRING
+ {"hippsan", 21}, % ERL_DRV_STRING
+ {{}, 22}, % ERL_DRV_TUPLE
+ {[], 23}, % ERL_DRV_LIST
+ {Self, 24}, % ERL_DRV_PID
+ {[], 25}, % ERL_DRV_STRING_CONS
+ {[], 27}, % ERL_DRV_EXT2TERM
+ {18446744073709551615, 28}, % ERL_DRV_UINT64
+ {20233590931456, 29}, % ERL_DRV_UINT64
+ {4711, 30}, % ERL_DRV_UINT64
+ {0, 31}, % ERL_DRV_UINT64
+ {9223372036854775807, 32}, % ERL_DRV_INT64
+ {20233590931456, 33}, % ERL_DRV_INT64
+ {4711, 34}, % ERL_DRV_INT64
+ {0, 35}, % ERL_DRV_INT64
+ {-1, 36}, % ERL_DRV_INT64
+ {-4711, 37}, % ERL_DRV_INT64
+ {-20233590931456, 38}, % ERL_DRV_INT64
+ {-9223372036854775808, 39}], % ERL_DRV_INT64
+ ?line {Terms, Ops} = lists:unzip(Singles),
+ ?line Terms = term(P,Ops),
+
+ AFloat = term(P, 26), % ERL_DRV_FLOAT
?line true = AFloat < 0.001,
?line true = AFloat > -0.001,
- ?line [] = term(P, 27), % ERL_DRV_EXT2TERM
- ?line 18446744073709551615 = term(P, 28), % ERL_DRV_UINT64
- ?line 20233590931456 = term(P, 29), % ERL_DRV_UINT64
- ?line 4711 = term(P, 30), % ERL_DRV_UINT64
- ?line 0 = term(P, 31), % ERL_DRV_UINT64
- ?line 9223372036854775807 = term(P, 32), % ERL_DRV_INT64
- ?line 20233590931456 = term(P, 33), % ERL_DRV_INT64
- ?line 4711 = term(P, 34), % ERL_DRV_INT64
- ?line 0 = term(P, 35), % ERL_DRV_INT64
- ?line -1 = term(P, 36), % ERL_DRV_INT64
- ?line -4711 = term(P, 37), % ERL_DRV_INT64
- ?line -20233590931456 = term(P, 38), % ERL_DRV_INT64
- ?line -9223372036854775808 = term(P, 39), % ERL_DRV_INT64
%% Failure cases.
?line [] = term(P, 127),
diff --git a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
index 6638de0560..165cce2e9d 100644
--- a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
+++ b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
@@ -17,6 +17,7 @@
*/
#include "erl_driver.h"
+#include <stdio.h>
#include <errno.h>
#include <string.h>
@@ -65,12 +66,21 @@ static void fail_term(ErlDrvTermData* msg, int len, int line);
static void send_term_drv_run(ErlDrvData port, char *buf, int count)
{
- ErlDrvTermData msg[1024];
-
- switch (*buf) {
+ char buf7[1024];
+ ErlDrvTermData spec[1024];
+ ErlDrvTermData* msg = spec;
+ ErlDrvBinary* bins[15];
+ int bin_ix = 0;
+ ErlDrvSInt64 s64[15];
+ int s64_ix = 0;
+ ErlDrvUInt64 u64[15];
+ int u64_ix = 0;
+ int i = 0;
+
+ for (i=0; i<count; i++) switch (buf[i]) {
case 0:
msg[0] = ERL_DRV_NIL;
- output_term(msg, 1);
+ msg += 1;
break;
case 1: /* Most term types inside a tuple. */
@@ -102,7 +112,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[22] = driver_connected(erlang_port);
msg[23] = ERL_DRV_TUPLE;
msg[24] = (ErlDrvTermData) 7;
- output_term(msg, 25);
+ msg += 25;
}
break;
@@ -117,7 +127,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[i] = ERL_DRV_NIL;
msg[i+1] = ERL_DRV_LIST;
msg[i+2] = (ErlDrvTermData) 201;
- output_term(msg, i+3);
+ msg += i+3;
}
break;
@@ -126,7 +136,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
ErlDrvBinary* bin;
int i;
- bin = driver_alloc_binary(256);
+ bin = bins[bin_ix++] = driver_alloc_binary(256);
for (i = 0; i < 256; i++) {
bin->orig_bytes[i] = i;
}
@@ -140,8 +150,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[7] = (ErlDrvTermData) 23;
msg[8] = ERL_DRV_TUPLE;
msg[9] = (ErlDrvTermData) 2;
- output_term(msg, 10);
- driver_free_binary(bin);
+ msg += 10;
}
break;
@@ -152,11 +161,11 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[3] = driver_caller(erlang_port);
msg[4] = ERL_DRV_TUPLE;
msg[5] = (ErlDrvTermData) 2;
- output_term(msg, 6);
+ msg += 6;
break;
case 5:
- output_term(msg, make_ext_term_list(msg, 0));
+ msg += make_ext_term_list(msg, 0);
break;
case 6:
@@ -166,94 +175,91 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[3] = ~((ErlDrvTermData) 0);
msg[4] = ERL_DRV_TUPLE;
msg[5] = (ErlDrvTermData) 2;
- output_term(msg, 6);
+ msg += 6;
break;
case 7: {
int len = 0;
- char buf[1024];
- memset(buf, 17, sizeof(buf));
+ memset(buf7, 17, sizeof(buf7));
/* empty heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
msg[len++] = (ErlDrvTermData) NULL; /* NULL is ok if size == 0 */
msg[len++] = (ErlDrvTermData) 0;
/* empty heap binary again */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0]; /* ptr is ok if size == 0 */
+ msg[len++] = (ErlDrvTermData) buf7; /* ptr is ok if size == 0 */
msg[len++] = (ErlDrvTermData) 0;
/* heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0];
+ msg[len++] = (ErlDrvTermData) buf7;
msg[len++] = (ErlDrvTermData) 17;
/* off heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0];
- msg[len++] = (ErlDrvTermData) sizeof(buf);
+ msg[len++] = (ErlDrvTermData) buf7;
+ msg[len++] = (ErlDrvTermData) sizeof(buf7);
msg[len++] = ERL_DRV_TUPLE;
msg[len++] = (ErlDrvTermData) 4;
- output_term(msg, len);
+ msg += len;
break;
}
case 8:
msg[0] = ERL_DRV_NIL;
- output_term(msg, 1);
+ msg += 1;
break;
case 9:
msg[0] = ERL_DRV_ATOM;
msg[1] = (ErlDrvTermData) driver_mk_atom("");
- output_term(msg, 2);
+ msg += 2;
break;
case 10:
msg[0] = ERL_DRV_ATOM;
msg[1] = (ErlDrvTermData) driver_mk_atom("an_atom");
- output_term(msg, 2);
+ msg += 2;
break;
case 11:
msg[0] = ERL_DRV_INT;
msg[1] = (ErlDrvTermData) -4711;
- output_term(msg, 2);
+ msg += 2;
break;
case 12:
msg[0] = ERL_DRV_UINT;
msg[1] = (ErlDrvTermData) 4711;
- output_term(msg, 2);
+ msg += 2;
break;
case 13:
msg[0] = ERL_DRV_PORT;
msg[1] = driver_mk_port(erlang_port);
- output_term(msg, 2);
+ msg += 2;
break;
case 14: {
- ErlDrvBinary *dbin = driver_alloc_binary(0);
+ ErlDrvBinary *dbin = bins[bin_ix++] = driver_alloc_binary(0);
msg[0] = ERL_DRV_BINARY;
msg[1] = (ErlDrvTermData) dbin;
msg[2] = (ErlDrvTermData) 0;
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
- driver_free_binary(dbin);
+ msg += 4;
break;
}
case 15: {
- char buf[] = "hejsan";
- ErlDrvBinary *dbin = driver_alloc_binary(sizeof(buf)-1);
+ static const char buf[] = "hejsan";
+ ErlDrvBinary *dbin = bins[bin_ix++] = driver_alloc_binary(sizeof(buf)-1);
if (dbin)
memcpy((void *) dbin->orig_bytes, (void *) buf, sizeof(buf)-1);
msg[0] = ERL_DRV_BINARY;
msg[1] = (ErlDrvTermData) dbin;
msg[2] = (ErlDrvTermData) (dbin ? sizeof(buf)-1 : 0);
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
- driver_free_binary(dbin);
+ msg += 4;
break;
}
@@ -261,24 +267,24 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) NULL;
msg[2] = (ErlDrvTermData) 0;
- output_term(msg, 3);
+ msg += 3;
break;
case 17: {
- char buf[] = "";
+ static const char buf[] = "";
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 18: {
- char buf[] = "hoppsan";
+ static const char buf[] = "hoppsan";
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
@@ -286,44 +292,44 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) 0;
- output_term(msg, 3);
+ msg += 3;
break;
case 20: {
- char buf[] = "";
+ static const char buf[] = "";
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 21: {
- char buf[] = "hippsan";
+ static const char buf[] = "hippsan";
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 22:
msg[0] = ERL_DRV_TUPLE;
msg[1] = (ErlDrvTermData) 0;
- output_term(msg, 2);
+ msg += 2;
break;
case 23:
msg[0] = ERL_DRV_NIL;
msg[1] = ERL_DRV_LIST;
msg[2] = (ErlDrvTermData) 1;
- output_term(msg, 3);
+ msg += 3;
break;
case 24:
msg[0] = ERL_DRV_PID;
msg[1] = driver_connected(erlang_port);
- output_term(msg, 2);
+ msg += 2;
break;
case 25:
@@ -331,132 +337,131 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[1] = ERL_DRV_STRING_CONS;
msg[2] = (ErlDrvTermData) "";
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
+ msg += 4;
break;
case 26: {
- double my_float = 0.0;
+ static double my_float = 0.0;
msg[0] = ERL_DRV_FLOAT;
msg[1] = (ErlDrvTermData) &my_float;
- output_term(msg, 2);
+ msg += 2;
break;
}
case 27: {
- char buf[] = {131, 106}; /* [] */
+ static char buf[] = {131, 106}; /* [] */
msg[0] = ERL_DRV_EXT2TERM;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf);
- output_term(msg, 3);
+ msg += 3;
break;
}
case 28: {
- ErlDrvUInt64 x = ~((ErlDrvUInt64) 0);
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = ~((ErlDrvUInt64) 0);
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 29: {
- ErlDrvUInt64 x = ((ErlDrvUInt64) 4711) << 32;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = ((ErlDrvUInt64) 4711) << 32;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 30: {
- ErlDrvUInt64 x = 4711;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = 4711;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 31: {
- ErlDrvUInt64 x = 0;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = 0;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 32: {
- ErlDrvSInt64 x = ((((ErlDrvUInt64) 0x7fffffff) << 32)
- | ((ErlDrvUInt64) 0xffffffff));
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((((ErlDrvUInt64) 0x7fffffff) << 32) | ((ErlDrvUInt64) 0xffffffff));
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 33: {
- ErlDrvSInt64 x = (ErlDrvSInt64) (((ErlDrvUInt64) 4711) << 32);
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = (ErlDrvSInt64) (((ErlDrvUInt64) 4711) << 32);
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 34: {
- ErlDrvSInt64 x = 4711;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = 4711;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 35: {
- ErlDrvSInt64 x = 0;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = 0;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 36: {
- ErlDrvSInt64 x = -1;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = -1;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 37: {
- ErlDrvSInt64 x = -4711;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = -4711;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 38: {
- ErlDrvSInt64 x = ((ErlDrvSInt64) ((ErlDrvUInt64) 4711) << 32)*-1;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((ErlDrvSInt64) ((ErlDrvUInt64) 4711) << 32)*-1;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 39: {
- ErlDrvSInt64 x = ((ErlDrvSInt64) 1) << 63;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((ErlDrvSInt64) 1) << 63;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
@@ -464,7 +469,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
case 127: /* Error cases */
{
long refc;
- ErlDrvBinary* bin = driver_alloc_binary(256);
+ ErlDrvBinary* bin = bins[bin_ix++] = driver_alloc_binary(256);
FAIL_TERM(msg, 0);
@@ -537,7 +542,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
refc = driver_binary_get_refc(bin);
if (refc > 3) {
char sbuf[128];
- sprintf(sbuf, "bad_refc:%d", refc);
+ sprintf(sbuf, "bad_refc:%ld", refc);
driver_failure_atom(erlang_port, sbuf);
}
driver_free_binary(bin);
@@ -644,6 +649,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
/* Signal end of test case */
msg[0] = ERL_DRV_NIL;
driver_output_term(erlang_port, msg, 1);
+ return;
}
break;
@@ -651,6 +657,16 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
driver_failure_atom(erlang_port, "bad_request");
break;
}
+ if (count > 1) {
+ *msg++ = ERL_DRV_NIL;
+ *msg++ = ERL_DRV_LIST;
+ *msg++ = count + 1;
+ }
+ output_term(spec, msg-spec);
+ if ((bin_ix|s64_ix|u64_ix) > 15) abort();
+ while (bin_ix) {
+ driver_free_binary(bins[--bin_ix]);
+ }
}
static void output_term(ErlDrvTermData* msg, int len)
diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl
index 2ad1f0d201..095e9dd1af 100644
--- a/erts/emulator/test/time_SUITE.erl
+++ b/erts/emulator/test/time_SUITE.erl
@@ -34,6 +34,8 @@
consistency/1,
now/1, now_unique/1, now_update/1, timestamp/1]).
+-export([local_to_univ_utc/1]).
+
-include("test_server.hrl").
-export([linear_time/1]).
@@ -53,7 +55,40 @@
-define(dst_timezone, 2).
all(suite) -> [univ_to_local, local_to_univ,
- bad_univ_to_local, bad_local_to_univ, consistency, now, timestamp].
+ local_to_univ_utc,
+ bad_univ_to_local, bad_local_to_univ,
+ consistency, now, timestamp].
+
+local_to_univ_utc(suite) ->
+ [];
+local_to_univ_utc(doc) ->
+ ["Test that DST = true on timezones without DST is ignored"];
+local_to_univ_utc(Config) when is_list(Config) ->
+ case os:type() of
+ {unix,_} ->
+ %% TZ variable has a meaning
+ ?line {ok, Node} =
+ test_server:start_node(local_univ_utc,peer,
+ [{args, "-env TZ UTC"}]),
+ ?line {{2008,8,1},{0,0,0}} =
+ rpc:call(Node,
+ erlang,localtime_to_universaltime,
+ [{{2008, 8, 1}, {0, 0, 0}},
+ false]),
+ ?line {{2008,8,1},{0,0,0}} =
+ rpc:call(Node,
+ erlang,localtime_to_universaltime,
+ [{{2008, 8, 1}, {0, 0, 0}},
+ true]),
+ ?line [{{2008,8,1},{0,0,0}}] =
+ rpc:call(Node,
+ calendar,local_time_to_universal_time_dst,
+ [{{2008, 8, 1}, {0, 0, 0}}]),
+ ?line test_server:stop_node(Node),
+ ok;
+ _ ->
+ {skip,"Only valid on Unix"}
+ end.
%% Tests conversion from univeral to local time.