aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in58
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c2
-rw-r--r--erts/emulator/beam/beam_bp.c3
-rw-r--r--erts/emulator/beam/beam_emu.c14
-rw-r--r--erts/emulator/beam/beam_load.c2
-rw-r--r--erts/emulator/beam/bif.c41
-rw-r--r--erts/emulator/beam/code_ix.c3
-rw-r--r--erts/emulator/beam/erl_alloc.c3
-rw-r--r--erts/emulator/beam/erl_alloc.h4
-rw-r--r--erts/emulator/beam/erl_alloc.types15
-rw-r--r--erts/emulator/beam/erl_alloc_util.c4
-rw-r--r--erts/emulator/beam/erl_async.c13
-rw-r--r--erts/emulator/beam/erl_bif_lists.c6
-rw-r--r--erts/emulator/beam/erl_bif_port.c5
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c2
-rw-r--r--erts/emulator/beam/erl_db_hash.h5
-rw-r--r--erts/emulator/beam/erl_db_util.c3
-rw-r--r--erts/emulator/beam/erl_driver.h11
-rw-r--r--erts/emulator/beam/erl_drv_nif.h7
-rw-r--r--erts/emulator/beam/erl_drv_thread.c5
-rw-r--r--erts/emulator/beam/erl_init.c29
-rw-r--r--erts/emulator/beam/erl_lock_check.c76
-rw-r--r--erts/emulator/beam/erl_lock_check.h26
-rw-r--r--erts/emulator/beam/erl_lock_count.c2
-rw-r--r--erts/emulator/beam/erl_lock_count.h6
-rw-r--r--erts/emulator/beam/erl_nif.c26
-rw-r--r--erts/emulator/beam/erl_nif.h4
-rw-r--r--erts/emulator/beam/erl_port_task.c3
-rw-r--r--erts/emulator/beam/erl_port_task.h8
-rw-r--r--erts/emulator/beam/erl_process.c1469
-rw-r--r--erts/emulator/beam/erl_process.h63
-rw-r--r--erts/emulator/beam/erl_process_dict.c2
-rw-r--r--erts/emulator/beam/erl_process_lock.c64
-rw-r--r--erts/emulator/beam/erl_process_lock.h25
-rw-r--r--erts/emulator/beam/erl_ptab.c3
-rw-r--r--erts/emulator/beam/erl_smp.h85
-rw-r--r--erts/emulator/beam/erl_thr_progress.c3
-rw-r--r--erts/emulator/beam/erl_threads.h128
-rw-r--r--erts/emulator/beam/erl_trace.c10
-rw-r--r--erts/emulator/beam/erl_utils.h2
-rw-r--r--erts/emulator/beam/io.c23
-rw-r--r--erts/emulator/beam/ops.tab10
-rw-r--r--erts/emulator/beam/sys.h2
-rw-r--r--erts/emulator/beam/utils.c2
-rw-r--r--erts/emulator/drivers/common/efile_drv.c17
-rw-r--r--erts/emulator/drivers/common/erl_efile.h2
-rw-r--r--erts/emulator/drivers/common/gzio.c10
-rw-r--r--erts/emulator/drivers/common/inet_drv.c110
-rw-r--r--erts/emulator/drivers/ose/ose_efile.c1124
-rw-r--r--erts/emulator/drivers/ose/ose_signal_drv.c896
-rw-r--r--erts/emulator/drivers/ose/ttsl_drv.c68
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c8
-rw-r--r--erts/emulator/drivers/win32/win_efile.c5
-rw-r--r--erts/emulator/hipe/hipe_bif2.c3
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c3
-rw-r--r--erts/emulator/sys/common/erl_check_io.c40
-rw-r--r--erts/emulator/sys/common/erl_poll.h25
-rw-r--r--erts/emulator/sys/ose/default.lmconf25
-rw-r--r--erts/emulator/sys/ose/driver_int.h41
-rw-r--r--erts/emulator/sys/ose/erl_main.c51
-rw-r--r--erts/emulator/sys/ose/erl_ose_sys.h353
-rw-r--r--erts/emulator/sys/ose/erl_ose_sys_ddll.c126
-rw-r--r--erts/emulator/sys/ose/erl_poll.c774
-rw-r--r--erts/emulator/sys/ose/erts.sig17
-rw-r--r--erts/emulator/sys/ose/gcc_4.4.3_lm_ppc.lcf182
-rw-r--r--erts/emulator/sys/ose/gcc_4.6.3_lm_ppc.lcf242
-rw-r--r--erts/emulator/sys/ose/sys.c1755
-rw-r--r--erts/emulator/sys/ose/sys_float.c844
-rw-r--r--erts/emulator/sys/ose/sys_time.c56
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h5
-rw-r--r--erts/emulator/sys/unix/sys_float.c2
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h4
-rwxr-xr-xerts/emulator/sys/win32/sys.c2
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl95
75 files changed, 8724 insertions, 439 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 5e5b98bbd6..523130d01a 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -22,6 +22,9 @@ include ../vsn.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-include $(TARGET)/gen_git_version.mk
+ifeq ($(findstring ose,$(TARGET)),ose)
+include $(ERL_TOP)/make/$(TARGET)/ose_lm.mk
+endif
ENABLE_ALLOC_TYPE_VARS = @ENABLE_ALLOC_TYPE_VARS@
HIPE_ENABLED=@HIPE_ENABLED@
@@ -234,7 +237,9 @@ HCC = @HCC@
LD = @LD@
DEXPORT = @DEXPORT@
RANLIB = @RANLIB@
+ifneq ($(findstring ose,$(TARGET)),ose)
STRIP = strip
+endif
PERL = @PERL@
RM = @RM@
MKDIR = @MKDIR@
@@ -684,6 +689,14 @@ $(OBJDIR)/%.o: $(TTF_DIR)/%.c
$(OBJDIR)/%.o: sys/$(ERLANG_OSTYPE)/%.c
$(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+ifeq ($(findstring ose,$(TARGET)),ose)
+$(OBJDIR)/ose_confd.o: $(OSE_CONFD)
+ $(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+$(OBJDIR)/crt0_lm.o: $(CRT0_LM)
+ $(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+endif
+
$(OBJDIR)/%.o: sys/common/%.c
$(V_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
@@ -786,7 +799,8 @@ DRV_OBJS = \
$(OBJDIR)/efile_drv.o \
$(OBJDIR)/inet_drv.o \
$(OBJDIR)/zlib_drv.o \
- $(OBJDIR)/ram_file_drv.o
+ $(OBJDIR)/ram_file_drv.o \
+ $(OBJDIR)/ttsl_drv.o
OS_OBJS = \
$(OBJDIR)/win_efile.o \
$(OBJDIR)/win_con.o \
@@ -798,6 +812,30 @@ OS_OBJS = \
$(OBJDIR)/sys_interrupt.o \
$(OBJDIR)/sys_env.o \
$(OBJDIR)/dosmap.o
+
+else
+ifeq ($(findstring ose,$(TARGET)),ose)
+OS_OBJS = \
+ $(OBJDIR)/sys.o \
+ $(OBJDIR)/driver_tab.o \
+ $(OBJDIR)/ose_efile.o \
+ $(OBJDIR)/gzio.o \
+ $(OBJDIR)/elib_memmove.o
+
+OS_OBJS += $(OBJDIR)/ose_confd.o \
+ $(OBJDIR)/crt0_lm.o
+
+OS_OBJS += $(OBJDIR)/sys_float.o \
+ $(OBJDIR)/sys_time.o
+
+DRV_OBJS = \
+ $(OBJDIR)/efile_drv.o \
+ $(OBJDIR)/ose_signal_drv.o \
+ $(OBJDIR)/inet_drv.o \
+ $(OBJDIR)/zlib_drv.o \
+ $(OBJDIR)/ram_file_drv.o \
+ $(OBJDIR)/ttsl_drv.o
+
else
OS_OBJS = \
$(OBJDIR)/sys.o \
@@ -812,10 +850,10 @@ DRV_OBJS = \
$(OBJDIR)/efile_drv.o \
$(OBJDIR)/inet_drv.o \
$(OBJDIR)/zlib_drv.o \
- $(OBJDIR)/ram_file_drv.o
+ $(OBJDIR)/ram_file_drv.o \
+ $(OBJDIR)/ttsl_drv.o
+endif
endif
-
- DRV_OBJS += $(OBJDIR)/ttsl_drv.o
ifneq ($(STATIC_NIFS),no)
STATIC_NIF_LIBS = $(STATIC_NIFS)
@@ -836,7 +874,9 @@ endif
ifeq ($(COMPILE_STATIC_LIBS),yes)
$(STATIC_NIF_LIBS) $(STATIC_DRIVER_LIBS):
- $(MAKE) BUILD_STATIC_LIBS=1 TYPE=$(TYPE) -C $(ERL_TOP)/lib/ static_lib
+ echo "=== Entering lib to make static libs"
+ (cd $(ERL_TOP)/lib/ && $(MAKE) BUILD_STATIC_LIBS=1 TYPE=$(TYPE) static_lib)
+ echo "=== Leaving lib after making static libs"
endif
ifeq ($(ERTS_ENABLE_KERNEL_POLL),yes)
@@ -986,6 +1026,13 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
$(ld_verbose)$(PURIFY) $(LD) -dll -def:sys/$(ERLANG_OSTYPE)/erl.def -implib:$(BINDIR)/erl_dll.lib -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \
$(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) $(STATIC_NIF_LIBS) \
$(STATIC_DRIVER_LIBS) $(LIBS)
+
+else
+ifeq ($(findstring ose,$(TARGET)),ose)
+$(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS) $(LCF)
+ $(call build-ose-load-module, $@, $(INIT_OBJS) $(OBJS), $(STATIC_NIF_LIBS) \
+ $(STATIC_DRIVER_LIBS) $(LIBS), $(LMCONF))
+
else
$(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
echo $(DEPLIBS)
@@ -994,6 +1041,7 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
$(STATIC_NIF_LIBS) $(STATIC_DRIVER_LIBS) $(LIBS)
endif
+endif
# ----------------------------------------------------------------------
# Dependencies
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 96547ba743..d28e519ae1 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -179,6 +179,7 @@ atom dexit
atom depth
atom dgroup_leader
atom dictionary
+atom dirty_cpu_schedulers_online
atom disable_trace
atom disabled
atom display_items
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 3f92c5b025..df1983a83d 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -201,7 +201,7 @@ finish_loading_1(BIF_ALIST_1)
* to keep the elements in.
*/
- n = list_length(BIF_ARG_1);
+ n = erts_list_length(BIF_ARG_1);
if (n == -1) {
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
goto done;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 68907a771a..49a34ab4ad 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -46,7 +46,8 @@
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 6f295530b9..0cec9ea3ec 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -71,7 +71,8 @@ do { \
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
} while (0)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
# else
@@ -1190,11 +1191,16 @@ void process_main(void)
* c_p->arg_reg before calling the scheduler.
*/
if (!init_done) {
+ /* This should only be reached during the init phase when only the main
+ * process is running. I.e. there is no race for init_done.
+ */
init_done = 1;
goto init_emulator;
}
+
c_p = NULL;
reds_used = 0;
+
goto do_schedule1;
do_schedule:
@@ -1203,7 +1209,11 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
- if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
+#ifdef ERTS_DIRTY_SCHEDULERS
+ && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
+#endif
+ ) {
BeamInstr *inptr = find_function_from_pc(start_time_i);
BeamInstr *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 53df7876d8..e96177cfd9 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -5870,7 +5870,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
Funcs = tp[1];
Patchlist = tp[2];
- if ((n = list_length(Funcs)) < 0) {
+ if ((n = erts_list_length(Funcs)) < 0) {
goto error;
}
if ((bytes = erts_get_aligned_binary_bytes(Beam, &temp_alloc)) == NULL) {
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 9c4801041f..06a1230ca0 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -2675,7 +2675,7 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i > MAX_ATOM_CHARACTERS) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
@@ -2953,7 +2953,7 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
char *buf = NULL;
int base;
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i < 0)
BIF_ERROR(BIF_P, BADARG);
@@ -3292,7 +3292,7 @@ BIF_RETTYPE list_to_float_1(BIF_ALIST_1)
Eterm res;
char *buf = NULL;
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i < 0)
BIF_ERROR(BIF_P, BADARG);
@@ -3407,7 +3407,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
Eterm* hp;
int len;
- if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
+ if ((len = erts_list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -4333,7 +4333,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
switch (erts_set_schedulers_online(BIF_P,
ERTS_PROC_LOCK_MAIN,
signed_val(BIF_ARG_2),
- &old_no)) {
+ &old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , 0
+#endif
+ )) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(old_no));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
@@ -4465,6 +4469,33 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
ref,
old ? am_true : am_false);
}
+#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
+ Sint old_no;
+ if (!is_small(BIF_ARG_2))
+ goto error;
+ switch (erts_set_schedulers_online(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ signed_val(BIF_ARG_2),
+ &old_no,
+ 1)) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(make_small(old_no));
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no),
+ am_dirty_cpu_schedulers_online);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
+#endif
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index c66d5a2f05..4344558348 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -58,7 +58,8 @@ void erts_code_ix_init(void)
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_tsd_key_create(&has_code_write_permission);
+ erts_tsd_key_create(&has_code_write_permission,
+ "erts_has_code_write_permission");
#endif
CIX_TRACE("init");
}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 64b3e01ed4..05ac24e04d 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -585,7 +585,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
if (ncpu < 1)
ncpu = 1;
- erts_tsd_key_create(&erts_allctr_prelock_tsd_key);
+ erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
+ "erts_allctr_prelock_tsd_key");
erts_sys_alloc_init();
erts_init_utils_mem();
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index f83f6b39cf..942eaa47d0 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -209,8 +209,8 @@ int erts_is_allctr_wrapper_prelocked(void);
void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
#ifndef ERTS_CACHE_LINE_SIZE
-/* Assume a cache line size of 64 bytes */
-# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
+/* Assumed cache line size */
+# define ERTS_CACHE_LINE_SIZE ((UWord) ASSUMED_CACHE_LINE_SIZE)
# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1)
#endif
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index b4e52770e3..17ac6316b7 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -414,6 +414,21 @@ type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+endif
++if ose
+
+type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
+type FD_TAB LONG_LIVED SYSTEM fd_tab
+type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
+type FD_SIG_LIST SHORT_LIVED SYSTEM fd_sig_list
+type DRV_EV STANDARD SYSTEM driver_event
+type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
+type ENVIRONMENT TEMPORARY SYSTEM environment
+type PUTENV_STR SYSTEM SYSTEM putenv_string
+type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+
++endif
+
+
+if win32
type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index c6cea0185f..45f0cc4312 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -5561,11 +5561,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
erts_mtx_init_x_opt(&allctr->mutex,
"alcu_allocator",
make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC);
+ ERTS_LCNT_LT_ALLOC,1);
#else
erts_mtx_init_x(&allctr->mutex,
"alcu_allocator",
- make_small(allctr->alloc_no));
+ make_small(allctr->alloc_no),1);
#endif /*ERTS_ENABLE_LOCK_COUNT*/
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index f0cec1c53c..b3dc327704 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -166,6 +166,7 @@ async_ready_q(Uint sched_id)
#endif
+
void
erts_init_async(void)
{
@@ -226,11 +227,23 @@ erts_init_async(void)
thr_opts.suggested_stack_size
= erts_async_thread_suggested_stack_size;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1));
+#endif
+
for (i = 0; i < erts_async_max_threads; i++) {
ErtsAsyncQ *aq = async_q(i);
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(thr_opts.name, "async_%d", i+1);
+#endif
+
erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
}
+#ifdef ETHR_HAVE_THREAD_NAMES
+ free(thr_opts.name);
+#endif
/* Wait for async threads to initialize... */
erts_mtx_lock(&async->init.data.mtx);
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 1805366cfe..820ed2385d 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -43,7 +43,7 @@ static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
Eterm* hp;
int i;
- if ((i = list_length(A)) < 0) {
+ if ((i = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
}
if (i == 0) {
@@ -102,10 +102,10 @@ static Eterm subtract(Process* p, Eterm A, Eterm B)
int n;
int m;
- if ((n = list_length(A)) < 0) {
+ if ((n = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
}
- if ((m = list_length(B)) < 0) {
+ if ((m = erts_list_length(B)) < 0) {
BIF_ERROR(p, BADARG);
}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index f298422267..77627a6897 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -941,7 +941,8 @@ static char **convert_args(Eterm l)
if (is_not_list(l) && is_not_nil(l)) {
return NULL;
}
- n = list_length(l);
+
+ n = erts_list_length(l);
/* We require at least one element in argv[0] + NULL at end */
pp = erts_alloc(ERTS_ALC_T_TMP, (n + 2) * sizeof(char **));
pp[i++] = erts_default_arg0;
@@ -986,7 +987,7 @@ static byte* convert_environment(Process* p, Eterm env)
byte* bytes;
int encoding = erts_get_native_filename_encoding();
- if ((n = list_length(env)) < 0) {
+ if ((n = erts_list_length(env)) < 0) {
return NULL;
}
heap_size = 2*(5*n+1);
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 88c6c34881..f594cb9392 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1699,7 +1699,7 @@ erts_early_init_cpu_topology(int no_schedulers,
}
max_main_threads = erts_get_cpu_configured(cpuinfo);
- if (max_main_threads > no_schedulers)
+ if (max_main_threads > no_schedulers || max_main_threads < 0)
max_main_threads = no_schedulers;
*max_main_threads_p = max_main_threads;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index d17bd9f693..908cec11d4 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -33,7 +33,12 @@ typedef struct hash_db_term {
DbTerm dbterm; /* The actual term */
} HashDbTerm;
+#ifdef ERTS_DB_HASH_LOCK_CNT
+#define DB_HASH_LOCK_CNT ERTS_DB_HASH_LOCK_CNT
+#else
#define DB_HASH_LOCK_CNT 64
+#endif
+
typedef struct db_table_hash_fine_locks {
union {
erts_smp_rwmtx_t lck;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 3986ccd4d3..3927615e04 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -483,7 +483,8 @@ void
match_pseudo_process_init(void)
{
#ifdef ERTS_SMP
- erts_smp_tsd_key_create(&match_pseudo_process_key);
+ erts_smp_tsd_key_create(&match_pseudo_process_key,
+ "erts_match_pseudo_process_key");
erts_smp_install_exit_handler(destroy_match_pseudo_process);
#else
match_pseudo_process = create_match_pseudo_process();
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index ab9ee63104..5517c26ba4 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -271,7 +271,6 @@ typedef struct ErlDrvCond_ ErlDrvCond;
typedef struct ErlDrvRWLock_ ErlDrvRWLock;
typedef int ErlDrvTSDKey;
-
/*
*
*/
@@ -680,6 +679,16 @@ EXTERN char *driver_dl_error(void);
EXTERN int erl_drv_putenv(char *key, char *value);
EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
+#ifdef __OSE__
+typedef ErlDrvUInt ErlDrvOseEventId;
+EXTERN union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent ev);
+EXTERN ErlDrvEvent erl_drv_ose_event_alloc(SIGSELECT sig, ErlDrvOseEventId handle,
+ ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig), void *extra);
+EXTERN void erl_drv_ose_event_free(ErlDrvEvent ev);
+EXTERN void erl_drv_ose_event_fetch(ErlDrvEvent ev, SIGSELECT *sig,
+ ErlDrvOseEventId *handle, void **extra);
+#endif
+
#endif /* !ERL_DRIVER_TYPES_ONLY */
#ifdef WIN32_DYNAMIC_ERL_DRIVER
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index ea013a49a3..3f829ea7ea 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -41,6 +41,13 @@ typedef struct {
int suggested_stack_size;
} ErlDrvThreadOpts;
+#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT)
+typedef enum {
+ ERL_DRV_DIRTY_JOB_CPU_BOUND = 1,
+ ERL_DRV_DIRTY_JOB_IO_BOUND = 2
+} ErlDrvDirtyJobFlags;
+#endif
+
#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 4f1bba8657..147249f751 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -78,8 +78,6 @@ struct ErlDrvTid_ {
static ethr_tsd_key tid_key;
-static ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
-
#else /* USE_THREADS */
static Uint tsd_len;
static void **tsd;
@@ -123,7 +121,7 @@ void erl_drv_thr_init(void)
{
int i;
#ifdef USE_THREADS
- int res = ethr_tsd_key_create(&tid_key);
+ int res = ethr_tsd_key_create(&tid_key,"erts_tid_key");
if (res == 0)
res = ethr_install_exit_handler(thread_exit_handler);
if (res != 0)
@@ -605,6 +603,7 @@ erl_drv_thread_create(char *name,
struct ErlDrvTid_ *dtid;
ethr_thr_opts ethr_opts;
ethr_thr_opts *use_opts;
+ ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
if (!opts)
use_opts = NULL;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index c17256f466..d54658f1ea 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -178,6 +178,11 @@ int erts_compat_rel;
static int no_schedulers;
static int no_schedulers_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static int no_dirty_cpu_schedulers;
+static int no_dirty_cpu_schedulers_online;
+static int no_dirty_io_schedulers;
+#endif
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -304,7 +309,13 @@ erl_init(int ncpu,
erts_init_sys_common_misc();
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
- no_schedulers_online);
+ no_schedulers_online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , no_dirty_cpu_schedulers,
+ no_dirty_cpu_schedulers_online,
+ no_dirty_io_schedulers
+#endif
+ );
erts_init_cpu_topology(); /* Must be after init_scheduling */
erts_init_gc(); /* Must be after init_scheduling */
erts_alloc_late_init();
@@ -709,7 +720,7 @@ early_init(int *argc, char **argv) /*
#endif
#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
- erts_tsd_key_create(&erts_is_crash_dumping_key);
+ erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key");
#else
erts_writing_erl_crash_dump = 0;
#endif
@@ -780,7 +791,7 @@ early_init(int *argc, char **argv) /*
case 'A': {
/* set number of threads in thread pool */
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (((erts_async_max_threads = atoi(arg)) < 0) ||
+ if (((erts_async_max_threads = atoi(arg)) < ERTS_MIN_NO_OF_ASYNC_THREADS) ||
(erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
erts_fprintf(stderr,
"bad number of async threads %s\n",
@@ -835,7 +846,7 @@ early_init(int *argc, char **argv) /*
else if (argv[i][2] == 'D') {
char *arg;
char *type = argv[i]+3;
- if (strcmp(type, "Pcpu") == 0) {
+ if (strncmp(type, "Pcpu", 4) == 0) {
int ptot, ponln;
arg = get_arg(argv[i]+7, argv[i+1], &i);
switch (sscanf(arg, "%d:%d", &ptot, &ponln)) {
@@ -872,7 +883,7 @@ early_init(int *argc, char **argv) /*
VERBOSE(DEBUG_SYSTEM,
("using %d:%d dirty CPU scheduler percentages\n",
dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg));
- } else if (strcmp(type, "cpu") == 0) {
+ } else if (strncmp(type, "cpu", 3) == 0) {
int tot, onln;
arg = get_arg(argv[i]+6, argv[i+1], &i);
switch (sscanf(arg, "%d:%d", &tot, &onln)) {
@@ -923,7 +934,7 @@ early_init(int *argc, char **argv) /*
}
VERBOSE(DEBUG_SYSTEM,
("using %d:%d dirty CPU scheduler(s)\n", tot, onln));
- } else if (strcmp(type, "io") == 0) {
+ } else if (strncmp(type, "io", 2) == 0) {
arg = get_arg(argv[i]+5, argv[i+1], &i);
dirty_io_scheds = atoi(arg);
if (dirty_io_scheds < 0 ||
@@ -1055,9 +1066,9 @@ early_init(int *argc, char **argv) /*
erts_no_schedulers = (Uint) no_schedulers;
#endif
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_no_dirty_cpu_schedulers = dirty_cpu_scheds;
- erts_no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
- erts_no_dirty_io_schedulers = dirty_io_scheds;
+ erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
+ no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
+ erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds;
#endif
erts_early_init_scheduling(no_schedulers);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index a8ff94ac89..7e3a90779d 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -241,6 +241,8 @@ struct erts_lc_locked_lock_t_ {
erts_lc_locked_lock_t *prev;
UWord extra;
Sint16 id;
+ char *file;
+ unsigned int line;
Uint16 flags;
};
@@ -430,47 +432,51 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags)
+new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
l_lck->next = NULL;
l_lck->prev = NULL;
l_lck->id = lck->id;
l_lck->extra = lck->extra;
+ l_lck->file = file;
+ l_lck->line = line;
l_lck->flags = lck->flags | op_flags;
return l_lck;
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+ char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
? erts_lock_order[id].name
: "unknown");
+ erts_fprintf(stderr,"%s'%s:",prefix,lname);
+
if (is_not_immed(extra))
- erts_fprintf(stderr,
- "%s'%s:%p%s'%s%s",
- prefix,
- lname,
- _unchecked_boxed_val(extra),
- lock_type(flags),
- rw_op_str(flags),
- suffix);
+ erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
- erts_fprintf(stderr,
- "%s'%s:%T%s'%s%s",
- prefix,
- lname,
- extra,
- lock_type(flags),
- rw_op_str(flags),
- suffix);
+ erts_fprintf(stderr,"%T",extra);
+ erts_fprintf(stderr,"%s",lock_type(flags));
+
+ if (file)
+ erts_fprintf(stderr,"(%s:%d)",file,line);
+
+ erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+}
+
+static void
+print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+{
+ raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
static void
print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
{
- print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix);
+ raw_print_lock(prefix, lck->id, lck->extra, lck->flags, NULL, 0, suffix);
}
static void
@@ -486,7 +492,8 @@ print_curr_locks(erts_lc_locked_locks_t *l_lcks)
"Currently these locks are locked by the %s thread:\n",
l_lcks->thread_name);
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
- print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n");
+ raw_print_lock(" ", l_lck->id, l_lck->extra, l_lck->flags,
+ l_lck->file, l_lck->line, "\n");
}
}
@@ -1002,7 +1009,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1014,7 +1022,7 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags) : NULL;
+ l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1055,13 +1063,14 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags);
+ l_lck = new_locked_lock(lck, op_flags, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1129,7 +1138,8 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1141,7 +1151,7 @@ void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags);
+ l_lck = new_locked_lock(lck, op_flags, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1232,15 +1242,15 @@ erts_lc_trylock_force_busy(erts_lc_lock_t *lck)
}
void
-erts_lc_trylock(int locked, erts_lc_lock_t *lck)
+erts_lc_trylock_x(int locked, erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_trylock_flg(locked, lck, 0);
+ erts_lc_trylock_flg_x(locked, lck, 0, file, line);
}
void
-erts_lc_lock(erts_lc_lock_t *lck)
+erts_lc_lock_x(erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_lock_flg(lck, 0);
+ erts_lc_lock_flg_x(lck, 0, file, line);
}
void
@@ -1254,9 +1264,9 @@ void erts_lc_might_unlock(erts_lc_lock_t *lck)
erts_lc_might_unlock_flg(lck, 0);
}
-void erts_lc_require_lock(erts_lc_lock_t *lck)
+void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_require_lock_flg(lck, 0);
+ erts_lc_require_lock_flg(lck, 0, file, line);
}
void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
@@ -1322,7 +1332,7 @@ erts_lc_init(void)
if (ethr_spinlock_init(&free_blocks_lock) != 0)
lc_abort();
- erts_tsd_key_create(&locks_key);
+ erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
void
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 068340abe7..3f7f417e61 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -35,6 +35,11 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifndef ERTS_ENABLE_LOCK_POSITION
+/* Enable in order for _x variants of mtx functions to be used. */
+#define ERTS_ENABLE_LOCK_POSITION 1
+#endif
+
typedef struct {
int inited;
Sint16 id;
@@ -79,13 +84,16 @@ void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
-void erts_lc_trylock(int locked, erts_lc_lock_t *lck);
-void erts_lc_lock(erts_lc_lock_t *lck);
+void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
+ char* file, unsigned int line);
+void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
@@ -96,10 +104,11 @@ int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_require_lock(erts_lc_lock_t *lck);
+void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
int erts_lc_is_emu_thr(void);
@@ -116,4 +125,9 @@ int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) ((void) 1)
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
+#define erts_lc_lock(lck) erts_lc_lock_x(lck,__FILE__,__LINE__)
+#define erts_lc_trylock(res,lck) erts_lc_trylock_x(res,lck,__FILE__,__LINE__)
+#define erts_lc_lock_flg(lck) erts_lc_lock_flg_x(lck,__FILE__,__LINE__)
+#define erts_lc_trylock_flg(res,lck) erts_lc_trylock_flg_x(res,lck,__FILE__,__LINE__)
+
#endif /* #ifndef ERTS_LOCK_CHECK_H__ */
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 5f75b0ac0b..6f44bf097b 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -236,7 +236,7 @@ void erts_lcnt_init() {
/* init tsd */
lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key);
+ ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
lcnt_lock();
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index a4fc91b510..75f7cd028b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -61,8 +61,14 @@
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifndef ERTS_ENABLE_LOCK_POSITION
+/* Enable in order for _x variants of mtx functions to be used. */
+#define ERTS_ENABLE_LOCK_POSITION 1
+#endif
+
#include "ethread.h"
+
#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 4126df6f34..40860e141c 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -876,7 +876,7 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
{
if (is_not_list(term) && is_not_nil(term)) return 0;
- *len = list_length(term);
+ *len = erts_list_length(term);
return 1;
}
@@ -1570,6 +1570,13 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
a = erts_smp_atomic32_read_acqb(&proc->state);
while (1) {
n = state = a;
+ /*
+ * clear any current dirty flags and dirty queue indicators,
+ * in case the application is shifting a job from one type
+ * of dirty scheduler to the other
+ */
+ n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
n |= ERTS_PSFLG_DIRTY_CPU_PROC;
else
@@ -1600,22 +1607,15 @@ enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
{
#ifdef USE_THREADS
- erts_aint32_t state, n, a;
Process* proc = env->proc;
Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
Export* ep;
- a = erts_smp_atomic32_read_acqb(&proc->state);
- while (1) {
- n = state = a;
- if (!(n & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)))
- break;
- n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state);
- if (a == state)
- break;
- }
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
alloc_proc_psd(proc, &ep);
ERTS_VBUMP_ALL_REDS(proc);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 7613446f64..c12ba4d554 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -172,8 +172,8 @@ typedef ErlDrvThreadOpts ErlNifThreadOpts;
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
typedef enum
{
- ERL_NIF_DIRTY_JOB_CPU_BOUND = 1,
- ERL_NIF_DIRTY_JOB_IO_BOUND = 2
+ ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND,
+ ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND
}ErlNifDirtyTaskFlags;
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index d4108067d0..fb6048b41f 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1681,7 +1681,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
reds = ERTS_PORT_REDS_INPUT;
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_input, pp);
- /* NOTE some windows drivers use ->ready_input for input and output */
+ /* NOTE some windows/ose drivers use ->ready_input
+ for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
io_tasks_executed++;
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 123253a057..1d30465ec9 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -185,11 +185,13 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
- lock_str = NULL;
+ (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
+#else
+ 1
#endif
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
+ );
#endif
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 937881212a..37e1d07107 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -44,6 +44,7 @@
#include "dtrace-wrapper.h"
#include "erl_ptab.h"
+
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
@@ -53,7 +54,11 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
+#ifndef ERTS_SCHED_MIN_SPIN
#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+#else
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 1
+#endif
#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40
#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000
@@ -148,7 +153,6 @@ int erts_sched_balance_util = 0;
Uint erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
Uint erts_no_dirty_cpu_schedulers;
-Uint erts_no_dirty_cpu_schedulers_online;
Uint erts_no_dirty_io_schedulers;
#endif
@@ -188,6 +192,13 @@ static ErtsAuxWorkData *aux_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_cpu_changing, (VAL))
+#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_io_changing, (VAL))
+#endif
+
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
@@ -198,6 +209,23 @@ do { \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_cpu_changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_io_changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+#endif
+
#endif
@@ -207,11 +235,29 @@ static struct {
int online;
int curr_online;
int wait_curr_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_cpu_online;
+ int dirty_cpu_curr_online;
+ int dirty_cpu_wait_curr_online;
+ int dirty_io_online;
+ int dirty_io_curr_online;
+ int dirty_io_wait_curr_online;
+#endif
erts_smp_atomic32_t changing;
erts_smp_atomic32_t active;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_t dirty_cpu_changing;
+ erts_smp_atomic32_t dirty_cpu_active;
+ erts_smp_atomic32_t dirty_io_changing;
+ erts_smp_atomic32_t dirty_io_active;
+#endif
struct {
int ongoing;
long wait_active;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ long dirty_cpu_wait_active;
+ long dirty_io_wait_active;
+#endif
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
} schdlr_sspnd;
@@ -467,7 +513,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#ifdef ERTS_SMP
static void handle_pending_exiters(ErtsProcList *);
-
+static void wake_scheduler(ErtsRunQueue *rq);
#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
@@ -507,7 +553,7 @@ void
erts_pre_init_process(void)
{
#ifdef USE_THREADS
- erts_tsd_key_create(&sched_data_key);
+ erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1306,6 +1352,9 @@ static ERTS_INLINE void
haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
{
ErtsThrPrgrVal current = awdp->current_thr_prgr;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (current != ERTS_THR_PRGR_INVALID
&& !erts_thr_progress_equal(current, erts_thr_progress_current())) {
/*
@@ -1322,6 +1371,10 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in
{
int jix, max_jix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
+
ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions)
@@ -1477,6 +1530,9 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
erts_aint32_t aux_work,
int waiting)
{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
@@ -1561,6 +1617,9 @@ handle_async_ready(ErtsAuxWorkData *awdp,
int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
if (erts_check_async_ready(awdp->async_ready.queue)) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
@@ -1585,6 +1644,9 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
{
void *thr_prgr_p;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
#ifdef ERTS_SMP
if (awdp->async_ready.need_thr_prgr
&& !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
@@ -1622,6 +1684,9 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
@@ -1655,11 +1720,7 @@ erts_alloc_ensure_handle_delayed_dealloc_call(int ix)
{
#ifdef DEBUG
ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
- return;
-#endif
- ASSERT(!esdp || ix == (int) esdp->no);
+ ASSERT(!esdp || (ERTS_SCHEDULER_IS_DIRTY(esdp) || ix == (int) esdp->no));
#endif
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
ERTS_SSI_AUX_WORK_DD);
@@ -1673,6 +1734,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
&need_thr_progress,
@@ -1712,6 +1776,9 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1759,6 +1826,9 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
int lops;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
if (!erts_thr_progress_has_reached_this(current, lop->later))
@@ -1917,6 +1987,14 @@ erts_smp_notify_check_children_needed(void)
for (i = 0; i < erts_no_schedulers; i++)
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ for (i = 0; i < erts_no_dirty_cpu_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ for (i = 0; i < erts_no_dirty_io_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+#endif
}
static ERTS_INLINE erts_aint32_t
@@ -2302,14 +2380,24 @@ try_set_sys_scheduling(void)
#endif
static ERTS_INLINE int
-prepare_for_sys_schedule(void)
+prepare_for_sys_schedule(ErtsSchedulerData *esdp)
{
#ifdef ERTS_SMP
while (!erts_port_task_have_outstanding_io_tasks()
&& try_set_sys_scheduling()) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ if (esdp->no != 1) {
+ /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
+ then we make sure to wake scheduler 1 */
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
clear_sys_scheduling();
+ wake_scheduler(rq);
+ return 0;
+ }
+#endif
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
}
return 0;
#else
@@ -2615,6 +2703,8 @@ aux_thread(void *unused)
erts_thr_progress_active(NULL, thr_prgr_active = 0);
erts_thr_progress_prepare_wait(NULL);
+ ERTS_SCHED_FAIR_YIELD();
+
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
@@ -2682,7 +2772,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* be waiting in erl_sys_schedule()
*/
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule()) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) {
sched_waiting(esdp->no, rq);
@@ -2697,15 +2787,15 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
while (1) {
- aux_work = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
- erts_atomic32_read_acqb(&ssi->aux_work);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
- if (!thr_prgr_active) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- if (aux_work && erts_thr_progress_update(esdp))
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)
+ && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
@@ -2720,6 +2810,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_prepare_wait(esdp);
}
+ ERTS_SCHED_FAIR_YIELD();
+
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
@@ -2775,6 +2867,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
#endif
+
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ ASSERT(esdp->no == 1);
+#endif
sched_waiting_sys(esdp->no, rq);
@@ -2795,7 +2891,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_wall_time_change(esdp, working = 0);
ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
erl_sys_schedule(1); /* Might give us something to do */
dt = erts_do_time_read_and_reset();
@@ -2841,7 +2936,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(esdp)) {
spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
goto tse_wait;
}
@@ -2863,7 +2958,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(esdp)) {
/*
* Not allowed to wait in erl_sys_schedule;
* do tse wait instead...
@@ -2990,7 +3085,7 @@ wake_scheduler(ErtsRunQueue *rq)
#ifdef ERTS_DIRTY_SCHEDULERS
static void
-wake_dirty_scheduler(ErtsRunQueue *rq)
+wake_dirty_schedulers(ErtsRunQueue *rq, int one)
{
ErtsSchedulerSleepInfo *ssi;
ErtsSchedulerSleepList *sl;
@@ -3000,9 +3095,27 @@ wake_dirty_scheduler(ErtsRunQueue *rq)
sl = &rq->sleepers;
erts_smp_spin_lock(&sl->lock);
ssi = sl->list;
- if (!ssi)
+ if (!ssi) {
erts_smp_spin_unlock(&sl->lock);
- else {
+ if (one)
+ wake_scheduler(rq);
+ } else if (one) {
+ erts_aint32_t flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
+ }
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ erts_smp_spin_unlock(&sl->lock);
+
+ ERTS_THR_MEMORY_BARRIER;
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+ } else {
sl->list = NULL;
erts_smp_spin_unlock(&sl->lock);
@@ -3154,7 +3267,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
if (runq) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
- wake_dirty_scheduler(runq);
+ wake_dirty_schedulers(runq, 1);
else
#endif
wake_scheduler(runq);
@@ -3452,6 +3565,7 @@ suspend_run_queue(ErtsRunQueue *rq)
}
static void scheduler_ix_resume_wake(Uint ix);
+static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
@@ -3478,7 +3592,10 @@ resume_run_queue(ErtsRunQueue *rq)
erts_smp_runq_unlock(rq);
- scheduler_ix_resume_wake(rq->ix);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ scheduler_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -3509,20 +3626,28 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp;
+ ErtsMigrationPath *mp = NULL;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ {
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
+ }
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -3551,6 +3676,9 @@ evacuate_run_queue(ErtsRunQueue *rq,
if (rq->ports.start) {
Port *prt;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -3586,15 +3714,26 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_aint32_t state;
Process *proc;
int notify = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int requeue;
+#endif
to_rq = NULL;
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ {
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
+ }
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ requeue = 1;
+#endif
if (ERTS_PSFLG_BOUND & state) {
/* Bound processes get stuck here... */
proc->next = NULL;
@@ -3603,12 +3742,43 @@ evacuate_run_queue(ErtsRunQueue *rq,
else
sbpp->first = proc;
sbpp->last = proc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ requeue = 0;
+#endif
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
+ erts_aint32_t old;
+ old = erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+ /* assert that no other dirty flags are set */
+ ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
+ } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
+ erts_aint32_t old;
+ old = erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ /* assert that no other dirty flags are set */
+ ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
+ }
+ if (requeue) {
+#else
else {
+#endif
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
- to_rq = mp->prio[prio].runq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ /*
+ * dirty run queues evacuate only to run
+ * queue 0 during multi-scheduling blocking
+ */
+ to_rq = ERTS_RUNQ_IX(0);
+ else
+#endif
+ to_rq = mp->prio[prio].runq;
RUNQ_SET_RQ(&proc->run_queue, to_rq);
erts_smp_runq_lock(to_rq);
@@ -4722,13 +4892,20 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
rq->wakeup_other += (left_len*wo_reds
+ ERTS_WAKEUP_OTHER_FIXED_INC);
if (rq->wakeup_other > wakeup_other.limit) {
- int empty_rqs =
- erts_smp_atomic32_read_acqb(&no_empty_run_queues);
- if (flags & ERTS_RUNQ_FLG_PROTECTED)
- (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- if (empty_rqs != 0)
- wake_scheduler_on_empty_runq(rq);
- rq->wakeup_other = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting)
+ wake_dirty_schedulers(rq, 1);
+ else
+#endif
+ {
+ int empty_rqs =
+ erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ if (empty_rqs != 0)
+ wake_scheduler_on_empty_runq(rq);
+ rq->wakeup_other = 0;
+ }
}
}
rq->wakeup_other_reds = 0;
@@ -4889,11 +5066,17 @@ erts_early_init_scheduling(int no_schedulers)
wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
#endif
+#ifndef ERTS_SCHED_MIN_SPIN
sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
+#else
+ sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+#endif
}
int
@@ -5009,8 +5192,12 @@ erts_sched_set_wake_cleanup_threshold(char *str)
static void
init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
- if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp))
+ if (!esdp)
awdp->sched_id = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp);
+#endif
else
awdp->sched_id = (int) esdp->no;
awdp->esdp = esdp;
@@ -5076,11 +5263,11 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
- esdp->dirty_no = (Uint) num;
+ ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
}
else {
esdp->no = (Uint) num;
- esdp->dirty_no = 0;
+ ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
}
#else
esdp->no = (Uint) num;
@@ -5111,7 +5298,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
}
void
-erts_init_scheduling(int no_schedulers, int no_schedulers_online)
+erts_init_scheduling(int no_schedulers, int no_schedulers_online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
+ int no_dirty_io_schedulers
+#endif
+ )
{
int ix, n, no_ssi;
char *daww_ptr;
@@ -5133,6 +5325,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(no_dirty_cpu_schedulers <= no_schedulers);
+ ASSERT(no_dirty_cpu_schedulers >= 1);
+ ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
+ ASSERT(no_dirty_cpu_schedulers_online >= 1);
+#endif
/* Create and initialize run queues */
@@ -5169,10 +5367,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix))
erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
- rq->sleepers.list = NULL;
- }
+ rq->sleepers.list = NULL;
#endif
#endif
@@ -5236,6 +5433,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
n = (int) no_schedulers;
erts_no_schedulers = n;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
+ erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
+#endif
/* Create and initialize scheduler sleep info */
#ifdef ERTS_SMP
@@ -5267,21 +5468,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
aligned_dirty_cpu_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
- erts_no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
erts_smp_atomic32_init_nob(&ssi->flags, 0);
- ssi->event = NULL; /* initialized in sched_thread_func */
+ ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
aligned_dirty_io_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
- erts_no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
erts_smp_atomic32_init_nob(&ssi->flags, 0);
- ssi->event = NULL; /* initialized in sched_thread_func */
+ ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
#endif
@@ -5314,8 +5515,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_aligned_dirty_cpu_scheduler_data =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_DATA,
- erts_no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_CPU_RUNQ, NULL, 0);
@@ -5323,8 +5524,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_aligned_dirty_io_scheduler_data =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_DATA,
- erts_no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_IO_RUNQ, NULL, 0);
@@ -5354,6 +5555,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
schdlr_sspnd.curr_online = no_schedulers;
schdlr_sspnd.msb.ongoing = 0;
erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_changing, 0);
+ schdlr_sspnd.dirty_cpu_online = no_dirty_cpu_schedulers_online;
+ schdlr_sspnd.dirty_cpu_curr_online = no_dirty_cpu_schedulers;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_active, no_dirty_cpu_schedulers);
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_changing, 0);
+ schdlr_sspnd.dirty_io_online = no_dirty_io_schedulers;
+ schdlr_sspnd.dirty_io_curr_online = no_dirty_io_schedulers;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_active, no_dirty_io_schedulers);
+#endif
schdlr_sspnd.msb.procs = NULL;
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
@@ -5379,6 +5590,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.dirty_cpu_wait_curr_online = no_dirty_cpu_schedulers_online;
+ schdlr_sspnd.dirty_cpu_curr_online *= 2;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ }
+
+ schdlr_sspnd.dirty_io_wait_curr_online = no_dirty_io_schedulers;
+ schdlr_sspnd.dirty_io_curr_online *= 2;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+#endif
erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
@@ -5497,9 +5723,16 @@ free_proxy_proc(Process *proxy)
erts_free(ERTS_ALC_T_PROC, proxy);
}
+#define ERTS_ENQUEUE_NOT 0
+#define ERTS_ENQUEUE_NORMAL_QUEUE 1
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
+#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
+#endif
static ERTS_INLINE int
-check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p,
+check_enqueue_in_prio_queue(Process *c_p,
+ erts_aint32_t *prq_prio_p,
erts_aint32_t *newp,
erts_aint32_t actual)
{
@@ -5511,56 +5744,105 @@ check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p,
*prq_prio_p = aprio;
#ifdef ERTS_DIRTY_SCHEDULERS
- if (!(actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))) {
+ if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
+ /*
+ * If we have system tasks of a priority higher
+ * or equal to the user priority, we enqueue
+ * on ordinary run-queue and take care of
+ * those system tasks first.
+ */
+ if (actual & ERTS_PSFLG_ACTIVE_SYS) {
+ erts_aint32_t uprio, stprio, qmask;
+ uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
+ if (aprio < uprio)
+ goto enqueue_normal_runq; /* system tasks with higher prio */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ qmask = c_p->sys_task_qs->qmask;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ switch (qmask & -qmask) {
+ case MAX_BIT:
+ stprio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ stprio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ stprio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ stprio = PRIORITY_LOW;
+ break;
+ default:
+ stprio = PRIORITY_LOW+1;
+ break;
+ }
+ if (stprio <= uprio)
+ goto enqueue_normal_runq; /* system tasks with higher prio */
+ }
+
+ /* Enqueue in dirty run queue if not already enqueued */
+ if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
+ return ERTS_ENQUEUE_NOT; /* already in queue */
+ if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) {
+ *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
+ if (actual & ERTS_PSFLG_IN_RUNQ)
+ return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
+ }
+ *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
+ if (actual & ERTS_PSFLG_IN_RUNQ)
+ return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ }
+
+ enqueue_normal_runq:
#endif
- max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
- max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
- max_qbit &= -max_qbit;
- /*
- * max_qbit now either contain bit set for highest prio queue or a bit
- * out of range (which will have a value larger than valid range).
- */
+ max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+ /*
+ * max_qbit now either contain bit set for highest prio queue or a bit
+ * out of range (which will have a value larger than valid range).
+ */
- if (qbit >= max_qbit)
- return 0; /* Already queued in higher or equal prio */
+ if (qbit >= max_qbit)
+ return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */
- /* Need to enqueue (if already enqueued, it is in lower prio) */
- *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
+ /* Need to enqueue (if already enqueued, it is in lower prio) */
+ *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
- if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
- != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
- /*
- * Process struct already enqueued, or actual prio not
- * equal to user prio, i.e., enqueue using proxy.
- */
- return -1;
- }
-#ifdef ERTS_DIRTY_SCHEDULERS
- } else {
- if (actual & ERTS_PSFLG_DIRTY_CPU_PROC)
- *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
- else
- *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
+ if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
+ != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
+ /*
+ * Process struct already enqueued, or actual prio not
+ * equal to user prio, i.e., enqueue using proxy.
+ */
+ return -ERTS_ENQUEUE_NORMAL_QUEUE;
}
-#endif
/*
* Enqueue using process struct.
*/
*newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
*newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
- return 1;
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
}
/*
- * scheduler_out_process() return with c_rq locked.
+ * schedule_out_process() return with c_rq locked.
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
{
erts_aint32_t a, e, n, enq_prio = -1;
- int res = 0;
int enqueue; /* < 0 -> use proxy */
+ Process* sched_p;
+ ErtsRunQueue* runq;
+#ifdef ERTS_SMP
+ int check_emigration_need;
+#endif
a = state;
@@ -5569,20 +5851,20 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
- enqueue = 0;
+ enqueue = ERTS_ENQUEUE_NOT;
n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
if (a & ERTS_PSFLG_ACTIVE_SYS
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
- enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a);
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- if (!enqueue) {
-
+ switch (enqueue) {
+ case ERTS_ENQUEUE_NOT:
if (erts_system_profile_flags.runnable_procs) {
if (!(a & ERTS_PSFLG_ACTIVE_SYS)
@@ -5595,60 +5877,76 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
if (proxy)
free_proxy_proc(proxy);
- }
- else {
- Process *sched_p;
- ErtsRunQueue *runq;
- ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+ erts_smp_runq_lock(c_rq);
+ return 0;
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_PSFLG_DIRTY_CPU_PROC & a)
- runq = ERTS_DIRTY_CPU_RUNQ;
- else if (ERTS_PSFLG_DIRTY_IO_PROC & a)
- runq = ERTS_DIRTY_IO_RUNQ;
- else
+ case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ runq = ERTS_DIRTY_CPU_RUNQ;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler));
+#ifdef ERTS_SMP
+ check_emigration_need = 0;
+#endif
+ break;
+
+ case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ runq = ERTS_DIRTY_IO_RUNQ;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler));
+#ifdef ERTS_SMP
+ check_emigration_need = 0;
+#endif
+ break;
#endif
#endif
- runq = erts_get_runq_proc(p);
- if (enqueue < 0)
- sched_p = make_proxy_proc(proxy, p, enq_prio);
- else {
- sched_p = p;
- if (proxy)
- free_proxy_proc(proxy);
- }
+ default:
+ ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
+ runq = erts_get_runq_proc(p);
#ifdef ERTS_SMP
- if (!(ERTS_PSFLG_BOUND & n)
-#ifdef ERTS_DIRTY_SCHEDULERS
- && !(n & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
+ check_emigration_need = !(ERTS_PSFLG_BOUND & n);
#endif
- ) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
- if (new_runq) {
- RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
- runq = new_runq;
- }
+ break;
+ }
+
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+
+ if (enqueue < 0)
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ else {
+ sched_p = p;
+ if (proxy)
+ free_proxy_proc(proxy);
+ }
+
+#ifdef ERTS_SMP
+ if (check_emigration_need) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
+ runq = new_runq;
}
+ }
#endif
- ASSERT(runq);
- res = 1;
- erts_smp_runq_lock(runq);
+ ASSERT(runq);
+
+ erts_smp_runq_lock(runq);
- /* Enqueue the process */
- enqueue_process(runq, (int) enq_prio, sched_p);
+ /* Enqueue the process */
+ enqueue_process(runq, (int) enq_prio, sched_p);
- if (runq == c_rq)
- return res;
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
- }
+ if (runq == c_rq)
+ return 1;
+ erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
erts_smp_runq_lock(c_rq);
- return res;
+ return 1;
}
static ERTS_INLINE void
@@ -5704,7 +6002,7 @@ change_proc_schedule_state(Process *p,
erts_aint32_t e;
n = e = a;
- enqueue = 0;
+ enqueue = ERTS_ENQUEUE_NOT;
if (a & ERTS_PSFLG_FREE)
break; /* We don't want to schedule free processes... */
@@ -5725,13 +6023,13 @@ change_proc_schedule_state(Process *p,
* process may be in a run queue via proxy, need
* further inspection...
*/
- enqueue = check_enqueue_in_prio_queue(enq_prio_p, &n, a);
+ enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a);
}
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
- if (enqueue == 0 && n == a)
+ if (enqueue == ERTS_ENQUEUE_NOT && n == a)
break;
}
@@ -5765,7 +6063,7 @@ schedule_process(Process *p, erts_aint32_t in_state)
ERTS_PSFLG_ACTIVE,
&state,
&enq_prio);
- if (enqueue)
+ if (enqueue != ERTS_ENQUEUE_NOT)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
enq_prio);
@@ -5792,14 +6090,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
if (a & ERTS_PSFLG_FREE)
return; /* We don't want to schedule free processes... */
- enqueue = 0;
+ enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
- enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a);
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
- if (a == n && !enqueue)
+ if (a == n && enqueue == ERTS_ENQUEUE_NOT)
goto cleanup;
}
@@ -5815,7 +6113,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
- if (enqueue) {
+ if (enqueue != ERTS_ENQUEUE_NOT) {
Process *sched_p;
if (enqueue > 0)
sched_p = p;
@@ -5938,6 +6236,12 @@ static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+}
+
+static void
+scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
| ERTS_SSI_FLG_WAITING
@@ -6028,12 +6332,20 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
erts_aint32_t flgs;
erts_aint32_t changing;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ long no = (long) (ERTS_SCHEDULER_IS_DIRTY(esdp)
+ ? ERTS_DIRTY_SCHEDULER_NO(esdp)
+ : esdp->no);
+#else
long no = (long) esdp->no;
+#endif
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
@@ -6041,21 +6353,305 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_aint32_t aux_work;
int thr_prgr_active = 1;
ErtsStuckBoundProcesses sbp = {NULL, NULL};
+ int* ss_onlinep;
+ int* ss_curr_onlinep;
+ int* ss_wait_curr_onlinep;
+ long* ss_wait_activep;
+ long ss_wait_active_target;
+ erts_smp_atomic32_t* ss_changingp;
+ erts_smp_atomic32_t* ss_activep;
/*
* Schedulers may be suspended in two different ways:
* - A scheduler may be suspended since it is not online.
* All schedulers with scheduler ids greater than
- * schdlr_sspnd.online are suspended.
+ * schdlr_sspnd.online are suspended; same for dirty
+ * schedulers and schdlr_sspnd.dirty_cpu_online and
+ * schdlr_sspnd.dirty_io_online.
* - Multi scheduling is blocked. All schedulers except the
- * scheduler with scheduler id 1 are suspended.
+ * scheduler with scheduler id 1 are suspended, and all
+ * dirty CPU and dirty I/O schedulers are suspended.
*
* Regardless of why a scheduler is suspended, it ends up here.
*/
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) || no != 1);
+
#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_smp_runq_lock(esdp->run_queue);
+ }
+ if (ongoing_multi_scheduling_block())
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ } else
+#endif
+ evacuate_run_queue(esdp->run_queue, &sbp);
+
+ erts_smp_runq_unlock(esdp->run_queue);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ erts_sched_check_cpu_bind_prep_suspend(esdp);
+
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_inactive);
+
+ sched_wall_time_change(esdp, 0);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_cpu_active);
+ ASSERT(active_schedulers >= 0);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
+ ss_onlinep = &schdlr_sspnd.dirty_cpu_online;
+ ss_curr_onlinep = &schdlr_sspnd.dirty_cpu_curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.dirty_cpu_wait_curr_online;
+ ss_changingp = &schdlr_sspnd.dirty_cpu_changing;
+ ss_wait_activep = &schdlr_sspnd.msb.dirty_cpu_wait_active;
+ ss_activep = &schdlr_sspnd.dirty_cpu_active;
+ } else {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_io_active);
+ ASSERT(active_schedulers >= 0);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing);
+ ss_onlinep = &schdlr_sspnd.dirty_io_online;
+ ss_curr_onlinep = &schdlr_sspnd.dirty_io_curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.dirty_io_wait_curr_online;
+ ss_changingp = &schdlr_sspnd.dirty_io_changing;
+ ss_wait_activep = &schdlr_sspnd.msb.dirty_io_wait_active;
+ ss_activep = &schdlr_sspnd.dirty_io_active;
+ }
+ ss_wait_active_target = 0;
+ }
+ else
+#endif
+ {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ ss_onlinep = &schdlr_sspnd.online;
+ ss_curr_onlinep = &schdlr_sspnd.curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.wait_curr_online;
+ ss_changingp = &schdlr_sspnd.changing;
+ ss_wait_activep = &schdlr_sspnd.msb.wait_active;
+ ss_activep = &schdlr_sspnd.active;
+ ss_wait_active_target = 1;
+ }
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == *ss_wait_activep)
+ wake = 1;
+ if (active_schedulers == ss_wait_active_target) {
+ changing = erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
+ }
+
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > *ss_onlinep && curr_online) {
+ (*ss_curr_onlinep)--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= *ss_onlinep && !curr_online) {
+ (*ss_curr_onlinep)++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && *ss_curr_onlinep == *ss_wait_curr_onlinep)
+ wake = 1;
+ if (*ss_onlinep == *ss_curr_onlinep) {
+ changing = erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
+ }
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
+ }
+
+ if (curr_online && !ongoing_multi_scheduling_block()) {
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ while (1) {
+ erts_aint32_t qmask;
+ erts_aint32_t flgs;
+
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work|qmask) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
+ (aux_work && erts_thr_progress_update(esdp)))
+ erts_thr_progress_leader_update(esdp);
+ if (qmask) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_smp_runq_lock(esdp->run_queue);
+ if (ongoing_multi_scheduling_block())
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ } else
+#endif
+ {
+ erts_smp_runq_lock(esdp->run_queue);
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ }
+ }
+ }
+
+ if (!aux_work) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
#endif
+ {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ }
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ erts_thr_progress_finalize_wait(esdp);
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+ }
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ }
+
+ active_schedulers = erts_smp_atomic32_inc_read_nob(ss_activep);
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && *ss_onlinep == active_schedulers) {
+ erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ ASSERT(no <= *ss_onlinep);
+ ASSERT(!ongoing_multi_scheduling_block());
+
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ ASSERT(curr_online);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
+
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+
+ erts_smp_runq_lock(esdp->run_queue);
+ non_empty_runq(esdp->run_queue);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ schedule_bound_processes(esdp->run_queue, &sbp);
+
+ erts_sched_check_cpu_bind_post_suspend(esdp);
+ }
+}
+
+#else /* !ERTS_DIRTY_SCHEDULERS */
+
+static void
+suspend_scheduler(ErtsSchedulerData *esdp)
+{
+ erts_aint32_t flgs;
+ erts_aint32_t changing;
+ long no = (long) esdp->no;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long active_schedulers;
+ int curr_online = 1;
+ int wake = 0;
+ erts_aint32_t aux_work;
+ int thr_prgr_active = 1;
+ ErtsStuckBoundProcesses sbp = {NULL, NULL};
+
+ /*
+ * Schedulers may be suspended in two different ways:
+ * - A scheduler may be suspended since it is not online.
+ * All schedulers with scheduler ids greater than
+ * schdlr_sspnd.online are suspended.
+ * - Multi scheduling is blocked. All schedulers except the
+ * scheduler with scheduler id 1 are suspended.
+ *
+ * Regardless of why a scheduler is suspended, it ends up here.
+ */
+
ASSERT(no != 1);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -6219,6 +6815,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_sched_check_cpu_bind_post_suspend(esdp);
}
+#endif
+
ErtsSchedSuspendResult
erts_schedulers_state(Uint *total,
Uint *online,
@@ -6230,40 +6828,315 @@ erts_schedulers_state(Uint *total,
{
int res = ERTS_SCHDLR_SSPND_EINVAL;
erts_aint32_t changing;
- if (total) {
- ASSERT(online);
- ASSERT(active);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
- res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
- else {
- *active = *online = schdlr_sspnd.online;
- if (ongoing_multi_scheduling_block())
- *active = 1;
- res = ERTS_SCHDLR_SSPND_DONE;
- }
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- *total = erts_no_schedulers;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
+#endif
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ else {
+ if (active)
+ *active = schdlr_sspnd.online;
+ if (online)
+ *online = schdlr_sspnd.online;
+ if (ongoing_multi_scheduling_block() && active)
+ *active = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (dirty_cpu_online)
+ *dirty_cpu_online = schdlr_sspnd.dirty_cpu_online;
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
}
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (total)
+ *total = erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
if (dirty_cpu)
*dirty_cpu = erts_no_dirty_cpu_schedulers;
- if (dirty_cpu_online)
- *dirty_cpu_online = erts_no_dirty_cpu_schedulers_online;
if (dirty_io)
*dirty_io = erts_no_dirty_io_schedulers;
-#else
- if (dirty_cpu)
- *dirty_cpu = 0;
- if (dirty_cpu_online)
- *dirty_cpu_online = 0;
- if (dirty_io)
- *dirty_io = 0;
#endif
return res;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+ErtsSchedSuspendResult
+erts_set_schedulers_online(Process *p,
+ ErtsProcLocks plocks,
+ Sint new_no,
+ Sint *old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int dirty_only
+#endif
+ )
+{
+ ErtsSchedulerData *esdp;
+ int ix, res = -1, no, have_unlocked_plocks, end_wait;
+ erts_aint32_t changing = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerSleepInfo* ssi;
+ int dirty_no, change_dirty;
+#endif
+
+ if (new_no < 1)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (dirty_only && erts_no_dirty_cpu_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+#endif
+ else if (erts_no_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+ end_wait = 0;
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ have_unlocked_plocks = 0;
+ no = (int) new_no;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(schdlr_sspnd.dirty_cpu_online <= erts_no_dirty_cpu_schedulers);
+ if (dirty_only) {
+ if (no > schdlr_sspnd.online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ return ERTS_SCHDLR_SSPND_EINVAL;
+ }
+ dirty_no = no;
+ } else {
+ /*
+ * Adjust the number of dirty CPU schedulers online relative to the
+ * adjustment made to the number of normal schedulers online.
+ */
+ int total_pct = erts_no_dirty_cpu_schedulers*100/erts_no_schedulers;
+ int onln_pct = no*total_pct/schdlr_sspnd.online;
+ dirty_no = schdlr_sspnd.dirty_cpu_online*onln_pct/100;
+ if (dirty_no == 0)
+ dirty_no = 1;
+ ASSERT(dirty_no <= erts_no_dirty_cpu_schedulers);
+ }
+#endif
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
+#endif
+ if (changing) {
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ }
+ else {
+ int online = *old_no = schdlr_sspnd.online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_online = schdlr_sspnd.dirty_cpu_online;
+
+ if (dirty_only) {
+ *old_no = schdlr_sspnd.dirty_cpu_online;
+ if (dirty_no == schdlr_sspnd.dirty_cpu_online) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ change_dirty = 1;
+ } else {
+#endif
+ if (no == schdlr_sspnd.online) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ dirty_only = 1;
+ if (dirty_no == schdlr_sspnd.dirty_cpu_online)
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else
+ change_dirty = 1;
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else
+ change_dirty = (dirty_no != schdlr_sspnd.dirty_cpu_online);
+ }
+#endif
+ if (res == -1)
+ {
+ int increase = (no > online);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!dirty_only) {
+#endif
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ schdlr_sspnd.online = no;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ } else
+ increase = (dirty_no > dirty_online);
+ if (change_dirty) {
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ schdlr_sspnd.dirty_cpu_online = dirty_no;
+ }
+#endif
+ if (increase) {
+ int ix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!dirty_only) {
+#endif
+ schdlr_sspnd.wait_curr_online = no;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ change_no_used_runqs(no);
+
+ for (ix = online; ix < no; ix++)
+ resume_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ }
+ if (change_dirty) {
+ schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
+ schdlr_sspnd.dirty_cpu_wait_curr_online);
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
+ }
+ } else {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ }
+ }
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else /* if (no < online) */ {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (change_dirty) {
+ schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
+ schdlr_sspnd.dirty_cpu_wait_curr_online);
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = dirty_no; ix < dirty_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
+ }
+ } else {
+ for (ix = dirty_no; ix < dirty_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ }
+ }
+ if (dirty_only) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else
+#endif
+ {
+ if (p->scheduler_data->no <= no) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ schdlr_sspnd.wait_curr_online = no;
+ }
+ else {
+ /*
+ * Yield! Current process needs to migrate
+ * before bif returns.
+ */
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE;
+ schdlr_sspnd.wait_curr_online = no+1;
+ }
+
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = no; ix < online; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+
+ change_no_used_runqs(no);
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
+ }
+ }
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (change_dirty) {
+ while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ if (!dirty_only)
+#endif
+ {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ end_wait = 1;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ }
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(schdlr_sspnd.dirty_cpu_online <= schdlr_sspnd.online);
+ if (!dirty_only)
+#endif
+ {
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
+ if (have_unlocked_plocks)
+ erts_smp_proc_lock(p, plocks);
+ }
+
+ return res;
+}
+
+#else /* !ERTS_DIRTY_SCHEDULERS */
+
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
@@ -6352,10 +7225,6 @@ erts_set_schedulers_online(Process *p,
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
wake_scheduler(rq);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- wake_dirty_scheduler(ERTS_DIRTY_CPU_RUNQ);
- wake_dirty_scheduler(ERTS_DIRTY_IO_RUNQ);
-#endif
}
}
@@ -6396,15 +7265,24 @@ erts_set_schedulers_online(Process *p,
return res;
}
+#endif
+
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
- int ix, res, have_unlocked_plocks = 0;
+ int ix, res, have_unlocked_plocks = 0, online;
erts_aint32_t changing;
ErtsProcList *plp;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerSleepInfo* ssi;
+#endif
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
+#endif
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -6414,10 +7292,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
p->flags |= F_HAVE_BLCKD_MSCHED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 0);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) == 0);
+#endif
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- }
- else {
+ } else {
int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
@@ -6429,6 +7310,35 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 1);
+ ASSERT(!(erts_smp_atomic32_read_nob(&ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0)->flags)
+ & ERTS_SSI_FLG_SUSPENDED));
+ schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
+ != schdlr_sspnd.msb.dirty_cpu_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+
+ schdlr_sspnd.msb.dirty_io_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
+ != schdlr_sspnd.msb.dirty_io_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+#endif
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -6447,6 +7357,37 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
+ != schdlr_sspnd.msb.dirty_cpu_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_online);
+
+ schdlr_sspnd.msb.dirty_io_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
+ != schdlr_sspnd.msb.dirty_io_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ ASSERT(schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_online);
+#endif
change_no_used_runqs(1);
for (ix = 1; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
@@ -6487,6 +7428,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
& erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
@@ -6527,12 +7469,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
p->flags &= ~F_HAVE_BLCKD_MSCHED;
schdlr_sspnd.msb.ongoing = 0;
if (schdlr_sspnd.online == 1) {
- /* No schedulers to resume */
+ /* No normal schedulers to resume */
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else {
- int online = schdlr_sspnd.online;
+ online = schdlr_sspnd.online;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
@@ -6547,6 +7489,27 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
for (ix = online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
+ schdlr_sspnd.msb.dirty_cpu_wait_active = schdlr_sspnd.dirty_cpu_online;
+ for (ix = 0; ix < schdlr_sspnd.dirty_cpu_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
+ schdlr_sspnd.msb.dirty_io_wait_active = erts_no_dirty_io_schedulers;
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+#endif
res = ERTS_SCHDLR_SSPND_DONE;
}
}
@@ -6673,7 +7636,11 @@ sched_thread_func(void *vesdp)
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (no != 1)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+#else
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+#endif
}
if (no == 1) {
@@ -6710,7 +7677,9 @@ sched_dirty_cpu_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = esdp->dirty_no;
+ Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER;
+ ASSERT(no != 0);
ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
callbacks.wakeup = thr_prgr_wakeup;
@@ -6738,6 +7707,24 @@ sched_dirty_cpu_thread_func(void *vesdp)
#endif
erts_thread_init_float();
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
+
+ if (--schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_wait_curr_online) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+
+ if (no == 1) {
+ while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
process_main();
/* No schedulers should *ever* terminate */
erl_exit(ERTS_ABORT_EXIT,
@@ -6751,7 +7738,9 @@ sched_dirty_io_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = esdp->dirty_no;
+ Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER;
+ ASSERT(no != 0);
ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
callbacks.wakeup = thr_prgr_wakeup;
@@ -6779,6 +7768,24 @@ sched_dirty_io_thread_func(void *vesdp)
#endif
erts_thread_init_float();
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
+
+ if (--schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_wait_curr_online) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_io_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+
+ if (no == 1) {
+ while (schdlr_sspnd.dirty_io_curr_online != schdlr_sspnd.dirty_io_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
process_main();
/* No schedulers should *ever* terminate */
erl_exit(ERTS_ABORT_EXIT,
@@ -6795,16 +7802,23 @@ void
erts_start_schedulers(void)
{
int res = 0;
- Uint actual = 0;
+ Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
opts.detached = 1;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ opts.name = malloc(80);
+#endif
+
#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "runq_supervisor");
+#endif
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
erl_exit(1, "Failed to create run-queue supervision event\n");
@@ -6826,17 +7840,27 @@ erts_start_schedulers(void)
res = ENOTSUP;
}
- while (actual < wanted) {
+ for (actual = 0; actual < wanted; actual++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
- actual++;
- ASSERT(actual == esdp->no);
- res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
+
+ ASSERT(actual == esdp->no - 1);
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "scheduler_%d", actual + 1);
+#endif
+
+#ifdef __OSE__
+ /* This should be done in the bind strategy */
+ opts.coreNo = (actual+1) % ose_num_cpus();
+#endif
+
+ res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
+
if (res != 0) {
- actual--;
- break;
+ break;
}
}
-
+
erts_no_schedulers = actual;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -6845,12 +7869,18 @@ erts_start_schedulers(void)
int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1);
+#endif
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1);
+#endif
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix);
@@ -6861,6 +7891,14 @@ erts_start_schedulers(void)
ERTS_THR_MEMORY_BARRIER;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "aux");
+#endif
+
+#ifdef __OSE__
+ opts.coreNo = 0;
+#endif /* __OSE__ */
+
res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
if (res != 0)
erl_exit(1, "Failed to create aux thread\n");
@@ -6880,6 +7918,10 @@ erts_start_schedulers(void)
actual, actual == 1 ? " was" : "s were");
erts_send_error_to_logger_nogl(dsbufp);
}
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ free(opts.name);
+#endif
}
#endif /* ERTS_SMP */
@@ -7995,7 +9037,6 @@ Process *schedule(Process *p, int calls)
|| flags & ERTS_RUNQ_FLG_NONEMPTY);
if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
-
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
suspend_scheduler(esdp);
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
@@ -8006,21 +9047,31 @@ Process *schedule(Process *p, int calls)
erts_sched_check_cpu_bind(esdp);
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (ERTS_SCHEDULER_IS_DIRTY(esdp)
+ && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED))
+ suspend_scheduler(esdp);
+#endif
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ {
erts_aint32_t aux_work;
- int leader_update = erts_thr_progress_update(esdp);
+ int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0
+ : erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work | leader_update) {
+ if (aux_work | leader_update | ERTS_SCHED_FAIR) {
erts_smp_runq_unlock(rq);
if (leader_update)
erts_thr_progress_leader_update(esdp);
+ else if (ERTS_SCHED_FAIR)
+ ERTS_SCHED_FAIR_YIELD();
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_smp_runq_lock(rq);
}
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || !erts_thr_progress_is_blocking());
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -8035,6 +9086,17 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) {
+ /*
+ * TODO: if halt in progress, need to put the dirty scheduler
+ * to sleep somewhere around here to prevent it from picking up
+ * new work
+ */
+ }
+ else
+#endif
+
if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
|| (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
/* Prepare for scheduler wait */
@@ -8077,7 +9139,8 @@ Process *schedule(Process *p, int calls)
goto check_activities_to_run;
}
else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (fcalls > input_reductions && prepare_for_sys_schedule())) {
+ (fcalls > input_reductions &&
+ prepare_for_sys_schedule(esdp))) {
/*
* Schedule system-level activities.
*/
@@ -8182,11 +9245,15 @@ Process *schedule(Process *p, int calls)
+ ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
#ifdef ERTS_DIRTY_SCHEDULERS
- /* if a non-dirty scheduler picks up a process marked as already being
- in a dirty run queue, just drop it and go get another process */
- if (state & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) &&
- !ERTS_SCHEDULER_IS_DIRTY(esdp))
- goto pick_next_process;
+ ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) !=
+ (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC));
+ if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
+ ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) ||
+ (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS))
+ goto pick_next_process;
+ state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
+ }
#endif
if (!(state & ERTS_PSFLG_PROXY))
@@ -8322,7 +9389,11 @@ Process *schedule(Process *p, int calls)
if (state & ERTS_PSFLG_RUNNING_SYS) {
reds -= execute_sys_tasks(p, &state, reds);
- if (reds <= 0) {
+ if (reds <= 0
+#ifdef ERTS_DIRTY_SCHEDULERS
+ || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))
+#endif
+ ) {
p->fcalls = reds;
goto sched_out_proc;
}
@@ -9379,7 +10450,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Check for errors.
*/
- if (is_not_atom(mod) || is_not_atom(func) || ((arity = list_length(args)) < 0)) {
+ if (is_not_atom(mod) || is_not_atom(func) || ((arity = erts_list_length(args)) < 0)) {
so->error_code = BADARG;
goto error;
}
@@ -10087,7 +11158,7 @@ save_pending_exiter(Process *p)
erts_smp_runq_unlock(rq);
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- wake_dirty_scheduler(rq);
+ wake_dirty_schedulers(rq, 0);
else
#endif
wake_scheduler(rq);
@@ -11148,6 +12219,10 @@ void erl_halt(int code)
if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress,
erts_no_schedulers,
-1)) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
+ ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
+#endif
erts_halt_code = code;
notify_reap_ports_relb();
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index dcb9251d0d..ed6dadbffa 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -109,7 +109,6 @@ extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
extern Uint erts_no_dirty_cpu_schedulers;
-extern Uint erts_no_dirty_cpu_schedulers_online;
extern Uint erts_no_dirty_io_schedulers;
#endif
extern Uint erts_no_run_queues;
@@ -544,6 +543,21 @@ typedef struct {
#endif
} ErtsAuxWorkData;
+#ifdef ERTS_DIRTY_SCHEDULERS
+typedef enum {
+ ERTS_DIRTY_CPU_SCHEDULER,
+ ERTS_DIRTY_IO_SCHEDULER
+} ErtsDirtySchedulerType;
+
+typedef union {
+ struct {
+ ErtsDirtySchedulerType type: 1;
+ unsigned num: 31;
+ } s;
+ Uint no;
+} ErtsDirtySchedId;
+#endif
+
struct ErtsSchedulerData_ {
/*
* Keep X registers first (so we get as many low
@@ -570,7 +584,7 @@ struct ErtsSchedulerData_ {
Process *current_process;
Uint no; /* Scheduler number for normal schedulers */
#ifdef ERTS_DIRTY_SCHEDULERS
- Uint dirty_no; /* Scheduler number for dirty schedulers */
+ ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
#endif
Port *current_port;
ErtsRunQueue *run_queue;
@@ -607,6 +621,13 @@ extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
extern ErtsSchedulerData *erts_scheduler_data;
#endif
+#ifdef ERTS_SCHED_FAIR
+#define ERTS_SCHED_FAIR_YIELD() ETHR_YIELD()
+#else
+#define ERTS_SCHED_FAIR 0
+#define ERTS_SCHED_FAIR_YIELD()
+#endif
+
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
#endif
@@ -1292,6 +1313,8 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
&erts_aligned_run_queues[(IX)].runq)
#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq)
#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq)
+#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1)
+#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2)
#else
#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
#endif
@@ -1305,21 +1328,37 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \
&erts_aligned_dirty_io_scheduler_data[(IX)].esd)
+#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \
+ ((ESDP)->dirty_no.s.num)
+#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \
+ ((ESDP)->dirty_no.s.type)
#ifdef ERTS_SMP
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \
- ((ESDP)->dirty_no != 0)
+ ((ESDP)->dirty_no.s.num != 0)
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \
+ ((ESDP)->dirty_no.s.type == 0)
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
+ ((ESDP)->dirty_no.s.type == 1)
#else
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
#endif
#else
#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
#endif
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int);
+void erts_init_scheduling(int, int
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int, int, int
+#endif
+ );
int erts_set_gc_state(Process *c_p, int enable);
Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
@@ -1526,7 +1565,11 @@ ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
Sint new_no,
- Sint *old_no);
+ Sint *old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int dirty_only
+#endif
+ );
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
int erts_is_multi_scheduling_blocked(void);
@@ -2009,12 +2052,6 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
#endif
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
-
-#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
-
-#else
-
ERTS_GLB_INLINE void
erts_smp_runq_lock(ErtsRunQueue *rq)
{
@@ -2023,6 +2060,10 @@ erts_smp_runq_lock(ErtsRunQueue *rq)
#endif
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
+
#endif
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index a611b52af2..23e5bf737f 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -659,7 +659,7 @@ static void shrink(Process *p, Eterm* ret)
} else {
int needed = 4;
if (is_list(hi) && is_list(lo)) {
- needed = 2*list_length(hi);
+ needed = 2*erts_list_length(hi);
}
if (HeapWordsLeft(p) < needed) {
BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1));
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 2db5df06b4..82cc68222d 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -117,7 +117,7 @@ erts_init_proc_lock(int cpus)
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i));
+ "pix_lock", make_small(i), 1);
#else
erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
@@ -901,7 +901,7 @@ erts_pid2proc_opt(Process *c_p,
busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
- erts_proc_lc_trylock(proc, need_locks, !busy);
+ erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
if (!busy)
@@ -1001,8 +1001,8 @@ erts_pid2proc_opt(Process *c_p,
void
erts_proc_lock_init(Process *p)
{
-#if ERTS_PROC_LOCK_OWN_IMPL
int i;
+#if ERTS_PROC_LOCK_OWN_IMPL
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_smp_atomic32_init_nob(&p->lock.flags,
@@ -1013,25 +1013,33 @@ erts_proc_lock_init(Process *p)
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
p->lock.queue[i] = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
+ erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1,__FILE__,__LINE__);
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ int do_lock_count = 1;
+#else
+ int do_lock_count = 0;
+#endif
+
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
+ do_lock_count);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
@@ -1210,50 +1218,51 @@ void erts_lcnt_enable_proc_lock_count(int enable)
#if ERTS_PROC_LOCK_OWN_IMPL
void
-erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
}
void
-erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
+erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
+ char* file, unsigned int line)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
}
@@ -1319,7 +1328,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
}
void
-erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
+ unsigned int line)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
@@ -1327,29 +1337,29 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
- erts_lc_require_lock(&p->lock.main.lc);
+ erts_lc_require_lock(&p->lock.main.lc, file, line);
if (locks & ERTS_PROC_LOCK_LINK)
- erts_lc_require_lock(&p->lock.link.lc);
+ erts_lc_require_lock(&p->lock.link.lc, file, line);
if (locks & ERTS_PROC_LOCK_MSGQ)
- erts_lc_require_lock(&p->lock.msgq.lc);
+ erts_lc_require_lock(&p->lock.msgq.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
- erts_lc_require_lock(&p->lock.status.lc);
+ erts_lc_require_lock(&p->lock.status.lc, file, line);
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 9dd503f3cb..052d992d3f 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -215,7 +215,7 @@ typedef struct erts_proc_lock_t_ {
/* Lock counter implemetation */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__)
#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__)
#endif
@@ -243,8 +243,10 @@ void erts_lcnt_enable_proc_lock_count(int enable);
erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__)
#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
erts_proc_lc_chk_only_proc_main((P))
-void erts_proc_lc_lock(Process *p, ErtsProcLocks locks);
-void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked);
+void erts_proc_lc_lock(Process *p, ErtsProcLocks locks,
+ char *file, unsigned int line);
+void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
+ char *file, unsigned int line);
void erts_proc_lc_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
@@ -253,7 +255,8 @@ void erts_proc_lc_chk_only_proc_main(Process *p);
void erts_proc_lc_chk_no_proc_locks(char *file, int line);
ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
-void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks,
+ char* file, unsigned int line);
void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#else
#define ERTS_SMP_CHK_NO_PROC_LOCKS
@@ -372,7 +375,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *);
ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
erts_pix_lock_t *,
ErtsProcLocks,
@@ -482,7 +485,7 @@ busy_main:
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_proc_lock_x__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks,
@@ -528,7 +531,7 @@ erts_smp_proc_lock__(Process *p,
erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line);
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_lock(p, locks);
+ erts_proc_lc_lock(p, locks, file, line);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
@@ -695,7 +698,7 @@ erts_smp_proc_trylock__(Process *p,
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(p, locks, res == 0);
+ erts_proc_lc_trylock(p, locks, res == 0, __FILE__, __LINE__);
#endif
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -741,7 +744,7 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
#endif /* ERTS_SMP */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
@@ -756,13 +759,13 @@ ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
#else
erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_smp_proc_lock_x__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index fa5482b841..eabf016081 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -756,7 +756,8 @@ erts_ptab_delete_element(ErtsPTab *ptab,
pix = erts_ptab_id2pix(ptab, ptab_el->id);
- ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */
+ /* *Need* to be an managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_ptab_rlock(ptab);
maybe_save = ptab->list.data.deleted.end != NULL;
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index ecb5525022..c38ef47d87 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -26,10 +26,13 @@
#define ERL_SMP_H
#include "erl_threads.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__)
#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__)
#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__)
#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__)
@@ -131,10 +134,11 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx);
#endif
ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx);
@@ -159,16 +163,18 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
#endif
ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
@@ -179,7 +185,7 @@ ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
@@ -192,7 +198,7 @@ ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
#else
@@ -202,7 +208,8 @@ ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp,
+ char *keyname);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
@@ -835,7 +842,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra);
+ erts_mtx_init_x(mtx, name, extra, 1);
#endif
}
@@ -843,7 +850,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra);
+ erts_mtx_init_locked_x(mtx, name, extra, 1);
#endif
}
@@ -872,9 +879,15 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
+#else
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_mtx_trylock_x(mtx,file,line);
+#elif defined(ERTS_SMP)
return erts_mtx_trylock(mtx);
#else
return 0;
@@ -884,13 +897,13 @@ erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
-erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line)
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
#else
erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_mtx_lock_x(mtx, file, line);
#elif defined(ERTS_SMP)
erts_mtx_lock(mtx);
@@ -1020,9 +1033,15 @@ erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_rwmtx_tryrlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
return erts_rwmtx_tryrlock(rwmtx);
#else
return 0;
@@ -1030,13 +1049,13 @@ erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rlock_x(rwmtx, file, line);
#elif defined(ERTS_SMP)
erts_rwmtx_rlock(rwmtx);
@@ -1053,9 +1072,15 @@ erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_rwmtx_tryrwlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
return erts_rwmtx_tryrwlock(rwmtx);
#else
return 0;
@@ -1063,13 +1088,13 @@ erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rwlock_x(rwmtx, file, line);
#elif defined(ERTS_SMP)
erts_rwmtx_rwlock(rwmtx);
@@ -1171,13 +1196,13 @@ erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line)
#else
erts_smp_spin_lock(erts_smp_spinlock_t *lock)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_spin_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_spin_lock(lock);
@@ -1237,13 +1262,13 @@ erts_smp_read_unlock(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
#else
erts_smp_read_lock(erts_smp_rwlock_t *lock)
#endif
{
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP)
erts_read_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_read_lock(lock);
@@ -1263,13 +1288,13 @@ erts_smp_write_unlock(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
#else
erts_smp_write_lock(erts_smp_rwlock_t *lock)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_write_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_write_lock(lock);
@@ -1299,10 +1324,10 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp)
+erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname)
{
#ifdef ERTS_SMP
- erts_tsd_key_create(keyp);
+ erts_tsd_key_create(keyp,keyname);
#endif
}
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index cf5e3dc012..545a0343d0 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -417,7 +417,8 @@ void
erts_thr_progress_pre_init(void)
{
intrnl = NULL;
- erts_tsd_key_create(&erts_thr_prgr_data_key__);
+ erts_tsd_key_create(&erts_thr_prgr_data_key__,
+ "erts_thr_prgr_data_key");
init_nob(&erts_thr_prgr__.current, ERTS_THR_PRGR_VAL_FIRST);
}
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 759c8f4c33..80026104db 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -281,10 +281,13 @@
#define ERTS_THR_READ_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER ETHR_READ_DEPEND_MEMORY_BARRIER
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_mtx_trylock(L) erts_mtx_trylock_x(L, __FILE__, __LINE__)
#define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_tryrlock(L) erts_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
#define erts_rwmtx_rlock(L) erts_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_tryrwlock(L) erts_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
#define erts_rwmtx_rwlock(L) erts_rwmtx_rwlock_x(L, __FILE__, __LINE__)
#define erts_read_lock(L) erts_read_lock_x(L, __FILE__, __LINE__)
#define erts_write_lock(L) erts_write_lock_x(L, __FILE__, __LINE__)
@@ -461,18 +464,24 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
+ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
+ int enable_lcnt);
+ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
+ Uint16 opt, int enable_lcnt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
char *name,
- Eterm extra);
+ Eterm extra,
+ int enable_lcnt);
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
+ unsigned int line);
+ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file,
+ unsigned int line);
#else
+ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_mtx_lock(erts_mtx_t *mtx);
#endif
ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx);
@@ -496,16 +505,18 @@ ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
#endif
ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
@@ -571,7 +582,7 @@ ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
@@ -584,7 +595,7 @@ ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
#else
@@ -594,7 +605,7 @@ ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
@@ -1549,7 +1560,7 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1559,13 +1570,17 @@ erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
+erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
+ int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1575,14 +1590,17 @@ erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1592,7 +1610,10 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
#endif
ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1670,7 +1691,11 @@ erts_mtx_destroy(erts_mtx_t *mtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line)
+#else
erts_mtx_trylock(erts_mtx_t *mtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1684,8 +1709,12 @@ erts_mtx_trylock(erts_mtx_t *mtx)
res = ethr_mutex_trylock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_x(res == 0, &mtx->lc,file,line);
+#else
erts_lc_trylock(res == 0, &mtx->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
#endif
@@ -1697,7 +1726,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
#else
erts_mtx_lock(erts_mtx_t *mtx)
@@ -1705,8 +1734,12 @@ erts_mtx_lock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_x(&mtx->lc, file, line);
+#else
erts_lc_lock(&mtx->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
@@ -1857,7 +1890,10 @@ erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
+ if (name && name[0] == '\0')
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
#endif
#endif
}
@@ -1921,7 +1957,11 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1935,8 +1975,12 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
res = ethr_rwmutex_tryrlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
@@ -1948,7 +1992,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
@@ -1956,8 +2000,12 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
@@ -1984,7 +2032,11 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1998,8 +2050,12 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
res = ethr_rwmutex_tryrwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2011,7 +2067,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
@@ -2019,8 +2075,12 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2426,7 +2486,7 @@ erts_spin_unlock(erts_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
#else
erts_spin_lock(erts_spinlock_t *lock)
@@ -2434,8 +2494,12 @@ erts_spin_lock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_x(&lock->lc,file,line);
+#else
erts_lc_lock(&lock->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
@@ -2545,7 +2609,7 @@ erts_read_unlock(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
#else
erts_read_lock(erts_rwlock_t *lock)
@@ -2553,8 +2617,12 @@ erts_read_lock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
@@ -2584,7 +2652,7 @@ erts_write_unlock(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
#else
erts_write_lock(erts_rwlock_t *lock)
@@ -2592,8 +2660,12 @@ erts_write_lock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2635,10 +2707,10 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_tsd_key_create(erts_tsd_key_t *keyp)
+erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname)
{
#ifdef USE_THREADS
- int res = ethr_tsd_key_create(keyp);
+ int res = ethr_tsd_key_create(keyp, keyname);
if (res)
erts_thr_fatal_error(res, "create thread specific data key");
#endif
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ff7fdfcfca..6978a5f11a 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -3309,6 +3309,8 @@ sys_msg_dispatcher_func(void *unused)
if (erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
+ ERTS_SCHED_FAIR_YIELD();
+
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
#endif
@@ -3467,12 +3469,20 @@ static void
init_sys_msg_dispatcher(void)
{
erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+#ifdef __OSE__
+ thr_opts.coreNo = 0;
+#endif
thr_opts.detached = 1;
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ thr_opts.name = "sys_msg_dispatcher";
+#endif
+
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 43a19e97f1..0807649ea1 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -150,7 +150,7 @@ void erts_silence_warn_unused_result(long unused);
int erts_fit_in_bits_int64(Sint64);
int erts_fit_in_bits_int32(Sint32);
-int list_length(Eterm);
+int erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 3b16cdeb4a..cd5060ebb3 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -49,7 +49,9 @@
#include "erl_map.h"
extern ErlDrvEntry fd_driver_entry;
+#ifndef __OSE__
extern ErlDrvEntry vanilla_driver_entry;
+#endif
extern ErlDrvEntry spawn_driver_entry;
extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
@@ -245,11 +247,13 @@ static ERTS_INLINE void port_init_instr(Port *prt
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
char *lock_str = "port_lock";
+ erts_mtx_init_locked_x(prt->lock, lock_str, id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
- lock_str = NULL;
+ (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
+#else
+ 0
#endif
- erts_mtx_init_locked_x(prt->lock, lock_str, id);
+ );
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -2743,8 +2747,10 @@ void erts_init_io(int port_tab_size,
&drv_list_rwmtx_opts,
"driver_list");
driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key);
- erts_smp_tsd_key_create(&driver_list_last_error_key);
+ erts_smp_tsd_key_create(&driver_list_lock_status_key,
+ "erts_driver_list_lock_status_key");
+ erts_smp_tsd_key_create(&driver_list_last_error_key,
+ "erts_driver_list_last_error_key");
erts_ptab_init_table(&erts_port,
ERTS_ALC_T_PORT_TABLE,
@@ -2764,7 +2770,9 @@ void erts_init_io(int port_tab_size,
erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
+#ifndef __OSE__
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
+#endif
init_driver(&spawn_driver, &spawn_driver_entry, NULL);
erts_init_static_drivers();
for (dp = driver_tab; *dp != NULL; dp++)
@@ -7306,10 +7314,11 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
erts_atom_put((byte *) drv->name,
sys_strlen(drv->name),
ERTS_ATOM_ENC_LATIN1,
- 1)
+ 1),
#else
- NIL
+ NIL,
#endif
+ 1
);
}
#endif
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 8406f5344a..73630fda8e 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1470,11 +1470,15 @@ apply_last I P
#
put_map_assoc F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
-put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
-put_map_assoc F Src Dst Live Size Rest=* => \
+put_map_assoc F Src=s Dst Live Size Rest=* => \
update_map_assoc F Src Dst Live Size Rest
-put_map_exact F Src Dst Live Size Rest=* => \
+put_map_assoc F Src Dst Live Size Rest=* => \
+ move Src x | update_map_assoc F x Dst Live Size Rest
+put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
+put_map_exact F Src=s Dst Live Size Rest=* => \
update_map_exact F Src Dst Live Size Rest
+put_map_exact F Src Dst Live Size Rest=* => \
+ move Src x | update_map_exact F x Dst Live Size Rest
new_map j d I I
update_map_assoc j s d I I
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 189d9ebac8..e273056a2b 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -38,6 +38,8 @@
#if defined (__WIN32__)
# include "erl_win_sys.h"
+#elif defined (__OSE__)
+# include "erl_ose_sys.h"
#else
# include "erl_unix_sys.h"
#ifndef UNIX
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 7da555b18d..5b43d25e3c 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -258,7 +258,7 @@ erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
* Returns -1 if not a proper list (i.e. not terminated with NIL)
*/
int
-list_length(Eterm list)
+erts_list_length(Eterm list)
{
int i = 0;
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index e040864d24..b62e9a0306 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -99,7 +99,16 @@
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
+
+#ifndef __OSE__
+#include <ctype.h>
+#include <sys/types.h>
#include <stdlib.h>
+#else
+#include "ctype.h"
+#include "sys/types.h"
+#include "stdlib.h"
+#endif
/* Need (NON)BLOCKING macros for sendfile */
#ifndef WANT_NONBLOCKING
@@ -113,8 +122,7 @@
#include "erl_threads.h"
#include "gzio.h"
#include "dtrace-wrapper.h"
-#include <ctype.h>
-#include <sys/types.h>
+
void erl_exit(int n, char *fmt, ...);
@@ -765,6 +773,9 @@ file_init(void)
: 0);
driver_system_info(&sys_info, sizeof(ErlDrvSysInfo));
+ /* run initiation of efile_driver if needed */
+ efile_init();
+
#ifdef USE_VM_PROBES
erts_mtx_init(&dt_driver_mutex, "efile_drv dtrace mutex");
pthread_key_create(&dt_driver_key, NULL);
@@ -911,6 +922,7 @@ static void reply_Uint_posix_error(file_descriptor *desc, Uint num,
driver_output2(desc->port, response, t-response, NULL, 0);
}
+#ifdef HAVE_SENDFILE
static void reply_string_error(file_descriptor *desc, char* str) {
char response[256]; /* Response buffer. */
char* s;
@@ -921,6 +933,7 @@ static void reply_string_error(file_descriptor *desc, char* str) {
*t = tolower(*s);
driver_output2(desc->port, response, t-response, NULL, 0);
}
+#endif
static int reply_error(file_descriptor *desc,
Efile_error *errInfo) /* The error codes. */
diff --git a/erts/emulator/drivers/common/erl_efile.h b/erts/emulator/drivers/common/erl_efile.h
index 95c036db8f..5a8e3bc5db 100644
--- a/erts/emulator/drivers/common/erl_efile.h
+++ b/erts/emulator/drivers/common/erl_efile.h
@@ -127,7 +127,7 @@ struct t_sendfile_hdtl {
/*
* Functions.
*/
-
+int efile_init(void);
int efile_mkdir(Efile_error* errInfo, char* name);
int efile_rmdir(Efile_error* errInfo, char* name);
int efile_delete_file(Efile_error* errInfo, char* name);
diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c
index 8ec2c3f762..ef539f8f9b 100644
--- a/erts/emulator/drivers/common/gzio.c
+++ b/erts/emulator/drivers/common/gzio.c
@@ -20,6 +20,7 @@
#endif
#include <ctype.h>
#include "erl_driver.h"
+#include "erl_efile.h"
#include "sys.h"
#ifdef __WIN32__
@@ -597,6 +598,15 @@ erts_gzseek(ErtsGzFile file, int offset, int whence)
int pos;
gz_stream* s = (gz_stream *) file;
+ switch (whence) {
+ case EFILE_SEEK_SET: whence = SEEK_SET; break;
+ case EFILE_SEEK_CUR: whence = SEEK_CUR; break;
+ case EFILE_SEEK_END: whence = SEEK_END; break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
if (s == NULL) {
errno = EINVAL;
return -1;
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 4a861b121c..357a4b7bcb 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -121,6 +121,10 @@ typedef unsigned long long llu_t;
#undef WANT_NONBLOCKING
#include "sys.h"
+#ifdef __OSE__
+#include "inet.h"
+#endif
+
#undef EWOULDBLOCK
#undef ETIMEDOUT
@@ -289,7 +293,111 @@ static BOOL (WINAPI *fpSetHandleInformation)(HANDLE,DWORD,DWORD);
static unsigned long zero_value = 0;
static unsigned long one_value = 1;
-#else /* #ifdef __WIN32__ */
+#elif defined (__OSE__)
+#include "sys/socket.h"
+#include "sys/uio.h"
+#include "sfk/sys/sfk_uio.h"
+#include "netinet/in.h"
+#include "netinet/tcp.h"
+#include "netdb.h"
+
+ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
+{
+ return 0;
+}
+
+#define INVALID_SOCKET -1
+#define INVALID_EVENT -1
+#define SOCKET_ERROR -1
+
+#define SOCKET int
+#define HANDLE long int
+#define FD_READ ERL_DRV_READ
+#define FD_WRITE ERL_DRV_WRITE
+#define FD_CLOSE 0
+#define FD_CONNECT ERL_DRV_WRITE
+#define FD_ACCEPT ERL_DRV_READ
+
+#define sock_connect(s, addr, len) connect((s), (addr), (len))
+#define sock_listen(s, b) listen((s), (b))
+#define sock_bind(s, addr, len) bind((s), (addr), (len))
+#define sock_getopt(s,t,n,v,l) getsockopt((s),(t),(n),(v),(l))
+#define sock_setopt(s,t,n,v,l) setsockopt((s),(t),(n),(v),(l))
+#define sock_name(s, addr, len) getsockname((s), (addr), (len))
+#define sock_peer(s, addr, len) getpeername((s), (addr), (len))
+#define sock_ntohs(x) ntohs((x))
+#define sock_ntohl(x) ntohl((x))
+#define sock_htons(x) htons((x))
+#define sock_htonl(x) htonl((x))
+
+#define sock_accept(s, addr, len) accept((s), (addr), (len))
+#define sock_send(s,buf,len,flag) inet_send((s),(buf),(len),(flag))
+#define sock_sendto(s,buf,blen,flag,addr,alen) \
+ sendto((s),(buf),(blen),(flag),(addr),(alen))
+#define sock_sendv(s, vec, size, np, flag) \
+ (*(np) = writev((s), (struct iovec*)(vec), (size)))
+#define sock_sendmsg(s,msghdr,flag) sendmsg((s),(msghdr),(flag))
+
+#define sock_open(af, type, proto) socket((af), (type), (proto))
+#define sock_close(s) close((s))
+#define sock_shutdown(s, how) shutdown((s), (how))
+
+#define sock_hostname(buf, len) gethostname((buf), (len))
+#define sock_getservbyname(name,proto) getservbyname((name), (proto))
+#define sock_getservbyport(port,proto) getservbyport((port), (proto))
+
+#define sock_recv(s,buf,len,flag) recv((s),(buf),(len),(flag))
+#define sock_recvfrom(s,buf,blen,flag,addr,alen) \
+ recvfrom((s),(buf),(blen),(flag),(addr),(alen))
+#define sock_recvmsg(s,msghdr,flag) recvmsg((s),(msghdr),(flag))
+
+#define sock_errno() errno
+#define sock_create_event(d) ((d)->s) /* return file descriptor */
+#define sock_close_event(e) /* do nothing */
+
+#define inet_driver_select(port, e, mode, on) \
+ driver_select(port, e, mode | (on?ERL_DRV_USE:0), on)
+
+#define sock_select(d, flags, onoff) do { \
+ ASSERT(!(d)->is_ignored); \
+ (d)->event_mask = (onoff) ? \
+ ((d)->event_mask | (flags)) : \
+ ((d)->event_mask & ~(flags)); \
+ DEBUGF(("sock_select(%ld): flags=%02X, onoff=%d, event_mask=%02lX\r\n", \
+ (long) (d)->port, (flags), (onoff), (unsigned long) (d)->event_mask)); \
+ inet_driver_select((d)->port, (ErlDrvEvent)(long)(d)->event, (flags), (onoff)); \
+ } while(0)
+
+#ifndef WANT_NONBLOCKING
+#define WANT_NONBLOCKING
+#endif
+#include "sys.h"
+
+typedef unsigned long u_long;
+#define IN_CLASSA(a) ((((in_addr_t)(a)) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET)
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(a) ((((in_addr_t)(a)) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET)
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(a) ((((in_addr_t)(a)) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET)
+
+#define IN_CLASSD(a) ((((in_addr_t)(a)) & 0xf0000000) == 0xe0000000)
+#define IN_MULTICAST(a) IN_CLASSD(a)
+
+#define IN_EXPERIMENTAL(a) ((((in_addr_t)(a)) & 0xe0000000) == 0xe0000000)
+#define IN_BADCLASS(a) ((((in_addr_t)(a)) & 0xf0000000) == 0xf0000000)
+
+#else /* !__OSE__ && !__WIN32__ */
#include <sys/time.h>
#ifdef NETDB_H_NEEDS_IN_H
diff --git a/erts/emulator/drivers/ose/ose_efile.c b/erts/emulator/drivers/ose/ose_efile.c
new file mode 100644
index 0000000000..035ff81a9b
--- /dev/null
+++ b/erts/emulator/drivers/ose/ose_efile.c
@@ -0,0 +1,1124 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * Purpose: Provides file and directory operations for OSE.
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#if defined(HAVE_POSIX_FALLOCATE) && !defined(__sun) && !defined(__sun__)
+#define _XOPEN_SOURCE 600
+#endif
+#if !defined(_GNU_SOURCE) && defined(HAVE_LINUX_FALLOC_H)
+#define _GNU_SOURCE
+#endif
+#include "sys.h"
+#include "erl_driver.h"
+#include "erl_efile.h"
+#if defined(DARWIN) || defined(HAVE_LINUX_FALLOC_H) || defined(HAVE_POSIX_FALLOCATE)
+#include "fcntl.h"
+#endif
+#include "ose.h"
+#include "unistd.h"
+#include "sys/stat.h"
+#include "dirent.h"
+#include "sys/time.h"
+#include "time.h"
+#include "assert.h"
+
+/* Find a definition of MAXIOV, that is used in the code later. */
+#if defined IOV_MAX
+#define MAXIOV IOV_MAX
+#elif defined UIO_MAXIOV
+#define MAXIOV UIO_MAXIOV
+#else
+#define MAXIOV 16
+#endif
+
+/*
+ * Macros for testing file types.
+ */
+
+#define ISDIR(st) (((st).st_mode & S_IFMT) == S_IFDIR)
+#define ISREG(st) (((st).st_mode & S_IFMT) == S_IFREG)
+#define ISDEV(st) \
+ (((st).st_mode&S_IFMT) == S_IFCHR || ((st).st_mode&S_IFMT) == S_IFBLK)
+#define ISLNK(st) (((st).st_mode & S_IFLNK) == S_IFLNK)
+#ifdef NO_UMASK
+#define FILE_MODE 0644
+#define DIR_MODE 0755
+#else
+#define FILE_MODE 0666
+#define DIR_MODE 0777
+#endif
+
+#define IS_DOT_OR_DOTDOT(s) \
+ (s[0] == '.' && (s[1] == '\0' || (s[1] == '.' && s[2] == '\0')))
+
+/*
+ * Macros for handling local file descriptors
+ * and mutexes.
+ *
+ * Handling of files like this is necessary because OSE
+ * does not allow seeking after the end of a file. So
+ * what we do it emulate this by keeping track of the size
+ * of the file and where the file's positions is. If a
+ * write happens after eof then we pad it.
+ *
+ * Given time this should be rewritten to get rid of the
+ * mutex and use the port lock to protect the data. This
+ * could be done be done by adapting the efile api for some
+ * calls to allow some meta-data to be associated with the
+ * open file.
+ */
+
+#define L_FD_IS_VALID(fd_data) ((fd_data)->beyond_eof > 0)
+#define L_FD_INVALIDATE(fd_data) (fd_data)->beyond_eof = 0
+#define L_FD_CUR(fd_data) (fd_data)->pos
+#define L_FD_OFFS_BEYOND_EOF(fd_data, offs) \
+ (((fd_data)->size > offs) ? 0 : 1)
+
+#define L_FD_FAIL -1
+#define L_FD_SUCCESS 1
+#define L_FD_PAD_SIZE 255
+
+struct fd_meta {
+ ErlDrvMutex *meta_mtx;
+ struct fd_data *fd_data_list;
+};
+
+struct fd_data {
+ int fd;
+ struct fd_data *next;
+ struct fd_data *prev;
+ int pos;
+ int beyond_eof;
+ size_t size;
+#ifdef DEBUG
+ PROCESS owner;
+#endif
+};
+
+static int l_invalidate_local_fd(int fd);
+static int l_pad_file(struct fd_data *fd_data, off_t offset);
+static int check_error(int result, Efile_error* errInfo);
+static struct fd_data* l_new_fd(void);
+static int l_remove_local_fd(int fd);
+static struct fd_data* l_find_local_fd(int fd);
+static int l_update_local_fd(int fd, int pos, int size);
+
+static struct fd_meta* fdm = NULL;
+
+
+/***************************************************************************/
+
+static int
+l_remove_local_fd(int fd)
+{
+ struct fd_data *fd_data;
+ fd_data = l_find_local_fd(fd);
+
+ if (fd_data == NULL) {
+ return L_FD_FAIL;
+ }
+#ifdef DEBUG
+ assert(fd_data->owner == current_process());
+#endif
+ erl_drv_mutex_lock(fdm->meta_mtx);
+ /* head ? */
+ if (fd_data == fdm->fd_data_list) {
+ if (fd_data->next != NULL) {
+ /* remove link to head */
+ fd_data->next->prev = NULL;
+ /* set new head */
+ fdm->fd_data_list = fd_data->next;
+ }
+ else {
+ /* head is lonely */
+ fdm->fd_data_list = NULL;
+ }
+ }
+ else { /* not head */
+ if (fd_data->prev == NULL) {
+ erl_drv_mutex_unlock(fdm->meta_mtx);
+ return L_FD_FAIL;
+ }
+ else {
+ if (fd_data->next != NULL) {
+ fd_data->next->prev = fd_data->prev;
+ fd_data->prev->next = fd_data->next;
+ }
+ else {
+ fd_data->prev->next = NULL;
+ }
+ }
+ }
+
+ /* scramble values */
+ fd_data->beyond_eof = -1;
+ fd_data->next = NULL;
+ fd_data->prev = NULL;
+ fd_data->fd = -1;
+
+ /* unlock and clean */
+ driver_free(fd_data);
+ erl_drv_mutex_unlock(fdm->meta_mtx);
+
+ return L_FD_SUCCESS;
+}
+
+/***************************************************************************/
+
+static int
+l_invalidate_local_fd(int fd) {
+ struct fd_data *fd_data;
+
+ if ((fd_data = l_find_local_fd(fd)) == NULL) {
+ return L_FD_FAIL;
+ }
+
+ fd_data->beyond_eof = 0;
+ return L_FD_SUCCESS;
+}
+
+/****************************************************************************/
+
+static struct fd_data*
+l_find_local_fd(int fd) {
+ struct fd_data *fd_data;
+
+ fd_data = NULL;
+ erl_drv_mutex_lock(fdm->meta_mtx);
+ for (fd_data = fdm->fd_data_list; fd_data != NULL; ) {
+ if (fd_data->fd == fd) {
+#ifdef DEBUG
+ assert(fd_data->owner == current_process());
+#endif
+ break;
+ }
+ fd_data = fd_data->next;
+ }
+ erl_drv_mutex_unlock(fdm->meta_mtx);
+ return fd_data;
+}
+
+/***************************************************************************/
+
+static struct fd_data*
+l_new_fd(void) {
+ struct fd_data *fd_data;
+
+ fd_data = driver_alloc(sizeof(struct fd_data));
+ if (fd_data == NULL) {
+ return NULL;
+ }
+ erl_drv_mutex_lock(fdm->meta_mtx);
+ if (fdm->fd_data_list == NULL) {
+ fdm->fd_data_list = fd_data;
+ fdm->fd_data_list->prev = NULL;
+ fdm->fd_data_list->next = NULL;
+ }
+ else {
+ fd_data->next = fdm->fd_data_list;
+ fdm->fd_data_list = fd_data;
+ fdm->fd_data_list->prev = NULL;
+ }
+#ifdef DEBUG
+ fd_data->owner = current_process();
+#endif
+ erl_drv_mutex_unlock(fdm->meta_mtx);
+ return fd_data;
+}
+
+/***************************************************************************/
+
+static int
+l_update_local_fd(int fd, int pos, int size) {
+ struct fd_data *fd_data = NULL;
+
+ fd_data = l_find_local_fd(fd);
+ /* new fd to handle? */
+ if (fd_data == NULL) {
+ fd_data = l_new_fd();
+ if (fd_data == NULL) {
+ /* out of memory */
+ return L_FD_FAIL;
+ }
+ }
+ fd_data->size = size;
+ fd_data->pos = pos;
+ fd_data->fd = fd;
+ fd_data->beyond_eof = 1;
+
+ return L_FD_SUCCESS;
+}
+
+/***************************************************************************/
+
+static int
+l_pad_file(struct fd_data *fd_data, off_t offset) {
+ int size_dif;
+ int written = 0;
+ int ret_val = L_FD_SUCCESS;
+ char padding[L_FD_PAD_SIZE];
+
+ size_dif = (offset - fd_data->size);
+ memset(&padding, '\0', L_FD_PAD_SIZE);
+
+ while (size_dif > 0) {
+ written = write(fd_data->fd, padding,
+ (size_dif < L_FD_PAD_SIZE) ?
+ size_dif : L_FD_PAD_SIZE);
+ if (written < 0 && errno != EINTR && errno != EAGAIN) {
+ ret_val = -1;
+ break;
+ }
+ size_dif -= written;
+ }
+ L_FD_INVALIDATE(fd_data);
+ return ret_val;
+}
+
+/***************************************************************************/
+
+static int
+check_error(int result, Efile_error *errInfo) {
+ if (result < 0) {
+ errInfo->posix_errno = errInfo->os_errno = errno;
+ return 0;
+ }
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_init() {
+ fdm = driver_alloc(sizeof(struct fd_meta));
+ if (fdm == NULL) {
+ return L_FD_FAIL;
+ }
+ fdm->meta_mtx = erl_drv_mutex_create("ose_efile local fd mutex\n");
+ erl_drv_mutex_lock(fdm->meta_mtx);
+ fdm->fd_data_list = NULL;
+ erl_drv_mutex_unlock(fdm->meta_mtx);
+ return L_FD_SUCCESS;
+}
+
+/***************************************************************************/
+
+int
+efile_mkdir(Efile_error* errInfo, /* Where to return error codes. */
+ char* name) /* Name of directory to create. */
+{
+#ifdef NO_MKDIR_MODE
+ return check_error(mkdir(name), errInfo);
+#else
+ int res = mkdir(name, DIR_MODE);
+ if (res < 0 && errno == EINVAL) {
+ errno = ENOENT;
+ }
+ return check_error(res, errInfo);
+#endif
+}
+
+/***************************************************************************/
+
+int
+efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */
+ char* name) /* Name of directory to delete. */
+{
+ if (rmdir(name) == 0) {
+ return 1;
+ }
+ if (errno == ENOTEMPTY) {
+ errno = EEXIST;
+ }
+ if (errno == EEXIST || errno == EINVAL) {
+ int saved_errno = errno;
+ struct stat file_stat;
+ struct stat cwd_stat;
+
+ if(stat(name, &file_stat) != 0) {
+ errno = ENOENT;
+ return check_error(-1, errInfo);
+ }
+ /*
+ * The error code might be wrong if this is the current directory.
+ */
+ if (stat(name, &file_stat) == 0 && stat(".", &cwd_stat) == 0 &&
+ file_stat.st_ino == cwd_stat.st_ino &&
+ file_stat.st_dev == cwd_stat.st_dev) {
+ saved_errno = EACCES;
+ }
+ errno = saved_errno;
+ }
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */
+ char* name) /* Name of file to delete. */
+{
+ struct stat statbuf;
+
+ if (stat(name, &statbuf) >= 0) {
+ /* Do not let unlink() remove directories */
+ if (ISDIR(statbuf)) {
+ errno = EPERM;
+ return check_error(-1, errInfo);
+ }
+
+ if (unlink(name) == 0) {
+ return 1;
+ }
+
+ if (errno == EISDIR) {
+ errno = EPERM;
+ return check_error(-1, errInfo);
+ }
+ }
+ else {
+ if (errno == EINVAL) {
+ errno = ENOENT;
+ return check_error(-1, errInfo);
+ }
+ }
+ return check_error(-1, errInfo);
+}
+
+/*
+ *---------------------------------------------------------------------------
+ *
+ * Changes the name of an existing file or directory, from src to dst.
+ * If src and dst refer to the same file or directory, does nothing
+ * and returns success. Otherwise if dst already exists, it will be
+ * deleted and replaced by src subject to the following conditions:
+ * If src is a directory, dst may be an empty directory.
+ * If src is a file, dst may be a file.
+ * In any other situation where dst already exists, the rename will
+ * fail.
+ *
+ * Results:
+ * If the directory was successfully created, returns 1.
+ * Otherwise the return value is 0 and errno is set to
+ * indicate the error. Some possible values for errno are:
+ *
+ * EACCES: src or dst parent directory can't be read and/or written.
+ * EEXIST: dst is a non-empty directory.
+ * EINVAL: src is a root directory or dst is a subdirectory of src.
+ * EISDIR: dst is a directory, but src is not.
+ * ENOENT: src doesn't exist, or src or dst is "".
+ * ENOTDIR: src is a directory, but dst is not.
+ * EXDEV: src and dst are on different filesystems.
+ *
+ * Side effects:
+ * The implementation of rename may allow cross-filesystem renames,
+ * but the caller should be prepared to emulate it with copy and
+ * delete if errno is EXDEV.
+ *
+ *---------------------------------------------------------------------------
+ */
+
+int
+efile_rename(Efile_error* errInfo, /* Where to return error codes. */
+ char* src, /* Original name. */
+ char* dst) /* New name. */
+{
+
+ /* temporary fix AFM does not recognize ./<file name>
+ * in destination remove pending on adaption of AFM fix
+ */
+
+ char *dot_str;
+ if (dst != NULL) {
+ dot_str = strchr(dst, '.');
+ if (dot_str && dot_str == dst && dot_str[1] == '/') {
+ dst = dst+2;
+ }
+ }
+
+ if (rename(src, dst) == 0) {
+ return 1;
+ }
+ if (errno == ENOTEMPTY) {
+ errno = EEXIST;
+ }
+ if (errno == EINVAL) {
+ struct stat file_stat;
+
+ if (stat(dst, &file_stat)== 0) {
+ if (ISDIR(file_stat)) {
+ errno = EISDIR;
+ }
+ else if (ISREG(file_stat)) {
+ errno = ENOTDIR;
+ }
+ else {
+ errno = EINVAL;
+ }
+ }
+ else {
+ errno = EINVAL;
+ }
+ }
+
+ if (strcmp(src, "/") == 0) {
+ errno = EINVAL;
+ }
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_chdir(Efile_error* errInfo, /* Where to return error codes. */
+ char* name) /* Name of directory to make current. */
+{
+ return check_error(chdir(name), errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_getdcwd(Efile_error* errInfo, /* Where to return error codes. */
+ int drive, /* 0 - current, 1 - A, 2 - B etc. */
+ char* buffer, /* Where to return the current
+ directory. */
+ size_t size) /* Size of buffer. */
+{
+ if (drive == 0) {
+ if (getcwd(buffer, size) == NULL)
+ return check_error(-1, errInfo);
+
+ return 1;
+ }
+
+ /*
+ * Drives other than 0 is not supported on Unix.
+ */
+
+ errno = ENOTSUP;
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
+ char* name, /* Name of directory to open. */
+ EFILE_DIR_HANDLE* p_dir_handle, /* Pointer to directory
+ handle of
+ open directory.*/
+ char* buffer, /* Pointer to buffer for
+ one filename. */
+ size_t *size) /* in-out Size of buffer, length
+ of name. */
+{
+ DIR *dp; /* Pointer to directory structure. */
+ struct dirent* dirp; /* Pointer to directory entry. */
+
+ /*
+ * If this is the first call, we must open the directory.
+ */
+
+ if (*p_dir_handle == NULL) {
+ dp = opendir(name);
+ if (dp == NULL)
+ return check_error(-1, errInfo);
+ *p_dir_handle = (EFILE_DIR_HANDLE) dp;
+ }
+
+ /*
+ * Retrieve the name of the next file using the directory handle.
+ */
+
+ dp = *((DIR **)((void *)p_dir_handle));
+ for (;;) {
+ dirp = readdir(dp);
+ if (dirp == NULL) {
+ closedir(dp);
+ return 0;
+ }
+ if (IS_DOT_OR_DOTDOT(dirp->d_name))
+ continue;
+ buffer[0] = '\0';
+ strncat(buffer, dirp->d_name, (*size)-1);
+ *size = strlen(dirp->d_name);
+ return 1;
+ }
+}
+
+/***************************************************************************/
+
+int
+efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
+ char* name, /* Name of directory to open. */
+ int flags, /* Flags to user for opening. */
+ int* pfd, /* Where to store the file
+ descriptor. */
+ Sint64 *pSize) /* Where to store the size of the
+ file. */
+{
+ struct stat statbuf;
+ int fd;
+ int mode; /* Open mode. */
+
+ if (stat(name, &statbuf) >= 0 && !ISREG(statbuf)) {
+ errno = EISDIR;
+ return check_error(-1, errInfo);
+ }
+
+ switch (flags & (EFILE_MODE_READ|EFILE_MODE_WRITE)) {
+ case EFILE_MODE_READ:
+ mode = O_RDONLY;
+ break;
+ case EFILE_MODE_WRITE:
+ if (flags & EFILE_NO_TRUNCATE)
+ mode = O_WRONLY | O_CREAT;
+ else
+ mode = O_WRONLY | O_CREAT | O_TRUNC;
+ break;
+ case EFILE_MODE_READ_WRITE:
+ mode = O_RDWR | O_CREAT;
+ break;
+ default:
+ errno = EINVAL;
+ return check_error(-1, errInfo);
+ }
+
+
+ if (flags & EFILE_MODE_APPEND) {
+ mode &= ~O_TRUNC;
+ mode |= O_APPEND;
+ }
+
+ if (flags & EFILE_MODE_EXCL) {
+ mode |= O_EXCL;
+ }
+
+ fd = open(name, mode, FILE_MODE);
+
+ if (!check_error(fd, errInfo))
+ return 0;
+
+ *pfd = fd;
+ if (pSize) {
+ *pSize = statbuf.st_size;
+ }
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_may_openfile(Efile_error* errInfo, char *name) {
+ struct stat statbuf; /* Information about the file */
+ int result;
+
+ result = stat(name, &statbuf);
+ if (!check_error(result, errInfo))
+ return 0;
+ if (!ISREG(statbuf)) {
+ errno = EISDIR;
+ return check_error(-1, errInfo);
+ }
+ return 1;
+}
+
+/***************************************************************************/
+
+void
+efile_closefile(int fd)
+{
+ if (l_find_local_fd(fd) != NULL) {
+ l_remove_local_fd(fd);
+ }
+ close(fd);
+}
+
+/***************************************************************************/
+
+int
+efile_fdatasync(Efile_error *errInfo, /* Where to return error codes. */
+ int fd) /* File descriptor for file to sync data. */
+{
+ return efile_fsync(errInfo, fd);
+}
+
+/***************************************************************************/
+
+int
+efile_fsync(Efile_error *errInfo, /* Where to return error codes. */
+ int fd) /* File descriptor for file to sync. */
+{
+ return check_error(fsync(fd), errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
+ char* name, int info_for_link)
+{
+ struct stat statbuf; /* Information about the file */
+ int result;
+
+ result = stat(name, &statbuf);
+ if (!check_error(result, errInfo)) {
+ return 0;
+ }
+
+#if SIZEOF_OFF_T == 4
+ pInfo->size_high = 0;
+#else
+ pInfo->size_high = (Uint32)(statbuf.st_size >> 32);
+#endif
+ pInfo->size_low = (Uint32)statbuf.st_size;
+
+#ifdef NO_ACCESS
+ /* Just look at read/write access for owner. */
+
+ pInfo->access = ((statbuf.st_mode >> 6) & 07) >> 1;
+
+#else
+ pInfo->access = FA_NONE;
+ if (access(name, R_OK) == 0)
+ pInfo->access |= FA_READ;
+ if (access(name, W_OK) == 0)
+ pInfo->access |= FA_WRITE;
+
+#endif
+
+ if (ISDEV(statbuf))
+ pInfo->type = FT_DEVICE;
+ else if (ISDIR(statbuf))
+ pInfo->type = FT_DIRECTORY;
+ else if (ISREG(statbuf))
+ pInfo->type = FT_REGULAR;
+ else if (ISLNK(statbuf))
+ pInfo->type = FT_SYMLINK;
+ else
+ pInfo->type = FT_OTHER;
+
+ pInfo->accessTime = statbuf.st_atime;
+ pInfo->modifyTime = statbuf.st_mtime;
+ pInfo->cTime = statbuf.st_ctime;
+
+ pInfo->mode = statbuf.st_mode;
+ pInfo->links = statbuf.st_nlink;
+ pInfo->major_device = statbuf.st_dev;
+ pInfo->inode = statbuf.st_ino;
+ pInfo->uid = statbuf.st_uid;
+ pInfo->gid = statbuf.st_gid;
+
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
+{
+ /*
+ * On some systems chown will always fail for a non-root user unless
+ * POSIX_CHOWN_RESTRICTED is not set. Others will succeed as long as
+ * you don't try to chown a file to someone besides youself.
+ */
+ if (pInfo->mode != -1) {
+ mode_t newMode = pInfo->mode & (S_ISUID | S_ISGID |
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ if (chmod(name, newMode)) {
+ newMode &= ~(S_ISUID | S_ISGID);
+ if (chmod(name, newMode)) {
+ return check_error(-1, errInfo);
+ }
+ }
+ }
+
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_write(Efile_error* errInfo, /* Where to return error codes. */
+ int flags, /* Flags given when file was
+ opened. */
+ int fd, /* File descriptor to write to. */
+ char* buf, /* Buffer to write. */
+ size_t count) /* Number of bytes to write. */
+{
+ ssize_t written; /* Bytes written in last operation. */
+ struct fd_data *fd_data;
+
+ if ((fd_data = l_find_local_fd(fd)) != NULL) {
+ if (L_FD_IS_VALID(fd_data)) {
+ /* we are beyond eof and need to pad*/
+ if (l_pad_file(fd_data, L_FD_CUR(fd_data)) < 0) {
+ return check_error(-1, errInfo);
+ }
+ }
+ }
+
+ while (count > 0) {
+ if ((written = write(fd, buf, count)) < 0) {
+ if (errno != EINTR) {
+ return check_error(-1, errInfo);
+ }
+ else {
+ written = 0;
+ }
+ }
+ ASSERT(written <= count);
+ buf += written;
+ count -= written;
+ }
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_writev(Efile_error* errInfo, /* Where to return error codes */
+ int flags, /* Flags given when file was
+ * opened */
+ int fd, /* File descriptor to write to */
+ SysIOVec* iov, /* Vector of buffer structs.
+ * The structs may be changed i.e.
+ * due to incomplete writes */
+ int iovcnt) /* Number of structs in vector */
+{
+ struct fd_data *fd_data;
+ int cnt = 0; /* Buffers so far written */
+
+ ASSERT(iovcnt >= 0);
+ if ((fd_data = l_find_local_fd(fd)) != NULL) {
+ if (L_FD_IS_VALID(fd_data)) {
+ /* we are beyond eof and need to pad*/
+ if (l_pad_file(fd_data, L_FD_CUR(fd_data)) < 0) {
+ return check_error(-1, errInfo);
+ }
+ }
+ }
+ while (cnt < iovcnt) {
+ if ((! iov[cnt].iov_base) || (iov[cnt].iov_len <= 0)) {
+ /* Empty buffer - skip */
+ cnt++;
+ }
+ else { /* Non-empty buffer */
+ ssize_t w; /* Bytes written in this call */
+ do {
+ w = write(fd, iov[cnt].iov_base, iov[cnt].iov_len);
+ } while (w < 0 && errno == EINTR);
+
+ ASSERT(w <= iov[cnt].iov_len || w == -1);
+
+ if (w < 0) {
+ return check_error(-1, errInfo);
+ }
+ /* Move forward to next buffer to write */
+ for (; cnt < iovcnt && w > 0; cnt++) {
+ if (iov[cnt].iov_base && iov[cnt].iov_len > 0) {
+ if (w < iov[cnt].iov_len) {
+ /* Adjust the buffer for next write */
+ iov[cnt].iov_len -= w;
+ iov[cnt].iov_base += w;
+ w = 0;
+ break;
+ }
+ else {
+ w -= iov[cnt].iov_len;
+ }
+ }
+ }
+ ASSERT(w == 0);
+ } /* else Non-empty buffer */
+ } /* while (cnt< iovcnt) */
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_read(Efile_error* errInfo, /* Where to return error codes. */
+ int flags, /* Flags given when file was opened. */
+ int fd, /* File descriptor to read from. */
+ char* buf, /* Buffer to read into. */
+ size_t count, /* Number of bytes to read. */
+ size_t *pBytesRead) /* Where to return number of
+ bytes read. */
+{
+ ssize_t n;
+ struct fd_data *fd_data;
+
+ if ((fd_data = l_find_local_fd(fd)) != NULL) {
+ if (L_FD_IS_VALID(fd_data)) {
+ *pBytesRead = 0;
+ return 1;
+ }
+ }
+ for (;;) {
+ if ((n = read(fd, buf, count)) >= 0) {
+ break;
+ }
+ else if (errno != EINTR) {
+ return check_error(-1, errInfo);
+ }
+ }
+ if (fd_data != NULL && L_FD_IS_VALID(fd_data)) {
+ L_FD_INVALIDATE(fd_data);
+ }
+ *pBytesRead = (size_t) n;
+ return 1;
+}
+
+/* pread() and pwrite() */
+/* Some unix systems, notably Solaris has these syscalls */
+/* It is especially nice for i.e. the dets module to have support */
+/* for this, even if the underlying OS dosn't support it, it is */
+/* reasonably easy to work around by first calling seek, and then */
+/* calling read(). */
+/* This later strategy however changes the file pointer, which pread() */
+/* does not do. We choose to ignore this and say that the location */
+/* of the file pointer is undefined after a call to any of the p functions*/
+
+
+int
+efile_pread(Efile_error* errInfo, /* Where to return error codes. */
+ int fd, /* File descriptor to read from. */
+ Sint64 offset, /* Offset in bytes from BOF. */
+ char* buf, /* Buffer to read into. */
+ size_t count, /* Number of bytes to read. */
+ size_t *pBytesRead) /* Where to return
+ number of bytes read. */
+{
+ int res = efile_seek(errInfo, fd, offset, EFILE_SEEK_SET, NULL);
+ if (res) {
+ return efile_read(errInfo, 0, fd, buf, count, pBytesRead);
+ } else {
+ return res;
+ }
+}
+
+
+/***************************************************************************/
+
+int
+efile_pwrite(Efile_error* errInfo, /* Where to return error codes. */
+ int fd, /* File descriptor to write to. */
+ char* buf, /* Buffer to write. */
+ size_t count, /* Number of bytes to write. */
+ Sint64 offset) /* where to write it */
+{
+ int res = efile_seek(errInfo, fd, offset, EFILE_SEEK_SET, NULL);
+
+ if (res) {
+ return efile_write(errInfo, 0, fd, buf, count);
+ } else {
+ return res;
+ }
+}
+
+/***************************************************************************/
+
+int
+efile_seek(Efile_error* errInfo, /* Where to return error codes. */
+ int fd, /* File descriptor to do the seek on. */
+ Sint64 offset, /* Offset in bytes from the given
+ origin. */
+ int origin, /* Origin of seek (SEEK_SET, SEEK_CUR,
+ SEEK_END). */
+ Sint64 *new_location) /* Resulting new location in file. */
+{
+ off_t off, result;
+ off = (off_t) offset;
+
+ switch (origin) {
+ case EFILE_SEEK_SET:
+ origin = SEEK_SET;
+ break;
+ case EFILE_SEEK_CUR:
+ origin = SEEK_CUR;
+ break;
+ case EFILE_SEEK_END:
+ origin = SEEK_END;
+ break;
+ default:
+ errno = EINVAL;
+ return check_error(-1, errInfo);
+ }
+
+ if (off != offset) {
+ errno = EINVAL;
+ return check_error(-1, errInfo);
+ }
+
+ errno = 0;
+ result = lseek(fd, off, origin);
+
+ if (result >= 0) {
+ l_invalidate_local_fd(fd);
+ }
+
+ if (result < 0)
+ {
+ if (errno == ENOSYS) {
+ int size, cur_pos;
+
+ if (off < 0) {
+ errno = EINVAL;
+ return check_error(-1, errInfo);
+ }
+
+ cur_pos = lseek(fd, 0, SEEK_CUR);
+ size = lseek(fd, 0, SEEK_END);
+
+ if (origin == SEEK_SET) {
+ result = offset;
+ }
+ else if (origin == SEEK_CUR) {
+ result = offset + cur_pos;
+ }
+ else if (origin == SEEK_END) {
+ result = size + offset;
+ }
+
+ /* sanity check our result */
+ if (size > result) {
+ return check_error(-1, errInfo);
+ }
+
+ /* store the data localy */
+ l_update_local_fd(fd, result, size);
+
+ /* reset the original file position */
+ if (origin != SEEK_END) {
+ lseek(fd, cur_pos, SEEK_SET);
+ }
+ }
+ else if (errno == 0) {
+ errno = EINVAL;
+ }
+ }
+
+ if (new_location) {
+ *new_location = result;
+ }
+
+ return 1;
+}
+
+/***************************************************************************/
+
+int
+efile_truncate_file(Efile_error* errInfo, int *fd, int flags)
+{
+ off_t offset;
+ struct fd_data *fd_data;
+
+ if ((fd_data = l_find_local_fd(*fd)) != NULL && L_FD_IS_VALID(fd_data)) {
+ offset = L_FD_CUR(fd_data);
+ }
+ else {
+ offset = lseek(*fd, 0, SEEK_CUR);
+ }
+
+ return check_error(((offset >= 0) &&
+ (ftruncate(*fd, offset) == 0)) ? 1 : -1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
+{
+ errno = ENOTSUP;
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_altname(Efile_error* errInfo, char* name, char* buffer, size_t size)
+{
+ errno = ENOTSUP;
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_link(Efile_error* errInfo, char* old, char* new)
+{
+ errno = ENOTSUP;
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_symlink(Efile_error* errInfo, char* old, char* new)
+{
+ errno = ENOTSUP;
+ return check_error(-1, errInfo);
+}
+
+/***************************************************************************/
+
+int
+efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
+ Sint64 length, int advise)
+{
+ return check_error(posix_fadvise(fd, offset, length, advise), errInfo);
+}
+
+/***************************************************************************/
+
+static int
+call_posix_fallocate(int fd, Sint64 offset, Sint64 length)
+{
+ int ret;
+
+ /*
+ * On Linux and Solaris for example, posix_fallocate() returns
+ * a positive error number on error and it does not set errno.
+ * On FreeBSD however (9.0 at least), it returns -1 on error
+ * and it sets errno.
+ */
+ do {
+ ret = posix_fallocate(fd, (off_t) offset, (off_t) length);
+ if (ret > 0) {
+ errno = ret;
+ ret = -1;
+ }
+ } while (ret != 0 && errno == EINTR);
+
+ return ret;
+}
+
+/***************************************************************************/
+
+int
+efile_fallocate(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length)
+{
+ return check_error(call_posix_fallocate(fd, offset, length), errInfo);
+}
diff --git a/erts/emulator/drivers/ose/ose_signal_drv.c b/erts/emulator/drivers/ose/ose_signal_drv.c
new file mode 100644
index 0000000000..4929b53856
--- /dev/null
+++ b/erts/emulator/drivers/ose/ose_signal_drv.c
@@ -0,0 +1,896 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2013-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "errno.h"
+#include "stdio.h"
+#include "string.h"
+#include "stddef.h"
+
+#include "sys.h"
+#include "erl_driver.h"
+#include "ose.h"
+
+
+#ifdef HAVE_OSE_SPI_H
+#include "ose_spi/ose_spi.h"
+#endif
+
+#define DEBUG_ATTACH 0
+#define DEBUG_HUNT 0
+#define DEBUG_SEND 0
+#define DEBUG_LISTEN 0
+
+#if 0
+#define DEBUGP(FMT,...) printf(FMT, __VA_ARGS__)
+#else
+#define DEBUGP(FMT,...)
+#endif
+
+#if DEBUG_ATTACH
+#define DEBUGP_ATTACH(...) DEBUGP( __VA_ARGS__)
+#else
+#define DEBUGP_ATTACH(...)
+#endif
+
+#if DEBUG_HUNT
+#define DEBUGP_HUNT(...) DEBUGP( __VA_ARGS__)
+#else
+#define DEBUGP_HUNT(...)
+#endif
+
+#if DEBUG_LISTEN
+#define DEBUGP_LISTEN(...) DEBUGP( __VA_ARGS__)
+#else
+#define DEBUGP_LISTEN(...)
+#endif
+
+#if DEBUG_SEND
+#define DEBUGP_SEND(...) DEBUGP( __VA_ARGS__)
+#else
+#define DEBUGP_SEND(...)
+#endif
+
+
+#define DRIVER_NAME "ose_signal_drv"
+#define GET_SPID 1
+#define GET_NAME 2
+#define HUNT 100
+#define DEHUNT 101
+#define ATTACH 102
+#define DETACH 103
+#define SEND 104
+#define SEND_W_S 105
+#define LISTEN 106
+#define OPEN 200
+
+#define REF_SEGMENT_SIZE 8
+
+struct async {
+ SIGSELECT signo;
+ ErlDrvTermData port;
+ ErlDrvTermData proc;
+ PROCESS spid;
+ PROCESS target;
+ Uint32 ref;
+};
+
+/**
+ * OSE signals
+ **/
+union SIGNAL {
+ SIGSELECT signo;
+ struct async async;
+};
+
+/**
+ * The driver's context
+ **/
+typedef struct _driver_context {
+ ErlDrvPort port;
+ PROCESS spid;
+ ErlDrvEvent perm_events[2];
+ ErlDrvEvent *events;
+ Uint32 event_cnt;
+ Uint32 ref;
+ Uint32 *outstanding_refs;
+ Uint32 outstanding_refs_max;
+ Uint32 outstanding_refs_cnt;
+} driver_context_t;
+
+/**
+ * Global variables
+ **/
+static ErlDrvTermData a_ok;
+static ErlDrvTermData a_error;
+static ErlDrvTermData a_enomem;
+static ErlDrvTermData a_enoent;
+static ErlDrvTermData a_badarg;
+static ErlDrvTermData a_mailbox_up;
+static ErlDrvTermData a_mailbox_down;
+static ErlDrvTermData a_ose_drv_reply;
+static ErlDrvTermData a_message;
+static PROCESS proxy_proc;
+
+
+/**
+ * Serialize/unserialize unsigned 32-bit values
+ **/
+static char *put_u32(unsigned int value, char *ptr) {
+ *ptr++ = (value & 0xff000000) >> 24;
+ *ptr++ = (value & 0x00ff0000) >> 16;
+ *ptr++ = (value & 0x0000ff00) >> 8;
+ *ptr++ = (value & 0xff);
+
+ return ptr;
+}
+
+static unsigned int get_u32(char *ptr) {
+ unsigned int result = 0;
+ result += (ptr[0] & 0xff) << 24;
+ result += (ptr[1] & 0xff) << 16;
+ result += (ptr[2] & 0xff) << 8;
+ result += (ptr[3] & 0xff);
+
+ return result;
+}
+
+
+/* Stolen from efile_drv.c */
+
+/* char EV_CHAR_P(ErlIOVec *ev, int p, int q) */
+#define EV_CHAR_P(ev, p, q) \
+ (((char *)(ev)->iov[(q)].iov_base) + (p))
+
+/* int EV_GET_CHAR(ErlIOVec *ev, char *p, int *pp, int *qp) */
+#define EV_GET_CHAR(ev, p, pp, qp) ev_get_char(ev, p ,pp, qp)
+static int
+ev_get_char(ErlIOVec *ev, char *p, int *pp, int *qp) {
+ if (*(pp)+1 <= (ev)->iov[*(qp)].iov_len) {
+ *(p) = *EV_CHAR_P(ev, *(pp), *(qp));
+ if (*(pp)+1 < (ev)->iov[*(qp)].iov_len)
+ *(pp) = *(pp)+1;
+ else {
+ (*(qp))++;
+ *pp = 0;
+ }
+ return !0;
+ }
+ return 0;
+}
+
+/* Uint32 EV_UINT32(ErlIOVec *ev, int p, int q)*/
+#define EV_UINT32(ev, p, q) \
+ ((Uint32) *(((unsigned char *)(ev)->iov[(q)].iov_base) + (p)))
+
+/* int EV_GET_UINT32(ErlIOVec *ev, Uint32 *p, int *pp, int *qp) */
+#define EV_GET_UINT32(ev, p, pp, qp) ev_get_uint32(ev,(Uint32*)(p),pp,qp)
+static int
+ev_get_uint32(ErlIOVec *ev, Uint32 *p, int *pp, int *qp) {
+ if (*(pp)+4 <= (ev)->iov[*(qp)].iov_len) {
+ *(p) = (EV_UINT32(ev, *(pp), *(qp)) << 24)
+ | (EV_UINT32(ev, *(pp)+1, *(qp)) << 16)
+ | (EV_UINT32(ev, *(pp)+2, *(qp)) << 8)
+ | (EV_UINT32(ev, *(pp)+3, *(qp)));
+ if (*(pp)+4 < (ev)->iov[*(qp)].iov_len)
+ *(pp) = *(pp)+4;
+ else {
+ (*(qp))++;
+ *pp = 0;
+ }
+ return !0;
+ }
+ return 0;
+}
+
+/**
+ * Convinience macros
+ **/
+#define send_response(port,output) erl_drv_send_term(driver_mk_port(port),\
+ driver_caller(port), output, sizeof(output) / sizeof(output[0]));
+
+void iov_memcpy(void *dest,ErlIOVec *ev,int ind,int off);
+void iov_memcpy(void *dest,ErlIOVec *ev,int ind,int off) {
+ int i;
+ memcpy(dest,ev->iov[ind].iov_base+off,ev->iov[ind].iov_len-off);
+ for (i = ind+1; i < ev->vsize; i++)
+ memcpy(dest,ev->iov[i].iov_base,ev->iov[i].iov_len);
+}
+
+/**
+ * Reference handling
+ **/
+
+static int add_reference(driver_context_t *ctxt, Uint32 ref) {
+
+ /*
+ * Premature optimizations may be evil, but they sure are fun.
+ */
+
+ if (ctxt->outstanding_refs == NULL) {
+ /* First ref to be ignored */
+ ctxt->outstanding_refs = driver_alloc(REF_SEGMENT_SIZE*sizeof(Uint32));
+ if (!ctxt->outstanding_refs)
+ return 1;
+
+ memset(ctxt->outstanding_refs,0,REF_SEGMENT_SIZE*sizeof(Uint32));
+ ctxt->outstanding_refs_max += REF_SEGMENT_SIZE;
+ ctxt->outstanding_refs[ctxt->outstanding_refs_cnt++] = ref;
+ } else if (ctxt->outstanding_refs_cnt == ctxt->outstanding_refs_max) {
+ /* Expand ref array */
+ Uint32 *new_array;
+ ctxt->outstanding_refs_max += REF_SEGMENT_SIZE;
+ new_array = driver_realloc(ctxt->outstanding_refs,
+ ctxt->outstanding_refs_max*sizeof(Uint32));
+
+ if (!new_array) {
+ ctxt->outstanding_refs_max -= REF_SEGMENT_SIZE;
+ return 1;
+ }
+
+ ctxt->outstanding_refs = new_array;
+
+ memset(ctxt->outstanding_refs+ctxt->outstanding_refs_cnt,0,
+ REF_SEGMENT_SIZE*sizeof(Uint32));
+ ctxt->outstanding_refs[ctxt->outstanding_refs_cnt++] = ref;
+
+ } else {
+ /* Find an empty slot:
+ * First we try current index,
+ * then we scan for a slot.
+ */
+ if (!ctxt->outstanding_refs[ctxt->outstanding_refs_cnt]) {
+ ctxt->outstanding_refs[ctxt->outstanding_refs_cnt++] = ref;
+ } else {
+ int i;
+ ASSERT(ctxt->outstanding_refs_cnt < ctxt->outstanding_refs_max);
+ for (i = 0; i < ctxt->outstanding_refs_max; i++)
+ if (!ctxt->outstanding_refs[i])
+ break;
+ ASSERT(ctxt->outstanding_refs[i] == 0);
+ ctxt->outstanding_refs[i] = ref;
+ ctxt->outstanding_refs_cnt++;
+ }
+ }
+ return 0;
+}
+
+/* Return 0 if removed, 1 if does not exist, */
+static int remove_reference(driver_context_t *ctxt, Uint32 ref) {
+ int i,j;
+
+ if (ctxt->outstanding_refs_max == 0 && ctxt->outstanding_refs_cnt == 0) {
+ ASSERT(ctxt->outstanding_refs == NULL);
+ return 1;
+ }
+
+ for (i = 0; i < ctxt->outstanding_refs_max; i++) {
+ if (ctxt->outstanding_refs[i] == ref) {
+ ctxt->outstanding_refs[i] = 0;
+ ctxt->outstanding_refs_cnt--;
+ i = -1;
+ break;
+ }
+ }
+
+ if (i != -1)
+ return 1;
+
+ if (ctxt->outstanding_refs_cnt == 0) {
+ driver_free(ctxt->outstanding_refs);
+ ctxt->outstanding_refs = NULL;
+ ctxt->outstanding_refs_max = 0;
+ } else if (ctxt->outstanding_refs_cnt == (ctxt->outstanding_refs_max - REF_SEGMENT_SIZE)) {
+ Uint32 *new_array;
+ for (i = 0, j = 0; i < ctxt->outstanding_refs_cnt; i++) {
+ if (ctxt->outstanding_refs[i] == 0) {
+ for (j = i+1; j < ctxt->outstanding_refs_max; j++)
+ if (ctxt->outstanding_refs[j]) {
+ ctxt->outstanding_refs[i] = ctxt->outstanding_refs[j];
+ ctxt->outstanding_refs[j] = 0;
+ break;
+ }
+ }
+ }
+ ctxt->outstanding_refs_max -= REF_SEGMENT_SIZE;
+ new_array = driver_realloc(ctxt->outstanding_refs,
+ ctxt->outstanding_refs_max*sizeof(Uint32));
+ if (!new_array) {
+ ctxt->outstanding_refs_max += REF_SEGMENT_SIZE;
+ return 2;
+ }
+
+ ctxt->outstanding_refs = new_array;
+
+ }
+
+ return 0;
+}
+
+/**
+ * The OSE proxy process. This only handles ERTS_SIGNAL_OSE_DRV_ATTACH.
+ * The process is needed because signals triggered by attach ignore
+ * redir tables.
+ *
+ * We have one global proxy process to save memory. An attempt to make each
+ * port phantom into a proxy was made, but that used way to much memory.
+ */
+static OS_PROCESS(driver_proxy_process) {
+ SIGSELECT sigs[] = {1,ERTS_SIGNAL_OSE_DRV_ATTACH};
+ PROCESS master = 0;
+
+ while (1) {
+ union SIGNAL *sig = receive(sigs);
+
+ if (sig->signo == ERTS_SIGNAL_OSE_DRV_ATTACH) {
+
+ /* The first message is used to determine who to send messages to. */
+ if (master == 0)
+ master = sender(&sig);
+
+ if (sig->async.target == 0) {
+ PROCESS from = sender(&sig);
+ restore(sig);
+ DEBUGP_ATTACH("0x%x: got attach 0x%x, sending to 0x%x\n",
+ current_process(),from,master);
+ sig->async.target = from;
+ send(&sig,master);
+ } else {
+ PROCESS target = sig->async.target;
+ restore(sig);
+ sig->async.target = 0;
+ DEBUGP_ATTACH("0x%x: doing attach on 0x%x\n",current_process(),target);
+ attach(&sig,target);
+ }
+ }
+ }
+}
+
+
+/**
+ * Init routine for the driver
+ **/
+static int drv_init(void) {
+
+ a_ok = driver_mk_atom("ok");
+ a_error = driver_mk_atom("error");
+ a_enomem = driver_mk_atom("enomem");
+ a_enoent = driver_mk_atom("enoent");
+ a_badarg = driver_mk_atom("badarg");
+ a_mailbox_up = driver_mk_atom("mailbox_up");
+ a_mailbox_down = driver_mk_atom("mailbox_down");
+ a_ose_drv_reply = driver_mk_atom("ose_drv_reply");
+ a_message = driver_mk_atom("message");
+
+ proxy_proc = create_process(get_ptype(current_process()),
+ "ose_signal_driver_proxy",
+ driver_proxy_process, 10000,
+ get_pri(current_process()),
+ 0, 0, NULL, 0, 0);
+
+#ifdef DEBUG
+ efs_clone(proxy_proc);
+#endif
+ start(proxy_proc);
+
+ return 0;
+}
+
+/* Signal resolution callback */
+static ErlDrvOseEventId resolve_signal(union SIGNAL* osig) {
+ union SIGNAL *sig = osig;
+ if (sig->signo == ERTS_SIGNAL_OSE_DRV_HUNT ||
+ sig->signo == ERTS_SIGNAL_OSE_DRV_ATTACH) {
+ return sig->async.spid;
+ }
+ DEBUGP("%p: Got signal %d sent to %p from 0x%p\n",
+ current_process(),sig->signo,addressee(&sig),sender(&sig));
+ return addressee(&sig);
+}
+
+
+/**
+ * Start routine for the driver
+ **/
+static ErlDrvData drv_start(ErlDrvPort port, char *command)
+{
+ driver_context_t *ctxt = driver_alloc(sizeof(driver_context_t));
+
+ ctxt->perm_events[0] = NULL;
+ ctxt->perm_events[1] = NULL;
+
+ ctxt->spid = 0;
+ ctxt->port = port;
+ ctxt->event_cnt = 0;
+ ctxt->events = NULL;
+ ctxt->ref = 0;
+ ctxt->outstanding_refs = NULL;
+ ctxt->outstanding_refs_max = 0;
+ ctxt->outstanding_refs_cnt = 0;
+
+
+ /* Set the communication protocol to Erlang to be binary */
+ set_port_control_flags(port, PORT_CONTROL_FLAG_BINARY);
+
+ /* Everything ok */
+ return (ErlDrvData)ctxt;
+}
+
+/**
+ * Stop routine for the driver
+ **/
+static void drv_stop(ErlDrvData driver_data)
+{
+ driver_context_t *ctxt = (driver_context_t *)driver_data;
+ int i;
+
+ /* HUNT + ATTACH */
+ if (ctxt->perm_events[0])
+ driver_select(ctxt->port, ctxt->perm_events[0],
+ ERL_DRV_USE|ERL_DRV_READ, 0);
+ if (ctxt->perm_events[1])
+ driver_select(ctxt->port, ctxt->perm_events[1],
+ ERL_DRV_USE|ERL_DRV_READ, 0);
+
+ for (i = 0; i < ctxt->event_cnt; i++) {
+ driver_select(ctxt->port, ctxt->events[i], ERL_DRV_USE|ERL_DRV_READ, 0);
+ }
+
+ if (ctxt->spid != 0)
+ kill_proc(ctxt->spid);
+ DEBUGP("0x%x: stopped\n",ctxt->spid);
+ if (ctxt->events)
+ driver_free(ctxt->events);
+ if (ctxt->outstanding_refs)
+ driver_free(ctxt->outstanding_refs);
+
+ driver_free(ctxt);
+}
+
+/**
+ * Output from Erlang
+ **/
+static void outputv(ErlDrvData driver_data, ErlIOVec *ev)
+{
+ driver_context_t *ctxt = (driver_context_t *)driver_data;
+ int p = 0, q = 1;
+ char cmd;
+
+ if (! EV_GET_CHAR(ev,&cmd,&p,&q)) {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_ATOM, a_badarg,
+ ERL_DRV_TUPLE, 3};
+ send_response(ctxt->port, output);
+ return;
+ }
+
+ /* Command is in the buffer's first byte */
+ switch(cmd) {
+
+ case OPEN: {
+ char *name = driver_alloc(ev->size - 1+1);
+ struct OS_redir_entry redir[2];
+
+ redir[0].sig = 1;
+ redir[0].pid = current_process();
+
+ iov_memcpy(name,ev,q,p);
+ name[ev->size-1] = '\0';
+
+ ctxt->spid = create_process(OS_PHANTOM, name, NULL, 0,
+ 0, 0, 0, redir, 0, 0);
+
+ DEBUGP("0x%x: open\n",ctxt->spid);
+
+ ctxt->perm_events[1] =
+ erl_drv_ose_event_alloc(ERTS_SIGNAL_OSE_DRV_ATTACH,(int)ctxt->spid,
+ resolve_signal, NULL);
+ driver_select(ctxt->port,ctxt->perm_events[1],ERL_DRV_READ|ERL_DRV_USE,1);
+
+ ctxt->perm_events[0] =
+ erl_drv_ose_event_alloc(ERTS_SIGNAL_OSE_DRV_HUNT,(int)ctxt->spid,
+ resolve_signal, NULL);
+ driver_select(ctxt->port,ctxt->perm_events[0],ERL_DRV_READ|ERL_DRV_USE,1);
+
+ start(ctxt->spid);
+
+ {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_ATOM, a_ok,
+ ERL_DRV_TUPLE, 3};
+
+ send_response(ctxt->port, output);
+ }
+
+ break;
+
+ }
+
+ case ATTACH:
+ case HUNT:
+ {
+ union SIGNAL *sig = alloc(sizeof(union SIGNAL),
+ cmd == HUNT ? ERTS_SIGNAL_OSE_DRV_HUNT:ERTS_SIGNAL_OSE_DRV_ATTACH);
+
+ sig->async.port = driver_mk_port(ctxt->port);
+ sig->async.proc = driver_caller(ctxt->port);
+ sig->async.spid = ctxt->spid;
+ sig->async.ref = ++ctxt->ref;
+
+ if (add_reference(ctxt,ctxt->ref)) {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_ATOM, a_enomem,
+ ERL_DRV_TUPLE, 3};
+ send_response(ctxt->port, output);
+ free_buf(&sig);
+ } else {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_INT, (ErlDrvUInt)ctxt->ref,
+ ERL_DRV_TUPLE, 2,
+ ERL_DRV_TUPLE, 3};
+ send_response(ctxt->port, output);
+
+ if (cmd == HUNT) {
+ char *huntname = driver_alloc(sizeof(char)*((ev->size-1)+1));
+
+ iov_memcpy(huntname,ev,q,p);
+ huntname[ev->size-1] = '\0';
+
+ DEBUGP_HUNT("0x%x: hunt %s -> %u (%u,%u)\n",
+ ctxt->spid,huntname,ctxt->ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+
+ hunt(huntname, 0, NULL, &sig);
+
+ driver_free(huntname);
+ } else {
+ EV_GET_UINT32(ev,&sig->async.target,&p,&q);
+ DEBUGP_ATTACH("0x%x: attach %u -> %u (%u,%u)\n",
+ ctxt->spid,sig->async.target,
+ ctxt->ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+
+ send(&sig,proxy_proc);
+ }
+
+ }
+
+ break;
+ }
+
+ case DETACH:
+ case DEHUNT:
+ {
+
+ Uint32 ref;
+
+ EV_GET_UINT32(ev,&ref,&p,&q);
+ if (cmd == DETACH) {
+ DEBUGP_ATTACH("0x%x: detach %u (%u,%u)\n",ctxt->spid,ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+ } else {
+ DEBUGP_HUNT("0x%x: dehunt %u (%u,%u)\n",ctxt->spid,ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+ }
+
+ if (remove_reference(ctxt,ref)) {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_ATOM, a_error,
+ ERL_DRV_ATOM, a_enoent,
+ ERL_DRV_TUPLE, 2,
+ ERL_DRV_TUPLE, 3};
+
+ send_response(ctxt->port, output);
+ } else {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_ATOM, a_ok,
+ ERL_DRV_TUPLE, 3};
+
+ send_response(ctxt->port, output);
+ }
+
+ break;
+ }
+
+ case SEND:
+ case SEND_W_S:
+ {
+ PROCESS spid;
+ PROCESS sender;
+ SIGSELECT signo;
+ OSBUFSIZE size = ev->size-9;
+ union SIGNAL *sig;
+
+ EV_GET_UINT32(ev,&spid,&p,&q);
+
+ if (cmd == SEND_W_S) {
+ EV_GET_UINT32(ev,&sender,&p,&q);
+ size -= 4;
+ } else {
+ sender = ctxt->spid;
+ }
+
+ EV_GET_UINT32(ev,&signo,&p,&q);
+
+ sig = alloc(size + sizeof(SIGSELECT),signo);
+
+ if (cmd == SEND_W_S) {
+ DEBUGP_SEND("0x%x: send_w_s(%u,%u,%u)\n",ctxt->spid,spid,signo,sender);
+ } else {
+ DEBUGP_SEND("0x%x: send(%u,%u)\n",ctxt->spid,spid,signo);
+ }
+
+ iov_memcpy(((char *)&sig->signo) + sizeof(SIGSELECT),ev,q,p);
+
+ send_w_s(&sig, sender, spid);
+
+ break;
+ }
+
+ case LISTEN:
+ {
+ int i,j,event_cnt = (ev->size - 1)/4;
+ ErlDrvEvent *events = NULL;
+ SIGSELECT signo,tmp_signo;
+
+ if (event_cnt == 0) {
+ for (i = 0; i < ctxt->event_cnt; i++)
+ driver_select(ctxt->port,ctxt->events[i],ERL_DRV_READ|ERL_DRV_USE,0);
+ if (ctxt->events)
+ driver_free(ctxt->events);
+ } else {
+ events = driver_alloc(sizeof(ErlDrvEvent)*event_cnt);
+ EV_GET_UINT32(ev,&signo,&p,&q);
+ for (i = 0, j = 0; i < event_cnt || j < ctxt->event_cnt; ) {
+
+ if (ctxt->events)
+ erl_drv_ose_event_fetch(ctxt->events[j],&tmp_signo,NULL,NULL);
+
+ if (signo == tmp_signo) {
+ events[i++] = ctxt->events[j++];
+ EV_GET_UINT32(ev,&signo,&p,&q);
+ } else if (signo < tmp_signo || !ctxt->events) {
+ /* New signal to select on */
+ events[i] = erl_drv_ose_event_alloc(signo,(int)ctxt->spid,
+ resolve_signal, NULL);
+ driver_select(ctxt->port,events[i++],ERL_DRV_READ|ERL_DRV_USE,1);
+ EV_GET_UINT32(ev,&signo,&p,&q);
+ } else {
+ /* Remove old signal to select on */
+ driver_select(ctxt->port,ctxt->events[j++],ERL_DRV_READ|ERL_DRV_USE,0);
+ }
+ }
+ if (ctxt->events)
+ driver_free(ctxt->events);
+ }
+ ctxt->events = events;
+ ctxt->event_cnt = event_cnt;
+
+ {
+ ErlDrvTermData output[] = {
+ ERL_DRV_ATOM, a_ose_drv_reply,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_ATOM, a_ok,
+ ERL_DRV_TUPLE, 3};
+ send_response(ctxt->port, output);
+ }
+ break;
+ }
+
+ default:
+ {
+ DEBUGP("Warning: 'ose_signal_drv' unknown command '%d'\n", cmd);
+ break;
+ }
+ }
+}
+
+/**
+ * Handler for when OSE signal arrives
+ **/
+static void ready_input(ErlDrvData driver_data, ErlDrvEvent event)
+{
+ driver_context_t *ctxt = (driver_context_t *)driver_data;
+ union SIGNAL *sig = erl_drv_ose_get_signal(event);
+
+ while (sig != NULL) {
+
+ switch(sig->signo)
+ {
+ /* Remote process is available */
+ case ERTS_SIGNAL_OSE_DRV_HUNT:
+ {
+ const PROCESS spid = sender(&sig);
+
+ if (remove_reference(ctxt,sig->async.ref)) {
+ DEBUGP_HUNT("0x%x: Got hunt from 0x%x -> %u (CANCELLED) (%u,%u)\n",
+ ctxt->spid,spid,sig->async.ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+ /* Already removed by dehunt */
+ } else {
+ ErlDrvTermData reply[] = {
+ ERL_DRV_ATOM, a_mailbox_up,
+ ERL_DRV_PORT, sig->async.port,
+ ERL_DRV_PORT, sig->async.port,
+ ERL_DRV_UINT, (ErlDrvUInt)sig->async.ref,
+ ERL_DRV_TUPLE, 2,
+ ERL_DRV_UINT, (ErlDrvUInt)spid,
+ ERL_DRV_TUPLE, 4};
+ DEBUGP_HUNT("0x%x: Got hunt from 0x%x -> %u (%u,%u)\n",
+ ctxt->spid,spid,sig->async.ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+ erl_drv_send_term(sig->async.port, sig->async.proc, reply,
+ sizeof(reply) / sizeof(reply[0]));
+ }
+ break;
+ }
+
+ /* Remote process is down */
+ case ERTS_SIGNAL_OSE_DRV_ATTACH:
+ {
+ PROCESS spid = sig->async.target;
+
+ if (remove_reference(ctxt,sig->async.ref)) {
+ DEBUGP_ATTACH("0x%x: Got attach from 0x%x -> %u (CANCELLED) (%u,%u)\n",
+ ctxt->spid,spid,sig->async.ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+ /* Already removed by detach */
+ } else {
+ ErlDrvTermData reply[] = {
+ ERL_DRV_ATOM, a_mailbox_down,
+ ERL_DRV_PORT, sig->async.port,
+ ERL_DRV_PORT, sig->async.port,
+ ERL_DRV_UINT, sig->async.ref,
+ ERL_DRV_TUPLE, 2,
+ ERL_DRV_UINT, (ErlDrvUInt)spid,
+ ERL_DRV_TUPLE, 4};
+ DEBUGP_ATTACH("0x%x: Got attach from 0x%x -> %u (%u,%u)\n",
+ ctxt->spid,spid,sig->async.ref,
+ ctxt->outstanding_refs_cnt,
+ ctxt->outstanding_refs_max);
+ erl_drv_send_term(sig->async.port, sig->async.proc, reply,
+ sizeof(reply) / sizeof(reply[0]));
+ }
+ break;
+ }
+
+ /* Received user defined signal */
+ default:
+ {
+ const PROCESS spid = sender(&sig);
+ const OSBUFSIZE size = sigsize(&sig) - sizeof(SIGSELECT);
+ const char *sig_data = ((char *)&sig->signo) + sizeof(SIGSELECT);
+
+ ErlDrvTermData reply[] = {
+ ERL_DRV_ATOM, a_message,
+ ERL_DRV_PORT, driver_mk_port(ctxt->port),
+ ERL_DRV_UINT, (ErlDrvUInt)spid,
+ ERL_DRV_UINT, (ErlDrvUInt)ctxt->spid,
+ ERL_DRV_UINT, (ErlDrvUInt)sig->signo,
+ ERL_DRV_BUF2BINARY, (ErlDrvTermData)sig_data, (ErlDrvUInt)size,
+ ERL_DRV_TUPLE, 4,
+ ERL_DRV_TUPLE, 3};
+
+ DEBUGP_SEND("0x%x: Got 0x%u\r\n", spid, sig->signo);
+
+ erl_drv_output_term(driver_mk_port(ctxt->port), reply,
+ sizeof(reply) / sizeof(reply[0]));
+ break;
+ }
+ }
+
+ free_buf(&sig);
+ sig = erl_drv_ose_get_signal(event);
+ }
+}
+
+/**
+ * Handler for 'port_control'
+ **/
+static ErlDrvSSizeT control(ErlDrvData driver_data, unsigned int cmd,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
+{
+ driver_context_t *ctxt = (driver_context_t *)driver_data;
+
+ switch(cmd)
+ {
+ case GET_SPID:
+ {
+ const PROCESS spid = ctxt->spid;
+ put_u32(spid, *rbuf);
+ return sizeof(PROCESS);
+ }
+
+#ifdef HAVE_OSE_SPI_H
+ case GET_NAME:
+ {
+ const PROCESS spid = get_u32(buf);
+ char *name = (char*)get_pid_info(spid,OSE_PI_NAME);
+ int n;
+ if (!name) {
+ *rbuf = NULL;
+ return 0;
+ }
+
+ if (rlen < (n = strlen(name))) {
+ ErlDrvBinary *bin = driver_alloc_binary(n);
+ strncpy(bin->orig_bytes,name,n);
+ *rbuf = (char*)bin;
+ } else
+ strncpy(*rbuf,name,n);
+ free_buf((union SIGNAL**)&name);
+
+ return n;
+ }
+#endif
+ default:
+ {
+ /* Unknown command */
+ return (ErlDrvSSizeT)ERL_DRV_ERROR_GENERAL;
+ break;
+ }
+ }
+}
+
+static void stop_select(ErlDrvEvent event, void *reserved)
+{
+ erl_drv_ose_event_free(event);
+}
+
+/**
+ * Setup the driver entry for the Erlang runtime
+ **/
+ErlDrvEntry ose_signal_driver_entry = {
+ .init = drv_init,
+ .start = drv_start,
+ .stop = drv_stop,
+ .outputv = outputv,
+ .ready_input = ready_input,
+ .driver_name = DRIVER_NAME,
+ .control = control,
+ .extended_marker = ERL_DRV_EXTENDED_MARKER,
+ .major_version = ERL_DRV_EXTENDED_MAJOR_VERSION,
+ .minor_version = ERL_DRV_EXTENDED_MINOR_VERSION,
+ .driver_flags = ERL_DRV_FLAG_USE_PORT_LOCKING,
+ .stop_select = stop_select
+};
+
diff --git a/erts/emulator/drivers/ose/ttsl_drv.c b/erts/emulator/drivers/ose/ttsl_drv.c
new file mode 100644
index 0000000000..8af2ce6af3
--- /dev/null
+++ b/erts/emulator/drivers/ose/ttsl_drv.c
@@ -0,0 +1,68 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * Stub tty driver because group/user depend on this.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_driver.h"
+
+static int ttysl_init(void);
+static ErlDrvData ttysl_start(ErlDrvPort, char*);
+
+/* Define the driver table entry. */
+struct erl_drv_entry ttsl_driver_entry = {
+ ttysl_init,
+ ttysl_start,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "tty_sl",
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL,
+ NULL, /* process_exit */
+ NULL
+};
+
+
+static int ttysl_init(void)
+{
+ return 0;
+}
+
+static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
+{
+ return ERL_DRV_ERROR_GENERAL;
+}
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index 8ffc05da99..42f41c5f3d 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -102,6 +102,11 @@ check_error(int result, Efile_error *errInfo)
}
int
+efile_init() {
+ return 1;
+}
+
+int
efile_mkdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to create. */
{
@@ -629,7 +634,8 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */
do {
w = write(fd, iov[cnt].iov_base, iov[cnt].iov_len);
} while (w < 0 && errno == EINTR);
- ASSERT(w <= iov[cnt].iov_len);
+ ASSERT(w <= iov[cnt].iov_len ||
+ (w == -1 && errno != EINTR));
}
if (w < 0) return check_error(-1, errInfo);
/* Move forward to next buffer to write */
diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c
index d693d7d593..480ba23239 100644
--- a/erts/emulator/drivers/win32/win_efile.c
+++ b/erts/emulator/drivers/win32/win_efile.c
@@ -196,6 +196,11 @@ set_error(Efile_error* errInfo)
return 0;
}
+int
+efile_init() {
+ return 1;
+}
+
/*
* A writev with Unix semantics, but with Windows arguments
*/
diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c
index c3687681cf..7637049bc3 100644
--- a/erts/emulator/hipe/hipe_bif2.c
+++ b/erts/emulator/hipe/hipe_bif2.c
@@ -157,7 +157,8 @@ BIF_RETTYPE hipe_bifs_modeswitch_debug_off_0(BIF_ALIST_0)
BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1);
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index adc8793469..8ebe5da670 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -37,7 +37,8 @@
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 7035dc77df..245841a768 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -34,6 +34,7 @@
#endif
#include "sys.h"
#include "global.h"
+#include "erl_port.h"
#include "erl_check_io.h"
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
@@ -78,6 +79,8 @@ typedef char EventStateFlags;
#define ERTS_CIO_POLL_INIT ERTS_POLL_EXPORT(erts_poll_init)
#define ERTS_CIO_POLL_INFO ERTS_POLL_EXPORT(erts_poll_info)
+#define GET_FD(fd) fd
+
static struct pollset_info
{
ErtsPollSet ps;
@@ -894,7 +897,7 @@ print_driver_name(erts_dsprintf_buf_t *dsbufp, Eterm id)
static void
steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
{
- erts_dsprintf(dsbufp, "stealing control of fd=%d from ", (int) state->fd);
+ erts_dsprintf(dsbufp, "stealing control of fd=%d from ", (int) GET_FD(state->fd));
switch (state->type) {
case ERTS_EV_TYPE_DRV_SEL: {
int deselect_mode = 0;
@@ -918,7 +921,7 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
if (deselect_mode)
deselect(state, deselect_mode);
else {
- erts_dsprintf(dsbufp, "no one", (int) state->fd);
+ erts_dsprintf(dsbufp, "no one", (int) GET_FD(state->fd));
ASSERT(0);
}
erts_dsprintf(dsbufp, "\n");
@@ -946,7 +949,7 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
break;
}
default:
- erts_dsprintf(dsbufp, "no one\n", (int) state->fd);
+ erts_dsprintf(dsbufp, "no one\n", (int) GET_FD(state->fd));
ASSERT(0);
}
}
@@ -957,10 +960,14 @@ print_select_op(erts_dsprintf_buf_t *dsbufp,
{
Port *pp = erts_drvport2port(ix);
erts_dsprintf(dsbufp,
+#ifdef __OSE__
+ "driver_select(%p, %d,%s%s%s%s | %d, %d) "
+#else
"driver_select(%p, %d,%s%s%s%s, %d) "
+#endif
"by ",
ix,
- (int) fd,
+ (int) GET_FD(fd),
mode & ERL_DRV_READ ? " ERL_DRV_READ" : "",
mode & ERL_DRV_WRITE ? " ERL_DRV_WRITE" : "",
mode & ERL_DRV_USE ? " ERL_DRV_USE" : "",
@@ -1010,7 +1017,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
ASSERT(state->type == ERTS_EV_TYPE_STOP_USE);
erts_dsprintf(dsbufp, "failed: fd=%d (re)selected before stop_select "
"was called for driver %s\n",
- (int) state->fd, state->driver.drv_ptr->name);
+ (int) GET_FD(state->fd), state->driver.drv_ptr->name);
erts_send_error_to_logger_nogl(dsbufp);
if (on) {
@@ -1395,6 +1402,26 @@ stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode)
}
#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+
+#ifdef __OSE__
+static SafeHashValue drv_ev_state_hash(void *des)
+{
+ ErtsSysFdType fd = ((ErtsDrvEventState *) des)->fd;
+ /* We use hash on signo ^ id in order for steal to happen when the
+ same signo + fd is selected on by two different ports */
+ SafeHashValue val = (SafeHashValue)(fd->signo ^ fd->id);
+ return val ^ (val >> 8);
+}
+
+static int drv_ev_state_cmp(void *des1, void *des2)
+{
+ ErtsSysFdType fd1 = ((ErtsDrvEventState *) des1)->fd;
+ ErtsSysFdType fd2 = ((ErtsDrvEventState *) des2)->fd;
+ if (fd1->signo == fd2->signo && fd1->id == fd2->id)
+ return 0;
+ return 1;
+}
+#else /* !__OSE__ && !ERTS_SYS_CONTINOUS_FD_NUMBERS i.e. probably windows */
static SafeHashValue drv_ev_state_hash(void *des)
{
SafeHashValue val = (SafeHashValue) ((ErtsDrvEventState *) des)->fd;
@@ -1406,6 +1433,7 @@ static int drv_ev_state_cmp(void *des1, void *des2)
return ( ((ErtsDrvEventState *) des1)->fd == ((ErtsDrvEventState *) des2)->fd
? 0 : 1);
}
+#endif
static void *drv_ev_state_alloc(void *des_tmpl)
{
@@ -1882,12 +1910,14 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
int fd, len;
#endif
IterDebugCounters counters;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
ErtsDrvEventState null_des;
null_des.driver.select = NULL;
null_des.events = 0;
null_des.remove_cnt = 0;
null_des.type = ERTS_EV_TYPE_NONE;
+#endif
erts_printf("--- fds in pollset --------------------------------------\n");
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 09ed9f41af..2f1c05f401 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -90,7 +90,7 @@
# if defined(ERTS_USE_POLL)
# undef ERTS_POLL_USE_POLL
# define ERTS_POLL_USE_POLL 1
-# elif !defined(__WIN32__)
+# elif !defined(__WIN32__) && !defined(__OSE__)
# undef ERTS_POLL_USE_SELECT
# define ERTS_POLL_USE_SELECT 1
# endif
@@ -99,13 +99,31 @@
typedef Uint32 ErtsPollEvents;
#undef ERTS_POLL_EV_E2N
-#if defined(__WIN32__) /* --- win32 ------------------------------- */
+#if defined(__WIN32__) || defined(__OSE__) /* --- win32 or ose -------- */
#define ERTS_POLL_EV_IN 1
#define ERTS_POLL_EV_OUT 2
#define ERTS_POLL_EV_ERR 4
#define ERTS_POLL_EV_NVAL 8
+#ifdef __OSE__
+
+typedef struct ErtsPollOseMsgList_ {
+ struct ErtsPollOseMsgList_ *next;
+ union SIGNAL *data;
+} ErtsPollOseMsgList;
+
+struct erts_sys_fd_type {
+ SIGSELECT signo;
+ ErlDrvOseEventId id;
+ ErtsPollOseMsgList *msgs;
+ ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig);
+ ethr_mutex mtx;
+ void *extra;
+};
+
+#endif
+
#elif ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */
#include <sys/epoll.h>
@@ -228,7 +246,8 @@ ErtsPollEvents ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet,
ErtsSysFdType,
ErtsPollEvents,
int on,
- int* wake_poller);
+ int* wake_poller
+ );
void ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet,
ErtsPollControlEntry [],
int on);
diff --git a/erts/emulator/sys/ose/default.lmconf b/erts/emulator/sys/ose/default.lmconf
new file mode 100644
index 0000000000..a66b0ece56
--- /dev/null
+++ b/erts/emulator/sys/ose/default.lmconf
@@ -0,0 +1,25 @@
+OSE_LM_STACK_SIZES=256,512,1024,2048,4096,8192,16384,65536
+OSE_LM_SIGNAL_SIZES=31,63,127,255,1023,4095,16383,65535
+OSE_LM_POOL_SIZE=0x200000
+OSE_LM_MAIN_NAME=main
+OSE_LM_MAIN_STACK_SIZE=0xF000
+OSE_LM_MAIN_PRIORITY=20
+OSE_LM_PROGRAM_TYPE=APP_RAM
+OSE_LM_DATA_INIT=YES
+OSE_LM_BSS_INIT=YES
+OSE_LM_EXEC_MODEL=SHARED
+HEAP_MAX_SIZE=1000000000
+HEAP_SMALL_BUF_INIT_SIZE=64000000
+HEAP_LARGE_BUF_THRESHOLD=16000000
+HEAP_LOCK_TYPE=2
+
+ERTS_DEFAULT_PRIO=24
+ERTS_SCHEDULER_PRIO=24
+ERTS_ASYNC_PRIO=22
+ERTS_AUX_PRIO=24
+ERTS_SYS_MSG_DISPATCHER_PRIO=21
+
+# Setting the environment variable EFS_RESOLVE_TMO on the block to 0.
+# This will eliminiate delays when trying to open files on not mounted
+# volumes.
+EFS_RESOLVE_TMO=0
diff --git a/erts/emulator/sys/ose/driver_int.h b/erts/emulator/sys/ose/driver_int.h
new file mode 100644
index 0000000000..2c9ac955d8
--- /dev/null
+++ b/erts/emulator/sys/ose/driver_int.h
@@ -0,0 +1,41 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * System dependant driver declarations
+ */
+
+#ifndef __DRIVER_INT_H__
+#define __DRIVER_INT_H__
+
+#ifdef HAVE_SYS_UIO_H
+#include <sys/types.h>
+#include <sys/uio.h>
+
+typedef struct iovec SysIOVec;
+
+#else
+
+typedef struct {
+ char* iov_base;
+ int iov_len;
+} SysIOVec;
+
+#endif
+
+#endif
diff --git a/erts/emulator/sys/ose/erl_main.c b/erts/emulator/sys/ose/erl_main.c
new file mode 100644
index 0000000000..03119c3fec
--- /dev/null
+++ b/erts/emulator/sys/ose/erl_main.c
@@ -0,0 +1,51 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include <stdlib.h>
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "ose.h"
+
+int
+main(int argc, char **argv) {
+
+ /* When starting using pm_create -c ARGV="-- -root ..", argv[0] is the first
+ part of ARGV and not the name of the executable. So we shuffle some
+ pointers here to make erl_start happy. */
+ if (argv[0][0] == '-') {
+ int i;
+ char **tmp_argv = malloc(sizeof(char*)*(argc+1));
+ for (i = 0; i < argc; i++)
+ tmp_argv[i+1] = argv[i];
+ tmp_argv[0] = "beam";
+ erl_start(argc+1,tmp_argv);
+ free(tmp_argv);
+ } else {
+ erl_start(argc,argv);
+ }
+
+ stop(current_process());
+
+ return 0;
+}
diff --git a/erts/emulator/sys/ose/erl_ose_sys.h b/erts/emulator/sys/ose/erl_ose_sys.h
new file mode 100644
index 0000000000..cd66d95c26
--- /dev/null
+++ b/erts/emulator/sys/ose/erl_ose_sys.h
@@ -0,0 +1,353 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ *
+ * This file handles differences between different Unix systems.
+ * This should be the only place with conditional compilation
+ * depending on the type of OS.
+ */
+
+#ifndef _ERL_OSE_SYS_H
+#define _ERL_OSE_SYS_H
+
+#include "ose.h"
+#undef NIL
+#include "ramlog.h"
+#include "erts.sig"
+
+#include "fcntl.h"
+#include "math.h"
+#include "stdio.h"
+#include "stdlib.h"
+#include "string.h"
+#include "sys/param.h"
+#include "sys/time.h"
+#include "time.h"
+#include "dirent.h"
+#include "ethread.h"
+
+/* FIXME: configuration options */
+#define ERTS_SCHED_MIN_SPIN 1
+#define ERTS_SCHED_ONLY_POLL_SCHED_1 1
+#define ERTS_SCHED_FAIR 1
+#define NO_SYSCONF 1
+#define OPEN_MAX FOPEN_MAX
+
+#define MAP_ANON MAP_ANONYMOUS
+
+#ifndef HAVE_MMAP
+# define HAVE_MMAP 0
+#endif
+
+#if HAVE_MMAP
+# include "sys/mman.h"
+#endif
+
+/*
+ * Min number of async threads
+ */
+#define ERTS_MIN_NO_OF_ASYNC_THREADS 1
+
+/*
+ * Our own type of "FD's"
+ */
+#define ERTS_SYS_FD_TYPE struct erts_sys_fd_type*
+#define NO_FSTAT_ON_SYS_FD_TYPE 1 /* They are signals, not files */
+
+#include "sys/stat.h"
+
+/* FIXME mremap is not defined in OSE - POSIX issue */
+extern void *mremap (void *__addr, size_t __old_len, size_t __new_len,
+ int __flags, ...);
+
+/* FIXME: mremap constants */
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+
+typedef void *GETENV_STATE;
+
+/*
+** For the erl_timer_sup module.
+*/
+#define HAVE_GETHRTIME
+
+typedef long long SysHrTime;
+extern SysHrTime sys_gethrtime(void);
+
+void sys_init_hrtime(void);
+
+typedef time_t erts_time_t;
+
+typedef struct timeval SysTimeval;
+
+#define sys_gettimeofday(Arg) ((void) gettimeofday((Arg), NULL))
+
+typedef struct {
+ clock_t tms_utime;
+ clock_t tms_stime;
+ clock_t tms_cutime;
+ clock_t tms_cstime;
+} SysTimes;
+
+extern int erts_ticks_per_sec;
+
+#define SYS_CLK_TCK (erts_ticks_per_sec)
+
+extern clock_t sys_times(SysTimes *buffer);
+
+/* No use in having other resolutions than 1 Ms. */
+#define SYS_CLOCK_RESOLUTION 1
+
+#ifdef NO_FPE_SIGNALS
+
+#define erts_get_current_fp_exception() NULL
+#ifdef ERTS_SMP
+#define erts_thread_init_fp_exception() do{}while(0)
+#endif
+# define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
+# define __ERTS_FP_ERROR(fpexnp, f, Action) if (!finite(f)) { Action; } else {}
+# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
+# define __ERTS_SAVE_FP_EXCEPTION(fpexnp)
+# define __ERTS_RESTORE_FP_EXCEPTION(fpexnp)
+
+#define erts_sys_block_fpe() 0
+#define erts_sys_unblock_fpe(x) do{}while(0)
+
+#else /* !NO_FPE_SIGNALS */
+
+extern volatile unsigned long *erts_get_current_fp_exception(void);
+#ifdef ERTS_SMP
+extern void erts_thread_init_fp_exception(void);
+#endif
+# if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("fwait" : "=m"(*(fpexnp)) : "m"(f))
+# elif (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__)
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("" : "=m"(*(fpexnp)) : "fm"(f))
+# elif defined(__sparc__) && defined(__linux__) && defined(__GNUC__)
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("" : "=m"(*(fpexnp)) : "em"(f))
+# else
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("" : "=m"(*(fpexnp)) : "g"(f))
+# endif
+# if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+ extern void erts_restore_fpu(void);
+# else
+# define erts_restore_fpu() /*empty*/
+# endif
+# if (!defined(__GNUC__) || \
+ (__GNUC__ < 2) || \
+ (__GNUC__ == 2 && __GNUC_MINOR < 96)) && \
+ !defined(__builtin_expect)
+# define __builtin_expect(x, expected_value) (x)
+# endif
+static __inline__ int erts_check_fpe(volatile unsigned long *fp_exception, double f)
+{
+ erts_fwait(fp_exception, f);
+ if (__builtin_expect(*fp_exception == 0, 1))
+ return 0;
+ *fp_exception = 0;
+ erts_restore_fpu();
+ return 1;
+}
+# undef erts_fwait
+# undef erts_restore_fpu
+extern void erts_fp_check_init_error(volatile unsigned long *fp_exception);
+static __inline__ void __ERTS_FP_CHECK_INIT(volatile unsigned long *fp_exception)
+{
+ if (__builtin_expect(*fp_exception == 0, 1))
+ return;
+ erts_fp_check_init_error(fp_exception);
+}
+# define __ERTS_FP_ERROR(fpexnp, f, Action) do { if (erts_check_fpe((fpexnp),(f))) { Action; } } while (0)
+# define __ERTS_SAVE_FP_EXCEPTION(fpexnp) unsigned long old_erl_fp_exception = *(fpexnp)
+# define __ERTS_RESTORE_FP_EXCEPTION(fpexnp) \
+ do { *(fpexnp) = old_erl_fp_exception; } while (0)
+ /* This is for library calls where we don't trust the external
+ code to always throw floating-point exceptions on errors. */
+static __inline__ int erts_check_fpe_thorough(volatile unsigned long *fp_exception, double f)
+{
+ return erts_check_fpe(fp_exception, f) || !finite(f);
+}
+# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) \
+ do { if (erts_check_fpe_thorough((fpexnp),(f))) { Action; } } while (0)
+
+int erts_sys_block_fpe(void);
+void erts_sys_unblock_fpe(int);
+
+#endif /* !NO_FPE_SIGNALS */
+
+#define ERTS_FP_CHECK_INIT(p) __ERTS_FP_CHECK_INIT(&(p)->fp_exception)
+#define ERTS_FP_ERROR(p, f, A) __ERTS_FP_ERROR(&(p)->fp_exception, f, A)
+#define ERTS_FP_ERROR_THOROUGH(p, f, A) __ERTS_FP_ERROR_THOROUGH(&(p)->fp_exception, f, A)
+
+/* FIXME: force HAVE_GETPAGESIZE and stub getpagesize */
+#ifndef HAVE_GETPAGESIZE
+#define HAVE_GETPAGESIZE 1
+#endif
+
+extern int getpagesize(void);
+
+#ifndef HZ
+#define HZ 60
+#endif
+
+/* OSE5 doesn't provide limits.h so a number of macros should be
+ * added manually */
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8
+#endif
+
+/* Minimum and maximum values a `signed int' can hold. */
+#ifndef INT_MAX
+#define INT_MAX 2147483647
+#endif
+
+#ifndef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#endif
+
+#ifndef UINT_MAX
+# define UINT_MAX 4294967295U
+#endif
+
+/*
+static void erts_ose_sys_send(union SIGNAL **signal,PROCESS dst,
+ char* file,int line) {
+ SIGSELECT **ziggy = (SIGSELECT**)signal;
+ printf("%s:%d 0x%x Send signal 0x%x(0x%x) to 0x%x\r\n",
+ file,line,current_process(),ziggy[0][0],*ziggy,dst);
+ send(signal,dst);
+}
+#define send(signal,dst) erts_ose_sys_send(signal,dst,__FILE__,__LINE__)
+
+static void erts_ose_sys_send_w_sender(union SIGNAL **signal,
+ PROCESS sender,PROCESS dst,
+ char* file,int line) {
+ SIGSELECT **ziggy = (SIGSELECT**)signal;
+ printf("%s:%d 0x%x Send signal 0x%x(0x%x) to 0x%x as 0x%x\r\n",
+ file,line,current_process(),ziggy[0][0],*ziggy,dst,sender);
+ send_w_sender(signal,sender,dst);
+}
+#define send_w_sender(signal,sender,dst) \
+ erts_ose_sys_send_w_sender(signal,sender,dst,__FILE__,__LINE__)
+
+
+static union SIGNAL *erts_ose_sys_receive(SIGSELECT *sigsel,
+ char *file,
+ int line) {
+ SIGSELECT *sig;
+ int i;
+
+ printf("%s:%d 0x%x receive({%d,",file,line,current_process(),sigsel[0]);
+ for (i = 1; i < sigsel[0]; i++)
+ printf("0x%x, ",sigsel[i]);
+ if (sigsel[0] != 0)
+ printf("0x%x",sigsel[i]);
+ printf("})\n");
+ sig = (SIGSELECT*)receive(sigsel);
+ printf("%s:%d 0x%x got 0x%x from 0x%x\n",file,line,current_process(),
+ *sig,sender((union SIGNAL**)(&sig)));
+ return (union SIGNAL*)sig;
+}
+#define receive(SIGSEL) erts_ose_sys_receive(SIGSEL,__FILE__,__LINE__)
+
+static union SIGNAL *erts_ose_sys_receive_w_tmo(OSTIME tmo,SIGSELECT *sigsel,
+ char *file,int line) {
+ SIGSELECT *sig;
+ int i;
+ if (tmo == 0) {
+ sig = (SIGSELECT*)receive_w_tmo(tmo,sigsel);
+ if (sig != NULL) {
+ printf("%s:%d 0x%x receive_w_tmo(0,{%d,",file,line,current_process(),
+ sigsel[0]);
+ for (i = 1; i < sigsel[0]; i++)
+ printf("0x%x, ",sigsel[i]);
+ if (sigsel[0] != 0)
+ printf("0x%x",sigsel[i]);
+ printf("})\n");
+ printf("%s:%d 0x%x got 0x%x from 0x%x\n",file,line,current_process(),
+ *sig,sender((union SIGNAL**)(&sig)));
+ }
+ } else {
+ printf("%s:%d 0x%x receive_w_tmo(%u,{%d,",file,line,current_process(),tmo,
+ sigsel[0]);
+ for (i = 1; i < sigsel[0]; i++)
+ printf("0x%x, ",sigsel[i]);
+ if (sigsel[0] != 0)
+ printf("0x%x",sigsel[i]);
+ printf("})\n");
+ sig = (SIGSELECT*)receive_w_tmo(tmo,sigsel);
+ printf("%s:%d 0x%x got ",file,line,current_process());
+ if (sig == NULL)
+ printf("TIMEOUT\n");
+ else
+ printf("0x%x from 0x%x\n",*sig,sender((union SIGNAL**)(&sig)));
+ }
+
+ return (union SIGNAL*)sig;
+}
+
+#define receive_w_tmo(tmo,sigsel) erts_ose_sys_receive_w_tmo(tmo,sigsel, \
+ __FILE__,__LINE__)
+
+static union SIGNAL *erts_ose_sys_receive_fsem(OSTIME tmo,SIGSELECT *sigsel,
+ OSFSEMVAL fsem,
+ char *file,int line) {
+ SIGSELECT *sig;
+ int i;
+ if (tmo == 0) {
+ sig = (SIGSELECT*)receive_fsem(tmo,sigsel,fsem);
+ if (sig != NULL && sig != OS_RCV_FSEM) {
+ printf("%s:%d 0x%x receive_fsem(0,{%d,",file,line,current_process(),
+ sigsel[0]);
+ for (i = 1; i < sigsel[0]; i++)
+ printf("0x%x, ",sigsel[i]);
+ if (sigsel[0] != 0)
+ printf("0x%x",sigsel[i]);
+ printf("},%d)\n",fsem);
+ printf("%s:%d 0x%x got 0x%x from 0x%x\n",file,line,current_process(),
+ *sig,sender((union SIGNAL**)(&sig)));
+ }
+ } else {
+ printf("%s:%d 0x%x receive_fsem(%u,{%d,",file,line,current_process(),tmo,
+ sigsel[0]);
+ for (i = 1; i < sigsel[0]; i++)
+ printf("0x%x, ",sigsel[i]);
+ if (sigsel[0] != 0)
+ printf("0x%x",sigsel[i]);
+ printf("},%d)\n",fsem);
+ sig = (SIGSELECT*)receive_fsem(tmo,sigsel,fsem);
+ printf("%s:%d 0x%x got ",file,line,current_process());
+ if (sig == NULL)
+ printf("TIMEOUT\n");
+ else if (sig == OS_RCV_FSEM)
+ printf("FSEM\n");
+ else
+ printf("0x%x from 0x%x\n",*sig,sender((union SIGNAL**)(&sig)));
+ }
+
+ return (union SIGNAL*)sig;
+}
+
+#define receive_fsem(tmo,sigsel,fsem) \
+ erts_ose_sys_receive_fsem(tmo,sigsel,fsem,__FILE__,__LINE__)
+*/
+#endif /* _ERL_OSE_SYS_H */
diff --git a/erts/emulator/sys/ose/erl_ose_sys_ddll.c b/erts/emulator/sys/ose/erl_ose_sys_ddll.c
new file mode 100644
index 0000000000..ebd80deeaf
--- /dev/null
+++ b/erts/emulator/sys/ose/erl_ose_sys_ddll.c
@@ -0,0 +1,126 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Interface functions to the dynamic linker using dl* functions.
+ * (No support in OSE, we use static linkage instead)
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+
+
+void erl_sys_ddll_init(void) {
+}
+
+/*
+ * Open a shared object
+ */
+int erts_sys_ddll_open(const char *full_name, void **handle, ErtsSysDdllError* err)
+{
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+}
+
+int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
+{
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+}
+
+/*
+ * Find a symbol in the shared object
+ */
+int erts_sys_ddll_sym2(void *handle, const char *func_name, void **function,
+ ErtsSysDdllError* err)
+{
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+}
+
+/* XXX:PaN These two will be changed with new driver interface! */
+
+/*
+ * Load the driver init function, might appear under different names depending on object arch...
+ */
+
+int erts_sys_ddll_load_driver_init(void *handle, void **function)
+{
+ void *fn;
+ int res;
+ if ((res = erts_sys_ddll_sym2(handle, "driver_init", &fn, NULL)) != ERL_DE_NO_ERROR) {
+ res = erts_sys_ddll_sym2(handle, "_driver_init", &fn, NULL);
+ }
+ if (res == ERL_DE_NO_ERROR) {
+ *function = fn;
+ }
+ return res;
+}
+
+int erts_sys_ddll_load_nif_init(void *handle, void **function, ErtsSysDdllError* err)
+{
+ void *fn;
+ int res;
+ if ((res = erts_sys_ddll_sym2(handle, "nif_init", &fn, err)) != ERL_DE_NO_ERROR) {
+ res = erts_sys_ddll_sym2(handle, "_nif_init", &fn, err);
+ }
+ if (res == ERL_DE_NO_ERROR) {
+ *function = fn;
+ }
+ return res;
+}
+
+/*
+ * Call the driver_init function, whatever it's really called, simple on unix...
+*/
+void *erts_sys_ddll_call_init(void *function) {
+ void *(*initfn)(void) = function;
+ return (*initfn)();
+}
+void *erts_sys_ddll_call_nif_init(void *function) {
+ return erts_sys_ddll_call_init(function);
+}
+
+
+
+/*
+ * Close a chared object
+ */
+int erts_sys_ddll_close2(void *handle, ErtsSysDdllError* err)
+{
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+}
+
+
+/*
+ * Return string that describes the (current) error
+ */
+char *erts_sys_ddll_error(int code)
+{
+ return "Unspecified error";
+}
+
+void erts_sys_ddll_free_error(ErtsSysDdllError* err)
+{
+ if (err->str != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, err->str);
+ }
+}
diff --git a/erts/emulator/sys/ose/erl_poll.c b/erts/emulator/sys/ose/erl_poll.c
new file mode 100644
index 0000000000..ca1ed6e53a
--- /dev/null
+++ b/erts/emulator/sys/ose/erl_poll.c
@@ -0,0 +1,774 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Poll interface suitable for ERTS on OSE with or without
+ * SMP support.
+ *
+ * The interface is currently implemented using:
+ * - receive + receive_fsem
+ *
+ * Author: Lukas Larsson
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_thr_progress.h"
+#include "erl_driver.h"
+#include "erl_alloc.h"
+#include "erl_poll.h"
+
+#define NOFILE 4096
+
+/*
+ * Some debug macros
+ */
+
+/* #define HARDDEBUG
+#define HARDTRACE*/
+#ifdef HARDDEBUG
+#ifdef HARDTRACE
+#define HARDTRACEF(X, ...) { fprintf(stderr, X, __VA_ARGS__); fprintf(stderr,"\r\n"); }
+#else
+#define HARDTRACEF(...)
+#endif
+
+#else
+#define HARDTRACEF(X,...)
+#define HARDDEBUGF(...)
+#endif
+
+#if 0
+#define ERTS_POLL_DEBUG_PRINT
+#endif
+
+#if defined(DEBUG) && 0
+#define HARD_DEBUG
+#endif
+
+# define SEL_ALLOC erts_alloc
+# define SEL_REALLOC realloc_wrap
+# define SEL_FREE erts_free
+
+#ifdef ERTS_SMP
+
+#define ERTS_POLLSET_LOCK(PS) \
+ erts_smp_mtx_lock(&(PS)->mtx)
+#define ERTS_POLLSET_UNLOCK(PS) \
+ erts_smp_mtx_unlock(&(PS)->mtx)
+
+#else
+
+#define ERTS_POLLSET_LOCK(PS)
+#define ERTS_POLLSET_UNLOCK(PS)
+
+#endif
+
+/*
+ * --- Data types ------------------------------------------------------------
+ */
+
+union SIGNAL {
+ SIGSELECT sig_no;
+};
+
+typedef struct erts_sigsel_item_ ErtsSigSelItem;
+
+struct erts_sigsel_item_ {
+ ErtsSigSelItem *next;
+ ErtsSysFdType fd;
+ ErtsPollEvents events;
+};
+
+typedef struct erts_sigsel_info_ ErtsSigSelInfo;
+
+struct erts_sigsel_info_ {
+ ErtsSigSelInfo *next;
+ SIGSELECT signo;
+ ErlDrvOseEventId (*decode)(union SIGNAL* sig);
+ ErtsSigSelItem *fds;
+};
+
+struct ErtsPollSet_ {
+ SIGSELECT *sigs;
+ ErtsSigSelInfo *info;
+ Uint sig_count;
+ Uint item_count;
+ PROCESS interrupt;
+ erts_atomic32_t wakeup_state;
+ erts_smp_atomic32_t timeout;
+#ifdef ERTS_SMP
+ erts_smp_mtx_t mtx;
+#endif
+};
+
+static int max_fds = -1;
+
+#define ERTS_POLL_NOT_WOKEN ((erts_aint32_t) (1 << 0))
+#define ERTS_POLL_WOKEN_INTR ((erts_aint32_t) (1 << 1))
+#define ERTS_POLL_WOKEN_TIMEDOUT ((erts_aint32_t) (1 << 2))
+#define ERTS_POLL_WOKEN_IO_READY ((erts_aint32_t) (1 << 3))
+#define ERTS_POLL_SLEEPING ((erts_aint32_t) (1 << 4))
+
+/* signal list prototypes */
+static ErtsSigSelInfo *get_sigsel_info(ErtsPollSet ps, SIGSELECT signo);
+static ErtsSigSelItem *get_sigsel_item(ErtsPollSet ps, ErtsSysFdType fd);
+static ErtsSigSelInfo *add_sigsel_info(ErtsPollSet ps, ErtsSysFdType fd,
+ ErlDrvOseEventId (*decode)(union SIGNAL* sig));
+static ErtsSigSelItem *add_sigsel_item(ErtsPollSet ps, ErtsSysFdType fd,
+ ErlDrvOseEventId (*decode)(union SIGNAL* sig));
+static int del_sigsel_info(ErtsPollSet ps, ErtsSigSelInfo *info);
+static int del_sigsel_item(ErtsPollSet ps, ErtsSigSelItem *item);
+static int update_sigsel(ErtsPollSet ps);
+
+static ErtsSigSelInfo *
+get_sigsel_info(ErtsPollSet ps, SIGSELECT signo) {
+ ErtsSigSelInfo *curr = ps->info;
+ while (curr != NULL) {
+ if (curr->signo == signo)
+ return curr;
+ curr = curr->next;
+ }
+ return NULL;
+}
+
+static ErtsSigSelItem *
+get_sigsel_item(ErtsPollSet ps, ErtsSysFdType fd) {
+ ErtsSigSelInfo *info = get_sigsel_info(ps,fd->signo);
+ ErtsSigSelItem *curr;
+
+ if (info == NULL)
+ return NULL;
+
+ curr = info->fds;
+
+ while (curr != NULL) {
+ if (curr->fd->id == fd->id) {
+ ASSERT(curr->fd->signo == fd->signo);
+ return curr;
+ }
+ curr = curr->next;
+ }
+ return NULL;
+}
+
+static ErtsSigSelInfo *
+add_sigsel_info(ErtsPollSet ps, ErtsSysFdType fd,
+ ErlDrvOseEventId (*decode)(union SIGNAL* sig)) {
+ ErtsSigSelInfo *info = SEL_ALLOC(ERTS_ALC_T_POLLSET,
+ sizeof(ErtsSigSelInfo));
+ info->next = ps->info;
+ info->fds = NULL;
+ info->signo = fd->signo;
+ info->decode = decode;
+ ps->info = info;
+ ps->sig_count++;
+ return info;
+}
+
+static ErtsSigSelItem *
+add_sigsel_item(ErtsPollSet ps, ErtsSysFdType fd,
+ ErlDrvOseEventId (*decode)(union SIGNAL* sig)) {
+ ErtsSigSelInfo *info = get_sigsel_info(ps,fd->signo);
+ ErtsSigSelItem *item = SEL_ALLOC(ERTS_ALC_T_POLLSET,
+ sizeof(ErtsSigSelItem));
+ if (info == NULL)
+ info = add_sigsel_info(ps, fd, decode);
+ if (info->decode != decode) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "erts_poll_control() inconsistency: multiple resolve_signal functions for same signal (%d)\n",
+ fd->signo);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+ ASSERT(info->decode == decode);
+ item->next = info->fds;
+ item->fd = fd;
+ item->events = 0;
+ info->fds = item;
+ ps->item_count++;
+ return item;
+}
+
+static int del_sigsel_info(ErtsPollSet ps, ErtsSigSelInfo *info) {
+ ErtsSigSelInfo *curr, *prev;
+
+ if (ps->info == info) {
+ ps->info = ps->info->next;
+ } else {
+ curr = ps->info->next;
+ prev = ps->info;
+
+ while (curr != info) {
+ if (curr == NULL)
+ return 1;
+ prev = curr;
+ curr = curr->next;
+ }
+ prev->next = curr->next;
+ }
+
+ ps->sig_count--;
+ SEL_FREE(ERTS_ALC_T_POLLSET, info);
+ return 0;
+}
+
+static int del_sigsel_item(ErtsPollSet ps, ErtsSigSelItem *item) {
+ ErtsSigSelInfo *info = get_sigsel_info(ps,item->fd->signo);
+ ErtsSigSelItem *curr, *prev;
+
+ ps->item_count--;
+ ASSERT(ps->item_count >= 0);
+
+ if (info->fds == item) {
+ info->fds = info->fds->next;
+ SEL_FREE(ERTS_ALC_T_POLLSET,item);
+ if (info->fds == NULL)
+ return del_sigsel_info(ps,info);
+ return 0;
+ }
+
+ curr = info->fds->next;
+ prev = info->fds;
+
+ while (curr != item) {
+ if (curr == NULL) {
+ /* We did not find an item to delete so we have to
+ * increment item count again.
+ */
+ ps->item_count++;
+ return 1;
+ }
+ prev = curr;
+ curr = curr->next;
+ }
+ prev->next = curr->next;
+ SEL_FREE(ERTS_ALC_T_POLLSET,item);
+ return 0;
+}
+
+#ifdef ERTS_SMP
+
+static void update_redir_tables(ErtsPollSet ps) {
+ struct OS_redir_entry *redir_table;
+ PROCESS sched_1 = ERTS_SCHEDULER_IX(0)->tid.id;
+ int i;
+ redir_table = SEL_ALLOC(ERTS_ALC_T_POLLSET,
+ sizeof(struct OS_redir_entry)*(ps->sig_count+1));
+
+ redir_table[0].sig = ps->sig_count+1;
+ redir_table[0].pid = 0;
+
+ for (i = 1; i < ps->sig_count+1; i++) {
+ redir_table[i].sig = ps->sigs[i];
+ redir_table[i].pid = sched_1;
+ }
+
+ for (i = 1; i < erts_no_schedulers; i++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(i);
+ set_redirection(esdp->tid.id,redir_table);
+ }
+
+ SEL_FREE(ERTS_ALC_T_POLLSET,redir_table);
+}
+
+#endif
+
+static int update_sigsel(ErtsPollSet ps) {
+ ErtsSigSelInfo *info = ps->info;
+
+ int i;
+
+ if (ps->sigs != NULL)
+ SEL_FREE(ERTS_ALC_T_POLLSET,ps->sigs);
+
+ if (ps->sig_count == 0) {
+ /* If there are no signals we place a non-valid signal to make sure that
+ * we do not trigger on a any unrelated signals which are sent to the
+ * process.
+ */
+ ps->sigs = SEL_ALLOC(ERTS_ALC_T_POLLSET,sizeof(SIGSELECT)*(2));
+ ps->sigs[0] = 1;
+ ps->sigs[1] = ERTS_SIGNAL_INVALID;
+ return 0;
+ }
+
+ ps->sigs = SEL_ALLOC(ERTS_ALC_T_POLLSET,sizeof(SIGSELECT)*(ps->sig_count+1));
+ ps->sigs[0] = ps->sig_count;
+
+ for (i = 1; info != NULL; i++, info = info->next)
+ ps->sigs[i] = info->signo;
+
+#ifdef ERTS_SMP
+ update_redir_tables(ps);
+#endif
+
+ return 0;
+}
+
+static ERTS_INLINE void
+wake_poller(ErtsPollSet ps)
+{
+ erts_aint32_t wakeup_state;
+
+ ERTS_THR_MEMORY_BARRIER;
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
+ while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
+ && wakeup_state != ERTS_POLL_WOKEN_INTR) {
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_INTR,
+ wakeup_state);
+ if (act == wakeup_state) {
+ wakeup_state = act;
+ break;
+ }
+ wakeup_state = act;
+ }
+ if (wakeup_state == ERTS_POLL_SLEEPING) {
+ /*
+ * Since we don't know the internals of signal_fsem() we issue
+ * a memory barrier as a safety precaution ensuring that
+ * the store we just made to wakeup_state wont be reordered
+ * with loads in signal_fsem().
+ */
+ ERTS_THR_MEMORY_BARRIER;
+ signal_fsem(ps->interrupt);
+ }
+}
+
+static ERTS_INLINE void
+reset_interrupt(ErtsPollSet ps)
+{
+ /* We need to keep io-ready if set */
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
+ while (wakeup_state != ERTS_POLL_NOT_WOKEN &&
+ wakeup_state != ERTS_POLL_SLEEPING) {
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_NOT_WOKEN,
+ wakeup_state);
+ if (wakeup_state == act)
+ break;
+ wakeup_state = act;
+ }
+ ERTS_THR_MEMORY_BARRIER;
+}
+
+static ERTS_INLINE void
+set_interrupt(ErtsPollSet ps)
+{
+ wake_poller(ps);
+}
+
+void erts_poll_interrupt(ErtsPollSet ps,int set) {
+ HARDTRACEF("erts_poll_interrupt called!\n");
+
+ if (!set)
+ reset_interrupt(ps);
+ else
+ set_interrupt(ps);
+
+}
+
+void erts_poll_interrupt_timed(ErtsPollSet ps,int set,erts_short_time_t msec) {
+ HARDTRACEF("erts_poll_interrupt_timed called!\n");
+
+ if (!set)
+ reset_interrupt(ps);
+ else if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ set_interrupt(ps);
+}
+
+ErtsPollEvents erts_poll_control(ErtsPollSet ps, ErtsSysFdType fd,
+ ErtsPollEvents pe, int on, int* do_wake) {
+ ErtsSigSelItem *curr;
+ ErtsPollEvents new_events;
+ int old_sig_count;
+
+ HARDTRACEF(
+ "%ux: In erts_poll_control, fd = %d, pe = %d, on = %d, *do_wake = %d, curr = 0x%xu",
+ ps, fd, pe, on, do_wake, curr);
+
+ ERTS_POLLSET_LOCK(ps);
+
+ if (on && (pe & ERTS_POLL_EV_IN) && (pe & ERTS_POLL_EV_OUT)) {
+ /* Check to make sure both in and out are not used at the same time */
+ new_events = ERTS_POLL_EV_NVAL;
+ goto done;
+ }
+
+ curr = get_sigsel_item(ps, fd);
+ old_sig_count = ps->sig_count;
+
+ if (curr == NULL && on) {
+ curr = add_sigsel_item(ps, fd, fd->resolve_signal);
+ } else if (curr == NULL && !on) {
+ new_events = ERTS_POLL_EV_NVAL;
+ goto done;
+ }
+
+ new_events = curr->events;
+
+ if (pe == 0) {
+ *do_wake = 0;
+ goto done;
+ }
+
+ if (on) {
+ new_events |= pe;
+ curr->events = new_events;
+ } else {
+ new_events &= ~pe;
+ curr->events = new_events;
+ if (new_events == 0 && del_sigsel_item(ps, curr)) {
+ new_events = ERTS_POLL_EV_NVAL;
+ goto done;
+ }
+ }
+
+ if (ps->sig_count != old_sig_count) {
+ if (update_sigsel(ps))
+ new_events = ERTS_POLL_EV_NVAL;
+ }
+done:
+ ERTS_POLLSET_UNLOCK(ps);
+ HARDTRACEF("%ux: Out erts_poll_control", ps);
+ return new_events;
+}
+
+int erts_poll_wait(ErtsPollSet ps,
+ ErtsPollResFd pr[],
+ int *len,
+ SysTimeval *utvp) {
+ int res = ETIMEDOUT, no_fds, currid = 0;
+ OSTIME timeout;
+ union SIGNAL *sig;
+ // HARDTRACEF("%ux: In erts_poll_wait",ps);
+ if (ps->interrupt == (PROCESS)0)
+ ps->interrupt = current_process();
+
+ ASSERT(current_process() == ps->interrupt);
+ ASSERT(get_fsem(current_process()) == 0);
+ ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) &
+ (ERTS_POLL_NOT_WOKEN | ERTS_POLL_WOKEN_INTR));
+ /* Max no of spots avable in pr */
+ no_fds = *len;
+
+ *len = 0;
+
+ ASSERT(utvp);
+
+ /* erts_printf("Entering erts_poll_wait(), timeout=%d\n",
+ (int) utvp->tv_sec*1000 + utvp->tv_usec/1000); */
+
+ timeout = utvp->tv_sec*1000 + utvp->tv_usec/1000;
+
+ if (timeout > ((time_t) ERTS_AINT32_T_MAX))
+ timeout = ERTS_AINT32_T_MAX;
+ erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
+
+ while (currid < no_fds) {
+ if (timeout > 0) {
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_SLEEPING,
+ ERTS_POLL_NOT_WOKEN);
+ if (act == ERTS_POLL_NOT_WOKEN) {
+#ifdef ERTS_SMP
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ sig = receive_fsem(timeout, ps->sigs, 1);
+#ifdef ERTS_SMP
+ erts_thr_progress_finalize_wait(NULL);
+#endif
+ } else {
+ ASSERT(act == ERTS_POLL_WOKEN_INTR);
+ sig = OS_RCV_FSEM;
+ }
+ } else
+ sig = receive_w_tmo(0, ps->sigs);
+
+ if (sig == NULL) {
+ if (timeout > 0) {
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_TIMEDOUT,
+ ERTS_POLL_SLEEPING);
+ if (act == ERTS_POLL_WOKEN_INTR)
+ /* Restore fsem as it was signaled but we got a timeout */
+ wait_fsem(1);
+ } else
+ erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_TIMEDOUT,
+ ERTS_POLL_NOT_WOKEN);
+ break;
+ } else if (sig == OS_RCV_FSEM) {
+ ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) == ERTS_POLL_WOKEN_INTR);
+ break;
+ }
+ {
+ ErtsSigSelInfo *info = get_sigsel_info(ps, sig->sig_no);
+ struct erts_sys_fd_type fd = { sig->sig_no, info->decode(sig) };
+ ErtsSigSelItem *item = get_sigsel_item(ps, &fd);
+
+ ASSERT(sig);
+ if (currid == 0 && timeout > 0) {
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_IO_READY,
+ ERTS_POLL_SLEEPING);
+ if (act == ERTS_POLL_WOKEN_INTR) {
+ /* Restore fsem as it was signaled but we got a msg */
+ wait_fsem(1);
+ act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_IO_READY,
+ ERTS_POLL_WOKEN_INTR);
+ }
+ } else if (currid == 0) {
+ erts_atomic32_set_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_IO_READY);
+ }
+
+ if (item == NULL) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(
+ dsbufp,
+ "erts_poll_wait() failed: found unkown signal id %d (signo %u) "
+ "(curr_proc 0x%x)\n",
+ fd.id, fd.signo, current_process());
+ erts_send_error_to_logger_nogl(dsbufp);
+ timeout = 0;
+ ASSERT(0);
+ } else {
+ int i;
+ struct erts_sys_fd_type *fd = NULL;
+ ErtsPollOseMsgList *tl,*new;
+
+ /* Check if this fd has already been triggered by a previous signal */
+ for (i = 0; i < currid;i++) {
+ if (pr[i].fd == item->fd) {
+ fd = pr[i].fd;
+ pr[i].events |= item->events;
+ break;
+ }
+ }
+
+ /* First time this fd is triggered */
+ if (fd == NULL) {
+ pr[currid].fd = item->fd;
+ pr[currid].events = item->events;
+ fd = item->fd;
+ timeout = 0;
+ currid++;
+ }
+
+ /* Insert new signal in approriate list */
+ new = erts_alloc(ERTS_ALC_T_FD_SIG_LIST,sizeof(ErtsPollOseMsgList));
+ new->next = NULL;
+ new->data = sig;
+
+ ethr_mutex_lock(&fd->mtx);
+ tl = fd->msgs;
+
+ if (tl == NULL) {
+ fd->msgs = new;
+ } else {
+ while (tl->next != NULL)
+ tl = tl->next;
+ tl->next = new;
+ }
+ ethr_mutex_unlock(&fd->mtx);
+ }
+
+ }
+ }
+
+ {
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
+
+ switch (wakeup_state) {
+ case ERTS_POLL_WOKEN_IO_READY:
+ res = 0;
+ break;
+ case ERTS_POLL_WOKEN_INTR:
+ res = EINTR;
+ break;
+ case ERTS_POLL_WOKEN_TIMEDOUT:
+ res = ETIMEDOUT;
+ break;
+ case ERTS_POLL_NOT_WOKEN:
+ /* This happens when we get an invalid signal only */
+ res = EINVAL;
+ break;
+ default:
+ res = 0;
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d: Internal error: Invalid wakeup_state=%d\n",
+ __FILE__, __LINE__, (int) wakeup_state);
+ }
+ }
+
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+ erts_smp_atomic32_set_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+
+ *len = currid;
+
+ // HARDTRACEF("%ux: Out erts_poll_wait",ps);
+ return res;
+}
+
+int erts_poll_max_fds(void)
+{
+
+ HARDTRACEF("In/Out erts_poll_max_fds -> %d",max_fds);
+ return max_fds;
+}
+
+void erts_poll_info(ErtsPollSet ps,
+ ErtsPollInfo *pip)
+{
+ Uint size = 0;
+ Uint num_events = 0;
+
+ size += sizeof(struct ErtsPollSet_);
+ size += sizeof(ErtsSigSelInfo)*ps->sig_count;
+ size += sizeof(ErtsSigSelItem)*ps->item_count;
+ size += sizeof(SIGSELECT)*(ps->sig_count+1);
+
+ pip->primary = "receive_fsem";
+
+ pip->fallback = NULL;
+
+ pip->kernel_poll = NULL;
+
+ pip->memory_size = size;
+
+ pip->poll_set_size = num_events;
+
+ pip->fallback_poll_set_size = 0;
+
+ pip->lazy_updates = 0;
+
+ pip->pending_updates = 0;
+
+ pip->batch_updates = 0;
+
+ pip->concurrent_updates = 0;
+
+
+ pip->max_fds = erts_poll_max_fds();
+ HARDTRACEF("%ux: Out erts_poll_info",ps);
+
+}
+
+ErtsPollSet erts_poll_create_pollset(void)
+{
+ ErtsPollSet ps = SEL_ALLOC(ERTS_ALC_T_POLLSET,
+ sizeof(struct ErtsPollSet_));
+
+ ps->sigs = NULL;
+ ps->sig_count = 0;
+ ps->item_count = 0;
+ ps->info = NULL;
+ ps->interrupt = (PROCESS)0;
+ erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+ erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
+#ifdef ERTS_SMP
+ erts_smp_mtx_init(&ps->mtx, "pollset");
+#endif
+ update_sigsel(ps);
+ HARDTRACEF("%ux: Out erts_poll_create_pollset",ps);
+ return ps;
+}
+
+void erts_poll_destroy_pollset(ErtsPollSet ps)
+{
+ ErtsSigSelInfo *info;
+ for (info = ps->info; ps->info != NULL; info = ps->info, ps->info = ps->info->next) {
+ ErtsSigSelItem *item;
+ for (item = info->fds; info->fds != NULL; item = info->fds, info->fds = info->fds->next)
+ SEL_FREE(ERTS_ALC_T_POLLSET, item);
+ SEL_FREE(ERTS_ALC_T_POLLSET, info);
+ }
+
+ SEL_FREE(ERTS_ALC_T_POLLSET,ps->sigs);
+
+#ifdef ERTS_SMP
+ erts_smp_mtx_destroy(&ps->mtx);
+#endif
+
+ SEL_FREE(ERTS_ALC_T_POLLSET,ps);
+}
+
+void erts_poll_init(void)
+{
+ HARDTRACEF("In %s", __FUNCTION__);
+ max_fds = 256;
+
+ HARDTRACEF("Out %s", __FUNCTION__);
+}
+
+
+/* OSE driver functions */
+
+union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent drv_ev) {
+ struct erts_sys_fd_type *ev = (struct erts_sys_fd_type *)drv_ev;
+ ethr_mutex_lock(&ev->mtx);
+ if (ev->msgs == NULL) {
+ ethr_mutex_unlock(&ev->mtx);
+ return NULL;
+ } else {
+ ErtsPollOseMsgList *msg = ev->msgs;
+ union SIGNAL *sig = (union SIGNAL*)msg->data;
+ ASSERT(msg->data);
+ ev->msgs = msg->next;
+ ethr_mutex_unlock(&ev->mtx);
+ erts_free(ERTS_ALC_T_FD_SIG_LIST,msg);
+ return sig;
+ }
+}
+
+ErlDrvEvent
+erl_drv_ose_event_alloc(SIGSELECT signo, ErlDrvOseEventId id,
+ ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig), void *extra) {
+ struct erts_sys_fd_type *ev = erts_alloc(ERTS_ALC_T_DRV_EV,
+ sizeof(struct erts_sys_fd_type));
+ ev->signo = signo;
+ ev->extra = extra;
+ ev->id = id;
+ ev->msgs = NULL;
+ ev->resolve_signal = resolve_signal;
+ ethr_mutex_init(&ev->mtx);
+ return (ErlDrvEvent)ev;
+}
+
+void erl_drv_ose_event_free(ErlDrvEvent drv_ev) {
+ struct erts_sys_fd_type *ev = (struct erts_sys_fd_type *)drv_ev;
+ ASSERT(ev->msgs == NULL);
+ ethr_mutex_destroy(&ev->mtx);
+ erts_free(ERTS_ALC_T_DRV_EV,ev);
+}
+
+void erl_drv_ose_event_fetch(ErlDrvEvent drv_ev, SIGSELECT *signo,
+ ErlDrvOseEventId *id, void **extra) {
+ struct erts_sys_fd_type *ev = (struct erts_sys_fd_type *)drv_ev;
+ if (signo)
+ *signo = ev->signo;
+ if (extra)
+ *extra = ev->extra;
+ if (id)
+ *id = ev->id;
+}
diff --git a/erts/emulator/sys/ose/erts.sig b/erts/emulator/sys/ose/erts.sig
new file mode 100644
index 0000000000..78b883ee6c
--- /dev/null
+++ b/erts/emulator/sys/ose/erts.sig
@@ -0,0 +1,17 @@
+#ifndef ERTS_OSE_SIGNALS
+#define ERTS_OSE_SIGNALS
+
+#ifndef ERTS_OSE_SIGNAL_BASE
+#define ERTS_OSE_SIGNAL_BASE 0x01900280
+#endif
+
+#define ERTS_SIGNAL_INVALID ERTS_OSE_SIGNAL_BASE
+#define ERTS_SIGNAL_FD_DRV_CONFIG ERTS_OSE_SIGNAL_BASE+1
+#define ERTS_SIGNAL_FD_DRV_ASYNC ERTS_OSE_SIGNAL_BASE+2
+#define ERTS_SIGNAL_OSE_DRV_ATTACH ERTS_OSE_SIGNAL_BASE+3
+#define ERTS_SIGNAL_OSE_DRV_HUNT ERTS_OSE_SIGNAL_BASE+4
+
+#define ERTS_SIGNAL_RUN_ERL_SETUP ERTS_OSE_SIGNAL_BASE+100
+#define ERTS_SIGNAL_RUN_ERL_DAEMON ERTS_OSE_SIGNAL_BASE+101
+
+#endif
diff --git a/erts/emulator/sys/ose/gcc_4.4.3_lm_ppc.lcf b/erts/emulator/sys/ose/gcc_4.4.3_lm_ppc.lcf
new file mode 100644
index 0000000000..a19d23facf
--- /dev/null
+++ b/erts/emulator/sys/ose/gcc_4.4.3_lm_ppc.lcf
@@ -0,0 +1,182 @@
+/*******************************************************************************
+ * Copyright (C) 2013-2014 by Enea Software AB,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ ******************************************************************************/
+
+OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
+OUTPUT_ARCH("powerpc")
+ENTRY("crt0_lm")
+MEMORY
+{
+ rom : ORIGIN = 0x01000000, LENGTH = 0x01000000
+ ram : ORIGIN = 0x02000000, LENGTH = 0x01000000
+}
+PHDRS
+{
+ ph_conf PT_LOAD ;
+ ph_rom PT_LOAD ;
+ ph_ram PT_LOAD ;
+}
+SECTIONS
+{
+ .text :
+ {
+ *(.text_first)
+ *(.text)
+ *(.text.*)
+ *(.stub)
+ *(oscode)
+ *(.init*)
+ *(.fini*)
+ *(.gnu.warning)
+ *(.gnu.linkonce.t.*)
+ *(.glue_7t)
+ *(.glue_7)
+ } > rom :ph_rom = 0
+ .ose_sfk_biosentry :
+ {
+ *(.ose_sfk_biosentry)
+ } > rom :ph_rom
+ .ctors :
+ {
+ __CTOR_LIST__ = .;
+ *(.ctors)
+ *(SORT(.ctors.*))
+ __CTOR_END__ = .;
+ } > rom :ph_rom
+ .dtors :
+ {
+ __DTOR_LIST__ = .;
+ *(.dtors)
+ *(SORT(.dtors.*))
+ __DTOR_END__ = .;
+ } > rom :ph_rom
+ OSESYMS :
+ {
+ *(.osesyms)
+ } > rom :ph_rom
+ .rodata :
+ {
+ *(.rodata)
+ *(.rodata.*)
+ *(.gnu.linkonce.r.*)
+ } > rom :ph_rom
+ .eh_frame_hdr :
+ {
+ *(.eh_frame_hdr)
+ } > rom :ph_rom
+ .eh_frame :
+ {
+ __EH_FRAME_BEGIN__ = .;
+ *(.eh_frame)
+ LONG(0)
+ __EH_FRAME_END__ = .;
+ } > rom :ph_rom
+ .gcc_except_table :
+ {
+ *(.gcc_except_table .gcc_except_table.*)
+ } > rom :ph_rom
+ .sdata2 :
+ {
+ PROVIDE (_SDA2_BASE_ = .);
+ *(.sdata2)
+ *(.sdata2.*)
+ *(.gnu.linkonce.s2.*)
+ } > rom :ph_rom
+ .sbss2 :
+ {
+ *(.sbss2)
+ *(.sbss2.*)
+ *(.gnu.linkonce.sb2.*)
+ } > rom :ph_rom
+ LMCONF :
+ {
+ obj/?*?/ose_confd.o(.rodata)
+ *(LMCONF)
+ } > rom :ph_conf
+ .data :
+ {
+ LONG(0xDEADBABE)
+ *(.data)
+ *(.data.*)
+ *(.gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ . = ALIGN(0x10);
+ } > ram :ph_ram = 0
+ .sdata2 :
+ {
+ _SDA2_BASE_ = .;
+ *(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
+ }> ram :ph_ram
+ .sdata :
+ {
+ PROVIDE (_SDA_BASE_ = .);
+ *(.sdata)
+ *(.sdata.*)
+ *(.gnu.linkonce.s.*)
+ } > ram :ph_ram
+ .sbss :
+ {
+ *(.sbss)
+ *(.sbss.*)
+ *(.scommon)
+ *(.gnu.linkonce.sb.*)
+ } > ram :ph_ram
+ .bss (NOLOAD) :
+ {
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ *(.gnu.linkonce.b.*)
+ *(.osvars)
+ } > ram :ph_ram
+ .ignore (NOLOAD) :
+ {
+ *(.rel.dyn)
+ } > ram :ph_ram
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) *(.gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+}
+__OSESYMS_START = ADDR(OSESYMS);
+__OSESYMS_END = ADDR(OSESYMS) + SIZEOF(OSESYMS);
diff --git a/erts/emulator/sys/ose/gcc_4.6.3_lm_ppc.lcf b/erts/emulator/sys/ose/gcc_4.6.3_lm_ppc.lcf
new file mode 100644
index 0000000000..3440c2961b
--- /dev/null
+++ b/erts/emulator/sys/ose/gcc_4.6.3_lm_ppc.lcf
@@ -0,0 +1,242 @@
+/*******************************************************************************
+ * Copyright (C) 2013-2014 by Enea Software AB,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ ******************************************************************************/
+
+OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
+OUTPUT_ARCH("powerpc")
+
+ENTRY("crt0_lm")
+
+/* Note:
+ * You may have to increase the length of the "rom" memory region and the
+ * origin and length of the "ram" memory region below depending on the size
+ * of the code and data in your load module.
+ */
+
+MEMORY
+{
+ conf : ORIGIN = 0x00100000, LENGTH = 0x00030000
+ rom : ORIGIN = 0x01000000, LENGTH = 0x01000000
+ ram : ORIGIN = 0x03000000, LENGTH = 0x01000000
+}
+
+PHDRS
+{
+ ph_conf PT_LOAD ;
+ ph_rom PT_LOAD ;
+ ph_ram PT_LOAD ;
+}
+
+SECTIONS
+{
+/*---------------------------------------------------------------------------
+ * Load module configuration area
+ *-------------------------------------------------------------------------*/
+
+ /* Load module configuration section. */
+ LMCONF :
+ {
+ obj/?*?/ose_confd.o(.rodata)
+ *(LMCONF)
+ } > conf :ph_conf
+
+/*---------------------------------------------------------------------------
+ * Read-only area
+ *-------------------------------------------------------------------------*/
+
+ /* Code section. */
+ .text :
+ {
+ *(.text)
+ *(.text.*)
+ *(.stub)
+ *(oscode)
+ *(.init*)
+ *(.fini*)
+ *(.gnu.warning)
+ *(.gnu.linkonce.t.*)
+ } > rom :ph_rom = 0
+
+ /* OSE symbols section. */
+ OSESYMS :
+ {
+ *(.osesyms)
+ } > rom :ph_rom
+
+ /* Read-only data section. */
+ .rodata :
+ {
+ *(.rodata)
+ *(.rodata.*)
+ *(.gnu.linkonce.r.*)
+ } > rom :ph_rom
+
+ /* C++ exception handling section. */
+ .eh_frame :
+ {
+ __EH_FRAME_BEGIN__ = .;
+ *(.eh_frame)
+ LONG(0)
+ __EH_FRAME_END__ = .;
+ } > rom :ph_rom
+
+ /* C++ exception handling section. */
+ .gcc_except_table :
+ {
+ *(.gcc_except_table .gcc_except_table.*)
+ } > rom :ph_rom
+
+ /* PowerPC EABI initialized read-only data section. */
+ .sdata2 :
+ {
+ PROVIDE (_SDA2_BASE_ = .);
+ *(.sdata2)
+ *(.sdata2.*)
+ *(.gnu.linkonce.s2.*)
+ } > rom :ph_rom
+
+ /* PowerPC EABI uninitialized read-only data section. */
+ .sbss2 :
+ {
+ *(.sbss2)
+ *(.sbss2.*)
+ *(.gnu.linkonce.sb2.*)
+ } > rom :ph_rom
+
+/*---------------------------------------------------------------------------
+ * Read-write area
+ *-------------------------------------------------------------------------*/
+
+ /*-------------------------------------------------------------------
+ * Initialized data (copied by PM)
+ *-----------------------------------------------------------------*/
+
+ /* Data section. */
+ .data :
+ {
+ *(.data)
+ *(.data.*)
+ *(.gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ } > ram :ph_ram
+
+ /* C++ constructor section. */
+ .ctors :
+ {
+ __CTOR_LIST__ = .;
+ *(.ctors)
+ *(SORT(.ctors.*))
+ __CTOR_END__ = .;
+ } > ram :ph_ram
+
+ /* C++ destructor section. */
+ .dtors :
+ {
+ __DTOR_LIST__ = .;
+ *(.dtors)
+ *(SORT(.dtors.*))
+ __DTOR_END__ = .;
+ } > ram :ph_ram
+
+
+ /* Small data section. */
+ .sdata ALIGN(0x10) :
+ {
+ PROVIDE (_SDA_BASE_ = .);
+ *(.sdata)
+ *(.sdata.*)
+ *(.gnu.linkonce.s.*)
+ } > ram :ph_ram
+
+ /*-------------------------------------------------------------------
+ * Uninitialized data (cleared by PM)
+ *-----------------------------------------------------------------*/
+
+ /* Small bss section. */
+ .sbss :
+ {
+ *(.sbss)
+ *(.sbss.*)
+ *(.scommon)
+ *(.gnu.linkonce.sb.*)
+ } > ram :ph_ram
+
+ /* Bss section. */
+ .bss :
+ {
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ *(.gnu.linkonce.b.*)
+ } > ram :ph_ram
+
+/*---------------------------------------------------------------------------
+ * Debug information
+ *-------------------------------------------------------------------------*/
+
+ /*
+ * Stabs debug sections.
+ */
+
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+
+ /*
+ * DWARF debug sections.
+ */
+
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) *(.gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+}
diff --git a/erts/emulator/sys/ose/sys.c b/erts/emulator/sys/ose/sys.c
new file mode 100644
index 0000000000..c892cc69c7
--- /dev/null
+++ b/erts/emulator/sys/ose/sys.c
@@ -0,0 +1,1755 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys/time.h"
+#include "time.h"
+#include "sys/uio.h"
+#include "termios.h"
+#include "ctype.h"
+#include "termios.h"
+
+#ifdef HAVE_FCNTL_H
+#include "fcntl.h"
+#endif
+
+#ifdef HAVE_SYS_IOCTL_H
+#include "sys/ioctl.h"
+#endif
+
+#define ERTS_WANT_BREAK_HANDLING
+#define WANT_NONBLOCKING
+#include "sys.h"
+#include "erl_thr_progress.h"
+
+#ifdef USE_THREADS
+#include "erl_threads.h"
+#endif
+
+#include "erl_mseg.h"
+
+#include "unistd.h"
+#include "efs.h"
+#include "erl_printf.h"
+#include "aio.h"
+#include "pm.h"
+#include "fcntl.h"
+
+/* Set the define to 1 to get some logging */
+#if 0
+#include "ramlog.h"
+#define LOG(output) ramlog_printf output
+#else
+#define LOG(output)
+#endif
+
+extern char **environ;
+static erts_smp_rwmtx_t environ_rwmtx;
+static PROCESS sig_proxy_pid = 0;
+
+#define MAX_VSIZE 16 /* Max number of entries allowed in an I/O
+ * vector sock_sendv().
+ */
+/*
+ * Don't need global.h, but bif_table.h (included by bif.h),
+ * won't compile otherwise
+ */
+#include "global.h"
+#include "bif.h"
+
+#include "erl_sys_driver.h"
+#include "erl_check_io.h"
+#include "erl_cpu_topology.h"
+
+/* The priority for reader/writer processes */
+#define FD_PROC_PRI get_pri(current_process())
+
+typedef struct ErtsSysReportExit_ ErtsSysReportExit;
+struct ErtsSysReportExit_ {
+ ErtsSysReportExit *next;
+ Eterm port;
+ int pid;
+ int ifd;
+ int ofd;
+ ErlDrvEvent attach_event;
+ ErlDrvEvent input_event;
+ ErlDrvEvent output_event;
+};
+
+/* This data is shared by these drivers - initialized by spawn_init() */
+static struct driver_data {
+ ErlDrvPort port_num;
+ int ofd;
+ int ifd;
+ int packet_bytes;
+ ErtsSysReportExit *report_exit;
+ int pid;
+ int alive;
+ int status;
+ ErlDrvEvent input_event;
+ ErlDrvEvent output_event;
+ struct aiocb aiocb;
+ FmHandle handle;
+ char *install_handle;
+} *driver_data; /* indexed by fd */
+
+struct async {
+ SIGSELECT signo;
+ ErlDrvTermData port;
+ ErlDrvTermData proc;
+ PROCESS spid;
+ PROCESS target;
+ Uint32 ref;
+};
+
+static ErtsSysReportExit *report_exit_list;
+static ERTS_INLINE void report_exit_status(ErtsSysReportExit *rep, int status);
+
+extern int driver_interrupt(int, int);
+extern void do_break(void);
+
+extern void erl_sys_args(int*, char**);
+
+/* The following two defs should probably be moved somewhere else */
+
+extern void erts_sys_init_float(void);
+
+extern void erl_crash_dump(char* file, int line, char* fmt, ...);
+
+#define DIR_SEPARATOR_CHAR '/'
+
+#if defined(DEBUG)
+#define ERL_BUILD_TYPE_MARKER ".debug"
+#else /* opt */
+#define ERL_BUILD_TYPE_MARKER
+#endif
+
+#define CHILD_SETUP_PROG_NAME "child_setup" ERL_BUILD_TYPE_MARKER
+
+#ifdef DEBUG
+static int debug_log = 0;
+#endif
+
+#ifdef ERTS_SMP
+static erts_smp_atomic32_t have_prepared_crash_dump;
+#define ERTS_PREPARED_CRASH_DUMP \
+ ((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1))
+#else
+static volatile int have_prepared_crash_dump;
+#define ERTS_PREPARED_CRASH_DUMP \
+ (have_prepared_crash_dump++)
+#endif
+
+static erts_smp_atomic_t sys_misc_mem_sz;
+
+#if defined(ERTS_SMP)
+erts_mtx_t chld_stat_mtx;
+#endif
+
+#if defined(ERTS_SMP) /* ------------------------------------------------- */
+#define CHLD_STAT_LOCK erts_mtx_lock(&chld_stat_mtx)
+#define CHLD_STAT_UNLOCK erts_mtx_unlock(&chld_stat_mtx)
+
+#else /* ------------------------------------------------------------------- */
+#define CHLD_STAT_LOCK
+#define CHLD_STAT_UNLOCK
+static volatile int children_died;
+#endif
+
+#define SET_AIO(REQ,FD,SIZE,BUFF) \
+ memset(&(REQ),0,sizeof(REQ)); \
+ (REQ).aio_fildes = FD; \
+ (REQ).aio_offset = FM_POSITION_CURRENT; \
+ (REQ).aio_nbytes = SIZE; \
+ (REQ).aio_buf = BUFF; \
+ (REQ).aio_sigevent.sigev_notify = SIGEV_NONE
+
+/* the first sizeof(struct aiocb *) bytes of the write buffer
+ * will contain the pointer to the aiocb struct, this needs
+ * to be freed between asynchronous writes.
+ * A write of 0 bytes is ignored. */
+#define WRITE_AIO(FD,SIZE,BUFF) do { \
+ if (SIZE > 0) { \
+ struct aiocb *write_req = driver_alloc(sizeof(struct aiocb)); \
+ char *write_buff = driver_alloc((sizeof(char)*SIZE)+1+ \
+ (sizeof(struct aiocb *))); \
+ *(struct aiocb **)write_buff = (struct aiocb *)write_req; \
+ write_buff += sizeof(struct aiocb *); \
+ memcpy(write_buff,BUFF,SIZE+1); \
+ SET_AIO(*write_req,FD,SIZE,write_buff); \
+ aio_write(write_req); \
+ } \
+} while(0)
+
+/* free the write_buffer and write_req
+ * created in the WRITE_AIO() request macro */
+#define FREE_AIO(ptr) do { \
+ struct aiocb *aiocb_ptr; \
+ char *buffer_ptr; \
+ aiocb_ptr = *(struct aiocb **)((ptr)-sizeof(struct aiocb *)); \
+ buffer_ptr = (((char*)ptr)-sizeof(struct aiocb *)); \
+ driver_free(aiocb_ptr); \
+ driver_free(buffer_ptr); \
+} while(0)
+
+/* When we have several schedulers, we need to make sure
+ * that scheduler issuing aio_dispatch() is the owner on the signal */
+#define DISPATCH_AIO(sig) do { \
+ restore(sig); \
+ aio_dispatch(sig); \
+ } while(0)
+
+
+/* debug print macros */
+#define DEBUG_RES 0
+
+#ifdef DEBUG_RES
+#define DEBUG_CHECK_RES(actual, expected) \
+ do { \
+ if (actual != expected ) { \
+ ramlog_printf("Result check failed" \
+ " got: 0x%08x expected:0x%08x\nat: %s:%d\n", \
+ actual, expected, __FILE__, __LINE__); \
+ abort(); /* This might perhaps be too harsh? */ \
+ } \
+ } while(0)
+#else
+#define DEBUG_CHECK_RES
+#endif
+
+static struct fd_data {
+ char pbuf[4]; /* hold partial packet bytes */
+ int psz; /* size of pbuf */
+ char *buf;
+ char *cpos;
+ int sz;
+ int remain; /* for input on fd */
+} *fd_data; /* indexed by fd */
+
+/********************* General functions ****************************/
+
+/* This is used by both the drivers and general I/O, must be set early */
+static int max_files = -1;
+
+/*
+ * a few variables used by the break handler
+ */
+#ifdef ERTS_SMP
+erts_smp_atomic32_t erts_break_requested;
+#define ERTS_SET_BREAK_REQUESTED \
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
+#define ERTS_UNSET_BREAK_REQUESTED \
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
+#else
+volatile int erts_break_requested = 0;
+#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
+#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
+#endif
+/* set early so the break handler has access to initial mode */
+static struct termios initial_tty_mode;
+static int replace_intr = 0;
+/* assume yes initially, ttsl_init will clear it */
+int using_oldshell = 1;
+static PROCESS get_signal_proxy_pid(void);
+
+static void
+init_check_io(void)
+{
+ erts_init_check_io();
+ max_files = erts_check_io_max_files();
+}
+
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_async_sig_interrupt()
+#else
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_interrupt(1)
+#endif
+#define ERTS_CHK_IO_INTR erts_check_io_interrupt
+#define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
+#define ERTS_CHK_IO erts_check_io
+#define ERTS_CHK_IO_SZ erts_check_io_size
+
+
+void
+erts_sys_schedule_interrupt(int set)
+{
+ ERTS_CHK_IO_INTR(set);
+}
+
+#ifdef ERTS_SMP
+void
+erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
+{
+ ERTS_CHK_IO_INTR_TMD(set, msec);
+}
+#endif
+
+Uint
+erts_sys_misc_mem_sz(void)
+{
+ Uint res = ERTS_CHK_IO_SZ();
+ res += erts_smp_atomic_read_mb(&sys_misc_mem_sz);
+ return res;
+}
+
+/*
+ * reset the terminal to the original settings on exit
+ */
+void sys_tty_reset(int exit_code)
+{
+ if (using_oldshell && !replace_intr) {
+ SET_BLOCKING(0);
+ }
+ else if (isatty(0)) {
+ tcsetattr(0,TCSANOW,&initial_tty_mode);
+ }
+}
+
+#ifdef USE_THREADS
+
+typedef struct {
+ int sched_bind_data;
+} erts_thr_create_data_t;
+
+/*
+ * thr_create_prepare() is called in parent thread before thread creation.
+ * Returned value is passed as argument to thr_create_cleanup().
+ */
+static void *
+thr_create_prepare(void)
+{
+ erts_thr_create_data_t *tcdp;
+
+ tcdp = erts_alloc(ERTS_ALC_T_TMP, sizeof(erts_thr_create_data_t));
+
+ tcdp->sched_bind_data = erts_sched_bind_atthrcreate_prepare();
+
+ return (void *) tcdp;
+}
+
+
+/* thr_create_cleanup() is called in parent thread after thread creation. */
+static void
+thr_create_cleanup(void *vtcdp)
+{
+ erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
+
+ erts_sched_bind_atthrcreate_parent(tcdp->sched_bind_data);
+
+ erts_free(ERTS_ALC_T_TMP, tcdp);
+}
+
+static void
+thr_create_prepare_child(void *vtcdp)
+{
+ erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_thread_setup();
+#endif
+
+ erts_sched_bind_atthrcreate_child(tcdp->sched_bind_data);
+}
+
+#endif /* #ifdef USE_THREADS */
+
+void
+erts_sys_pre_init(void)
+{
+ erts_printf_add_cr_to_stdout = 1;
+ erts_printf_add_cr_to_stderr = 1;
+#ifdef USE_THREADS
+ {
+ erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
+
+ eid.thread_create_child_func = thr_create_prepare_child;
+ /* Before creation in parent */
+ eid.thread_create_prepare_func = thr_create_prepare;
+ /* After creation in parent */
+ eid.thread_create_parent_func = thr_create_cleanup,
+
+ erts_thr_init(&eid);
+
+ report_exit_list = NULL;
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init();
+#endif
+
+#if defined(ERTS_SMP)
+ erts_mtx_init(&chld_stat_mtx, "child_status");
+#endif
+ }
+#ifdef ERTS_SMP
+ erts_smp_atomic32_init_nob(&erts_break_requested, 0);
+ erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0);
+#else
+ erts_break_requested = 0;
+ have_prepared_crash_dump = 0;
+#endif
+#if !defined(ERTS_SMP)
+ children_died = 0;
+#endif
+#endif /* USE_THREADS */
+ erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
+}
+
+void
+erl_sys_init(void)
+{
+
+#ifdef USE_SETLINEBUF
+ setlinebuf(stdout);
+#else
+ setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ);
+#endif
+
+ erts_sys_init_float();
+
+ /* we save this so the break handler can set and reset it properly */
+ /* also so that we can reset on exit (break handler or not) */
+ if (isatty(0)) {
+ tcgetattr(0,&initial_tty_mode);
+ }
+ tzset(); /* Required at least for NetBSD with localtime_r() */
+}
+
+static ERTS_INLINE int
+prepare_crash_dump(int secs)
+{
+#define NUFBUF (3)
+ int i, max;
+ char env[21]; /* enough to hold any 64-bit integer */
+ size_t envsz;
+ /*DeclareTmpHeapNoproc(heap,NUFBUF);*/
+ /*Eterm *hp = heap;*/
+ /*Eterm list = NIL;*/
+ int has_heart = 0;
+
+ UseTmpHeapNoproc(NUFBUF);
+
+ if (ERTS_PREPARED_CRASH_DUMP)
+ return 0; /* We have already been called */
+
+
+ /* Positive secs means an alarm must be set
+ * 0 or negative means no alarm
+ *
+ * Set alarm before we try to write to a port
+ * we don't want to hang on a port write with
+ * no alarm.
+ *
+ */
+
+#if 0 /*ose TBD!!!*/
+ if (secs >= 0) {
+ alarm((unsigned int)secs);
+ }
+#endif
+
+ /* Make sure we unregister at epmd (unknown fd) and get at least
+ one free filedescriptor (for erl_crash.dump) */
+
+ max = max_files;
+ if (max < 1024)
+ max = 1024;
+ for (i = 3; i < max; i++) {
+ close(i);
+ }
+
+ envsz = sizeof(env);
+ i = erts_sys_getenv__("ERL_CRASH_DUMP_NICE", env, &envsz);
+ if (i >= 0) {
+ int nice_val;
+ nice_val = i != 0 ? 0 : atoi(env);
+ if (nice_val > 39) {
+ nice_val = 39;
+ }
+ set_pri(nice_val);
+ }
+
+ UnUseTmpHeapNoproc(NUFBUF);
+#undef NUFBUF
+ return has_heart;
+}
+
+int erts_sys_prepare_crash_dump(int secs)
+{
+ return prepare_crash_dump(secs);
+}
+
+static ERTS_INLINE void
+break_requested(void)
+{
+ /*
+ * just set a flag - checked for and handled by
+ * scheduler threads erts_check_io() (not signal handler).
+ */
+#ifdef DEBUG
+ fprintf(stderr,"break!\n");
+#endif
+ if (ERTS_BREAK_REQUESTED)
+ erl_exit(ERTS_INTR_EXIT, "");
+
+ ERTS_SET_BREAK_REQUESTED;
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
+}
+
+/* Disable break */
+void erts_set_ignore_break(void) {
+
+}
+
+/* Don't use ctrl-c for break handler but let it be
+ used by the shell instead (see user_drv.erl) */
+void erts_replace_intr(void) {
+ struct termios mode;
+
+ if (isatty(0)) {
+ tcgetattr(0, &mode);
+
+ /* here's an example of how to replace ctrl-c with ctrl-u */
+ /* mode.c_cc[VKILL] = 0;
+ mode.c_cc[VINTR] = CKILL; */
+
+ mode.c_cc[VINTR] = 0; /* disable ctrl-c */
+ tcsetattr(0, TCSANOW, &mode);
+ replace_intr = 1;
+ }
+}
+
+void init_break_handler(void)
+{
+
+}
+
+int sys_max_files(void)
+{
+ return(max_files);
+}
+
+
+/************************** OS info *******************************/
+
+/* Used by erlang:info/1. */
+/* (This code was formerly in drv.XXX/XXX_os_drv.c) */
+
+char os_type[] = "ose";
+
+void
+os_flavor(char* namebuf, /* Where to return the name. */
+ unsigned size) /* Size of name buffer. */
+{
+#if 0
+ struct utsname uts; /* Information about the system. */
+ char* s;
+
+ (void) uname(&uts);
+ for (s = uts.sysname; *s; s++) {
+ if (isupper((int) *s)) {
+ *s = tolower((int) *s);
+ }
+ }
+ strcpy(namebuf, uts.sysname);
+#else
+ strncpy(namebuf, "release", size);
+#endif
+}
+
+void
+os_version(pMajor, pMinor, pBuild)
+int* pMajor; /* Pointer to major version. */
+int* pMinor; /* Pointer to minor version. */
+int* pBuild; /* Pointer to build number. */
+{
+ *pMajor = 5;
+ *pMinor = 7;
+ *pBuild = 0;
+}
+
+void init_getenv_state(GETENV_STATE *state)
+{
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ *state = NULL;
+}
+
+char **environ; /*ose - needs replacement*/
+
+char *getenv_string(GETENV_STATE *state0)
+{
+ char **state = (char **) *state0;
+ char *cp;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
+
+ if (state == NULL)
+ state = environ;
+
+ cp = *state++;
+ *state0 = (GETENV_STATE) state;
+
+ return cp;
+}
+
+void fini_getenv_state(GETENV_STATE *state)
+{
+ *state = NULL;
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+}
+
+
+/************************** Port I/O *******************************/
+
+/* I. Common stuff */
+
+union SIGNAL {
+ SIGSELECT sig_no;
+ struct FmReadPtr fm_read_reply;
+ struct FmWritePtr fm_write_reply;
+ struct async async;
+};
+
+/* II. The spawn/fd drivers */
+
+/*
+ * Decreasing the size of it below 16384 is not allowed.
+ */
+#define ERTS_SYS_READ_BUF_SZ (64*1024)
+
+/* Driver interfaces */
+static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
+static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
+static ErlDrvSSizeT fd_control(ErlDrvData, unsigned int, char *, ErlDrvSizeT,
+ char **, ErlDrvSizeT);
+static int spawn_init(void);
+static void fd_stop(ErlDrvData);
+static void erl_stop(ErlDrvData);
+static void ready_input(ErlDrvData, ErlDrvEvent);
+static void ready_output(ErlDrvData, ErlDrvEvent);
+static void output(ErlDrvData, char*, ErlDrvSizeT);
+static void stop_select(ErlDrvEvent, void*);
+
+static PROCESS
+get_signal_proxy_pid(void) {
+ union SIGNAL *sig;
+ SIGSELECT any_sig[] = {0};
+
+ if (!sig_proxy_pid) {
+ sig = alloc(sizeof(union SIGNAL), ERTS_SIGNAL_OSE_DRV_ATTACH);
+ hunt("ose_signal_driver_proxy", 0, NULL, &sig);
+ sig = receive(any_sig);
+ sig_proxy_pid = sender(&sig);
+ free_buf(&sig);
+ }
+ ASSERT(sig_proxy_pid);
+ return sig_proxy_pid;
+}
+
+static ErlDrvOseEventId
+resolve_signal(union SIGNAL* sig) {
+ switch(sig->sig_no) {
+
+ case FM_READ_PTR_REPLY:
+ return (ErlDrvOseEventId)sig->fm_read_reply.handle;
+
+ case FM_WRITE_PTR_REPLY:
+ return (ErlDrvOseEventId)sig->fm_write_reply.handle;
+
+ case ERTS_SIGNAL_OSE_DRV_ATTACH:
+ return (ErlDrvOseEventId)sig->async.target;
+
+ default:
+ break;
+ }
+ return (ErlDrvOseEventId)-1;
+}
+
+struct erl_drv_entry spawn_driver_entry = {
+ spawn_init,
+ spawn_start,
+ erl_stop,
+ output,
+ ready_input,
+ ready_output,
+ "spawn",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL, NULL,
+ stop_select
+};
+struct erl_drv_entry fd_driver_entry = {
+ NULL,
+ fd_start,
+ fd_stop,
+ output,
+ ready_input,
+ ready_output,
+ "fd",
+ NULL,
+ NULL,
+ fd_control,
+ NULL,
+ NULL,
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ stop_select
+};
+
+static void
+set_spawn_fd(int local_fd, int remote_fd, PROCESS remote_pid) {
+ PROCESS vm_pid;
+ FmHandle handle;
+ char env_val[55];
+ char env_name[10];
+ EfsStatus efs_res;
+
+ /* get pid of pipevm and handle of chosen fd */
+ efs_res = efs_examine_fd(local_fd, FLIB_FD_VMPID, &vm_pid, 0);
+ DEBUG_CHECK_RES(efs_res, EFS_SUCCESS);
+
+ /* setup the file descriptor to buffer per line */
+ efs_res = efs_config_fd(local_fd, FLIB_FD_BUFMODE, FM_BUFF_LINE,
+ FLIB_FD_BUFSIZE, 80, 0);
+ DEBUG_CHECK_RES(efs_res, EFS_SUCCESS);
+
+ /* duplicate handle and set spawn pid owner */
+ efs_res = efs_dup_to(local_fd, remote_pid, &handle);
+ DEBUG_CHECK_RES(efs_res, EFS_SUCCESS);
+
+ sprintf(env_name, "FD%d", remote_fd);
+
+ /* Syntax of the environment variable:
+ * "FD#" "<pid of pipevm>,<handle>,<buffer mode>,<buff size>,<omode>" */
+ sprintf(env_val, "0x%lx,0x%lx,%lu,%lu,0x%x",
+ vm_pid, handle,
+ FM_BUFF_LINE, 80,
+ O_APPEND);
+
+ set_env(remote_pid, env_name, env_val);
+}
+
+static ErlDrvData
+set_driver_data(ErlDrvPort port_num,
+ int ifd,
+ int ofd,
+ int packet_bytes,
+ int read_write,
+ int exit_status,
+ PROCESS pid)
+{
+ Port *prt;
+ ErtsSysReportExit *report_exit;
+
+ prt = erts_drvport2port(port_num);
+ if (prt != ERTS_INVALID_ERL_DRV_PORT) {
+ prt->os_pid = pid;
+ }
+
+ /* READ */
+ if (read_write & DO_READ) {
+ efs_examine_fd(ifd, FLIB_FD_HANDLE, &driver_data[ifd].handle, 0);
+ driver_data[ifd].ifd = ifd;
+ driver_data[ifd].packet_bytes = packet_bytes;
+ driver_data[ifd].port_num = port_num;
+ driver_data[ifd].pid = pid;
+
+ /* async read struct */
+ memset(&driver_data[ifd].aiocb, 0, sizeof(struct aiocb));
+ driver_data[ifd].aiocb.aio_buf = driver_alloc(255);
+ driver_data[ifd].aiocb.aio_fildes = ifd;
+ driver_data[ifd].aiocb.aio_nbytes = 255;
+
+ driver_data[ifd].alive = 1;
+ driver_data[ifd].status = 0;
+ driver_data[ifd].input_event =
+ erl_drv_ose_event_alloc(FM_READ_PTR_REPLY,
+ driver_data[ifd].handle, resolve_signal,
+ &driver_data[ifd].ifd);
+
+ /* READ & WRITE */
+ if (read_write & DO_WRITE) {
+ driver_data[ifd].ofd = ofd;
+ efs_examine_fd(ofd, FLIB_FD_HANDLE, &driver_data[ofd].handle, 0);
+
+ driver_data[ifd].output_event =
+ erl_drv_ose_event_alloc(FM_WRITE_PTR_REPLY,
+ driver_data[ofd].handle, resolve_signal,
+ &driver_data[ofd].ofd);
+ driver_data[ofd].pid = pid;
+ if (ifd != ofd) {
+ driver_data[ofd] = driver_data[ifd];
+ driver_data[ofd].aiocb.aio_buf = NULL;
+ }
+ }
+ else { /* READ ONLY */
+ driver_data[ifd].ofd = -1;
+ }
+
+ /* enable input event */
+ (void) driver_select(port_num, driver_data[ifd].input_event,
+ (ERL_DRV_READ | ERL_DRV_USE), 1);
+
+ aio_read(&driver_data[ifd].aiocb);
+ }
+ else { /* WRITE ONLY */
+ efs_examine_fd(ofd, FLIB_FD_HANDLE, &driver_data[ofd].handle, 0);
+ driver_data[ofd].packet_bytes = packet_bytes;
+ driver_data[ofd].port_num = port_num;
+ driver_data[ofd].ofd = ofd;
+ driver_data[ofd].pid = pid;
+ driver_data[ofd].alive = 1;
+ driver_data[ofd].status = 0;
+ driver_data[ofd].output_event =
+ erl_drv_ose_event_alloc(FM_WRITE_PTR_REPLY, driver_data[ofd].handle,
+ resolve_signal, &driver_data[ofd].ofd);
+ driver_data[ofd].input_event = driver_data[ofd].output_event;
+ }
+
+ /* this is used for spawned load modules, and is needed
+ * to properly uninstall them */
+ if (exit_status) {
+ struct PmProgramInfo *info;
+ int install_handle_size;
+ union SIGNAL *sig;
+ PmStatus pm_status;
+ report_exit = erts_alloc(ERTS_ALC_T_PRT_REP_EXIT,
+ sizeof(ErtsSysReportExit));
+ report_exit->next = report_exit_list;
+ report_exit->port = erts_drvport2id(port_num);
+ report_exit->pid = pid;
+ report_exit->ifd = (read_write & DO_READ) ? ifd : -1;
+ report_exit->ofd = (read_write & DO_WRITE) ? ofd : -1;
+ report_exit_list = report_exit;
+ report_exit->attach_event =
+ erl_drv_ose_event_alloc(ERTS_SIGNAL_OSE_DRV_ATTACH, pid,
+ resolve_signal, &driver_data[ifd].ifd);
+
+ /* setup ifd and ofd report exit */
+ driver_data[ifd].report_exit = report_exit;
+ driver_data[ofd].report_exit = report_exit;
+
+ pm_status = ose_pm_program_info(pid, &info);
+ DEBUG_CHECK_RES(pm_status, PM_SUCCESS);
+
+ install_handle_size = strlen(info->install_handle)+1;
+ driver_data[ifd].install_handle = driver_alloc(install_handle_size);
+ strcpy(driver_data[ifd].install_handle,
+ info->install_handle);
+
+ free_buf((union SIGNAL **)&info);
+
+ sig = alloc(sizeof(struct async), ERTS_SIGNAL_OSE_DRV_ATTACH);
+ sig->async.target = pid;
+ send(&sig, get_signal_proxy_pid());
+
+ /* this event will trigger when we receive an attach signal
+ * from the recently dead load module */
+ (void)driver_select(port_num,report_exit->attach_event, DO_READ, 1);
+ }
+ else {
+ report_exit = NULL;
+ }
+
+ /* the return value is the pointer to the driver_data struct we created
+ * in this function, it will be used in the drivers input
+ * and output functions */
+ return (ErlDrvData)((!(read_write & DO_READ) && read_write & DO_WRITE)
+ ? &driver_data[ofd]
+ : &driver_data[ifd]);
+}
+
+static int spawn_init()
+{
+ int i;
+
+ driver_data = (struct driver_data *)
+ erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files * sizeof(struct driver_data));
+
+ for (i = 0; i < max_files; i++)
+ driver_data[i].pid = -1;
+
+ return 1;
+}
+
+static void
+init_fd_data(int fd, ErlDrvPort port_num)
+{
+ fd_data[fd].buf = NULL;
+ fd_data[fd].cpos = NULL;
+ fd_data[fd].remain = 0;
+ fd_data[fd].sz = 0;
+ fd_data[fd].psz = 0;
+}
+
+/* FIXME write a decent text on pipes on ose */
+static ErlDrvData
+spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
+{
+ int ifd[2];
+ int ofd[2];
+ static uint32_t ticker = 0;
+ PmStatus pm_status;
+ OSDOMAIN domain = PM_NEW_DOMAIN;
+ PROCESS progpid, mainbid, mainpid;
+ char *handle = NULL;
+ struct PmProgramInfo *info;
+ char *args = NULL;
+ char *tmp_handle;
+ ErlDrvData res = (ErlDrvData)-1;
+ int handle_size;
+ char *ptr;
+
+ /* handle arguments */
+ ptr = strchr(name, ' ');
+ if (ptr != NULL) {
+ *ptr ='\0';
+ ptr++;
+ args = ptr;
+ }
+ else {
+ args = NULL;
+ }
+
+ /* create an install handle */
+ ptr = strrchr(name, '/');
+ if (ptr != NULL) {
+ ptr++;
+ tmp_handle = ptr;
+ }
+ else {
+ tmp_handle = name;
+ }
+ handle_size = strlen(tmp_handle)+1;
+ handle_size += (ticker<10)?3:((ticker<100)?4:5);
+
+ handle = driver_alloc(handle_size);
+ snprintf(handle, handle_size, "%s_%d", tmp_handle, ticker);
+
+ do {
+ snprintf(handle, handle_size, "%s_%d", tmp_handle, ticker++);
+ pm_status = ose_pm_install_load_module(0, "ELF", name, handle,
+ 0, 0, NULL);
+
+ } while (pm_status == PM_EINSTALL_HANDLE_ALREADY_INSTALLED);
+ DEBUG_CHECK_RES(pm_status, PM_SUCCESS);
+
+ /* Create Program */
+ pm_status = ose_pm_create_program(&domain, handle, 0, 0,
+ NULL, &progpid, &mainbid);
+ DEBUG_CHECK_RES(pm_status, PM_SUCCESS);
+
+ /* Get the mainpid from the newly created program */
+ pm_status = ose_pm_program_info(progpid, &info);
+ DEBUG_CHECK_RES(pm_status, PM_SUCCESS);
+
+ mainpid = info->main_process;
+ free_buf ((union SIGNAL **)&info);
+
+ /* pipevm needs to be started
+ * pipe will return 0 if success, -1 if not,
+ * errno will be set */
+ if (pipe(ifd) != 0 || pipe(ofd) != 0) {
+ DEBUG_CHECK_RES(0, -1);
+ ASSERT(0);
+ }
+
+ /* setup driver data */
+ res = set_driver_data(port_num, ofd[0], ifd[1], opts->packet_bytes,
+ opts->read_write, 1 /* opts->exit_status */, progpid);
+
+ /* init the fd_data array for read/write */
+ init_fd_data(ofd[0], port_num);
+ init_fd_data(ifd[1], port_num);
+
+ /* setup additional configurations
+ * for the spawned applications environment */
+ if (args != NULL) {
+ set_env(progpid, "ARGV", args);
+ }
+ set_env(mainbid, "EFS_RESOLVE_TMO", 0);
+ set_spawn_fd(ifd[0], 0, mainpid);
+ set_spawn_fd(ofd[1], 1, mainpid);
+ set_spawn_fd(ofd[1], 2, mainpid);
+
+ /* start the spawned program */
+ pm_status = ose_pm_start_program(mainbid);
+ DEBUG_CHECK_RES(pm_status, PM_SUCCESS);
+
+ /* close unused fd's */
+ close(ifd[0]);
+ close(ofd[1]);
+
+ if (handle) {
+ driver_free(handle);
+ }
+
+ return (ErlDrvData)res;
+}
+
+#define FD_DEF_HEIGHT 24
+#define FD_DEF_WIDTH 80
+/* Control op */
+#define FD_CTRL_OP_GET_WINSIZE 100
+
+static int fd_get_window_size(int fd, Uint32 *width, Uint32 *height)
+{
+#ifdef TIOCGWINSZ
+ struct winsize ws;
+ if (ioctl(fd,TIOCGWINSZ,&ws) == 0) {
+ *width = (Uint32) ws.ws_col;
+ *height = (Uint32) ws.ws_row;
+ return 0;
+ }
+#endif
+ return -1;
+}
+
+static ErlDrvSSizeT fd_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
+{
+ struct driver_data *data = (struct driver_data *)drv_data;
+ char resbuff[2*sizeof(Uint32)];
+ switch (command) {
+ case FD_CTRL_OP_GET_WINSIZE:
+ {
+ Uint32 w,h;
+ if (fd_get_window_size(data->ifd,&w,&h))
+ return 0;
+ memcpy(resbuff,&w,sizeof(Uint32));
+ memcpy(resbuff+sizeof(Uint32),&h,sizeof(Uint32));
+ }
+ break;
+ default:
+ return 0;
+ }
+ if (rlen < 2*sizeof(Uint32)) {
+ *rbuf = driver_alloc(2*sizeof(Uint32));
+ }
+ memcpy(*rbuf,resbuff,2*sizeof(Uint32));
+ return 2*sizeof(Uint32);
+}
+
+static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
+ SysDriverOpts* opts)
+{
+ ErlDrvData res;
+
+ CHLD_STAT_LOCK;
+ if (opts->read_write & DO_READ) {
+ init_fd_data(opts->ifd, port_num);
+ }
+ if (opts->read_write & DO_WRITE) {
+ init_fd_data(opts->ofd, port_num);
+ }
+ res = set_driver_data(port_num, opts->ifd, opts->ofd,
+ opts->packet_bytes,
+ opts->read_write, 0, -1);
+ CHLD_STAT_UNLOCK;
+ return res;
+}
+
+static void clear_fd_data(int fd)
+{
+ if (fd_data[fd].sz > 0) {
+ erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= fd_data[fd].sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*fd_data[fd].sz);
+ }
+ fd_data[fd].buf = NULL;
+ fd_data[fd].sz = 0;
+ fd_data[fd].remain = 0;
+ fd_data[fd].cpos = NULL;
+ fd_data[fd].psz = 0;
+}
+
+static void nbio_stop_fd(ErlDrvPort prt, ErlDrvEvent ev)
+{
+ int *fd;
+ driver_select(prt,ev,DO_READ|DO_WRITE,0);
+ erl_drv_ose_event_fetch(ev, NULL, NULL, (void **)&fd);
+ clear_fd_data(*fd);
+ SET_BLOCKING(*fd);
+}
+
+static void fd_stop(ErlDrvData drv_data) /* Does not close the fds */
+{
+ struct driver_data *data = (struct driver_data *)drv_data;
+
+ if (data->ofd != -1) {
+ if (data->ifd != data->ofd) { /* read and write */
+ nbio_stop_fd(data->port_num, data->input_event);
+ nbio_stop_fd(data->port_num, data->output_event);
+ }
+ else { /* write only */
+ nbio_stop_fd(data->port_num, data->output_event);
+ }
+ }
+ else { /* read only */
+ nbio_stop_fd(data->port_num, data->input_event);
+ }
+}
+
+
+static void erl_stop(ErlDrvData drv_data)
+{
+ struct driver_data *data = (struct driver_data *)drv_data;
+
+ CHLD_STAT_LOCK;
+ data->pid = -1;
+ CHLD_STAT_UNLOCK;
+
+ if (data->ofd != -1) {
+ if (data->ifd != data->ofd) { /* read and write */
+ nbio_stop_fd(data->port_num, data->input_event);
+ nbio_stop_fd(data->port_num, data->output_event);
+ driver_select(data->port_num, data->input_event, ERL_DRV_USE, 0);
+ driver_select(data->port_num, data->output_event, ERL_DRV_USE, 0);
+ }
+ else { /* write only */
+ nbio_stop_fd(data->port_num, data->output_event);
+ driver_select(data->port_num, data->output_event, ERL_DRV_USE, 0);
+ }
+ }
+ else { /* read only */
+ nbio_stop_fd(data->port_num, data->input_event);
+ driver_select(data->port_num, data->input_event, ERL_DRV_USE, 0);
+ }
+ close(data->ifd);
+ close(data->ofd);
+}
+
+/* The parameter e is a pointer to the driver_data structure
+ * related to the fd to be used as output */
+static void output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
+{
+ ErlDrvSizeT sz;
+ char lb[4];
+ char* lbp;
+ struct driver_data *data = (struct driver_data *)drv_data;
+
+ if (((data->packet_bytes == 2) &&
+ (len > 0xffff)) || (data->packet_bytes == 1 && len > 0xff)) {
+ driver_failure_posix(data->port_num, EINVAL);
+ return; /* -1; */
+ }
+ put_int32(len, lb);
+ lbp = lb + (4-(data->packet_bytes));
+
+ if ((sz = driver_sizeq(data->port_num)) > 0) {
+ driver_enq(data->port_num, lbp, data->packet_bytes);
+ driver_enq(data->port_num, buf, len);
+ if (sz + len + data->packet_bytes >= (1 << 13))
+ set_busy_port(data->port_num, 1);
+ }
+ else {
+ driver_enq(data->port_num, buf, len); /* n is the skip value */
+
+ driver_select(data->port_num, data->output_event,
+ ERL_DRV_WRITE|ERL_DRV_USE, 1);
+
+ WRITE_AIO(data->ofd, len, buf);
+ }
+ return; /* 0; */
+}
+
+/* This function is being run when we in recieve
+ * either a read of 0 bytes, or the attach signal from a dying
+ * spawned load module */
+static int port_inp_failure(ErlDrvPort port_num, ErlDrvEvent ready_fd, int res)
+ /* Result: 0 (eof) or -1 (error) */
+{
+ int *fd;
+ SIGSELECT sig_no;
+ ASSERT(res <= 0);
+
+ erl_drv_ose_event_fetch(ready_fd,&sig_no, NULL, (void **)&fd);
+
+ /* As we need to handle two signals, we do this in two steps */
+ if (driver_data[*fd].alive) {
+ report_exit_status(driver_data[*fd].report_exit, 0); /* status? */
+ }
+ else {
+ clear_fd_data(*fd);
+ driver_report_exit(driver_data[*fd].port_num, driver_data[*fd].status);
+ /* As we do not really know if the spawn has crashed or exited nicely
+ * we do not check the result status of the following call.. FIXME
+ * can we handle this in a better way? */
+ ose_pm_uninstall_load_module(driver_data[*fd].install_handle);
+ driver_free(driver_data[*fd].install_handle);
+ driver_free((void *)driver_data[*fd].aiocb.aio_buf);
+
+ close(*fd);
+ }
+
+ return 0;
+}
+
+/* The parameter e is a pointer to the driver_data structure
+ * related to the fd to be used as output.
+ * ready_fd is the event that triggered this call to ready_input */
+static void ready_input(ErlDrvData drv_data, ErlDrvEvent ready_fd)
+{
+ int res;
+ Uint h;
+ char *buf;
+ union SIGNAL *sig;
+ struct driver_data *data = (struct driver_data *)drv_data;
+
+ sig = erl_drv_ose_get_signal(ready_fd);
+ ASSERT(sig);
+
+
+ while (sig) {
+ /* If we've recieved an attach signal, we need to handle
+ * it in port_inp_failure */
+ if (sig->sig_no == ERTS_SIGNAL_OSE_DRV_ATTACH) {
+ port_inp_failure(data->port_num, ready_fd, 0);
+ }
+ else {
+ res = sig->fm_read_reply.actual;
+
+ if (data->packet_bytes == 0) {
+ if (res < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
+ port_inp_failure(data->port_num, ready_fd, res);
+ }
+ }
+ else if (res == 0) {
+ /* read of 0 bytes, eof, otherside of pipe is assumed dead */
+ port_inp_failure(data->port_num, ready_fd, res);
+ }
+ else {
+ buf = driver_alloc(res);
+ memcpy(buf, (void *)data->aiocb.aio_buf, res);
+ driver_select(data->port_num, data->output_event,
+ ERL_DRV_WRITE|ERL_DRV_USE, 1);
+ driver_output(data->port_num, (char*) buf, res);
+ driver_free(buf);
+ }
+ }
+ /* We try to read the remainder */
+ else if (fd_data[data->ifd].remain > 0) {
+ if (res < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
+ port_inp_failure(data->port_num, ready_fd, res);
+ }
+ }
+ else if (res == 0) {
+ port_inp_failure(data->port_num, ready_fd, res);
+ }
+ else if (res == fd_data[data->ifd].remain) { /* we're done */
+ driver_output(data->port_num,
+ fd_data[data->ifd].buf,
+ fd_data[data->ifd].sz);
+ clear_fd_data(data->ifd);
+ }
+ else { /* if (res < fd_data[fd].remain) */
+ fd_data[data->ifd].cpos += res;
+ fd_data[data->ifd].remain -= res;
+ }
+ }
+ else if (fd_data[data->ifd].remain == 0) { /* clean fd */
+ if (res < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
+ port_inp_failure(data->port_num, ready_fd, res);
+ }
+ }
+ else if (res == 0) { /* eof */
+ port_inp_failure(data->port_num, ready_fd, res);
+ }
+ else if (res < data->packet_bytes - fd_data[data->ifd].psz) {
+ memcpy(fd_data[data->ifd].pbuf+fd_data[data->ifd].psz,
+ (void *)data->aiocb.aio_buf, res);
+ fd_data[data->ifd].psz += res;
+ }
+ else { /* if (res >= packet_bytes) */
+ unsigned char* cpos = (unsigned char*)data->aiocb.aio_buf;
+ int bytes_left = res;
+
+ while (1) {
+ int psz = fd_data[data->ifd].psz;
+ char* pbp = fd_data[data->ifd].pbuf + psz;
+
+ while (bytes_left && (psz < data->packet_bytes)) {
+ *pbp++ = *cpos++;
+ bytes_left--;
+ psz++;
+ }
+
+ if (psz < data->packet_bytes) {
+ fd_data[data->ifd].psz = psz;
+ break;
+ }
+ fd_data[data->ifd].psz = 0;
+
+ switch (data->packet_bytes) {
+ case 1: h = get_int8(fd_data[data->ifd].pbuf); break;
+ case 2: h = get_int16(fd_data[data->ifd].pbuf); break;
+ case 4: h = get_int32(fd_data[data->ifd].pbuf); break;
+ default: ASSERT(0); return; /* -1; */
+ }
+
+ if (h <= (bytes_left)) {
+ driver_output(data->port_num, (char*) cpos, h);
+ cpos += h;
+ bytes_left -= h;
+ continue;
+ }
+ else { /* The last message we got was split */
+ char *buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
+ if (!buf) {
+ errno = ENOMEM;
+ port_inp_failure(data->port_num, ready_fd, -1);
+ }
+ else {
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, h);
+ sys_memcpy(buf, cpos, bytes_left);
+ fd_data[data->ifd].buf = buf;
+ fd_data[data->ifd].sz = h;
+ fd_data[data->ifd].remain = h - bytes_left;
+ fd_data[data->ifd].cpos = buf + bytes_left;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ /* reset the read buffer and init next asynch read */
+ DISPATCH_AIO(sig);
+ memset((void *)data->aiocb.aio_buf, 0, 255);
+
+ if (res > 0) {
+ aio_read(&data->aiocb);
+ }
+ }
+ sig = erl_drv_ose_get_signal(ready_fd);
+ }
+}
+
+
+/* The parameter e is a pointer to the driver_data structure
+ * related to the fd to be used as output.
+ * ready_fd is the event that triggered this call to ready_input */
+static void ready_output(ErlDrvData drv_data, ErlDrvEvent ready_fd)
+{
+ SysIOVec *iov;
+ int vlen;
+ int res;
+ union SIGNAL *sig;
+ struct driver_data *data = (struct driver_data *)drv_data;
+
+ sig = erl_drv_ose_get_signal(ready_fd);
+ ASSERT(sig);
+
+ while (sig != NULL) {
+ if (sig->fm_write_reply.actual <= 0) {
+ int status;
+
+ status = efs_status_to_errno(sig->fm_write_reply.status);
+ driver_select(data->port_num, ready_fd, ERL_DRV_WRITE, 0);
+ DISPATCH_AIO(sig);
+ FREE_AIO(sig->fm_write_reply.buffer);
+
+ driver_failure_posix(data->port_num, status);
+ }
+ else { /* written bytes > 0 */
+ iov = driver_peekq(data->port_num, &vlen);
+ if (vlen > 0) {
+ DISPATCH_AIO(sig);
+ FREE_AIO(sig->fm_write_reply.buffer);
+ res = driver_deq(data->port_num, iov[0].iov_len);
+ if (res > 0) {
+ iov = driver_peekq(data->port_num, &vlen);
+ WRITE_AIO(data->ofd, iov[0].iov_len, iov[0].iov_base);
+ }
+ }
+ }
+ sig = erl_drv_ose_get_signal(ready_fd);
+ }
+}
+
+static void stop_select(ErlDrvEvent ready_fd, void* _)
+{
+ int *fd;
+ erl_drv_ose_event_fetch(ready_fd, NULL, NULL, (void **)&fd);
+ erl_drv_ose_event_free(ready_fd);
+ close(*fd);
+}
+
+
+void erts_do_break_handling(void)
+{
+ struct termios temp_mode;
+ int saved = 0;
+
+ /*
+ * Most functions that do_break() calls are intentionally not thread safe;
+ * therefore, make sure that all threads but this one are blocked before
+ * proceeding!
+ */
+ erts_smp_thr_progress_block();
+
+ /* during break we revert to initial settings */
+ /* this is done differently for oldshell */
+ if (using_oldshell && !replace_intr) {
+ SET_BLOCKING(1);
+ }
+ else if (isatty(0)) {
+ tcgetattr(0,&temp_mode);
+ tcsetattr(0,TCSANOW,&initial_tty_mode);
+ saved = 1;
+ }
+
+ /* call the break handling function, reset the flag */
+ do_break();
+
+ fflush(stdout);
+
+ /* after break we go back to saved settings */
+ if (using_oldshell && !replace_intr) {
+ SET_NONBLOCKING(1);
+ }
+ else if (saved) {
+ tcsetattr(0,TCSANOW,&temp_mode);
+ }
+
+ erts_smp_thr_progress_unblock();
+}
+
+static pid_t
+getpid(void)
+{
+ return get_bid(current_process());
+}
+
+int getpagesize(void)
+{
+ return 1024;
+}
+
+
+/* Fills in the systems representation of the jam/beam process identifier.
+** The Pid is put in STRING representation in the supplied buffer,
+** no interpretatione of this should be done by the rest of the
+** emulator. The buffer should be at least 21 bytes long.
+*/
+void sys_get_pid(char *buffer, size_t buffer_size){
+ pid_t p = getpid();
+ /* Assume the pid is scalar and can rest in an unsigned long... */
+ erts_snprintf(buffer, buffer_size, "%lu",(unsigned long) p);
+}
+
+int
+erts_sys_putenv_raw(char *key, char *value) {
+ return erts_sys_putenv(key, value);
+}
+int
+erts_sys_putenv(char *key, char *value)
+{
+ int res;
+
+ erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ res = set_env(get_bid(current_process()), key,
+ value);
+ erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ return res;
+}
+
+
+int
+erts_sys_unsetenv(char *key)
+{
+ int res;
+
+ erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ res = set_env(get_bid(current_process()),key,NULL);
+ erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+
+ return res;
+}
+
+int
+erts_sys_getenv__(char *key, char *value, size_t *size)
+{
+ int res;
+ char *orig_value = get_env(get_bid(current_process()), key);
+ if (!orig_value)
+ res = -1;
+ else {
+ size_t len = sys_strlen(orig_value);
+ if (len >= *size) {
+ *size = len + 1;
+ res = 1;
+ }
+ else {
+ *size = len;
+ sys_memcpy((void *) value, (void *) orig_value, len+1);
+ res = 0;
+ }
+ free_buf((union SIGNAL **)&orig_value);
+ }
+ return res;
+}
+
+int
+erts_sys_getenv_raw(char *key, char *value, size_t *size) {
+ return erts_sys_getenv(key, value, size);
+}
+
+/*
+ * erts_sys_getenv
+ * returns:
+ * -1, if environment key is not set with a value
+ * 0, if environment key is set and value fits into buffer res
+ * 1, if environment key is set but does not fit into buffer res
+ * res is set with the needed buffer res value
+ */
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return res;
+}
+
+void
+sys_init_io(void)
+{
+ fd_data = (struct fd_data *)
+ erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files * sizeof(struct fd_data));
+}
+
+extern const char pre_loaded_code[];
+extern Preload pre_loaded[];
+
+void erts_sys_alloc_init(void)
+{
+}
+
+void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
+{
+ void *res = malloc((size_t) sz);
+#if HAVE_ERTS_MSEG
+ if (!res) {
+ erts_mseg_clear_cache();
+ return malloc((size_t) sz);
+ }
+#endif
+ return res;
+}
+
+void *erts_sys_realloc(ErtsAlcType_t t, void *x, void *p, Uint sz)
+{
+ void *res = realloc(p, (size_t) sz);
+#if HAVE_ERTS_MSEG
+ if (!res) {
+ erts_mseg_clear_cache();
+ return realloc(p, (size_t) sz);
+ }
+#endif
+ return res;
+}
+
+void erts_sys_free(ErtsAlcType_t t, void *x, void *p)
+{
+ free(p);
+}
+
+/* Return a pointer to a vector of names of preloaded modules */
+
+Preload*
+sys_preloaded(void)
+{
+ return pre_loaded;
+}
+
+/* Return a pointer to preloaded code for module "module" */
+unsigned char*
+sys_preload_begin(Preload* p)
+{
+ return p->code;
+}
+
+/* Clean up if allocated */
+void sys_preload_end(Preload* p)
+{
+ /* Nothing */
+}
+
+/* Read a key from console (?) */
+
+int sys_get_key(fd)
+int fd;
+{
+ int c;
+ unsigned char rbuf[64];
+
+ fflush(stdout); /* Flush query ??? */
+
+ if ((c = read(fd,rbuf,64)) <= 0) {
+ return c;
+ }
+
+ return rbuf[0];
+}
+
+
+#ifdef DEBUG
+
+extern int erts_initialized;
+void
+erl_assert_error(const char* expr, const char* func,
+ const char* file, int line)
+{
+ fflush(stdout);
+ fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n",
+ file, func, line, expr);
+ fflush(stderr);
+ ramlog_printf("%s:%d:%s() Assertion failed: %s\n",
+ file, func, line, expr);
+
+ abort();
+}
+
+void
+erl_debug(char* fmt, ...)
+{
+ char sbuf[1024]; /* Temporary buffer. */
+ va_list va;
+
+ if (debug_log) {
+ va_start(va, fmt);
+ vsprintf(sbuf, fmt, va);
+ va_end(va);
+ fprintf(stderr, "%s", sbuf);
+ }
+}
+
+#endif /* DEBUG */
+
+static ERTS_INLINE void
+report_exit_status(ErtsSysReportExit *rep, int status)
+{
+ if (rep->ifd >= 0) {
+ driver_data[rep->ifd].alive = 0;
+ driver_data[rep->ifd].status = status;
+ }
+ if (rep->ofd >= 0) {
+ driver_data[rep->ofd].alive = 0;
+ driver_data[rep->ofd].status = status;
+ }
+
+ erts_free(ERTS_ALC_T_PRT_REP_EXIT, rep);
+}
+
+#define ERTS_REPORT_EXIT_STATUS report_exit_status
+
+/*
+ * Called from schedule() when it runs out of runnable processes,
+ * or when Erlang code has performed INPUT_REDUCTIONS reduction
+ * steps. runnable == 0 iff there are no runnable Erlang processes.
+ */
+void
+erl_sys_schedule(int runnable)
+{
+ ASSERT(get_fsem(current_process()) == 0);
+#ifdef ERTS_SMP
+ ASSERT(erts_get_scheduler_data()->no == 1);
+ ERTS_CHK_IO(!runnable);
+#else
+ ERTS_CHK_IO( 1 );
+#endif
+ ASSERT(get_fsem(current_process()) == 0);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+}
+
+
+#ifdef ERTS_SMP
+
+void
+erts_sys_main_thread(void)
+{
+ erts_thread_disable_fpe();
+
+ /* Become signal receiver thread... */
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("signal_receiver");
+#endif
+
+ while (1) {
+ static const SIGSELECT sigsel[] = {0};
+ union SIGNAL *msg = receive(sigsel);
+
+ fprintf(stderr,"Main thread got message %d from 0x%x!!\r\n",
+ msg->sig_no, sender(&msg));
+ free_buf(&msg);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+void
+erl_sys_args(int* argc, char** argv)
+{
+ int i, j;
+
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+
+ init_check_io();
+
+ /* Handled arguments have been marked with NULL. Slide arguments
+ not handled towards the beginning of argv. */
+ for (i = 0, j = 0; i < *argc; i++) {
+ if (argv[i])
+ argv[j++] = argv[i];
+ }
+ *argc = j;
+
+}
diff --git a/erts/emulator/sys/ose/sys_float.c b/erts/emulator/sys/ose/sys_float.c
new file mode 100644
index 0000000000..d9d6bb7c04
--- /dev/null
+++ b/erts/emulator/sys/ose/sys_float.c
@@ -0,0 +1,844 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2013. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+#include "erl_process.h"
+
+
+#ifdef NO_FPE_SIGNALS
+
+void
+erts_sys_init_float(void)
+{
+# ifdef SIGFPE
+ sys_sigset(SIGFPE, SIG_IGN); /* Ignore so we can test for NaN and Inf */
+# endif
+}
+
+#else /* !NO_FPE_SIGNALS */
+
+#ifdef ERTS_SMP
+static erts_tsd_key_t fpe_key;
+
+/* once-only initialisation early in the main thread (via erts_sys_init_float()) */
+static void erts_init_fp_exception(void)
+{
+ /* XXX: the wrappers prevent using a pthread destructor to
+ deallocate the key's value; so when/where do we do that? */
+ erts_tsd_key_create(&fpe_key);
+}
+
+void erts_thread_init_fp_exception(void)
+{
+ unsigned long *fpe = erts_alloc(ERTS_ALC_T_FP_EXCEPTION, sizeof(*fpe));
+ *fpe = 0L;
+ erts_tsd_set(fpe_key, fpe);
+}
+
+static ERTS_INLINE volatile unsigned long *erts_thread_get_fp_exception(void)
+{
+ return (volatile unsigned long*)erts_tsd_get(fpe_key);
+}
+#else /* !SMP */
+#define erts_init_fp_exception() /*empty*/
+static volatile unsigned long fp_exception;
+#define erts_thread_get_fp_exception() (&fp_exception)
+#endif /* SMP */
+
+volatile unsigned long *erts_get_current_fp_exception(void)
+{
+ Process *c_p;
+
+ c_p = erts_get_current_process();
+ if (c_p)
+ return &c_p->fp_exception;
+ return erts_thread_get_fp_exception();
+}
+
+static void set_current_fp_exception(unsigned long pc)
+{
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+ ASSERT(fpexnp != NULL);
+ *fpexnp = pc;
+}
+
+void erts_fp_check_init_error(volatile unsigned long *fpexnp)
+{
+ char buf[64];
+ snprintf(buf, sizeof buf, "ERTS_FP_CHECK_INIT at %p: detected unhandled FPE at %p\r\n",
+ __builtin_return_address(0), (void*)*fpexnp);
+ if (write(2, buf, strlen(buf)) <= 0)
+ erl_exit(ERTS_ABORT_EXIT, "%s", buf);
+ *fpexnp = 0;
+#if defined(__i386__) || defined(__x86_64__)
+ erts_restore_fpu();
+#endif
+}
+
+/* Is there no standard identifier for Darwin/MacOSX ? */
+#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
+#define __DARWIN__ 1
+#endif
+
+#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+
+static void unmask_x87(void)
+{
+ unsigned short cw;
+
+ __asm__ __volatile__("fstcw %0" : "=m"(cw));
+ cw &= ~(0x01|0x04|0x08); /* unmask IM, ZM, OM */
+ __asm__ __volatile__("fldcw %0" : : "m"(cw));
+}
+
+/* mask x87 FPE, return true if the previous state was unmasked */
+static int mask_x87(void)
+{
+ unsigned short cw;
+ int unmasked;
+
+ __asm__ __volatile__("fstcw %0" : "=m"(cw));
+ unmasked = (cw & (0x01|0x04|0x08)) == 0;
+ /* or just set cw = 0x37f */
+ cw |= (0x01|0x04|0x08); /* mask IM, ZM, OM */
+ __asm__ __volatile__("fldcw %0" : : "m"(cw));
+ return unmasked;
+}
+
+static void unmask_sse2(void)
+{
+ unsigned int mxcsr;
+
+ __asm__ __volatile__("stmxcsr %0" : "=m"(mxcsr));
+ mxcsr &= ~(0x003F|0x0680); /* clear exn flags, unmask OM, ZM, IM (not PM, UM, DM) */
+ __asm__ __volatile__("ldmxcsr %0" : : "m"(mxcsr));
+}
+
+/* mask SSE2 FPE, return true if the previous state was unmasked */
+static int mask_sse2(void)
+{
+ unsigned int mxcsr;
+ int unmasked;
+
+ __asm__ __volatile__("stmxcsr %0" : "=m"(mxcsr));
+ unmasked = (mxcsr & 0x0680) == 0;
+ /* or just set mxcsr = 0x1f80 */
+ mxcsr &= ~0x003F; /* clear exn flags */
+ mxcsr |= 0x0680; /* mask OM, ZM, IM (not PM, UM, DM) */
+ __asm__ __volatile__("ldmxcsr %0" : : "m"(mxcsr));
+ return unmasked;
+}
+
+#if defined(__x86_64__)
+
+static inline int cpu_has_sse2(void) { return 1; }
+
+#else /* !__x86_64__ */
+
+/*
+ * Check if an x86-32 processor has SSE2.
+ */
+static unsigned int xor_eflags(unsigned int mask)
+{
+ unsigned int eax, edx;
+
+ eax = mask; /* eax = mask */
+ __asm__("pushfl\n\t"
+ "popl %0\n\t" /* edx = original EFLAGS */
+ "xorl %0, %1\n\t" /* eax = mask ^ EFLAGS */
+ "pushl %1\n\t"
+ "popfl\n\t" /* new EFLAGS = mask ^ original EFLAGS */
+ "pushfl\n\t"
+ "popl %1\n\t" /* eax = new EFLAGS */
+ "xorl %0, %1\n\t" /* eax = new EFLAGS ^ old EFLAGS */
+ "pushl %0\n\t"
+ "popfl" /* restore original EFLAGS */
+ : "=d"(edx), "=a"(eax)
+ : "1"(eax));
+ return eax;
+}
+
+static __inline__ unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax, save_ebx;
+
+ /* In PIC mode i386 reserves EBX. So we must save
+ and restore it ourselves to not upset gcc. */
+ __asm__(
+ "movl %%ebx, %1\n\t"
+ "cpuid\n\t"
+ "movl %1, %%ebx"
+ : "=a"(eax), "=m"(save_ebx)
+ : "0"(op)
+ : "cx", "dx");
+ return eax;
+}
+
+static __inline__ unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx, save_ebx;
+
+ /* In PIC mode i386 reserves EBX. So we must save
+ and restore it ourselves to not upset gcc. */
+ __asm__(
+ "movl %%ebx, %2\n\t"
+ "cpuid\n\t"
+ "movl %2, %%ebx"
+ : "=a"(eax), "=d"(edx), "=m"(save_ebx)
+ : "0"(op)
+ : "cx");
+ return edx;
+}
+
+/* The AC bit, bit #18, is a new bit introduced in the EFLAGS
+ * register on the Intel486 processor to generate alignment
+ * faults. This bit cannot be set on the Intel386 processor.
+ */
+static __inline__ int is_386(void)
+{
+ return ((xor_eflags(1<<18) >> 18) & 1) == 0;
+}
+
+/* Newer x86 processors have a CPUID instruction, as indicated by
+ * the ID bit (#21) in EFLAGS being modifiable.
+ */
+static __inline__ int has_CPUID(void)
+{
+ return (xor_eflags(1<<21) >> 21) & 1;
+}
+
+static int cpu_has_sse2(void)
+{
+ unsigned int maxlev, features;
+ static int has_sse2 = -1;
+
+ if (has_sse2 >= 0)
+ return has_sse2;
+ has_sse2 = 0;
+
+ if (is_386())
+ return 0;
+ if (!has_CPUID())
+ return 0;
+ maxlev = cpuid_eax(0);
+ /* Intel A-step Pentium had a preliminary version of CPUID.
+ It also didn't have SSE2. */
+ if ((maxlev & 0xFFFFFF00) == 0x0500)
+ return 0;
+ /* If max level is zero then CPUID cannot report any features. */
+ if (maxlev == 0)
+ return 0;
+ features = cpuid_edx(1);
+ has_sse2 = (features & (1 << 26)) != 0;
+
+ return has_sse2;
+}
+#endif /* !__x86_64__ */
+
+static void unmask_fpe(void)
+{
+ __asm__ __volatile__("fnclex");
+ unmask_x87();
+ if (cpu_has_sse2())
+ unmask_sse2();
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask x86 FPE, return true if the previous state was unmasked */
+static int mask_fpe(void)
+{
+ int unmasked;
+
+ unmasked = mask_x87();
+ if (cpu_has_sse2())
+ unmasked |= mask_sse2();
+ return unmasked;
+}
+
+void erts_restore_fpu(void)
+{
+ __asm__ __volatile__("fninit");
+ unmask_x87();
+ if (cpu_has_sse2())
+ unmask_sse2();
+}
+
+#elif defined(__sparc__) && defined(__linux__)
+
+#if defined(__arch64__)
+#define LDX "ldx"
+#define STX "stx"
+#else
+#define LDX "ld"
+#define STX "st"
+#endif
+
+static void unmask_fpe(void)
+{
+ unsigned long fsr;
+
+ __asm__(STX " %%fsr, %0" : "=m"(fsr));
+ fsr &= ~(0x1FUL << 23); /* clear FSR[TEM] field */
+ fsr |= (0x1AUL << 23); /* enable NV, OF, DZ exceptions */
+ __asm__ __volatile__(LDX " %0, %%fsr" : : "m"(fsr));
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask SPARC FPE, return true if the previous state was unmasked */
+static int mask_fpe(void)
+{
+ unsigned long fsr;
+ int unmasked;
+
+ __asm__(STX " %%fsr, %0" : "=m"(fsr));
+ unmasked = ((fsr >> 23) & 0x1A) == 0x1A;
+ fsr &= ~(0x1FUL << 23); /* clear FSR[TEM] field */
+ __asm__ __volatile__(LDX " %0, %%fsr" : : "m"(fsr));
+ return unmasked;
+}
+
+#elif (defined(__powerpc__) && defined(__linux__)) || (defined(__ppc__) && defined(__DARWIN__))
+
+#if defined(__linux__)
+#include <sys/prctl.h>
+
+static void set_fpexc_precise(void)
+{
+ if (prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE) < 0) {
+ perror("PR_SET_FPEXC");
+ exit(1);
+ }
+}
+
+#elif defined(__DARWIN__)
+
+#include <mach/mach.h>
+#include <pthread.h>
+
+/*
+ * FE0 FE1 MSR bits
+ * 0 0 floating-point exceptions disabled
+ * 0 1 floating-point imprecise nonrecoverable
+ * 1 0 floating-point imprecise recoverable
+ * 1 1 floating-point precise mode
+ *
+ * Apparently:
+ * - Darwin 5.5 (MacOS X <= 10.1) starts with FE0 == FE1 == 0,
+ * and resets FE0 and FE1 to 0 after each SIGFPE.
+ * - Darwin 6.0 (MacOS X 10.2) starts with FE0 == FE1 == 1,
+ * and does not reset FE0 or FE1 after a SIGFPE.
+ */
+#define FE0_MASK (1<<11)
+#define FE1_MASK (1<<8)
+
+/* a thread cannot get or set its own MSR bits */
+static void *fpu_fpe_enable(void *arg)
+{
+ thread_t t = *(thread_t*)arg;
+ struct ppc_thread_state state;
+ unsigned int state_size = PPC_THREAD_STATE_COUNT;
+
+ if (thread_get_state(t, PPC_THREAD_STATE, (natural_t*)&state, &state_size) != KERN_SUCCESS) {
+ perror("thread_get_state");
+ exit(1);
+ }
+ if ((state.srr1 & (FE1_MASK|FE0_MASK)) != (FE1_MASK|FE0_MASK)) {
+#if 1
+ /* This would also have to be performed in the SIGFPE handler
+ to work around the MSR reset older Darwin releases do. */
+ state.srr1 |= (FE1_MASK|FE0_MASK);
+ thread_set_state(t, PPC_THREAD_STATE, (natural_t*)&state, state_size);
+#else
+ fprintf(stderr, "srr1 == 0x%08x, your Darwin is too old\n", state.srr1);
+ exit(1);
+#endif
+ }
+ return NULL; /* Ok, we appear to be on Darwin 6.0 or later */
+}
+
+static void set_fpexc_precise(void)
+{
+ thread_t self = mach_thread_self();
+ pthread_t enabler;
+
+ if (pthread_create(&enabler, NULL, fpu_fpe_enable, &self)) {
+ perror("pthread_create");
+ } else if (pthread_join(enabler, NULL)) {
+ perror("pthread_join");
+ }
+}
+
+#endif
+
+static void set_fpscr(unsigned int fpscr)
+{
+ union {
+ double d;
+ unsigned int fpscr[2];
+ } u;
+
+ u.fpscr[0] = 0xFFF80000;
+ u.fpscr[1] = fpscr;
+ __asm__ __volatile__("mtfsf 255,%0" : : "f"(u.d));
+}
+
+static unsigned int get_fpscr(void)
+{
+ union {
+ double d;
+ unsigned int fpscr[2];
+ } u;
+
+ __asm__("mffs %0" : "=f"(u.d));
+ return u.fpscr[1];
+}
+
+static void unmask_fpe(void)
+{
+ set_fpexc_precise();
+ set_fpscr(0x80|0x40|0x10); /* VE, OE, ZE; not UE or XE */
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask PowerPC FPE, return true if the previous state was unmasked */
+static int mask_fpe(void)
+{
+ int unmasked;
+
+ unmasked = (get_fpscr() & (0x80|0x40|0x10)) == (0x80|0x40|0x10);
+ set_fpscr(0x00);
+ return unmasked;
+}
+
+#else
+
+static void unmask_fpe(void)
+{
+ fpsetmask(FP_X_INV | FP_X_OFL | FP_X_DZ);
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask IEEE FPE, return true if previous state was unmasked */
+static int mask_fpe(void)
+{
+ const fp_except unmasked_mask = FP_X_INV | FP_X_OFL | FP_X_DZ;
+ fp_except old_mask;
+
+ old_mask = fpsetmask(0);
+ return (old_mask & unmasked_mask) == unmasked_mask;
+}
+
+#endif
+
+#if (defined(__linux__) && (defined(__i386__) || defined(__x86_64__) || defined(__sparc__) || defined(__powerpc__))) || (defined(__DARWIN__) && (defined(__i386__) || defined(__x86_64__) || defined(__ppc__))) || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__))) || ((defined(__NetBSD__) || defined(__OpenBSD__)) && defined(__x86_64__)) || (defined(__sun__) && defined(__x86_64__))
+
+#if defined(__linux__) && defined(__i386__)
+#if !defined(X86_FXSR_MAGIC)
+#define X86_FXSR_MAGIC 0x0000
+#endif
+#elif defined(__FreeBSD__) && defined(__x86_64__)
+#include <sys/types.h>
+#include <machine/fpu.h>
+#elif defined(__FreeBSD__) && defined(__i386__)
+#include <sys/types.h>
+#include <machine/npx.h>
+#elif defined(__DARWIN__)
+#include <machine/signal.h>
+#elif defined(__OpenBSD__) && defined(__x86_64__)
+#include <sys/types.h>
+#include <machine/fpu.h>
+#endif
+#if !(defined(__OpenBSD__) && defined(__x86_64__))
+#include <ucontext.h>
+#endif
+#include <string.h>
+
+#if defined(__linux__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->gregs[REG_RIP])
+#elif defined(__linux__) && defined(__i386__)
+#define mc_pc(mc) ((mc)->gregs[REG_EIP])
+#elif defined(__DARWIN__) && defined(__i386__)
+#ifdef DARWIN_MODERN_MCONTEXT
+#define mc_pc(mc) ((mc)->__ss.__eip)
+#else
+#define mc_pc(mc) ((mc)->ss.eip)
+#endif
+#elif defined(__DARWIN__) && defined(__x86_64__)
+#ifdef DARWIN_MODERN_MCONTEXT
+#define mc_pc(mc) ((mc)->__ss.__rip)
+#else
+#define mc_pc(mc) ((mc)->ss.rip)
+#endif
+#elif defined(__FreeBSD__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->mc_rip)
+#elif defined(__FreeBSD__) && defined(__i386__)
+#define mc_pc(mc) ((mc)->mc_eip)
+#elif defined(__NetBSD__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->__gregs[_REG_RIP])
+#elif defined(__NetBSD__) && defined(__i386__)
+#define mc_pc(mc) ((mc)->__gregs[_REG_EIP])
+#elif defined(__OpenBSD__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->sc_rip)
+#elif defined(__sun__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->gregs[REG_RIP])
+#endif
+
+static void fpe_sig_action(int sig, siginfo_t *si, void *puc)
+{
+ ucontext_t *uc = puc;
+ unsigned long pc;
+
+#if defined(__linux__)
+#if defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ fpregset_t fpstate = mc->fpregs;
+ pc = mc_pc(mc);
+ /* A failed SSE2 instruction will restart. To avoid
+ looping we mask SSE2 exceptions now and unmask them
+ again later in erts_check_fpe()/erts_restore_fpu().
+ On RISCs we update PC to skip the failed instruction,
+ but the ever increasing complexity of the x86 instruction
+ set encoding makes that a poor solution here. */
+ fpstate->mxcsr = 0x1F80;
+ fpstate->swd &= ~0xFF;
+#elif defined(__i386__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ fpregset_t fpstate = mc->fpregs;
+ pc = mc_pc(mc);
+ if ((fpstate->status >> 16) == X86_FXSR_MAGIC)
+ ((struct _fpstate*)fpstate)->mxcsr = 0x1F80;
+ fpstate->sw &= ~0xFF;
+#elif defined(__sparc__) && defined(__arch64__)
+ /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
+ struct sigcontext *sc = (struct sigcontext*)puc;
+ pc = sc->sigc_regs.tpc;
+ sc->sigc_regs.tpc = sc->sigc_regs.tnpc;
+ sc->sigc_regs.tnpc += 4;
+#elif defined(__sparc__)
+ /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
+ struct sigcontext *sc = (struct sigcontext*)puc;
+ pc = sc->si_regs.pc;
+ sc->si_regs.pc = sc->si_regs.npc;
+ sc->si_regs.npc = (unsigned long)sc->si_regs.npc + 4;
+#elif defined(__powerpc__)
+#if defined(__powerpc64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ unsigned long *regs = &mc->gp_regs[0];
+#else
+ mcontext_t *mc = uc->uc_mcontext.uc_regs;
+ unsigned long *regs = &mc->gregs[0];
+#endif
+ pc = regs[PT_NIP];
+ regs[PT_NIP] += 4;
+ regs[PT_FPSCR] = 0x80|0x40|0x10; /* VE, OE, ZE; not UE or XE */
+#endif
+#elif defined(__DARWIN__) && (defined(__i386__) || defined(__x86_64__))
+#ifdef DARWIN_MODERN_MCONTEXT
+ mcontext_t mc = uc->uc_mcontext;
+ pc = mc_pc(mc);
+ mc->__fs.__fpu_mxcsr = 0x1F80;
+ *(unsigned short *)&mc->__fs.__fpu_fsw &= ~0xFF;
+#else
+ mcontext_t mc = uc->uc_mcontext;
+ pc = mc_pc(mc);
+ mc->fs.fpu_mxcsr = 0x1F80;
+ *(unsigned short *)&mc->fs.fpu_fsw &= ~0xFF;
+#endif /* DARWIN_MODERN_MCONTEXT */
+#elif defined(__DARWIN__) && defined(__ppc__)
+ mcontext_t mc = uc->uc_mcontext;
+ pc = mc->ss.srr0;
+ mc->ss.srr0 += 4;
+ mc->fs.fpscr = 0x80|0x40|0x10;
+#elif defined(__FreeBSD__) && defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ struct savefpu *savefpu = (struct savefpu*)&mc->mc_fpstate;
+ struct envxmm *envxmm = &savefpu->sv_env;
+ pc = mc_pc(mc);
+ envxmm->en_mxcsr = 0x1F80;
+ envxmm->en_sw &= ~0xFF;
+#elif defined(__FreeBSD__) && defined(__i386__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ union savefpu *savefpu = (union savefpu*)&mc->mc_fpstate;
+ pc = mc_pc(mc);
+ if (mc->mc_fpformat == _MC_FPFMT_XMM) {
+ struct envxmm *envxmm = &savefpu->sv_xmm.sv_env;
+ envxmm->en_mxcsr = 0x1F80;
+ envxmm->en_sw &= ~0xFF;
+ } else {
+ struct env87 *env87 = &savefpu->sv_87.sv_env;
+ env87->en_sw &= ~0xFF;
+ }
+#elif defined(__NetBSD__) && defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ struct fxsave64 *fxsave = (struct fxsave64 *)&mc->__fpregs;
+ pc = mc_pc(mc);
+ fxsave->fx_mxcsr = 0x1F80;
+ fxsave->fx_fsw &= ~0xFF;
+#elif defined(__NetBSD__) && defined(__i386__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ pc = mc_pc(mc);
+ if (uc->uc_flags & _UC_FXSAVE) {
+ struct envxmm *envxmm = (struct envxmm *)&mc->__fpregs;
+ envxmm->en_mxcsr = 0x1F80;
+ envxmm->en_sw &= ~0xFF;
+ } else {
+ struct env87 *env87 = (struct env87 *)&mc->__fpregs;
+ env87->en_sw &= ~0xFF;
+ }
+#elif defined(__OpenBSD__) && defined(__x86_64__)
+ struct fxsave64 *fxsave = uc->sc_fpstate;
+ pc = mc_pc(uc);
+ fxsave->fx_mxcsr = 0x1F80;
+ fxsave->fx_fsw &= ~0xFF;
+#elif defined(__sun__) && defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ struct fpchip_state *fpstate = &mc->fpregs.fp_reg_set.fpchip_state;
+ pc = mc_pc(mc);
+ fpstate->mxcsr = 0x1F80;
+ fpstate->sw &= ~0xFF;
+#endif
+#if 0
+ {
+ char buf[64];
+ snprintf(buf, sizeof buf, "%s: FPE at %p\r\n", __FUNCTION__, (void*)pc);
+ write(2, buf, strlen(buf));
+ }
+#endif
+ set_current_fp_exception(pc);
+}
+
+static void erts_thread_catch_fp_exceptions(void)
+{
+ struct sigaction act;
+ memset(&act, 0, sizeof act);
+ act.sa_sigaction = fpe_sig_action;
+ act.sa_flags = SA_SIGINFO;
+ sigaction(SIGFPE, &act, NULL);
+ unmask_fpe();
+}
+
+#else /* !((__linux__ && (__i386__ || __x86_64__ || __powerpc__)) || (__DARWIN__ && (__i386__ || __x86_64__ || __ppc__))) */
+
+static void fpe_sig_handler(int sig)
+{
+ set_current_fp_exception(1); /* XXX: convert to sigaction so we can get the trap PC */
+}
+
+static void erts_thread_catch_fp_exceptions(void)
+{
+ sys_sigset(SIGFPE, fpe_sig_handler);
+ unmask_fpe();
+}
+
+#endif /* (__linux__ && (__i386__ || __x86_64__ || __powerpc__)) || (__DARWIN__ && (__i386__ || __x86_64__ || __ppc__))) */
+
+/* once-only initialisation early in the main thread */
+void erts_sys_init_float(void)
+{
+ erts_init_fp_exception();
+ erts_thread_catch_fp_exceptions();
+ erts_printf_block_fpe = erts_sys_block_fpe;
+ erts_printf_unblock_fpe = erts_sys_unblock_fpe;
+}
+
+#endif /* NO_FPE_SIGNALS */
+
+void erts_thread_init_float(void)
+{
+#ifdef ERTS_SMP
+ /* This allows Erlang schedulers to leave Erlang-process context
+ and still have working FP exceptions. XXX: is this needed? */
+ erts_thread_init_fp_exception();
+#endif
+
+#ifndef NO_FPE_SIGNALS
+ /* NOTE:
+ * erts_thread_disable_fpe() is called in all threads at
+ * creation. We at least need to call unmask_fpe()
+ */
+#if defined(__DARWIN__) || defined(__FreeBSD__)
+ /* Darwin (7.9.0) does not appear to propagate FP exception settings
+ to a new thread from its parent. So if we want FP exceptions, we
+ must manually re-enable them in each new thread.
+ FreeBSD 6.1 appears to suffer from a similar issue. */
+ erts_thread_catch_fp_exceptions();
+#else
+ unmask_fpe();
+#endif
+
+#endif
+}
+
+void erts_thread_disable_fpe(void)
+{
+#if !defined(NO_FPE_SIGNALS)
+ (void)mask_fpe();
+#endif
+}
+
+#if !defined(NO_FPE_SIGNALS)
+int erts_sys_block_fpe(void)
+{
+ return mask_fpe();
+}
+
+void erts_sys_unblock_fpe(int unmasked)
+{
+ unmask_fpe_conditional(unmasked);
+}
+#endif
+
+/* The following check is incorporated from the Vee machine */
+
+#define ISDIGIT(d) ((d) >= '0' && (d) <= '9')
+
+/*
+ ** Convert a double to ascii format 0.dddde[+|-]ddd
+ ** return number of characters converted or -1 if error.
+ **
+ ** These two functions should maybe use localeconv() to pick up
+ ** the current radix character, but since it is uncertain how
+ ** expensive such a system call is, and since no-one has heard
+ ** of other radix characters than '.' and ',' an ad-hoc
+ ** low execution time solution is used instead.
+ */
+
+int
+sys_double_to_chars_ext(double fp, char *buffer, size_t buffer_size, size_t decimals)
+{
+ char *s = buffer;
+
+ if (erts_snprintf(buffer, buffer_size, "%.*e", decimals, fp) >= buffer_size)
+ return -1;
+ /* Search upto decimal point */
+ if (*s == '+' || *s == '-') s++;
+ while (ISDIGIT(*s)) s++;
+ if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */
+ /* Scan to end of string */
+ while (*s) s++;
+ return s-buffer; /* i.e strlen(buffer) */
+}
+
+/* Float conversion */
+
+int
+sys_chars_to_double(char* buf, double* fp)
+{
+#ifndef NO_FPE_SIGNALS
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+#endif
+ char *s = buf, *t, *dp;
+
+ /* Robert says that something like this is what he really wanted:
+ * (The [.,] radix test is NOT what Robert wanted - it was added later)
+ *
+ * 7 == sscanf(Tbuf, "%[+-]%[0-9][.,]%[0-9]%[eE]%[+-]%[0-9]%s", ....);
+ * if (*s2 == 0 || *s3 == 0 || *s4 == 0 || *s6 == 0 || *s7)
+ * break;
+ */
+
+ /* Scan string to check syntax. */
+ if (*s == '+' || *s == '-') s++;
+ if (!ISDIGIT(*s)) /* Leading digits. */
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ if (*s != '.' && *s != ',') /* Decimal part. */
+ return -1;
+ dp = s++; /* Remember decimal point pos just in case */
+ if (!ISDIGIT(*s))
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ if (*s == 'e' || *s == 'E') {
+ /* There is an exponent. */
+ s++;
+ if (*s == '+' || *s == '-') s++;
+ if (!ISDIGIT(*s))
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ }
+ if (*s) /* That should be it */
+ return -1;
+
+#ifdef NO_FPE_SIGNALS
+ errno = 0;
+#endif
+ __ERTS_FP_CHECK_INIT(fpexnp);
+ *fp = strtod(buf, &t);
+ __ERTS_FP_ERROR_THOROUGH(fpexnp, *fp, return -1);
+ if (t != s) { /* Whole string not scanned */
+ /* Try again with other radix char */
+ *dp = (*dp == '.') ? ',' : '.';
+ errno = 0;
+ __ERTS_FP_CHECK_INIT(fpexnp);
+ *fp = strtod(buf, &t);
+ __ERTS_FP_ERROR_THOROUGH(fpexnp, *fp, return -1);
+ }
+
+#ifdef NO_FPE_SIGNALS
+ if (errno == ERANGE) {
+ if (*fp == HUGE_VAL || *fp == -HUGE_VAL) {
+ /* overflow, should give error */
+ return -1;
+ } else if (t == s && *fp == 0.0) {
+ /* This should give 0.0 - OTP-7178 */
+ errno = 0;
+
+ } else if (*fp == 0.0) {
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+int
+matherr(struct exception *exc)
+{
+#if !defined(NO_FPE_SIGNALS)
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+ if (fpexnp != NULL)
+ *fpexnp = (unsigned long)__builtin_return_address(0);
+#endif
+ return 1;
+}
diff --git a/erts/emulator/sys/ose/sys_time.c b/erts/emulator/sys/ose/sys_time.c
new file mode 100644
index 0000000000..7e96f68424
--- /dev/null
+++ b/erts/emulator/sys/ose/sys_time.c
@@ -0,0 +1,56 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+
+/******************* Routines for time measurement *********************/
+
+int erts_ticks_per_sec = 0; /* Will be SYS_CLK_TCK in erl_unix_sys.h */
+
+int sys_init_time(void)
+{
+ return SYS_CLOCK_RESOLUTION;
+}
+
+clock_t sys_times(SysTimes *now) {
+ now->tms_utime = now->tms_stime = now->tms_cutime = now->tms_cstime = 0;
+ return 0;
+}
+
+static OSTICK last_tick_count = 0;
+static SysHrTime wrap = 0;
+static OSTICK us_per_tick;
+
+void sys_init_hrtime() {
+ us_per_tick = system_tick();
+}
+
+SysHrTime sys_gethrtime() {
+ OSTICK ticks = get_ticks();
+ if (ticks < (SysHrTime) last_tick_count) {
+ wrap += 1ULL << 32;
+ }
+ last_tick_count = ticks;
+ return ((((SysHrTime) ticks) + wrap) * 1000*us_per_tick);
+}
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 2c47aa06c2..176fc049a7 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -127,6 +127,11 @@
# endif
#endif
+/*
+ * Min number of async threads
+ */
+#define ERTS_MIN_NO_OF_ASYNC_THREADS 0
+
/* File descriptors are numbers anc consecutively allocated on Unix */
#define ERTS_SYS_CONTINOUS_FD_NUMBERS
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index 689be98969..cafeab547e 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -46,7 +46,7 @@ static void erts_init_fp_exception(void)
{
/* XXX: the wrappers prevent using a pthread destructor to
deallocate the key's value; so when/where do we do that? */
- erts_tsd_key_create(&fpe_key);
+ erts_tsd_key_create(&fpe_key,"fp_exception");
}
void erts_thread_init_fp_exception(void)
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 0deb097b1a..8015e8f378 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -103,6 +103,10 @@
#define CreateAutoEvent(state) CreateEvent(NULL, FALSE, state, NULL)
#define CreateManualEvent(state) CreateEvent(NULL, TRUE, state, NULL)
+/*
+ * Min number of async threads
+ */
+#define ERTS_MIN_NO_OF_ASYNC_THREADS 0
/*
* Our own type of "FD's"
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index a11ac73ebc..0ded6b274e 100755
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -3197,7 +3197,7 @@ void erl_sys_init(void)
noinherit_std_handle(STD_ERROR_HANDLE);
#ifdef ERTS_SMP
- erts_smp_tsd_key_create(&win32_errstr_key);
+ erts_smp_tsd_key_create(&win32_errstr_key,"win32_errstr_key");
InitializeCriticalSection(&htbc_lock);
#endif
erts_smp_atomic_init_nob(&pipe_creation_counter,0);
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index 6a43e2b0e7..3906471f87 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -54,6 +54,7 @@
sbt_cmd/1,
scheduler_threads/1,
scheduler_suspend/1,
+ dirty_scheduler_threads/1,
reader_groups/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(15)).
@@ -68,6 +69,7 @@ all() ->
equal_and_high_with_part_time_max, equal_with_high,
equal_with_high_max, bound_process,
{group, scheduler_bind}, scheduler_threads, scheduler_suspend,
+ dirty_scheduler_threads,
reader_groups].
groups() ->
@@ -1092,6 +1094,55 @@ scheduler_threads(Config) when is_list(Config) ->
end,
ok.
+dirty_scheduler_threads(Config) when is_list(Config) ->
+ SmpSupport = erlang:system_info(smp_support),
+ try
+ erlang:system_info(dirty_cpu_schedulers),
+ dirty_scheduler_threads_test(Config, SmpSupport)
+ catch
+ error:badarg ->
+ {skipped, "No dirty scheduler support"}
+ end.
+
+dirty_scheduler_threads_test(Config, SmpSupport) ->
+ {Sched, SchedOnln, _} = get_dsstate(Config, ""),
+ {HalfSched, HalfSchedOnln} = case SmpSupport of
+ false -> {1,1};
+ true ->
+ {Sched div 2,
+ SchedOnln div 2}
+ end,
+ Cmd1 = "+SDcpu "++integer_to_list(HalfSched)++":"++
+ integer_to_list(HalfSchedOnln),
+ {HalfSched, HalfSchedOnln, _} = get_dsstate(Config, Cmd1),
+ {HalfSched, HalfSchedOnln, _} = get_dsstate(Config, "+SDPcpu 50:50"),
+ IOSched = 20,
+ {_, _, IOSched} = get_dsstate(Config, "+SDio "++integer_to_list(IOSched)),
+ {ok, Node} = start_node(Config, ""),
+ [ok] = mcall(Node, [fun() -> dirty_schedulers_online_test() end]),
+ ok.
+
+dirty_schedulers_online_test() ->
+ dirty_schedulers_online_test(erlang:system_info(smp_support)).
+dirty_schedulers_online_test(false) -> ok;
+dirty_schedulers_online_test(true) ->
+ dirty_schedulers_online_smp_test(erlang:system_info(schedulers_online)).
+dirty_schedulers_online_smp_test(SchedOnln) when SchedOnln < 4 -> ok;
+dirty_schedulers_online_smp_test(SchedOnln) ->
+ DirtyCPUSchedOnln = erlang:system_info(dirty_cpu_schedulers_online),
+ SchedOnln = DirtyCPUSchedOnln,
+ HalfSchedOnln = SchedOnln div 2,
+ SchedOnln = erlang:system_flag(schedulers_online, HalfSchedOnln),
+ HalfDirtyCPUSchedOnln = DirtyCPUSchedOnln div 2,
+ HalfDirtyCPUSchedOnln = erlang:system_flag(schedulers_online, SchedOnln),
+ DirtyCPUSchedOnln = erlang:system_flag(dirty_cpu_schedulers_online,
+ HalfDirtyCPUSchedOnln),
+ HalfDirtyCPUSchedOnln = erlang:system_info(dirty_cpu_schedulers_online),
+ QrtrDirtyCPUSchedOnln = HalfDirtyCPUSchedOnln div 2,
+ SchedOnln = erlang:system_flag(schedulers_online, HalfSchedOnln),
+ QrtrDirtyCPUSchedOnln = erlang:system_info(dirty_cpu_schedulers_online),
+ ok.
+
get_sstate(Config, Cmd) ->
{ok, Node} = start_node(Config, Cmd),
[SState] = mcall(Node, [fun () ->
@@ -1100,6 +1151,20 @@ get_sstate(Config, Cmd) ->
stop_node(Node),
SState.
+get_dsstate(Config, Cmd) ->
+ {ok, Node} = start_node(Config, Cmd),
+ [DSCPU] = mcall(Node, [fun () ->
+ erlang:system_info(dirty_cpu_schedulers)
+ end]),
+ [DSCPUOnln] = mcall(Node, [fun () ->
+ erlang:system_info(dirty_cpu_schedulers_online)
+ end]),
+ [DSIO] = mcall(Node, [fun () ->
+ erlang:system_info(dirty_io_schedulers)
+ end]),
+ stop_node(Node),
+ {DSCPU, DSCPUOnln, DSIO}.
+
scheduler_suspend(Config) when is_list(Config) ->
?line Dog = ?t:timetrap(?t:minutes(5)),
?line lists:foreach(fun (S) -> scheduler_suspend_test(Config, S) end,
@@ -1171,16 +1236,40 @@ sst2_loop(N) ->
erlang:system_flag(multi_scheduling, unblock),
sst2_loop(N-1).
-sst3_loop(_S, 0) ->
- ok;
sst3_loop(S, N) ->
+ try erlang:system_info(dirty_cpu_schedulers) of
+ DS ->
+ sst3_loop_with_dirty_schedulers(S, DS, N)
+ catch
+ error:badarg ->
+ sst3_loop_normal_schedulers_only(S, N)
+ end.
+
+sst3_loop_normal_schedulers_only(_S, 0) ->
+ ok;
+sst3_loop_normal_schedulers_only(S, N) ->
+ erlang:system_flag(schedulers_online, (S div 2)+1),
+ erlang:system_flag(schedulers_online, 1),
+ erlang:system_flag(schedulers_online, (S div 2)+1),
+ erlang:system_flag(schedulers_online, S),
+ erlang:system_flag(schedulers_online, 1),
+ erlang:system_flag(schedulers_online, S),
+ sst3_loop_normal_schedulers_only(S, N-1).
+
+sst3_loop_with_dirty_schedulers(_S, _DS, 0) ->
+ ok;
+sst3_loop_with_dirty_schedulers(S, DS, N) ->
erlang:system_flag(schedulers_online, (S div 2)+1),
+ erlang:system_flag(dirty_cpu_schedulers_online, (DS div 2)+1),
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, (S div 2)+1),
+ erlang:system_flag(dirty_cpu_schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
+ erlang:system_flag(dirty_cpu_schedulers_online, DS),
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
- sst3_loop(S, N-1).
+ erlang:system_flag(dirty_cpu_schedulers_online, DS),
+ sst3_loop_with_dirty_schedulers(S, DS, N-1).
reader_groups(Config) when is_list(Config) ->
%% White box testing. These results are correct, but other results