aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/beam/beam_emu.c8
-rw-r--r--erts/emulator/beam/beam_load.c6
-rw-r--r--erts/emulator/beam/bif.c17
-rw-r--r--erts/emulator/beam/bif.tab4
-rw-r--r--erts/emulator/beam/big.c35
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/break.c1
-rw-r--r--erts/emulator/beam/erl_alloc.c4
-rw-r--r--erts/emulator/beam/erl_alloc.types2
-rw-r--r--erts/emulator/beam/erl_bif_binary.c16
-rw-r--r--erts/emulator/beam/erl_bif_info.c19
-rw-r--r--erts/emulator/beam/erl_driver.h2
-rw-r--r--erts/emulator/beam/erl_drv_nif.h1
-rw-r--r--erts/emulator/beam/erl_gc.c14
-rw-r--r--erts/emulator/beam/erl_init.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c19
-rw-r--r--erts/emulator/beam/erl_lock_count.c12
-rw-r--r--erts/emulator/beam/erl_message.c16
-rw-r--r--erts/emulator/beam/erl_nif.c547
-rw-r--r--erts/emulator/beam/erl_nif.h26
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h45
-rw-r--r--erts/emulator/beam/erl_port_task.c74
-rw-r--r--erts/emulator/beam/erl_port_task.h2
-rw-r--r--erts/emulator/beam/erl_process.c137
-rw-r--r--erts/emulator/beam/erl_process.h33
-rw-r--r--erts/emulator/beam/external.c2
-rw-r--r--erts/emulator/beam/io.c92
-rw-r--r--erts/emulator/beam/utils.c3
-rw-r--r--erts/emulator/drivers/common/inet_drv.c2
-rw-r--r--erts/emulator/drivers/unix/multi_drv.c2
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m45
-rw-r--r--erts/emulator/sys/common/erl_poll.c2
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/async_ports_SUITE.erl118
-rw-r--r--erts/emulator/test/async_ports_SUITE_data/Makefile.src15
-rw-r--r--erts/emulator/test/async_ports_SUITE_data/cport.c81
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl6
-rw-r--r--erts/emulator/test/driver_SUITE.erl41
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c9
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c11
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c9
-rw-r--r--erts/emulator/test/fun_SUITE.erl23
-rw-r--r--erts/emulator/test/nif_SUITE.erl57
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c106
-rw-r--r--erts/emulator/test/num_bif_SUITE.erl32
-rw-r--r--erts/emulator/test/tuple_SUITE.erl95
46 files changed, 1378 insertions, 382 deletions
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 1026e5f649..8bfb7d2ad2 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3503,6 +3503,7 @@ get_map_elements_fail:
* I[0]: &&call_nif
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
*/
BifFunction vbf;
@@ -3523,13 +3524,6 @@ get_map_elements_fail:
reg[0] = r(0);
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
erts_post_nif(&env);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (is_non_value(nif_bif_result) && c_p->freason == TRAP) {
- Export* ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(c_p);
- ep->code[0] = I[-3];
- ep->code[1] = I[-2];
- }
-#endif
}
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
PROCESS_MAIN_CHK_LOCKS(c_p);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index e96177cfd9..cfc6146b0a 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -2363,7 +2363,11 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- enum { MIN_FUNC_SZ = 3 };
+#ifdef ERTS_DIRTY_SCHEDULERS
+ enum { MIN_FUNC_SZ = 4 };
+#else
+ enum { MIN_FUNC_SZ = 3 };
+#endif
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index fcbeb6cf5c..a5be8e1529 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1869,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1879,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_pid_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
}
return remote_send(p, dep, to, to, msg, suspend);
@@ -1912,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1922,6 +1925,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
} else if (is_internal_port(to)) {
int ret_val;
@@ -2887,9 +2891,6 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2899,8 +2900,12 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
}
}
- if (is_big(res)) {
- hp += (big_arity(res)+1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res)+1);
+ }
}
HRelease(p,hp_end,hp);
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 011e49f1fe..e68b8e6274 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -601,6 +601,10 @@ bif maps:values/1
bif erts_internal:cmp_term/2
#
+# New in 17.1.
+#
+bif erlang:fun_info_mfa/1
+#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 41a041eba6..a8710dd910 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -274,6 +274,9 @@
_b = _b << _s; \
_vn1 = _b >> H_EXP; \
_vn0 = _b & LO_MASK; \
+ /* Sometimes _s is 0 which triggers undefined behaviour for the \
+ (_a0>>(D_EXP-_s)) shift, but this is ok because the \
+ & -s will make it all to 0 later anyways. */ \
_un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
_un10 = _a0 << _s; \
_un1 = _un10 >> H_EXP; \
@@ -1506,13 +1509,15 @@ Eterm uword_to_big(UWord x, Eterm *y)
*/
Eterm small_to_big(Sint x, Eterm *y)
{
+ Uint xu;
if (x >= 0) {
+ xu = x;
*y = make_pos_bignum_header(1);
} else {
- x = -x;
+ xu = -(Uint)x;
*y = make_neg_bignum_header(1);
}
- BIG_DIGIT(y, 0) = x;
+ BIG_DIGIT(y, 0) = xu;
return make_big(y);
}
@@ -1540,21 +1545,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
+ Uint64 ux;
int neg;
- if (x >= 0)
+ if (x >= 0) {
neg = 0;
+ ux = x;
+ }
else {
neg = 1;
- x = -x;
+ ux = -(Uint64)x;
}
#if defined(ARCH_32) || HALFWORD_HEAP
- if (x >= (((Uint64) 1) << 32)) {
+ if (ux >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
else
*hp = make_pos_bignum_header(2);
- BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
- BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
*hpp += 3;
}
else
@@ -1564,7 +1572,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_neg_bignum_header(1);
else
*hp = make_pos_bignum_header(1);
- BIG_DIGIT(hp, 0) = (Uint) x;
+ BIG_DIGIT(hp, 0) = (Uint) ux;
*hpp += 2;
}
return make_big(hp);
@@ -2667,9 +2675,6 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2679,8 +2684,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
}
}
- if (is_big(res)) {
- hp += (big_arity(res) + 1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res) + 1);
+ }
}
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_done;
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index d80111822e..da31876d75 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */
#define ERTS_SINT64_HEAP_SIZE(X) \
(IS_SSMALL((X)) \
? 0 \
- : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 7d4f52ee23..08265b590d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -256,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p)
p->current[1],
p->current[2]);
}
+ erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix);
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
approx_started = (time_t) p->approx_started;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 05ac24e04d..90cd227fae 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1873,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
size = va_arg(argp, Uint);
va_end(argp);
erl_exit(1,
- "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
- allctr_str, op, size, t_str);
+ "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n",
+ allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX());
break;
}
case ERTS_ALC_E_NOALLCTR:
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 17ac6316b7..37354b7f8d 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -357,6 +357,7 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD_LOW CODE nif_trap_export_entry
type EXPORT LONG_LIVED_LOW CODE export_entry
type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
@@ -375,6 +376,7 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 7e0e825a0d..3bf78adce7 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1324,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1555,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1644,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2213,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p,
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 6915765dab..6efe9d9550 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3055,6 +3055,25 @@ fun_info_2(BIF_ALIST_2)
return TUPLE2(hp, what, val);
}
+BIF_RETTYPE
+fun_info_mfa_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm* hp;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ }
+ BIF_ERROR(p, BADARG);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
if(is_internal_pid(BIF_ARG_1)) {
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 5ced8c5ca0..f9938fc66c 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -133,7 +133,7 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 3
-#define ERL_DRV_EXTENDED_MINOR_VERSION 0
+#define ERL_DRV_EXTENDED_MINOR_VERSION 1
/*
* The emulator will refuse to load a driver with a major version
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 3f829ea7ea..4e8c6dc68b 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -35,6 +35,7 @@ typedef struct {
int scheduler_threads;
int nif_major_version;
int nif_minor_version;
+ int dirty_scheduler_support;
} ErlDrvSysInfo;
typedef struct {
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index aa15d2cc57..0db42d4325 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2018,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
+
+ /*
+ * If a NIF has saved arguments, they need to be added
+ */
+ if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
+ Eterm* argv;
+ int argc;
+ if (erts_setup_nif_gc(p, &argv, &argc)) {
+ roots[n].v = argv;
+ roots[n].sz = argc;
+ n++;
+ }
+ }
+
ASSERT(n <= rootset->size);
mp = p->msg.first;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 5e6d812242..88c4006934 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2066,8 +2066,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
system_cleanup(flush_async);
save_statistics();
-
- an = abs(n);
+ if (n < 0)
+ an = -(unsigned int)n;
+ else
+ an = n;
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index c13eb87012..b105ece6f1 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -139,7 +139,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
- { "sys_msg_q", NULL },
{ "atom_tab", NULL },
{ "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
@@ -148,6 +147,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
+ { "sys_msg_q", NULL },
{ "port_table", NULL },
#endif
{ "mtrace_op", NULL },
@@ -227,8 +227,7 @@ rw_op_str(Uint16 flags)
case ERTS_LC_FLG_LO_READ:
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
- erts_fprintf(stderr, "\nInternal error\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Only write flag present");
default:
break;
}
@@ -311,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p)
static void *lc_core_alloc(void)
{
lc_unlock();
- erts_fprintf(stderr, "Lock checker out of memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
}
#else
@@ -325,8 +323,7 @@ static void *lc_core_alloc(void)
fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
- erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -366,11 +363,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
@@ -691,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("strdup failed");
}
l_lcks->emu_thread = 1;
}
@@ -1330,7 +1327,7 @@ erts_lc_init(void)
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- lc_abort();
+ ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 17ddfdc8ae..cf6996ea06 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -151,6 +151,9 @@ static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
erts_lcnt_thread_data_t *eltd;
eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
+ if (!eltd) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
@@ -272,6 +275,9 @@ void erts_lcnt_init() {
/* init lcnt structure */
erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
+ if (!erts_lcnt_data) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
erts_lcnt_data->current_locks = erts_lcnt_list_init();
erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
@@ -293,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
erts_lcnt_lock_list_t *list;
list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
+ if (!list) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
list->head = NULL;
list->tail = NULL;
list->n = 0;
@@ -399,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* copy structure and insert the copy */
deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+ if (!deleted_lock) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
deleted_lock->next = NULL;
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 59a677a12c..8870fac7d9 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -415,7 +415,13 @@ erts_queue_dist_message(Process *rcvr,
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr);
+ erts_proc_notify_new_message(rcvr,
+#ifdef ERTS_SMP
+ *rcvr_locks
+#else
+ 0
+#endif
+ );
}
}
@@ -542,7 +548,13 @@ queue_message(Process *c_p,
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver);
+ erts_proc_notify_new_message(receiver,
+#ifdef ERTS_SMP
+ *receiver_locks
+#else
+ 0
+#endif
+ );
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ff551ea3af..ede5f335dc 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -472,6 +472,18 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
+
+ if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term);
+ if (sb->is_writable) {
+ ProcBin* pb = (ProcBin*) binary_val(sb->orig);
+ ASSERT(pb->thing_word == HEADER_PROC_BIN);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ sb->is_writable = 0;
+ }
+ }
+ }
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
@@ -1513,72 +1525,263 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(env->proc) == 0;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-
-/* NIFs exports need one more item than the Export struct provides, the
- * erl_module_nif*, so the DirtyNifExport below adds that. The Export
- * member must be first in the struct.
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv
+ * members are used to track the MFA and arguments of the top NIF in case a
+ * chain of one or more enif_schedule_nif() calls results in an exception,
+ * since in that case the original MFA and registers have to be restored
+ * before returning to Erlang to ensure stacktrace information associated
+ * with the exception is correct.
*/
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
typedef struct {
Export exp;
struct erl_module_nif* m;
-} DirtyNifExport;
+ NativeFunPtr fp;
+ Eterm saved_mfa[3];
+ int saved_argc;
+ int alloced_argv_sz;
+ Eterm argv[1];
+} NifExport;
-static void
-alloc_proc_psd(Process* proc, DirtyNifExport **ep)
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. This function is declared in erl_process.h.
+ */
+int
+erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ int gc = (ep && ep->saved_argc > 0);
+
+ if (gc) {
+ *objv = ep->argv;
+ *nobj = ep->saved_argc;
+ }
+ return gc;
+}
+
+/*
+ * Allocate a NifExport and set it in proc specific data
+ */
+static NifExport*
+allocate_nif_sched_data(Process* proc, int argc)
+{
+ NifExport* ep;
+ size_t argv_extra, total;
int i;
- if (!*ep) {
- *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(DirtyNifExport));
- sys_memset((void*) *ep, 0, sizeof(DirtyNifExport));
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- (*ep)->exp.addressv[i] = &(*ep)->exp.code[3];
- }
- (*ep)->exp.code[3] = (BeamInstr) em_call_nif;
+
+ argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0;
+ total = sizeof(NifExport) + argv_extra;
+ ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
+ sys_memset((void*) ep, 0, total);
+ ep->alloced_argv_sz = argc;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->exp.addressv[i] = &ep->exp.code[3];
}
- (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &(*ep)->exp);
+ ep->exp.code[3] = (BeamInstr) em_call_nif;
+ (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, ep);
+ return ep;
+}
+
+static ERTS_INLINE void
+destroy_nif_export(NifExport *nif_export)
+{
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
}
+void
+erts_destroy_nif_export(void *nif_export)
+{
+ destroy_nif_export((NifExport *) nif_export);
+}
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save MFA and registers only if the need_save
+ * parameter is true.
+ */
static ERL_NIF_TERM
-execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ int need_save, int argc, const ERL_NIF_TERM argv[])
{
- Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array;
- ERL_NIF_TERM result, dirty_result = (ERL_NIF_TERM) reg[0];
- typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM);
- FinalizerFP fp;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- enif_get_ulong(env, reg[1], (unsigned long *) &fp);
-#endif
- result = (*fp)(env, dirty_result);
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0
- && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- return result;
+ Process* proc = env->proc;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ NifExport* ep;
+ int i;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ if (!ep)
+ ep = allocate_nif_sched_data(proc, argc);
+ else if (need_save && ep->alloced_argv_sz < argc) {
+ NifExport* new_ep = allocate_nif_sched_data(proc, argc);
+ destroy_nif_export(ep);
+ ep = new_ep;
+ }
+ ERTS_VBUMP_ALL_REDS(proc);
+ for (i = 0; i < argc; i++) {
+ if (need_save)
+ ep->argv[i] = reg[i];
+ reg[i] = (Eterm) argv[i];
+ }
+ if (need_save) {
+ ep->saved_mfa[0] = proc->current[0];
+ ep->saved_mfa[1] = proc->current[1];
+ ep->saved_mfa[2] = proc->current[2];
+ ep->saved_argc = argc;
+ }
+ proc->i = (BeamInstr*) ep->exp.addressv[0];
+ ep->exp.code[0] = (BeamInstr) proc->current[0];
+ ep->exp.code[1] = (BeamInstr) proc->current[1];
+ ep->exp.code[2] = argc;
+ ep->exp.code[4] = (BeamInstr) direct_fp;
+ ep->m = env->mod_nif;
+ ep->fp = indirect_fp;
+ proc->freason = TRAP;
+ return THE_NON_VALUE;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
+/*
+ * Restore saved MFA and registers. Registers are restored only when the
+ * exception flag is true.
+ */
+static void
+restore_nif_mfa(Process* proc, NifExport* ep, int exception)
+{
+ int i;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ proc->current[0] = ep->saved_mfa[0];
+ proc->current[1] = ep->saved_mfa[1];
+ proc->current[2] = ep->saved_mfa[2];
+ if (exception)
+ for (i = 0; i < ep->saved_argc; i++)
+ reg[i] = ep->argv[i];
+ ep->saved_argc = 0;
+ ep->saved_mfa[0] = THE_NON_VALUE;
+}
-ERL_NIF_TERM
-enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
- int argc, const ERL_NIF_TERM argv[])
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+/*
+ * Finalize a dirty NIF call. This function is scheduled to cause the VM to
+ * switch the process off a dirty scheduler thread and back onto a regular
+ * scheduler thread, and then return the result from the dirty NIF. It also
+ * restores the original NIF MFA when necessary based on the value of
+ * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * means restore, NULL means do not restore.
+ */
+static ERL_NIF_TERM
+dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 0);
+ return argv[0];
+}
+
+/* Finalize a dirty NIF call that raised an exception. Otherwise same as
+ * the dirty_nif_finalizer() function.
+ */
+static ERL_NIF_TERM
+dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 1);
+ return enif_make_badarg(env);
+}
+
+/*
+ * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
+ * then check the result and schedule the appropriate finalizer function
+ * where needed. Also restore the original NIF MFA when appropriate.
+ */
+static ERL_NIF_TERM
+execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+
+ /*
+ * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
+ close_lib(env->mod_nif);
+ /*
+ * If no more NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF calling
+ * context. Reuse fp essentially as a boolean for this, passing it to
+ * init_nif_sched_data below. Both dirty_nif_exception and
+ * dirty_nif_finalizer then check ep->fp to decide whether or not to
+ * restore the original calling context.
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ fp = NULL;
+ if (is_non_value(result)) {
+ if (proc->freason != TRAP) {
+ ASSERT(proc->freason == BADARG);
+ return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
+ } else {
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, 1);
+ return result;
+ }
+ }
+ else
+ return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+}
+
+/*
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
+ * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
+ * type (CPU or I/O) is indicated in flags parameter.
+ */
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
erts_aint32_t state, n, a;
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep = NULL;
- int i;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ int need_save;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND)
- return enif_make_badarg(env);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
a = erts_smp_atomic32_read_acqb(&proc->state);
while (1) {
@@ -1590,7 +1793,7 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
*/
n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
n |= ERTS_PSFLG_DIRTY_CPU_PROC;
else
n |= ERTS_PSFLG_DIRTY_IO_PROC;
@@ -1598,85 +1801,106 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
if (a == state)
break;
}
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = argc;
- for (i = 0; i < argc; i++) {
- reg[i] = (Eterm) argv[i];
- }
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) fp;
- ep->m = env->mod_nif;
- proc->freason = TRAP;
-
erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+ return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+}
- return THE_NON_VALUE;
-#else
- return (*fp)(env, argc, argv);
-#endif
+static ERL_NIF_TERM
+schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+}
+
+static ERL_NIF_TERM
+schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+/*
+ * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
+ * calls the actual NIF, restores original NIF MFA if necessary, and
+ * then returns the NIF result.
+ */
+static ERL_NIF_TERM
+execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ /*
+ * If no NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF MFA.
+ */
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP);
+ return result;
}
ERL_NIF_TERM
-enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
+enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
+ ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
+ int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep;
+ NifExport* ep;
+ ERL_NIF_TERM fun_name_atom, result;
+ int need_save;
- erts_smp_atomic32_read_band_mb(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- |ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
- |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = 2;
- reg[0] = (Eterm) result;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp);
-#endif
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) execute_dirty_nif_finalizer;
- proc->freason = TRAP;
+ if (argc > MAX_ARG)
+ return enif_make_badarg(env);
+ fun_name_atom = enif_make_atom(env, fun_name);
+ if (enif_is_exception(env, fun_name_atom))
+ return fun_name_atom;
- return THE_NON_VALUE;
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+
+ if (flags) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ NativeFunPtr sched_fun;
+ int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
+ if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
+ sched_fun = schedule_dirty_io_nif;
+ else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ sched_fun = schedule_dirty_cpu_nif;
+ else
+ return enif_make_badarg(env);
+ result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
#else
- return (*fp)(env, result);
+ return enif_make_badarg(env);
#endif
-}
+ }
+ else
+ result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-/* A simple finalizer that just returns its result argument */
-ERL_NIF_TERM
-enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)
-{
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->exp.code[1] = (BeamInstr) fun_name_atom;
return result;
}
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
int
enif_is_on_dirty_scheduler(ErlNifEnv* env)
{
return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
}
-int
-enif_have_dirty_schedulers()
-{
-#ifdef USE_THREADS
- return 1;
-#else
- return 0;
-#endif
-}
-
#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
/* Maps */
@@ -1977,6 +2201,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+/*
+ * The function below is for looping through ErlNifFunc arrays, helping
+ * provide backwards compatibility across the version 2.7 change that added
+ * the "flags" field to ErlNifFunc.
+ */
+static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+{
+ ASSERT(incrp);
+ if (!*incrp) {
+ if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ *incrp = sizeof(ErlNifFunc);
+ else {
+ /*
+ * ErlNifFuncV1 below is what ErlNifFunc was before the
+ * addition of the flags field for 2.7, and is needed to handle
+ * backward compatibility.
+ */
+ typedef struct {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }ErlNifFuncV1;
+ *incrp = sizeof(ErlNifFuncV1);
+ }
+ }
+ return (ErlNifFunc*) ((char*)func + *incrp);
+}
+
+
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
@@ -2086,22 +2339,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
/*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
+
+ int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
BeamInstr** code_pp;
- ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
|| (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
- }
- else if (code_pp[1] - code_pp[0] < (5+3)) {
+ }
+ else if (maybe_dirty_nifs && f->flags) {
+ /*
+ * If the flags field is non-zero and this emulator was
+ * built with dirty scheduler support, check that the flags
+ * value is legal. But if this emulator was built without
+ * dirty scheduler support, treat a non-zero flags field as
+ * a load error.
+ */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ f->flags, mod_atom, f->name, f->arity);
+#else
+ ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
+ mod_atom, f->name, f->arity);
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (code_pp[1] - code_pp[0] < (5+4))
+#else
+ else if (code_pp[1] - code_pp[0] < (5+3))
+#endif
+ {
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u to small)",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ " in module (%T:%s/%u too small)",
+ mod_atom, f->name, f->arity);
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ mod_atom, f->name, f->arity);*/
+ f = next_func(entry, &incr, f);
}
}
@@ -2127,7 +2406,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* is deprecated and was only ment as a development feature not to
* be used in production systems. (See warning below)
*/
- int k;
+ int k, old_incr = 0;
+ ErlNifFunc* old_func;
lib->priv_data = mod->curr.nif->priv_data;
ASSERT(mod->curr.nif->entry != NULL);
@@ -2136,13 +2416,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
/* Check that no NIF is removed */
+ old_func = mod->curr.nif->entry->funcs;
for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == entry->funcs[i].arity
- && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
+ if (old_func->arity == f->arity
+ && sys_strcmp(old_func->name, f->name) == 0) {
break;
}
+ f = next_func(entry, &incr, f);
}
if (i == entry->num_of_funcs) {
ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
@@ -2150,7 +2433,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
old_func->name, old_func->arity);
goto error;
}
- }
+ old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
+ }
erts_pre_nif(&env, BIF_P, lib);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
@@ -2197,13 +2481,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->curr.nif = lib;
+
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
+
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
- erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
-
+ erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity);
+
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
@@ -2211,10 +2499,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
GenericBp* g = (GenericBp *) code_ptr[1];
ASSERT(code_ptr[5+0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
- }
- code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ }
+ if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ code_ptr[5+3] = (BeamInstr) f->fptr;
+ code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) schedule_dirty_io_nif :
+ (BeamInstr) schedule_dirty_cpu_nif;
+#endif
+ }
+ else
+ code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
+ f = next_func(entry, &incr, f);
}
}
else {
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 5b93c2398e..226fc199a1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -42,9 +42,13 @@
** 2.5: R17 Maps API additions
** 2.6: R17 with maps
** R17 dirty schedulers
+** 2.7: 17.3 add enif_schedule_nif
+** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer
+** add ErlNifEntry options
+** add ErlNifFunc flags
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 6
+#define ERL_NIF_MINOR_VERSION 7
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -125,8 +129,10 @@ typedef struct
const char* name;
unsigned arity;
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
}ErlNifFunc;
+
typedef struct enif_entry_t
{
int major;
@@ -139,8 +145,11 @@ typedef struct enif_entry_t
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
const char* vm_variant;
+ unsigned options;
}ErlNifEntry;
+/* Field bits for ErlNifEntry options */
+#define ERL_NIF_DIRTY_NIF_OPTION 1
typedef struct
@@ -232,10 +241,21 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# else
# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
# endif
-# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_INIT_BODY do { \
+ memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)); \
+ entry.options = ERL_NIF_DIRTY_NIF_OPTION; \
+ } while(0)
+# else
+# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# endif
#else
# define ERL_NIF_INIT_GLOB
-# define ERL_NIF_INIT_BODY
+# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_INIT_BODY entry.options = ERL_NIF_DIRTY_NIF_OPTION
+# else
+# define ERL_NIF_INIT_BODY
+# endif
# ifdef STATIC_ERLANG_NIF
# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void)
# else
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index d7c554e60b..630cefae93 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -22,7 +22,7 @@
#endif
/*
-** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
+** WARNING: Add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
** to keep compatibility on Windows!!!
**
** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h
@@ -141,14 +141,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM)));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM));
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
-ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void));
-#endif
-
ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env));
@@ -163,12 +155,22 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMap
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value));
-
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
/*
-** Add new entries here to keep compatibility on Windows!!!
+** ADD NEW ENTRIES HERE (before this comment) !!!
*/
+
+
+/*
+ * Conditional EXPERIMENTAL stuff always last.
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
#endif
+#endif /* ERL_NIF_API_FUNC_DECL */
/*
** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order
@@ -282,21 +284,12 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
#endif
-
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif)
-# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer)
-# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer)
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
-# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers)
-#endif
-
# define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map)
# define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size)
# define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map)
@@ -311,11 +304,21 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next)
# define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev)
# define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair)
+# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif)
/*
-** Add new entries here
+** ADD NEW ENTRIES HERE (before this comment)
*/
+
+/*
+ * Conditional EXPERIMENTAL stuff always last
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
#endif
+#endif /* ERL_NIF_API_FUNC_MACRO */
#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 31d9a1e26e..682f6f8f4b 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -68,6 +68,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
#endif
+#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \
+ ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
+ erts_smp_atomic_read_nob(&(PP)->run_queue))); \
+ } while (0)
+
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PT_STATE_SCHEDULED 0
@@ -798,12 +805,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
static ERTS_INLINE void
abort_nosuspend_task(Port *pp,
ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp)
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
- if (!pp->sched.taskq.bpq)
+ if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
@@ -991,6 +999,7 @@ static ERTS_INLINE int
finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
erts_aint32_t act;
+ unsigned int prof_runnable_ports;
if (!processing_busy_q)
pp->sched.taskq.local.first = *execq;
@@ -1007,6 +1016,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1018,12 +1031,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
- ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
if (exp == act)
break;
}
+ if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS))
+ trace_sched_ports(pp, am_out);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS)))
+ profile_runnable_port(pp, am_inactive);
+ erts_port_task_sched_unlock(&pp->sched);
+ }
+ }
+
return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
@@ -1345,7 +1370,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
#endif
schedule_port_task_handle_list_free(pthlp);
- abort_nosuspend_task(pp, type, &td);
+ abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
}
}
@@ -1369,6 +1394,7 @@ erts_port_task_schedule(Eterm id,
Port *pp;
ErtsPortTask *ptp = NULL;
erts_aint32_t act, add_flags;
+ unsigned int prof_runnable_ports;
if (pthp && erts_port_task_is_scheduled(pthp)) {
ASSERT(0);
@@ -1457,6 +1483,10 @@ erts_port_task_schedule(Eterm id,
if (ns_pthlp)
add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1481,6 +1511,13 @@ erts_port_task_schedule(Eterm id,
goto done; /* Died after our task insert... */
}
+ if (prof_runnable_ports) {
+ if (!(act & ERTS_PTS_FLG_EXEC_IMM))
+ profile_runnable_port(pp, am_active);
+ erts_port_task_sched_unlock(&pp->sched);
+ prof_runnable_ports = 0;
+ }
+
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
@@ -1489,8 +1526,10 @@ erts_port_task_schedule(Eterm id,
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
- /* Port emigrated ... */
+ /* Emigrate port ... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = erts_port_runq(pp);
@@ -1500,10 +1539,6 @@ erts_port_task_schedule(Eterm id,
#endif
enqueue_port(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
- }
erts_smp_runq_unlock(runq);
@@ -1511,6 +1546,9 @@ erts_port_task_schedule(Eterm id,
done:
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&pp->sched);
+
#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
@@ -1525,7 +1563,7 @@ abort_nosuspend:
erts_port_dec_refc(pp);
#endif
- abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
@@ -1609,6 +1647,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto done;
}
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+
erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
@@ -1765,10 +1805,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
@@ -1791,11 +1827,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
- if (!active) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- }
- else {
+ if (active) {
#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
#endif
@@ -1804,6 +1836,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
@@ -1811,7 +1845,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
}
else {
- /* Port emigrated ... */
+ /* Emigrate port... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 1d30465ec9..9ef0cfcedc 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -78,6 +78,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11)
+#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12)
#define ERTS_PTS_FLGS_BUSY \
(ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
@@ -87,6 +88,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
| ERTS_PTS_FLG_HAVE_BUSY_TASKS \
| ERTS_PTS_FLG_HAVE_TASKS \
| ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_EXEC_IMM \
| ERTS_PTS_FLG_FORCE_SCHED \
| ERTS_PTS_FLG_EXITING)
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b73f9b7f92..20a88ec581 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -590,12 +590,10 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
= ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS;
-#endif
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -2211,6 +2209,9 @@ aux_work_timeout_early_init(int no_schedulers)
p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ sizeof(erts_atomic32_t)*(no_schedulers+1))
+ ERTS_CACHE_LINE_SIZE-1);
+ if (!p) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
if (p & ERTS_CACHE_LINE_MASK)
p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
@@ -3755,17 +3756,25 @@ evacuate_run_queue(ErtsRunQueue *rq,
}
#ifdef ERTS_DIRTY_SCHEDULERS
else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
} else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_IO_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
}
@@ -5874,6 +5883,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
case ERTS_ENQUEUE_NOT:
if (erts_system_profile_flags.runnable_procs) {
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
if (!(a & ERTS_PSFLG_ACTIVE_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))) {
@@ -5987,7 +5999,8 @@ change_proc_schedule_state(Process *p,
erts_aint32_t clear_state_flags,
erts_aint32_t set_state_flags,
erts_aint32_t *statep,
- erts_aint32_t *enq_prio_p)
+ erts_aint32_t *enq_prio_p,
+ ErtsProcLocks locks)
{
/*
* NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
@@ -5996,6 +6009,11 @@ change_proc_schedule_state(Process *p,
*/
erts_aint32_t a = *statep, n;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int lock_status = (prof_runnable_procs
+ && !(locks & ERTS_PROC_LOCK_STATUS));
+
+ ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
@@ -6005,6 +6023,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ if (lock_status)
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
while (1) {
erts_aint32_t e;
n = e = a;
@@ -6040,7 +6061,9 @@ change_proc_schedule_state(Process *p,
break;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
@@ -6053,15 +6076,18 @@ change_proc_schedule_state(Process *p,
profile_runnable_proc(p, am_active);
}
+ if (lock_status)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
+
*statep = a;
return enqueue;
}
static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t in_state)
+schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
{
erts_aint32_t enq_prio = -1;
erts_aint32_t state = in_state;
@@ -6069,7 +6095,8 @@ schedule_process(Process *p, erts_aint32_t in_state)
0,
ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue != ERTS_ENQUEUE_NOT)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -6077,16 +6104,27 @@ schedule_process(Process *p, erts_aint32_t in_state)
}
void
-erts_schedule_process(Process *p, erts_aint32_t state)
+erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
{
- schedule_process(p, state);
+ schedule_process(p, state, locks);
}
static void
schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
{
+ /*
+ * Expects status lock to be locked when called, and
+ * returns with status lock unlocked...
+ */
erts_aint32_t a = state, n, enq_prio = -1;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
ASSERT(!(state & ERTS_PSFLG_PROXY));
@@ -6095,7 +6133,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
n = e = a;
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
+ goto cleanup; /* We don't want to schedule free processes... */
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
@@ -6108,7 +6146,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
goto cleanup;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
@@ -6118,6 +6156,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
profile_runnable_proc(p, am_active);
}
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ prof_runnable_procs = 0;
}
if (enqueue != ERTS_ENQUEUE_NOT) {
@@ -6132,8 +6172,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
cleanup:
+
+ if (prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (proxy)
free_proxy_proc(proxy);
+
+ ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
}
static ERTS_INLINE int
@@ -6200,7 +6246,7 @@ suspend_process(Process *c_p, Process *p)
}
static ERTS_INLINE void
-resume_process(Process *p)
+resume_process(Process *p, ErtsProcLocks locks)
{
erts_aint32_t state, enq_prio = -1;
int enqueue;
@@ -6217,7 +6263,8 @@ resume_process(Process *p)
ERTS_PSFLG_SUSPENDED,
0,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -7818,6 +7865,9 @@ erts_start_schedulers(void)
#ifdef ETHR_HAVE_THREAD_NAMES
opts.name = malloc(80);
+ if (!opts.name) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#endif
#ifdef ERTS_SMP
@@ -8030,7 +8080,8 @@ handle_pend_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspendee != suspender);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8065,7 +8116,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(c_p->flags & F_P2PNR_RESCHED);
c_p->flags &= ~F_P2PNR_RESCHED;
if (!suspend && rp)
- resume_process(rp);
+ resume_process(rp, rp_locks);
}
else {
@@ -8223,7 +8274,8 @@ handle_pend_bif_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspender != suspendee);
+ resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
}
@@ -8583,7 +8635,8 @@ resume_process_1(BIF_ALIST_1)
ASSERT(ERTS_PSFLG_SUSPENDED
& erts_smp_atomic32_read_nob(&suspendee->state));
- resume_process(suspendee);
+ ASSERT(BIF_P != suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
@@ -8713,7 +8766,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
- resume_process(process);
+ resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
@@ -8732,7 +8785,7 @@ erts_resume_processes(ErtsProcList *list)
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
if (erts_proclist_same(plp, proc)) {
- resume_process(proc);
+ resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
@@ -9968,8 +10021,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
rp_state = n;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
-
+ /*
+ * schedule_process_sys_task() unlocks status
+ * lock on process.
+ */
schedule_process_sys_task(rp, rp_state, NULL);
if (free_stqs)
@@ -10714,7 +10769,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- schedule_process(p, state);
+ schedule_process(p, state, 0);
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
@@ -11035,7 +11090,8 @@ set_proc_exiting(Process *p,
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
p->fvalue = reason;
if (bp)
@@ -11076,7 +11132,8 @@ set_proc_self_exiting(Process *c_p)
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
ASSERT(!enqueue);
return state;
@@ -11721,8 +11778,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
smon->pid, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
+ ASSERT(suspendee != vc_p);
if (smon->active)
- resume_process(suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
erts_destroy_suspend_monitor(smon);
@@ -11814,6 +11872,7 @@ erts_continue_exit_process(Process *p)
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
erts_aint32_t state;
+ void *nif_export;
#ifdef DEBUG
int yield_allowed = 1;
@@ -11964,6 +12023,7 @@ erts_continue_exit_process(Process *p)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
+ nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
#ifdef BM_COUNTERS
@@ -12011,6 +12071,9 @@ erts_continue_exit_process(Process *p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+ if (nif_export)
+ erts_destroy_nif_export(nif_export);
+
delete_process(p);
#ifdef ERTS_SMP
@@ -12055,7 +12118,7 @@ timeout_proc(Process* p)
state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state);
+ schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index ed6dadbffa..3b0798207e 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -734,13 +734,9 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_DIST_ENTRY 3
#define ERTS_PSD_CALL_TIME_BP 4
#define ERTS_PSD_DELAYED_GC_TASK_QS 5
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6
+#define ERTS_PSD_NIF_TRAP_EXPORT 6
#define ERTS_PSD_SIZE 7
-#else
-#define ERTS_PSD_SIZE 6
-#endif
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -767,10 +763,8 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#endif
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -1367,6 +1361,9 @@ Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
+int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
+void erts_destroy_nif_export(void *); /* see erl_nif.c */
+
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1704,17 +1701,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
#endif
#endif
-void erts_schedule_process(Process *, erts_aint32_t);
+void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_proc_notify_new_message(Process *p)
+erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- erts_schedule_process(p, state);
+ erts_schedule_process(p, state, locks);
}
#endif
@@ -1817,12 +1814,10 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \
((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \
- ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT))
-#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \
- ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE)))
-#endif
+#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \
+ erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT)
+#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, NTE) \
+ erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 8d240355b0..9b9b4b2a62 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1925,6 +1925,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
real_size = endp - bytes;
result_bin = erts_bin_realloc(context->s.ec.result_bin,real_size);
+ result_bin->orig_size = real_size;
level = context->s.ec.level;
BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
if (level == 0 || real_size < 6) { /* We are done */
@@ -2004,6 +2005,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
erl_zlib_deflate_finish(&(context->s.cc.stream));
result_bin = erts_bin_realloc(context->s.cc.destination_bin,
context->s.cc.dest_len+6);
+ result_bin->orig_size = context->s.cc.dest_len+6;
context->s.cc.destination_bin = NULL;
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index d028737664..9ae973e108 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1218,9 +1218,10 @@ typedef struct {
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
{
+ unsigned int prof_runnable_ports;
ErtsTryImmDrvCallResult res;
int reds_left_in;
- erts_aint32_t invalid_state, invalid_sched_flags;
+ erts_aint32_t act, exp, invalid_state, invalid_sched_flags;
Port *prt = sp->port;
Process *c_p = sp->c_p;
@@ -1247,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
goto locked_fail;
}
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- if (sp->sched_flags & invalid_sched_flags) {
- res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
- goto locked_fail;
- }
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ do {
+ erts_aint32_t new;
+
+ if (act & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ sp->sched_flags = act;
+ goto locked_fail;
+ }
+ exp = act;
+ new = act | ERTS_PTS_FLG_EXEC_IMM;
+ act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ } while (act != exp);
+
+ sp->sched_flags = act;
if (!c_p)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_out);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_inactive);
@@ -1273,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_in, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_active);
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_active);
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&prt->sched);
+ }
sp->fpe_was_unmasked = erts_block_fpe();
@@ -1294,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
int reds;
Port *prt = sp->port;
Process *c_p = sp->c_p;
+ erts_aint32_t act;
+ unsigned int prof_runnable_ports;
reds = prt->reds;
reds += erts_port_driver_callback_epilogue(prt, NULL);
erts_unblock_fpe(sp->fpe_was_unmasked);
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_out, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_inactive);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_band_mb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_inactive);
+ erts_port_task_sched_unlock(&prt->sched);
+ }
+ }
erts_port_release(prt);
@@ -1319,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_in);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_active);
@@ -7166,7 +7212,7 @@ char *driver_dl_error(void)
#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
- (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ (offsetof(ErlDrvSysInfo, LAST_FIELD) \
+ sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
void
@@ -7228,6 +7274,18 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->nif_major_version = ERL_NIF_MAJOR_VERSION;
sip->nif_minor_version = ERL_NIF_MINOR_VERSION;
}
+ /*
+ * 'dirty_scheduler_support' is the last field in the 4th version
+ * (driver version 3.1, NIF version 2.7)
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
+#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS)
+ sip->dirty_scheduler_support = 1;
+#else
+ sip->dirty_scheduler_support = 0;
+#endif
+ }
+
}
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 72092ec7b0..55f9e68e78 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3948,6 +3948,9 @@ erts_save_emu_args(int argc, char **argv)
size += sz+1;
}
ptr = (char *) malloc(size);
+ if (!ptr) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#ifdef DEBUG
end_ptr = ptr + size;
#endif
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 09d90f4984..891589d1c5 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -5778,7 +5778,7 @@ done:
ia_p->Ipv6IfIndex &&
ia_p->Ipv6IfIndex != index)
{
- /* Oops, there was an other interface for IPv6. Possible? XXX */
+ /* Oops, there was another interface for IPv6. Possible? XXX */
index = ia_p->Ipv6IfIndex;
goto index;
}
diff --git a/erts/emulator/drivers/unix/multi_drv.c b/erts/emulator/drivers/unix/multi_drv.c
index 822c96730c..724d325ed5 100644
--- a/erts/emulator/drivers/unix/multi_drv.c
+++ b/erts/emulator/drivers/unix/multi_drv.c
@@ -20,7 +20,7 @@
/* Purpose: Multidriver interface
This is an example of a driver which allows multiple instances of itself.
I.e have one erlang process execute open_port(multi......) and
- at the same time have an other erlang process open an other port
+ at the same time have another erlang process open another port
running multi there as well.
*/
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index 0de69a617f..a3219c7586 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -39,7 +39,10 @@ define(HANDLE_GOT_MBUF,`
jmp 2b')
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) movq $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+# define CALL_BIF(F) \
+ movq CSYM(F)@GOTPCREL(%rip), %r11; \
+ movq %r11, P_BIF_CALLEE(P); \
+ call CSYM(hipe_debug_bif_wrapper)
#else
# define CALL_BIF(F) call CSYM(F)
#endif'
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 0a58a625b2..aa412a20c8 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -2157,7 +2157,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Entering erts_poll_wait(), timeout=%d\n",
- (int) tv->tv_sec*1000 + tv->tv_usec/1000);
+ (int) tvp->tv_sec*1000 + tvp->tv_usec/1000);
#endif
if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 0b0568c31a..dfbe47786a 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -31,6 +31,7 @@ MODULES= \
a_SUITE \
after_SUITE \
alloc_SUITE \
+ async_ports_SUITE \
beam_SUITE \
beam_literals_SUITE \
bif_SUITE \
diff --git a/erts/emulator/test/async_ports_SUITE.erl b/erts/emulator/test/async_ports_SUITE.erl
new file mode 100644
index 0000000000..c89b3655ff
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE.erl
@@ -0,0 +1,118 @@
+-module(async_ports_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(PACKET_SIZE, (10 * 1024 * 8)).
+-define(CPORT_DELAY, 100).
+-define(TEST_LOOPS_COUNT, 100000).
+-define(SLEEP_BEFORE_CHECK, 1000).
+-define(TEST_PROCS_COUNT, 2).
+-define(TC_TIMETRAP_SECONDS, 10).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ permanent_busy_test
+ ].
+
+permanent_busy_test(Config) ->
+ ct:timetrap({seconds, ?TC_TIMETRAP_SECONDS}),
+ ExePath = filename:join(?config(data_dir, Config), "cport"),
+
+ Self = self(),
+ spawn_link(
+ fun() ->
+ Block = <<0:?PACKET_SIZE>>,
+
+ Port = open_port(ExePath),
+
+ Testers =
+ lists:map(
+ fun(_) ->
+ erlang:spawn_link(?MODULE, run_loop,
+ [Self,
+ Port,
+ Block,
+ ?TEST_LOOPS_COUNT,
+ 0])
+ end,
+ lists:seq(1, ?TEST_PROCS_COUNT)),
+ Self ! {test_info, Port, Testers},
+ endless_flush(Port)
+ end),
+
+ receive
+ {test_info, Port, Testers} ->
+ MaxWaitTime = round(0.7 * ?TC_TIMETRAP_SECONDS * 1000),
+ ct:log("wait testers, maximum ~w mcsec~n", [MaxWaitTime]),
+ ok = wait_testers(MaxWaitTime, Testers),
+ timer:sleep(?SLEEP_BEFORE_CHECK),
+ case erlang:port_command(Port, <<"test">>, [nosuspend]) of
+ false ->
+ exit(port_dead);
+ true ->
+ ok
+ end
+ end.
+
+wait_testers(Timeout, Testers) ->
+ lists:foldl(
+ fun(Pid, AccIn) ->
+ StartWait = os:timestamp(),
+ receive
+ {Pid, port_dead} ->
+ recalc_timeout(AccIn, StartWait)
+ after AccIn ->
+ Pid ! stop,
+ recalc_timeout(AccIn, StartWait)
+ end
+ end, Timeout, Testers),
+ ok.
+
+recalc_timeout(TimeoutIn, WaitStart) ->
+ erlang:max(0, TimeoutIn - round(timer:now_diff(os:timestamp(), WaitStart)) div 1000).
+
+open_port(ExePath) ->
+ erlang:open_port({spawn, ExePath ++ " 100"}, [{packet, 4}, eof, exit_status, use_stdio, binary]).
+
+run_loop(RootProc, Port, Block, CheckLimit, BusyCnt) ->
+ receive
+ stop ->
+ ok
+ after 0 ->
+ case erlang:port_command(Port, Block, [nosuspend]) of
+ true ->
+ run_loop(RootProc, Port, Block, CheckLimit, 0);
+ false ->
+ if
+ BusyCnt + 1 > CheckLimit ->
+ check_dead(RootProc, Port, Block, CheckLimit);
+ true ->
+ run_loop(RootProc, Port, Block, CheckLimit, BusyCnt + 1)
+ end
+ end
+ end.
+
+check_dead(RootProc, Port, Block, CheckLimit) ->
+ ct:log("~p: check port dead~n", [self()]),
+ timer:sleep(?SLEEP_BEFORE_CHECK),
+ case erlang:port_command(Port, Block, [nosuspend]) of
+ true ->
+ ct:log("not dead~n"),
+ run_loop(RootProc, Port, Block, CheckLimit, 0);
+ false ->
+ ct:log("port dead: ~p~n", [Port]),
+ RootProc ! {self(), port_dead},
+ ok
+ end.
+
+endless_flush(Port) ->
+ receive
+ {Port, {data, _}} ->
+ endless_flush(Port);
+ {Port, SomethingWrong} ->
+ erlang:error({someting_wrong, SomethingWrong})
+ end.
diff --git a/erts/emulator/test/async_ports_SUITE_data/Makefile.src b/erts/emulator/test/async_ports_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..56da3fbe12
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE_data/Makefile.src
@@ -0,0 +1,15 @@
+CC = @CC@
+LD = @LD@
+CFLAGS = @CFLAGS@ @DEFS@
+CROSSLDFLAGS = @CROSSLDFLAGS@
+
+PROGS = cport@exe@
+
+
+all: $(PROGS)
+
+cport@exe@: cport@obj@
+ $(LD) $(CROSSLDFLAGS) -o cport cport@obj@ @LIBS@
+
+cport@obj@: cport.c
+ $(CC) -c -o cport@obj@ $(CFLAGS) cport.c
diff --git a/erts/emulator/test/async_ports_SUITE_data/cport.c b/erts/emulator/test/async_ports_SUITE_data/cport.c
new file mode 100644
index 0000000000..033aff382a
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE_data/cport.c
@@ -0,0 +1,81 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#ifdef __WIN32__
+# include "windows.h"
+# include "winbase.h"
+#else
+# include <unistd.h>
+#endif
+
+typedef unsigned char byte;
+
+int read_cmd(byte *buf)
+{
+ int len;
+ if (read_exact(buf, 4) != 4)
+ return(-1);
+
+ len = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+ return read_exact(buf, len);
+}
+
+int write_cmd(byte *buf, int len)
+{
+ byte li[4];
+ li[0] = (len >> 24) & 0xff;
+ li[1] = (len >> 16) & 0xff;
+ li[2] = (len >> 8) & 0xff;
+ li[3] = len & 0xff;
+ write_exact(&li, 4);
+
+ return write_exact(buf, len);
+}
+
+int read_exact(byte *buf, int len)
+{
+ int i, got=0;
+ do {
+ if ((i = read(0, buf+got, len-got)) <= 0)
+ {
+ return(i);
+ }
+ got += i;
+ } while (got<len);
+ return len;
+}
+
+int write_exact(byte *buf, int len)
+{
+ int i, wrote = 0;
+ do {
+ if ((i = write(1, buf+wrote, len-wrote)) < 0)
+ return (i);
+ wrote += i;
+ } while (wrote<len);
+ return len;
+}
+
+byte static_buf[31457280]; // 30 mb
+
+int main(int argc, char **argv) {
+ int sleep_time = atoi(argv[1]);
+ int fn, arg, res;
+ byte *buf = &static_buf[0];
+ int len = 0;
+ if (sleep_time <= 0)
+ sleep_time = 0;
+#ifdef __WIN32__
+ else
+ sleep_time = ((sleep_time - 1) / 1000) + 1; /* Milli seconds */
+#endif
+ while ((len = read_cmd(buf)) > 0) {
+#ifdef __WIN32__
+ Sleep((DWORD) sleep_time);
+#else
+ usleep(sleep_time);
+#endif
+ write_cmd(buf, len);
+ }
+}
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 4b4af0babe..2ed5aaa0d0 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -98,8 +98,10 @@ generator(0, Writer, _Data) ->
%% Calling process_info(Pid, current_function) on a suspended process
%% used to crash Beam.
- {current_function, {erlang, send, 2}} =
- process_info(Writer, current_function),
+ case process_info(Writer, [status,current_function]) of
+ [{status,suspended},{current_function,{erlang,send,2}}] -> ok;
+ [{status,suspended},{current_function,{erlang,bif_return_trap,_}}] -> ok
+ end,
unlock_slave();
generator(N, Writer, Data) ->
Writer ! {exec, Data},
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index c62bc0c454..336b6188f6 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -1062,10 +1062,9 @@ otp_6602(Config) when is_list(Config) ->
%% Inet driver use port locking...
{ok, S} = gen_udp:open(0),
{ok, Fd} = inet:getfd(S),
- {ok, Port} = inet:port(S),
%% Steal fd (lock checker used to
%% trigger here).
- {ok, _S2} = gen_udp:open(Port,[{fd,Fd}]),
+ {ok, _S2} = gen_udp:open(0,[{fd,Fd}]),
Parent ! Done
end),
?line receive Done -> ok end,
@@ -1085,7 +1084,15 @@ otp_6602(Config) when is_list(Config) ->
["async_thrs",
"sched_thrs"])).
--define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES2).
+-define(EXPECTED_SYSTEM_INFO_NAMES3,
+ (?EXPECTED_SYSTEM_INFO_NAMES2 ++
+ ["emu_nif_vsn"])).
+
+-define(EXPECTED_SYSTEM_INFO_NAMES4,
+ (?EXPECTED_SYSTEM_INFO_NAMES3 ++
+ ["dirty_sched"])).
+
+-define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES4).
'driver_system_info_base_ver'(doc) ->
[];
@@ -1133,16 +1140,18 @@ check_driver_system_info_result(Result) ->
drv_vsn_str2tup(erlang:system_info(driver_version))} of
{DDVSN, DDVSN} ->
?line [] = Ns;
- {{1, 0}, _} ->
+ %% {{1, 0}, _} ->
+ %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
+ %% -- ?EXPECTED_SYSTEM_INFO_NAMES1),
+ %% ?line ExpNs = lists:sort(Ns);
+ %% {{1, 1}, _} ->
+ %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
+ %% -- ?EXPECTED_SYSTEM_INFO_NAMES2),
+ %% ?line ExpNs = lists:sort(Ns);
+ {{3, 0}, _} ->
?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
- -- ?EXPECTED_SYSTEM_INFO_NAMES1),
- ?line ExpNs = lists:sort(Ns);
- {{1, 1}, _} ->
- ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
- -- ?EXPECTED_SYSTEM_INFO_NAMES2),
- ?line ExpNs = lists:sort(Ns);
- {{2, 0}, _} ->
- ?line [] = Ns
+ -- ?EXPECTED_SYSTEM_INFO_NAMES3),
+ ?line ExpNs = lists:sort(Ns)
end.
chk_sis(SIs, Ns) ->
@@ -1189,6 +1198,14 @@ check_si_res(["async_thrs", Value]) ->
check_si_res(["sched_thrs", Value]) ->
?line Value = integer_to_list(erlang:system_info(schedulers));
+%% Data added in 3rd version of driver_system_info() (driver version 1.5)
+check_si_res(["emu_nif_vsn", _Value]) ->
+ true;
+
+%% Data added in 4th version of driver_system_info() (driver version 3.1)
+check_si_res(["dirty_sched", _Value]) ->
+ true;
+
check_si_res(Unexpected) ->
?line ?t:fail({unexpected_result, Unexpected}).
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
index e44c7dbd5e..964034f5a6 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
@@ -41,7 +41,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d"
+
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
return slen;
}
@@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
index 5bbc966932..6d2c47fdaf 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
@@ -40,7 +40,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d " \
+ "dirty_sched=%s"
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -54,6 +56,8 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
+ slen += 5; /* dirty_sched */
return slen;
}
@@ -71,7 +75,10 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version,
+ sip->dirty_scheduler_support ? "true" : "false");
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
index 63c69f751c..2271d7027b 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
@@ -41,7 +41,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d"
+
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
return slen;
}
@@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index 8ad5f290ed..2968f5bebb 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -30,7 +30,7 @@
fun_to_port/1,t_hash/1,t_phash/1,t_phash2/1,md5/1,
refc/1,refc_ets/1,refc_dist/1,
const_propagation/1,t_arity/1,t_is_function2/1,
- t_fun_info/1]).
+ t_fun_info/1,t_fun_info_mfa/1]).
-export([nothing/0]).
@@ -42,7 +42,8 @@ all() ->
[bad_apply, bad_fun_call, badarity, ext_badarity,
equality, ordering, fun_to_port, t_hash, t_phash,
t_phash2, md5, refc, refc_ets, refc_dist,
- const_propagation, t_arity, t_is_function2, t_fun_info].
+ const_propagation, t_arity, t_is_function2, t_fun_info,
+ t_fun_info_mfa].
groups() ->
[].
@@ -824,6 +825,24 @@ t_fun_info(Config) when is_list(Config) ->
?line bad_info(<<1,2>>),
ok.
+t_fun_info_mfa(Config) when is_list(Config) ->
+ Fun1 = fun spawn_call/2,
+ {module,M1} = erlang:fun_info(Fun1, module),
+ {name,F1} = erlang:fun_info(Fun1, name),
+ {arity,A1} = erlang:fun_info(Fun1, arity),
+ {M1,F1,A1=2} = erlang:fun_info_mfa(Fun1),
+ %% Module fun.
+ Fun2 = fun ?MODULE:t_fun_info/1,
+ {module,M2} = erlang:fun_info(Fun2, module),
+ {name,F2} = erlang:fun_info(Fun2, name),
+ {arity,A2} = erlang:fun_info(Fun2, arity),
+ {M2,F2,A2=1} = erlang:fun_info_mfa(Fun2),
+
+ %% Not fun.
+ {'EXIT',_} = (catch erlang:fun_info_mfa(id(d))),
+ ok.
+
+
bad_info(Term) ->
try erlang:fun_info(Term, module) of
Any ->
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index b2da6f58af..14e6585220 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -37,7 +37,9 @@
threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1,
is_checks/1,
get_length/1, make_atom/1, make_string/1, reverse_list_test/1,
- otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1
+ otp_9828/1,
+ otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1,
+ dirty_nif_exception/1, nif_schedule/1
]).
-export([many_args_100/100]).
@@ -64,7 +66,9 @@ all() ->
resource_takeover, threading, send, send2, send3,
send_threaded, neg, is_checks, get_length, make_atom,
make_string,reverse_list_test,
- otp_9668, consume_timeslice, dirty_nif, dirty_nif_send
+ otp_9828,
+ otp_9668, consume_timeslice,
+ nif_schedule, dirty_nif, dirty_nif_send, dirty_nif_exception
].
groups() ->
@@ -1440,6 +1444,20 @@ otp_9668(Config) ->
?line verify_tmpmem(TmpMem),
ok.
+otp_9828(doc) -> ["Copy of writable binary"];
+otp_9828(Config) ->
+ ensure_lib_loaded(Config, 1),
+
+ otp_9828_loop(<<"I'm alive!">>, 1000).
+
+otp_9828_loop(Bin, 0) ->
+ ok;
+otp_9828_loop(Bin, Val) ->
+ WrtBin = <<Bin/binary, Val:32>>,
+ ok = otp_9828_nif(WrtBin),
+ otp_9828_loop(WrtBin, Val-1).
+
+
consume_timeslice(Config) when is_list(Config) ->
CONTEXT_REDS = 2000,
Me = self(),
@@ -1524,6 +1542,20 @@ consume_timeslice(Config) when is_list(Config) ->
ok.
+nif_schedule(Config) when is_list(Config) ->
+ ensure_lib_loaded(Config),
+ A = "this is a string",
+ B = {this,is,a,tuple},
+ {B,A} = call_nif_schedule(A, B),
+ ok = try call_nif_schedule(1, 2)
+ catch
+ error:badarg ->
+ [{?MODULE,call_nif_schedule,[1,2],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end,
+ ok.
+
dirty_nif(Config) when is_list(Config) ->
try erlang:system_info(dirty_cpu_schedulers) of
N when is_integer(N) ->
@@ -1556,6 +1588,24 @@ dirty_nif_send(Config) when is_list(Config) ->
{skipped,"No dirty scheduler support"}
end.
+dirty_nif_exception(Config) when is_list(Config) ->
+ try erlang:system_info(dirty_cpu_schedulers) of
+ N when is_integer(N) ->
+ ensure_lib_loaded(Config),
+ try
+ call_dirty_nif_exception(),
+ ?t:fail(expected_badarg)
+ catch
+ error:badarg ->
+ [{?MODULE,call_dirty_nif_exception,[],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end
+ catch
+ error:badarg ->
+ {skipped,"No dirty scheduler support"}
+ end.
+
next_msg(_Pid) ->
receive
M -> M
@@ -1684,9 +1734,12 @@ reverse_list(_) -> ?nif_stub.
echo_int(_) -> ?nif_stub.
type_sizes() -> ?nif_stub.
otp_9668_nif(_) -> ?nif_stub.
+otp_9828_nif(_) -> ?nif_stub.
consume_timeslice_nif(_,_) -> ?nif_stub.
+call_nif_schedule(_,_) -> ?nif_stub.
call_dirty_nif(_,_,_) -> ?nif_stub.
send_from_dirty_nif(_) -> ?nif_stub.
+call_dirty_nif_exception() -> ?nif_stub.
%% maps
is_map_nif(_) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 955dc64189..291c903947 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -1473,6 +1473,26 @@ static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
return atom_ok;
}
+static ERL_NIF_TERM otp_9828_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ /* copy a writable binary could reallocate it due to "emasculation"
+ and thereby render a previous inspection invalid.
+ */
+ ErlNifBinary bin1;
+ ErlNifEnv* myenv;
+
+ if (!enif_inspect_binary(env, argv[0], &bin1)) {
+ return enif_make_badarg(env);
+ }
+
+ myenv = enif_alloc_env();
+ enif_make_copy(myenv, argv[0]);
+ enif_free_env(myenv);
+
+ return memcmp(bin1.data, "I'm alive!", 10)==0 ? atom_ok : atom_false;
+}
+
+
static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int percent;
@@ -1493,25 +1513,57 @@ static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NI
}
}
+static ERL_NIF_TERM nif_sched2(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ char s[64];
+ if (!enif_get_string(env, argv[2], s, sizeof s, ERL_NIF_LATIN1))
+ return enif_make_badarg(env);
+ return enif_make_tuple2(env, argv[3], argv[2]);
+}
+
+static ERL_NIF_TERM nif_sched1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM new_argv[4];
+ new_argv[0] = enif_make_atom(env, "garbage0");
+ new_argv[1] = enif_make_atom(env, "garbage1");
+ new_argv[2] = argv[0];
+ new_argv[3] = argv[1];
+ return enif_schedule_nif(env, "nif_sched2", 0, nif_sched2, 4, new_argv);
+}
+
+static ERL_NIF_TERM call_nif_schedule(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (argc != 2)
+ return enif_make_atom(env, "false");
+ return enif_schedule_nif(env, "nif_sched1", 0, nif_sched1, argc, argv);
+}
+
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
+static int have_dirty_schedulers(void)
+{
+ ErlNifSysInfo si;
+ enif_system_info(&si, sizeof(si));
+ return si.dirty_scheduler_support;
+}
+
static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int n;
char s[10];
ErlNifBinary b;
ERL_NIF_TERM result;
- if (enif_have_dirty_schedulers()) {
+ if (have_dirty_schedulers()) {
assert(enif_is_on_dirty_scheduler(env));
}
assert(argc == 3);
enif_get_int(env, argv[0], &n);
enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1);
enif_inspect_binary(env, argv[2], &b);
- result = enif_make_tuple3(env,
- enif_make_int(env, n),
- enif_make_string(env, s, ERL_NIF_LATIN1),
- enif_make_binary(env, &b));
- return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer);
+ return enif_make_tuple3(env,
+ enif_make_int(env, n),
+ enif_make_string(env, s, ERL_NIF_LATIN1),
+ enif_make_binary(env, &b));
}
static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -1522,11 +1574,11 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM
assert(!enif_is_on_dirty_scheduler(env));
if (argc != 3)
return enif_make_badarg(env);
- if (enif_have_dirty_schedulers()) {
+ if (have_dirty_schedulers()) {
if (enif_get_int(env, argv[0], &n) &&
enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &b))
- return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
+ return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
else
return enif_make_badarg(env);
} else {
@@ -1534,35 +1586,42 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM
}
}
-static ERL_NIF_TERM dirty_sender(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ERL_NIF_TERM result;
ErlNifPid pid;
ErlNifEnv* menv;
int res;
- enif_get_local_pid(env, argv[0], &pid);
+ if (!enif_get_local_pid(env, argv[0], &pid))
+ return enif_make_badarg(env);
result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid));
menv = enif_alloc_env();
res = enif_send(env, &pid, menv, result);
enif_free_env(menv);
if (!res)
- /* Note the next line will crash, since dirty nifs can't return exceptions.
- * This is intentional, since enif_send should not fail if the test succeeds.
- */
- return enif_schedule_dirty_nif_finalizer(env, enif_make_badarg(env), enif_dirty_nif_finalizer);
+ return enif_make_badarg(env);
else
- return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer);
+ return result;
}
-static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- ERL_NIF_TERM result;
- ErlNifPid pid;
-
- if (!enif_get_local_pid(env, argv[0], &pid))
+ switch (argc) {
+ case 0: {
+ ERL_NIF_TERM args[255];
+ int i;
+ for (i = 0; i < 255; i++)
+ args[i] = enif_make_int(env, i);
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, 255, argv);
+ }
+ case 1:
return enif_make_badarg(env);
- return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_sender, argc, argv);
+ default:
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, argc-1, argv);
+ }
}
#endif
@@ -1741,10 +1800,13 @@ static ErlNifFunc nif_funcs[] =
{"echo_int", 1, echo_int},
{"type_sizes", 0, type_sizes},
{"otp_9668_nif", 1, otp_9668_nif},
+ {"otp_9828_nif", 1, otp_9828_nif},
{"consume_timeslice_nif", 2, consume_timeslice_nif},
+ {"call_nif_schedule", 2, call_nif_schedule},
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
{"call_dirty_nif", 3, call_dirty_nif},
- {"send_from_dirty_nif", 1, send_from_dirty_nif},
+ {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND},
+ {"call_dirty_nif_exception", 0, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND},
#endif
{"is_map_nif", 1, is_map_nif},
{"get_map_size_nif", 1, get_map_size_nif},
diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl
index ff8d18eef8..8cf8377c30 100644
--- a/erts/emulator/test/num_bif_SUITE.erl
+++ b/erts/emulator/test/num_bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -394,18 +394,15 @@ t_string_to_integer(Config) when is_list(Config) ->
test_sti(268435455),
test_sti(-268435455),
- %% 1 bsl 28 - 1, just before 32 bit bignum
- test_sti(1 bsl 28 - 1),
- %% 1 bsl 28, just beyond 32 bit small
- test_sti(1 bsl 28),
- %% 1 bsl 33, just beyond 32 bit
- test_sti(1 bsl 33),
- %% 1 bsl 60 - 1, just before 64 bit bignum
- test_sti(1 bsl 60 - 1),
- %% 1 bsl 60, just beyond 64 bit small
- test_sti(1 bsl 60),
- %% 1 bsl 65, just beyond 64 bit
- test_sti(1 bsl 65),
+ % Interesting values around 2-pows, such as MIN_SMALL and MAX_SMALL.
+ lists:foreach(fun(Bits) ->
+ N = 1 bsl Bits,
+ test_sti(N - 1),
+ test_sti(N),
+ test_sti(N + 1)
+ end,
+ lists:seq(16, 130)),
+
%% Bignums.
test_sti(123456932798748738738,16),
test_sti(list_to_integer(lists:duplicate(2000, $1))),
@@ -454,10 +451,11 @@ test_sti(Num) ->
end|| Base <- lists:seq(2,36)].
test_sti(Num,Base) ->
- Num = list_to_integer(int2list(Num,Base),Base),
- Num = -1*list_to_integer(int2list(Num*-1,Base),Base),
- Num = binary_to_integer(int2bin(Num,Base),Base),
- Num = -1*binary_to_integer(int2bin(Num*-1,Base),Base).
+ Neg = -Num,
+ Num = list_to_integer(int2list(Num,Base),Base),
+ Neg = list_to_integer(int2list(Num*-1,Base),Base),
+ Num = binary_to_integer(int2bin(Num,Base),Base),
+ Neg = binary_to_integer(int2bin(Num*-1,Base),Base).
% Calling this function (which is not supposed to be inlined) prevents
% the compiler from calculating the answer, so we don't test the compiler
diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl
index 46ece41096..f627eea07f 100644
--- a/erts/emulator/test/tuple_SUITE.erl
+++ b/erts/emulator/test/tuple_SUITE.erl
@@ -21,8 +21,9 @@
init_per_group/2,end_per_group/2,
t_size/1, t_tuple_size/1, t_element/1, t_setelement/1,
t_insert_element/1, t_delete_element/1,
- t_list_to_tuple/1, t_tuple_to_list/1,
- t_make_tuple_2/1, t_make_tuple_3/1, t_append_element/1,
+ t_list_to_tuple/1, t_list_to_upper_boundry_tuple/1, t_tuple_to_list/1,
+ t_make_tuple_2/1, t_make_upper_boundry_tuple_2/1, t_make_tuple_3/1,
+ t_append_element/1, t_append_element_upper_boundry/1,
build_and_match/1, tuple_with_case/1, tuple_in_guard/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -40,8 +41,10 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[build_and_match, t_size, t_tuple_size, t_list_to_tuple,
+ t_list_to_upper_boundry_tuple,
t_tuple_to_list, t_element, t_setelement,
- t_make_tuple_2, t_make_tuple_3, t_append_element,
+ t_make_tuple_2, t_make_upper_boundry_tuple_2, t_make_tuple_3,
+ t_append_element, t_append_element_upper_boundry,
t_insert_element, t_delete_element,
tuple_with_case, tuple_in_guard].
@@ -49,11 +52,21 @@ groups() ->
[].
init_per_suite(Config) ->
+ A0 = case application:start(sasl) of
+ ok -> [sasl];
+ _ -> []
+ end,
+ A = case application:start(os_mon) of
+ ok -> [os_mon|A0];
+ _ -> A0
+ end,
+ [{started_apps, A}|Config].
+
+end_per_suite(Config) ->
+ As = ?config(started_apps, Config),
+ lists:foreach(fun (A) -> application:stop(A) end, As),
Config.
-end_per_suite(_Config) ->
- ok.
-
init_per_group(_GroupName, Config) ->
Config.
@@ -176,14 +189,19 @@ t_list_to_tuple(Config) when is_list(Config) ->
{'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
{'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
- % test upper boundry, 16777215 elements
- MaxSize = 1 bsl 24 - 1,
- MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
- MaxSize = size(MaxTuple),
-
{'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))),
ok.
+t_list_to_upper_boundry_tuple(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ MaxSize = size(MaxTuple),
+ ok
+ end).
+
%% Tests tuple_to_list/1.
t_tuple_to_list(Config) when is_list(Config) ->
@@ -214,8 +232,6 @@ t_make_tuple_2(Config) when is_list(Config) ->
t_make_tuple1({a}),
t_make_tuple1(erlang:make_tuple(400, [])),
- % test upper boundry, 16777215 elements
- t_make_tuple(1 bsl 24 - 1, a),
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)),
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)),
@@ -225,6 +241,13 @@ t_make_tuple_2(Config) when is_list(Config) ->
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)),
ok.
+t_make_upper_boundry_tuple_2(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ t_make_tuple(1 bsl 24 - 1, a)
+ end).
+
t_make_tuple1(Element) ->
lists:foreach(fun(Size) -> t_make_tuple(Size, Element) end,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 256, 511, 512, 999,
@@ -309,13 +332,17 @@ t_delete_element(Config) when is_list(Config) ->
%% Tests the append_element/2 BIF.
t_append_element(Config) when is_list(Config) ->
- ok = t_append_element({}, 2048, 2048),
-
- % test upper boundry, 16777215 elements
- MaxSize = 1 bsl 24 - 1,
- MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
- {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
- ok.
+ ok = t_append_element({}, 2048, 2048).
+
+t_append_element_upper_boundry(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
+ ok
+ end).
t_append_element(_Tuple, 0, _High) -> ok;
t_append_element(Tuple, N, High) ->
@@ -371,3 +398,31 @@ tuple_in_guard(Config) when is_list(Config) ->
%% Use this function to avoid compile-time evaluation of an expression.
id(I) -> I.
+
+sys_mem_cond_run(ReqSizeMB, TestFun) when is_integer(ReqSizeMB) ->
+ case total_memory() of
+ TotMem when is_integer(TotMem), TotMem >= ReqSizeMB ->
+ TestFun();
+ TotMem when is_integer(TotMem) ->
+ {skipped, "Not enough memory ("++integer_to_list(TotMem)++" MB)"};
+ undefined ->
+ {skipped, "Could not retrieve memory information"}
+ end.
+
+
+total_memory() ->
+ %% Totat memory in MB.
+ try
+ MemoryData = memsup:get_system_memory_data(),
+ case lists:keysearch(total_memory, 1, MemoryData) of
+ {value, {total_memory, TM}} ->
+ TM div (1024*1024);
+ false ->
+ {value, {system_total_memory, STM}} =
+ lists:keysearch(system_total_memory, 1, MemoryData),
+ STM div (1024*1024)
+ end
+ catch
+ _ : _ ->
+ undefined
+ end.