aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names2
-rw-r--r--erts/emulator/beam/beam_emu.c8
-rw-r--r--erts/emulator/beam/beam_load.c6
-rw-r--r--erts/emulator/beam/bif.c11
-rw-r--r--erts/emulator/beam/bif.h118
-rw-r--r--erts/emulator/beam/bif.tab12
-rw-r--r--erts/emulator/beam/big.c24
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/binary.c1157
-rw-r--r--erts/emulator/beam/break.c1
-rw-r--r--erts/emulator/beam/erl_alloc.c4
-rw-r--r--erts/emulator/beam/erl_alloc_util.c9
-rw-r--r--erts/emulator/beam/erl_bif_binary.c27
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/erl_bif_info.c72
-rw-r--r--erts/emulator/beam/erl_binary.h33
-rw-r--r--erts/emulator/beam/erl_gc.c14
-rw-r--r--erts/emulator/beam/erl_init.c8
-rw-r--r--erts/emulator/beam/erl_lock_check.c19
-rw-r--r--erts/emulator/beam/erl_lock_count.c161
-rw-r--r--erts/emulator/beam/erl_lock_count.h20
-rw-r--r--erts/emulator/beam/erl_message.c53
-rw-r--r--erts/emulator/beam/erl_nif.c513
-rw-r--r--erts/emulator/beam/erl_nif.h26
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h8
-rw-r--r--erts/emulator/beam/erl_port_task.c74
-rw-r--r--erts/emulator/beam/erl_port_task.h2
-rw-r--r--erts/emulator/beam/erl_process.c132
-rw-r--r--erts/emulator/beam/erl_process.h32
-rw-r--r--erts/emulator/beam/erl_unicode.c2
-rw-r--r--erts/emulator/beam/external.c144
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/global.h53
-rw-r--r--erts/emulator/beam/io.c82
-rw-r--r--erts/emulator/beam/sys.h2
-rw-r--r--erts/emulator/beam/utils.c493
34 files changed, 2489 insertions, 835 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index d28e519ae1..5d06a32941 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -116,6 +116,7 @@ atom binary_longest_prefix_trap
atom binary_longest_suffix_trap
atom binary_match_trap
atom binary_matches_trap
+atom binary_to_list_continue
atom binary_to_term_trap
atom block
atom blocked
@@ -315,6 +316,7 @@ atom line_length
atom linked_in_driver
atom links
atom list
+atom list_to_binary_continue
atom little
atom loaded
atom load_cancelled
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 1026e5f649..8bfb7d2ad2 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3503,6 +3503,7 @@ get_map_elements_fail:
* I[0]: &&call_nif
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
*/
BifFunction vbf;
@@ -3523,13 +3524,6 @@ get_map_elements_fail:
reg[0] = r(0);
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
erts_post_nif(&env);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (is_non_value(nif_bif_result) && c_p->freason == TRAP) {
- Export* ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(c_p);
- ep->code[0] = I[-3];
- ep->code[1] = I[-2];
- }
-#endif
}
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
PROCESS_MAIN_CHK_LOCKS(c_p);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index e96177cfd9..cfc6146b0a 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -2363,7 +2363,11 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- enum { MIN_FUNC_SZ = 3 };
+#ifdef ERTS_DIRTY_SCHEDULERS
+ enum { MIN_FUNC_SZ = 4 };
+#else
+ enum { MIN_FUNC_SZ = 3 };
+#endif
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 06a1230ca0..f3c05d047d 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1869,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1879,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_pid_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
}
return remote_send(p, dep, to, to, msg, suspend);
@@ -1886,8 +1888,13 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
Eterm id = erts_whereis_name_to_id(p, to);
rp = erts_proc_lookup(id);
- if (rp)
+ if (rp) {
+ if (IS_TRACED(p))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
goto send_message;
+ }
pt = erts_port_lookup(id,
(erts_port_synchronous_ops
@@ -1907,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1917,6 +1925,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
} else if (is_internal_port(to)) {
int ret_val;
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 51b77a95ed..72c55ccb55 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -124,12 +124,85 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ return THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_ERROR(Ret, Proc, Reason) \
do { \
(Proc)->freason = (Reason); \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
@@ -392,6 +465,51 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+#ifndef HIPE
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY)
+
+#else
+
+#include "erl_fun.h"
+#include "hipe_mode_switch.h"
+
+/*
+ * Hipe wrappers used by native code for BIFs that disable GC while trapping.
+ * Also add usage of the wrapper in ../hipe/hipe_bif_list.m4
+ *
+ * Problem:
+ * When native code calls a BIF that traps, hipe_mode_switch will push a
+ * "trap frame" on the Erlang stack in order to find its way back from beam_emu
+ * back to native caller when finally done. If GC is disabled and stack/heap
+ * is full there is no place to push the "trap frame".
+ *
+ * Solution:
+ * We reserve space on stack for the "trap frame" here before the BIF is called.
+ * If the BIF does not trap, the space is reclaimed here before returning.
+ * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
+ * already is reserved and use it.
+ */
+
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
+BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
+ Eterm* args); \
+BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
+ Eterm* args) \
+{ \
+ BIF_RETTYPE res; \
+ hipe_reserve_beam_trap_frame(c_p, args, ARITY); \
+ res = BIF_NAME ## _ ## ARITY (c_p, args); \
+ if (is_value(res) || c_p->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(c_p); \
+ } \
+ return res; \
+}
+
+#endif
+
+
#include "erl_bif_table.h"
#endif
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index fbdddf09db..e68b8e6274 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -45,6 +45,7 @@ bif erlang:apply/3
bif erlang:atom_to_list/1
bif erlang:binary_to_list/1
bif erlang:binary_to_list/3
+bif erlang:binary_to_term/1
bif erlang:crc32/1
bif erlang:crc32/2
bif erlang:crc32_combine/3
@@ -151,8 +152,6 @@ bif erts_internal:port_command/3
bif erts_internal:port_control/3
bif erts_internal:port_close/1
bif erts_internal:port_connect/2
-bif erts_internal:binary_to_term/1
-bif erts_internal:binary_to_term/2
bif erts_internal:request_system_task/3
bif erts_internal:check_process_code/2
@@ -481,6 +480,11 @@ bif erlang:call_on_load_function/1
bif erlang:finish_after_on_load/2
#
+# New Bifs in R13B04
+#
+bif erlang:binary_to_term/2
+
+#
# The binary match bifs (New in R14A - EEP9)
#
@@ -597,6 +601,10 @@ bif maps:values/1
bif erts_internal:cmp_term/2
#
+# New in 17.1.
+#
+bif erlang:fun_info_mfa/1
+#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 41a041eba6..e62caa6b22 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -274,6 +274,9 @@
_b = _b << _s; \
_vn1 = _b >> H_EXP; \
_vn0 = _b & LO_MASK; \
+ /* Sometimes _s is 0 which triggers undefined behaviour for the \
+ (_a0>>(D_EXP-_s)) shift, but this is ok because the \
+ & -s will make it all to 0 later anyways. */ \
_un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
_un10 = _a0 << _s; \
_un1 = _un10 >> H_EXP; \
@@ -1506,13 +1509,15 @@ Eterm uword_to_big(UWord x, Eterm *y)
*/
Eterm small_to_big(Sint x, Eterm *y)
{
+ Uint xu;
if (x >= 0) {
+ xu = x;
*y = make_pos_bignum_header(1);
} else {
- x = -x;
+ xu = -(Uint)x;
*y = make_neg_bignum_header(1);
}
- BIG_DIGIT(y, 0) = x;
+ BIG_DIGIT(y, 0) = xu;
return make_big(y);
}
@@ -1540,21 +1545,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
+ Uint64 ux;
int neg;
- if (x >= 0)
+ if (x >= 0) {
neg = 0;
+ ux = x;
+ }
else {
neg = 1;
- x = -x;
+ ux = -(Uint64)x;
}
#if defined(ARCH_32) || HALFWORD_HEAP
- if (x >= (((Uint64) 1) << 32)) {
+ if (ux >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
else
*hp = make_pos_bignum_header(2);
- BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
- BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
*hpp += 3;
}
else
@@ -1564,7 +1572,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_neg_bignum_header(1);
else
*hp = make_pos_bignum_header(1);
- BIG_DIGIT(hp, 0) = (Uint) x;
+ BIG_DIGIT(hp, 0) = (Uint) ux;
*hpp += 2;
}
return make_big(hp);
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index d80111822e..da31876d75 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */
#define ERTS_SINT64_HEAP_SIZE(X) \
(IS_SSMALL((X)) \
? 0 \
- : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index c7926f18af..f50d484576 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -31,12 +31,11 @@
#include "erl_binary.h"
#include "erl_bits.h"
-#ifdef DEBUG
-static int list_to_bitstr_buf(Eterm obj, char* buf, Uint len);
-#else
-static int list_to_bitstr_buf(Eterm obj, char* buf);
-#endif
-static int bitstr_list_len(Eterm obj, Uint* num_bytes);
+static Export binary_to_list_continue_export;
+static Export list_to_binary_continue_export;
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1);
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1);
void
erts_init_binary(void)
@@ -49,6 +48,15 @@ erts_init_binary(void)
"Internal error: Address of orig_bytes[0] of a Binary"
" is *not* 8-byte aligned\n");
}
+
+ erts_init_trap_export(&binary_to_list_continue_export,
+ am_erts_internal, am_binary_to_list_continue, 1,
+ &binary_to_list_continue);
+
+ erts_init_trap_export(&list_to_binary_continue_export,
+ am_erts_internal, am_list_to_binary_continue, 1,
+ &list_to_binary_continue);
+
}
/*
@@ -333,6 +341,132 @@ BIF_RETTYPE integer_to_binary_1(BIF_ALIST_1)
BIF_RET(res);
}
+#define ERTS_B2L_BYTES_PER_REDUCTION 256
+
+typedef struct {
+ Eterm res;
+ Eterm *hp;
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+ byte *bytes;
+ Uint size;
+ Uint bitoffs;
+} ErtsB2LState;
+
+static void b2l_state_destructor(Binary *mbp)
+{
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+}
+
+static BIF_RETTYPE
+binary_to_list_chunk(Process *c_p,
+ Eterm mb_eterm,
+ ErtsB2LState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ BIF_RETTYPE ret;
+ int bump_reds;
+ Uint size;
+ byte *bytes;
+
+ size = (reds_left + 1)*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (size > sp->size)
+ size = sp->size;
+ bytes = sp->bytes + (sp->size - size);
+
+ bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+
+ ASSERT(is_list(sp->res) || is_nil(sp->res));
+
+ sp->res = erts_bin_bytes_to_list(sp->res,
+ sp->hp,
+ bytes,
+ size,
+ sp->bitoffs);
+ sp->size -= size;
+ sp->hp += 2*size;
+
+ if (sp->size > 0) {
+
+ if (!gc_disabled)
+ erts_set_gc_state(c_p, 0);
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+ ASSERT(is_value(mb_eterm));
+ ERTS_BIF_PREP_TRAP1(ret,
+ &binary_to_list_continue_export,
+ c_p,
+ mb_eterm);
+ }
+ else {
+
+ ASSERT(sp->hp == sp->hp_end);
+ ASSERT(sp->size == 0);
+
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, sp->res);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, sp->res);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ }
+
+ return ret;
+}
+
+static ERTS_INLINE BIF_RETTYPE
+binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes, Uint size, Uint bitoffs)
+{
+ int reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ if (size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION) {
+ Eterm res;
+ BIF_RETTYPE ret;
+ int bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+ res = erts_bin_bytes_to_list(tail, hp, bytes, size, bitoffs);
+ ERTS_BIF_PREP_RET(ret, res);
+ return ret;
+ }
+ else {
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsB2LState),
+ b2l_state_destructor);
+ ErtsB2LState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ Eterm mb;
+
+ sp->res = tail;
+ sp->hp = hp;
+#ifdef DEBUG
+ sp->hp_end = sp->hp + 2*size;
+#endif
+ sp->bytes = bytes;
+ sp->size = size;
+ sp->bitoffs = bitoffs;
+
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
+ }
+}
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
+{
+ Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return binary_to_list_chunk(BIF_P,
+ BIF_ARG_1,
+ (ErtsB2LState*) ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 1)
+
BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -354,14 +488,15 @@ BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
} else {
Eterm* hp = HAlloc(BIF_P, 2 * size);
byte* bytes = binary_bytes(real_bin)+offset;
-
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes, size, bitoffs));
+ return binary_to_list(BIF_P, hp, NIL, bytes, size, bitoffs);
}
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 3)
+
BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
{
byte* bytes;
@@ -387,12 +522,13 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
}
i = stop-start+1;
hp = HAlloc(BIF_P, 2*i);
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes+start-1, i, bitoffs));
-
+ return binary_to_list(BIF_P, hp, NIL, bytes+start-1, i, bitoffs);
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(bitstring_to_list, 1)
+
BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -431,124 +567,441 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
previous = CONS(hp, make_binary(last), previous);
hp += 2;
}
- BIF_RET(erts_bin_bytes_to_list(previous, hp, bytes, size, bitoffs));
+
+ return binary_to_list(BIF_P, hp, previous, bytes, size, bitoffs);
}
/* Turn a possibly deep list of ints (and binaries) into */
/* One large binary object */
-/*
- * This bif also exists in the binary module, under the name
- * binary:list_to_bin/1, why it's divided into interface and
- * implementation. Also the backend for iolist_to_binary_1.
- */
+typedef enum {
+ ERTS_L2B_OK,
+ ERTS_L2B_YIELD,
+ ERTS_L2B_TYPE_ERROR,
+ ERTS_L2B_OVERFLOW_ERROR
+} ErtsL2BResult;
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
-{
+#define ERTS_L2B_STATE_INITER(C_P, ARG, BIF, SZFunc, TBufFunc) \
+ {ERTS_IOLIST2BUF_STATE_INITER((C_P), (ARG)), \
+ (ARG), THE_NON_VALUE, (BIF), (SZFunc), (TBufFunc)}
+
+#define ERTS_L2B_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsL2BState))
+
+typedef struct ErtsL2BState_ ErtsL2BState;
+
+struct ErtsL2BState_ {
+ ErtsIOList2BufState buf;
+ Eterm arg;
Eterm bin;
- Eterm h,t;
- ErlDrvSizeT size;
- byte* bytes;
-#ifdef DEBUG
- ErlDrvSizeT offset;
-#endif
+ Export *bif;
+ int (*iolist_to_buf_size)(ErtsIOListState *);
+ ErlDrvSizeT (*iolist_to_buf)(ErtsIOList2BufState *);
+};
+
+static ERTS_INLINE ErtsL2BResult
+list_to_binary_engine(ErtsL2BState *sp)
+{
+ ErlDrvSizeT res;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ /*
+ * have_size == 0 while sp->iolist_to_buf_size()
+ * has not finished the calculation.
+ */
+
+ if (!sp->buf.iolist.have_size) {
+ switch (sp->iolist_to_buf_size(&sp->buf.iolist)) {
+ case ERTS_IOLIST_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TYPE:
+ return ERTS_L2B_TYPE_ERROR;
+ case ERTS_IOLIST_OK:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ ASSERT(sp->buf.iolist.have_size);
+
+ /*
+ * Size calculated... Setup state for
+ * sp->iolist_to_buf_*()
+ */
+
+ sp->bin = new_binary(c_p,
+ (byte *) NULL,
+ sp->buf.iolist.size);
+
+ if (sp->buf.iolist.size == 0)
+ return ERTS_L2B_OK;
+
+ sp->buf.buf = (char *) binary_bytes(sp->bin);
+ sp->buf.len = sp->buf.iolist.size;
+ sp->buf.iolist.obj = sp->arg;
- if (is_nil(arg)) {
- BIF_RET(new_binary(p,(byte*)"",0));
+ if (sp->buf.iolist.reds_left <= 0) {
+ BUMP_ALL_REDS(c_p);
+ return ERTS_L2B_YIELD;
+ }
}
- if (is_not_list(arg)) {
- goto error;
+
+ ASSERT(sp->buf.iolist.size != 0);
+ ASSERT(is_value(sp->bin));
+ ASSERT(sp->buf.buf);
+
+ res = sp->iolist_to_buf(&sp->buf);
+
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res)) {
+ ASSERT(res == 0);
+ return ERTS_L2B_OK;
}
- /* check for [binary()] case */
- h = CAR(list_val(arg));
- t = CDR(list_val(arg));
- if (is_binary(h) && is_nil(t) && !(
- HEADER_SUB_BIN == *(binary_val(h)) && (
- ((ErlSubBin *)binary_val(h))->bitoffs != 0 ||
- ((ErlSubBin *)binary_val(h))->bitsize != 0
- ))) {
- return h;
- }
- switch (erts_iolist_size(arg, &size)) {
- case ERTS_IOLIST_OVERFLOW: BIF_ERROR(p, SYSTEM_LIMIT);
- case ERTS_IOLIST_TYPE: goto error;
- default: ;
- }
- bin = new_binary(p, (byte *)NULL, size);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset =
-#endif
- erts_iolist_to_buf(arg, (char*) bytes, size);
- ASSERT(offset == 0);
- BIF_RET(bin);
+ switch (res) {
+ case ERTS_IOLIST_TO_BUF_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_TO_BUF_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TO_BUF_TYPE_ERROR:
+ return ERTS_L2B_TYPE_ERROR;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from iolist_to_buf_yielding()");
+ return ERTS_L2B_TYPE_ERROR;
+ }
+}
+
+static void
+l2b_state_destructor(Binary *mbp)
+{
+ ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+}
+
+static ERTS_INLINE Eterm
+l2b_final_touch(Process *c_p, ErtsL2BState *sp)
+{
+ Eterm *hp;
+ ErlSubBin* sbin;
+ if (sp->buf.offset == 0)
+ return sp->bin;
+
+ hp = HAlloc(c_p, ERL_SUB_BIN_SIZE);
+ ASSERT(sp->buf.offset > 0);
+ sbin = (ErlSubBin *) hp;
+ sbin->thing_word = HEADER_SUB_BIN;
+ sbin->size = sp->buf.iolist.size-1;
+ sbin->offs = 0;
+ sbin->orig = sp->bin;
+ sbin->bitoffs = 0;
+ sbin->bitsize = sp->buf.offset;
+ sbin->is_writable = 0;
+ return make_binary(sbin);
+}
+
+static BIF_RETTYPE
+list_to_binary_chunk(Eterm mb_eterm,
+ ErtsL2BState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ Eterm err = BADARG;
+ BIF_RETTYPE ret;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ sp->buf.iolist.reds_left = reds_left;
- error:
- BIF_ERROR(p, BADARG);
+ switch (list_to_binary_engine(sp)) {
+
+ case ERTS_L2B_OK: {
+ Eterm result = l2b_final_touch(c_p, sp);
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, result);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, result);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+ }
+ case ERTS_L2B_YIELD:
+ if (!gc_disabled) {
+ /* first yield... */
+ Eterm *hp;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsL2BState),
+ l2b_state_destructor);
+ ErtsL2BState *new_sp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_L2B_STATE_MOVE(new_sp, sp);
+ sp = new_sp;
+
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+
+ ASSERT(is_value(mb_eterm));
+
+ erts_set_gc_state(c_p, 0);
+ }
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+
+ ERTS_BIF_PREP_TRAP1(ret,
+ &list_to_binary_continue_export,
+ c_p,
+ mb_eterm);
+ break;
+
+ case ERTS_L2B_OVERFLOW_ERROR:
+ err = SYSTEM_LIMIT;
+ /* fall through */
+
+ case ERTS_L2B_TYPE_ERROR:
+ if (!gc_disabled)
+ ERTS_BIF_PREP_ERROR(ret, c_p, err);
+ else {
+ if (erts_set_gc_state(c_p, 1))
+ ERTS_VBUMP_ALL_REDS(c_p);
+
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret,
+ c_p,
+ err,
+ sp->bif,
+ sp->arg);
+ }
+
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from list_to_binary_engine()");
+ ERTS_BIF_PREP_ERROR(ret,c_p, EXC_INTERNAL_ERROR);
+ break;
+ }
+ return ret;
}
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
+{
+ Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return list_to_binary_chunk(BIF_ARG_1,
+ ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+BIF_RETTYPE erts_list_to_binary_bif(Process *c_p, Eterm arg, Export *bif)
+{
+ BIF_RETTYPE ret;
+
+ if (is_nil(arg))
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) "", 0));
+ else if (is_not_list(arg))
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ else {
+ /* check for [binary()] case */
+ Eterm h = CAR(list_val(arg));
+ Eterm t = CDR(list_val(arg));
+ if (is_binary(h)
+ && is_nil(t)
+ && !(HEADER_SUB_BIN == *(binary_val(h))
+ && (((ErlSubBin *)binary_val(h))->bitoffs != 0
+ || ((ErlSubBin *)binary_val(h))->bitsize != 0))) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(c_p,
+ arg,
+ bif,
+ erts_iolist_size_yielding,
+ erts_iolist_to_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(c_p);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding iolist_to_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (erts_iolist_size_yielding(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+ Eterm bin;
+ char *buf;
+
+ if (size == 0) {
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) NULL, 0));
+ break; /* done */
+ }
+
+ bin = new_binary(c_p, (byte *) NULL, size);
+ buf = (char *) binary_bytes(bin);
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = erts_iolist_to_buf(arg, buf, size);
+ if (res == 0) {
+ BUMP_REDS(c_p, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * state prepared for iolist_to_buf.
+ */
+ state.bin = bin;
+ state.buf.buf = buf;
+ state.buf.len = size;
+ state.buf.iolist.obj = arg;
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_binary, 1)
+
BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_list_to_binary_1]);
}
-/* Turn a possibly deep list of ints (and binaries) into */
-/* One large binary object */
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
if (is_binary(BIF_ARG_1)) {
BIF_RET(BIF_ARG_1);
}
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]);
}
+static int bitstr_list_len(ErtsIOListState *);
+static ErlDrvSizeT list_to_bitstr_buf_yielding(ErtsIOList2BufState *);
+static ErlDrvSizeT list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *);
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_bitstring, 1)
+
BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
{
- Eterm bin;
- Uint sz;
- int offset;
- byte* bytes;
- ErlSubBin* sb1;
- Eterm* hp;
-
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(new_binary(BIF_P,(byte*)"",0));
- }
- if (is_not_list(BIF_ARG_1)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
- }
- switch (bitstr_list_len(BIF_ARG_1, &sz)) {
- case ERTS_IOLIST_TYPE:
- goto error;
- case ERTS_IOLIST_OVERFLOW:
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
- bin = new_binary(BIF_P, (byte *)NULL, sz);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes, sz);
-#else
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes);
-#endif
- ASSERT(offset >= 0);
- if (offset > 0) {
- hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = sz-1;
- sb1->offs = 0;
- sb1->orig = bin;
- sb1->bitoffs = 0;
- sb1->bitsize = offset;
- sb1->is_writable = 0;
- bin = make_binary(sb1);
+ BIF_RETTYPE ret;
+
+ if (is_nil(BIF_ARG_1))
+ ERTS_BIF_PREP_RET(ret, new_binary(BIF_P, (byte *) "", 0));
+ else if (is_not_list(BIF_ARG_1))
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ else {
+ /* check for [bitstring()] case */
+ Eterm h = CAR(list_val(BIF_ARG_1));
+ Eterm t = CDR(list_val(BIF_ARG_1));
+ if (is_binary(h) && is_nil(t)) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(BIF_P,
+ BIF_ARG_1,
+ bif_export[BIF_list_to_bitstring_1],
+ bitstr_list_len,
+ list_to_bitstr_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding list_to_bitstr_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (bitstr_list_len(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+
+ state.bin = new_binary(BIF_P, (byte *) NULL, size);
+ state.buf.buf = (char *) binary_bytes(state.bin);
+ state.buf.len = size;
+ state.buf.iolist.obj = BIF_ARG_1;
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = list_to_bitstr_buf_not_yielding(&state.buf);
+ if (res == 0) {
+ Eterm res_bin = l2b_final_touch(BIF_P, &state);
+ BUMP_REDS(BIF_P, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, res_bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * the state prepared for list_to_bitstr_buf.
+ */
+
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ }
+ }
}
-
- BIF_RET(bin);
+
+ return ret;
}
BIF_RETTYPE split_binary_2(BIF_ALIST_2)
@@ -605,123 +1058,353 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
* Local functions.
*/
+static int
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
/*
* The input list is assumed to be type-correct and the buffer is
* assumed to be of sufficient size. Those assumptions are verified in
* the DEBUG-built emulator.
*/
-static int
+static ErlDrvSizeT
+list_to_bitstr_buf(int yield_support, ErtsIOList2BufState *state)
+{
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
#ifdef DEBUG
-list_to_bitstr_buf(Eterm obj, char* buf, Uint len)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG \
+ len -= size + (offset>7);
#else
-list_to_bitstr_buf(Eterm obj, char* buf)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG
#endif
-{
- Eterm* objp;
- int offset = 0;
+#define LIST_TO_BITSTR_BUF_BCOPY(CONSP) \
+ do { \
+ byte* bptr; \
+ Uint bitsize; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ size_t size = binary_size(obj); \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ ASSERT(size <= len); \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ num_bits = 8*size+bitsize; \
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); \
+ offset += bitsize; \
+ buf += size + (offset>7); \
+ LIST_TO_BITSTR_BUF_BCOPY_DBG; \
+ offset = offset & 7; \
+ } while(0)
+
+#ifdef DEBUG
+ ErlDrvSizeT len;
+#endif
+ Eterm obj;
+ char *buf;
+ Eterm *objp = NULL;
+ int offset;
+ int init_yield_count = 0, yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- ASSERT(len > 0);
- if (offset == 0) {
- *buf++ = unsigned_val(obj);
- } else {
- *buf = (char)((unsigned_val(obj) >> offset) |
- ((*buf >> (8-offset)) << (8-offset)));
- buf++;
- *buf = (unsigned_val(obj) << (8-offset));
- }
+
+ obj = state->iolist.obj;
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len--;
+ len = state->len;
#endif
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ if (list_to_bitstr_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ /* Yield again... */
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ }
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len -= size + (offset>7);
+ len = state->len;
#endif
- offset = offset & 7;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else {
- ASSERT(is_nil(obj));
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size+(offset>7);
+ objp = state->objp;
+ state->objp = NULL;
+
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ ASSERT(len > 0);
+ if (offset == 0) {
+ *buf++ = unsigned_val(obj);
+ } else {
+ *buf = (char)((unsigned_val(obj) >> offset) |
+ ((*buf >> (8-offset)) << (8-offset)));
+ buf++;
+ *buf = (unsigned_val(obj) << (8-offset));
+ }
#ifdef DEBUG
- len -= size+(offset>7);
+ len--;
#endif
- offset = offset & 7;
- } else {
- ASSERT(is_nil(obj));
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
+ }
+
+ L_tail:
+
+ obj = CDR(objp);
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
-#ifdef DEBUG
- len -= size + (offset>7);
-#endif
- offset = offset & 7;
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
} else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
ASSERT(is_nil(obj));
}
}
DESTROY_ESTACK(s);
- return offset;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+ state->buf = buf;
+ state->offset = offset;
+ return 0;
+
+L_bcopy_yield:
+
+ state->buf = buf;
+ state->offset = offset;
+#ifdef DEBUG
+ state->len = len;
+#endif
+
+ if (list_to_bitstr_buf_bcopy(state, obj, &yield_count) == 0)
+ ERTS_INTERNAL_ERROR("Missing yield");
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->offset = offset;
+ ESTACK_SAVE(s, &state->iolist.estack);
+#ifdef DEBUG
+ state->len = len;
+#endif
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
+
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(1, state);
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(0, state);
}
static int
-bitstr_list_len(Eterm obj, Uint* num_bytes)
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ int res;
+ char *buf = state->buf;
+ char *next_buf;
+ int offset = state->offset;
+ int next_offset;
+#ifdef DEBUG
+ ErlDrvSizeT len = state->len;
+ ErlDrvSizeT next_len;
+#endif
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ Uint bitsize;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ bitsize = state->bcopy.bitsize;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+
+ ASSERT(size <= len);
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ }
+
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ next_offset = offset + bitsize;
+ next_buf = buf + size+(next_offset>7);
+#ifdef DEBUG
+ next_len = len - size+(next_offset>7);
+#endif
+ next_offset &= 7;
+ num_bits = 8*size+bitsize;
+ res = 0;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.bitsize = bitsize;
+ state->bcopy.size = size - max_size;
+ next_buf = buf + max_size;
+#ifdef DEBUG
+ next_len = len - max_size;
+#endif
+ next_offset = offset;
+ num_bits = 8*max_size;
+ size = max_size;
+ res = 1;
+ }
+
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+
+ state->offset = next_offset;
+ state->buf = next_buf;
+#ifdef DEBUG
+ state->len = next_len;
+#endif
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+static int
+bitstr_list_len(ErtsIOListState *state)
{
Eterm* objp;
- Uint len = 0;
- Uint offs = 0;
+ Eterm obj;
+ Uint len, offs;
+ int res, init_yield_count, yield_count;
DECLARE_ESTACK(s);
+
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+
+ len = (Uint) state->size;
+ offs = state->offs;
+ obj = state->obj;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore estack... */
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -748,46 +1431,55 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- len++;
- if (len == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (--yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ len++;
+ if (len == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
+ } else {
+ if (--yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj)) {
SAFE_ADD(len, binary_size(obj));
SAFE_ADD_BITSIZE(offs, obj);
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
#undef SAFE_ADD_BITSIZE
- DESTROY_ESTACK(s);
-
/*
* Make sure that the number of bits in the bitstring will fit
* in an Uint to ensure that the binary can be matched using
@@ -800,15 +1492,42 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
if (len << 3 < len) {
goto L_overflow_error;
}
- *num_bytes = len;
- return ERTS_IOLIST_OK;
+ state->size = len;
- L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_OK;
+
+ L_return: {
+ int yc = init_yield_count - yield_count;
+ int reds;
+
+ DESTROY_ESTACK(s);
+ CLEAR_SAVED_ESTACK(&state->estack);
+
+ reds = (yc - 1)/ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) len;
+ state->have_size = 1;
+ return res;
+ }
L_overflow_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+ res = ERTS_IOLIST_OVERFLOW;
+ len = 0;
+ goto L_return;
+
+ L_type_error:
+ res = ERTS_IOLIST_TYPE;
+ len = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = len;
+ state->offs = offs;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 7d4f52ee23..08265b590d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -256,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p)
p->current[1],
p->current[2]);
}
+ erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix);
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
approx_started = (time_t) p->approx_started;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 05ac24e04d..90cd227fae 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1873,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
size = va_arg(argp, Uint);
va_end(argp);
erl_exit(1,
- "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
- allctr_str, op, size, t_str);
+ "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n",
+ allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX());
break;
}
case ERTS_ALC_E_NOALLCTR:
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 45f0cc4312..a4e164bf51 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -3274,6 +3274,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
+ if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) {
+ /* Do an overly conservative _overflow_ check here so we don't
+ * have to deal with it from here on. I guess we could be more accurate
+ * but I don't think the need to allocate over 99% of the address space
+ * will ever arise on any machine, neither 32 nor 64 bit.
+ */
+ return NULL;
+ }
+
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index ff775691b3..3bf78adce7 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1324,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1555,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1644,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2213,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p,
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2294,18 +2294,11 @@ BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
BIF_ERROR(BIF_P,BADARG);
}
-/*
- * Ok, erlang:list_to_binary does not interrupt, and we really don't want
- * an alternative implementation for the exact same thing, why we
- * have descided to use the old non-restarting implementation for now.
- * In reality, there are seldom many iterations involved in doing this, so the
- * problem of long-running bifs is not really that big in this case.
- * So, for now we use the old implementation also in the module binary.
- */
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1)
BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_binary_list_to_bin_1]);
}
typedef struct {
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 2adba9b240..6efe9d9550 100755..100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2691,6 +2691,11 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
+ BIF_RET(erts_disable_tolerant_timeofday
+ ? am_disabled
+ : am_enabled);
+ }
BIF_ERROR(BIF_P, BADARG);
}
@@ -3050,6 +3055,25 @@ fun_info_2(BIF_ALIST_2)
return TUPLE2(hp, what, val);
}
+BIF_RETTYPE
+fun_info_mfa_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm* hp;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ }
+ BIF_ERROR(p, BADARG);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
if(is_internal_pid(BIF_ARG_1)) {
@@ -3851,16 +3875,19 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
Uint tries = 0, colls = 0;
unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
unsigned int line = 0;
+ unsigned int i;
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
+ Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
+ * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }]
*/
-
+
tries = (Uint) ethr_atomic_read(&stats->tries);
colls = (Uint) ethr_atomic_read(&stats->colls);
@@ -3869,23 +3896,27 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
timer_ns = stats->timer.ns;
timer_n = stats->timer_n;
- af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
+ af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
uil = erts_bld_uint( hpp, szp, line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
-
+ uit = erts_bld_uint( hpp, szp, tries);
+ uic = erts_bld_uint( hpp, szp, colls);
+
uits = erts_bld_uint( hpp, szp, timer_s);
uitns = erts_bld_uint( hpp, szp, timer_ns);
uitn = erts_bld_uint( hpp, szp, timer_n);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
-
- t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
+ vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ }
+ thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
+
+ t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3906,13 +3937,13 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
ASSERT(ltype);
- type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
+ type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
if (lock->flag & ERTS_LCNT_LT_ALLOC) {
/* use allocator types names as id's for allocator locks */
ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
proc = erts_proc_lookup(lock->id);
@@ -3923,16 +3954,15 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
id = lock->id;
}
} else {
- id = lock->id;
+ id = lock->id;
}
-
+
for (i = 0; i < lock->n_stats; i++) {
stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
}
-
- t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3952,12 +3982,12 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *da
dtns = erts_bld_uint( hpp, szp, data->duration.ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
- adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+ adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
- aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+ aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 819b19e566..06dfeb1260 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -166,7 +166,7 @@ Eterm erts_bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size,
* Common implementation for erlang:list_to_binary/1 and binary:list_to_bin/1
*/
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg);
+BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg, Export *bif);
BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple);
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
@@ -236,6 +236,8 @@ erts_bin_drv_alloc_fnf(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ return NULL;
res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -246,6 +248,8 @@ erts_bin_drv_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -257,6 +261,8 @@ erts_bin_nrml_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -267,11 +273,12 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (bsize < size) /* overflow */
+ return NULL;
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
return nbp;
}
@@ -281,17 +288,14 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (bsize < size) /* overflow */
+ erts_realloc_enomem(type, bp, size);
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
if (!nbp)
- erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV
- ? ERTS_ALC_T_DRV_BINARY
- : ERTS_ALC_T_BINARY),
- bp,
- bsize);
+ erts_realloc_enomem(type, bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
return nbp;
}
@@ -312,6 +316,7 @@ erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
{
Uint bsize = ERTS_MAGIC_BIN_SIZE(size);
Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ ASSERT(bsize > size);
if (!bptr)
erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index aa15d2cc57..0db42d4325 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2018,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
+
+ /*
+ * If a NIF has saved arguments, they need to be added
+ */
+ if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
+ Eterm* argv;
+ int argc;
+ if (erts_setup_nif_gc(p, &argv, &argc)) {
+ roots[n].v = argv;
+ roots[n].sz = argc;
+ n++;
+ }
+ }
+
ASSERT(n <= rootset->size);
mp = p->msg.first;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index d54658f1ea..88c4006934 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -324,7 +324,6 @@ erl_init(int ncpu,
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
erts_init_trace();
- erts_init_binary();
erts_init_bits();
erts_code_ix_init();
erts_init_fun_table();
@@ -337,6 +336,7 @@ erl_init(int ncpu,
erts_ddll_init();
init_emulator();
erts_ptab_init(); /* Must be after init_emulator() */
+ erts_init_binary(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
erts_bif_timer_init();
@@ -2066,8 +2066,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
system_cleanup(flush_async);
save_statistics();
-
- an = abs(n);
+ if (n < 0)
+ an = -(unsigned int)n;
+ else
+ an = n;
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index c13eb87012..b105ece6f1 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -139,7 +139,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
- { "sys_msg_q", NULL },
{ "atom_tab", NULL },
{ "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
@@ -148,6 +147,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
+ { "sys_msg_q", NULL },
{ "port_table", NULL },
#endif
{ "mtrace_op", NULL },
@@ -227,8 +227,7 @@ rw_op_str(Uint16 flags)
case ERTS_LC_FLG_LO_READ:
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
- erts_fprintf(stderr, "\nInternal error\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Only write flag present");
default:
break;
}
@@ -311,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p)
static void *lc_core_alloc(void)
{
lc_unlock();
- erts_fprintf(stderr, "Lock checker out of memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
}
#else
@@ -325,8 +323,7 @@ static void *lc_core_alloc(void)
fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
- erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -366,11 +363,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
@@ -691,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("strdup failed");
}
l_lcks->emu_thread = 1;
}
@@ -1330,7 +1327,7 @@ erts_lc_init(void)
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- lc_abort();
+ ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 6f44bf097b..cf6996ea06 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -61,6 +61,25 @@ static ERTS_INLINE void lcnt_unlock(void) {
ethr_mutex_unlock(&lcnt_data_lock);
}
+const int log2_tab64[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+static ERTS_INLINE int lcnt_log2(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
static char* lcnt_lock_type(Uint16 flag) {
switch(flag & ERTS_LCNT_LT_ALL) {
@@ -81,19 +100,20 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
stats->timer_n = 0;
stats->file = (char *)str_undefined;
stats->line = 0;
+ sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
}
static void lcnt_time(erts_lcnt_time_t *time) {
-#ifdef HAVE_GETHRTIME
+#if 0 || defined(HAVE_GETHRTIME)
SysHrTime hr_time;
hr_time = sys_gethrtime();
time->s = (unsigned long)(hr_time / 1000000000LL);
time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
-#else
- SysTimeval tv;
- sys_gettimeofday(&tv);
- time->s = tv.tv_sec;
- time->ns = tv.tv_usec*1000LL;
+#else
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ time->s = tv.tv_sec;
+ time->ns = tv.tv_usec*1000LL;
#endif
}
@@ -111,28 +131,29 @@ static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_
dns += 1000000000LL;
}
+ ASSERT(ds >= 0);
+
d->s = ds;
d->ns = dns;
}
-/* difference d must be positive */
+/* difference d must be non-negative */
static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- unsigned long ngns = 0;
-
t->s += d->s;
t->ns += d->ns;
- ngns = t->ns / 1000000000LL;
+ t->s += t->ns / 1000000000LL;
t->ns = t->ns % 1000000000LL;
-
- t->s += ngns;
}
static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
erts_lcnt_thread_data_t *eltd;
eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
+ if (!eltd) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
@@ -158,59 +179,64 @@ static char* lock_opt(Uint16 flag) {
return "--";
}
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- erts_aint_t colls, tries, w_state, r_state;
- erts_lcnt_lock_stats_t *stats = NULL;
-
+static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
+ erts_aint_t w_state, r_state;
char *type;
- int i;
-
+
+ if (strcmp(lock->name, "run_queue") != 0) return;
type = lcnt_lock_type(lock->flag);
r_state = ethr_atomic_read(&lock->r_state);
w_state = ethr_atomic_read(&lock->w_state);
-
if (lock->flag & flag) {
- erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- lock->id,
- extra);
+ erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
+ action,
+ lock->name,
+ r_state,
+ w_state,
+ type,
+ lock->id);
}
}
-
-static void print_lock(erts_lcnt_lock_t *lock, char *action) {
- if (strcmp(lock->name, "proc_main") == 0) {
- print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
- }
-}
-
#endif
static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
unsigned int i;
erts_lcnt_lock_stats_t *stats = NULL;
-
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
+ for (i = 0; i < lock->n_stats; i++) {
+ if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
+ return &(lock->stats[i]);
+ }
+ }
+ if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ stats = &lock->stats[lock->n_stats];
+ lock->n_stats++;
+ stats->file = file;
+ stats->line = line;
+ return stats;
+ }
}
return &lock->stats[0];
+}
+static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
+ int idx;
+ unsigned long r;
+
+ if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+ if (r) idx = lcnt_log2(r);
+ else idx = 0;
+ }
+ hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) {
+static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
+ erts_lcnt_time_t *time_wait) {
ethr_atomic_inc(&stats->tries);
@@ -220,6 +246,7 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic
if (time_wait) {
lcnt_time_add(&(stats->timer), time_wait);
stats->timer_n++;
+ lcnt_update_stats_hist(&stats->hist,time_wait);
}
}
@@ -248,6 +275,9 @@ void erts_lcnt_init() {
/* init lcnt structure */
erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
+ if (!erts_lcnt_data) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
erts_lcnt_data->current_locks = erts_lcnt_list_init();
erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
@@ -269,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
erts_lcnt_lock_list_t *list;
list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
+ if (!list) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
list->head = NULL;
list->tail = NULL;
list->n = 0;
@@ -330,8 +363,9 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock)
/* interface to erl_threads.h */
/* only lock on init and destroy, all others should use atomics */
void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, am_undefined);
+ erts_lcnt_init_lock_x(lock, name, flag, NIL);
}
+
void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
int i;
if (!name) {
@@ -360,7 +394,6 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
}
erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
-
lcnt_unlock();
}
@@ -375,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* copy structure and insert the copy */
deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+ if (!deleted_lock) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
deleted_lock->next = NULL;
@@ -417,8 +453,9 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
if ((w_state > 0) || (r_state > 0)) {
eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
} else {
eltd->lock_in_conflict = 0;
@@ -433,7 +470,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc( &lock->w_state);
+ ethr_atomic_inc(&lock->w_state);
eltd = lcnt_get_thread_data();
@@ -446,10 +483,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
* 'atomicly'. All other locks will block the thread if w_state > 0
* i.e. locked.
*/
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
-
} else {
eltd->lock_in_conflict = 0;
}
@@ -459,11 +496,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
/* should check if this thread was "waiting" */
-
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- ethr_atomic_dec( &lock->w_state);
+ ethr_atomic_dec(&lock->w_state);
}
/* erts_lcnt_lock_post
@@ -491,7 +527,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
+ ethr_atomic_inc(&lock->flowstate);
}
#endif
@@ -500,19 +536,12 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
ASSERT(eltd);
/* if lock was in conflict, time it */
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- stats = lcnt_get_lock_stats(lock, file, line);
- } else {
- stats = &lock->stats[0];
- }
-
+ stats = lcnt_get_lock_stats(lock, file, line);
if (eltd->timer_set) {
lcnt_time(&timer);
lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
-
eltd->timer_set--;
ASSERT(eltd->timer_set >= 0);
} else {
@@ -541,11 +570,11 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
/* flowstate */
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
- ethr_atomic_dec( &lock->flowstate);
+ ethr_atomic_dec(&lock->flowstate);
/* write state */
w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0)
+ ASSERT(w_state > 0);
#endif
ethr_atomic_dec(&lock->w_state);
}
@@ -582,9 +611,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
ethr_atomic_inc( &lock->flowstate);
#endif
ethr_atomic_inc(&lock->w_state);
-
lcnt_update_stats(&(lock->stats[0]), 0, NULL);
-
} else {
ethr_atomic_inc(&lock->stats[0].tries);
ethr_atomic_inc(&lock->stats[0].colls);
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 75f7cd028b..ffbb93da1b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -35,6 +35,10 @@
* | | | - collisions (including trylock busy)
* | | | - timer (time spent in waiting for lock)
* | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
*
* Each instance of a lock is the unique lock, i.e. set and id in that set.
* For each lock there is a set of statistics with where and what impact
@@ -68,8 +72,17 @@
#include "ethread.h"
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+/* histogram */
+#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
+#if 0 || defined(HAVE_GETHRTIME)
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (0)
+#else
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
+#endif
#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
@@ -104,6 +117,10 @@ typedef struct {
extern erts_lcnt_time_t timer_start;
+typedef struct {
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
+} erts_lcnt_hist_t;
+
typedef struct erts_lcnt_lock_stats_s {
/* "tries" and "colls" needs to be atomic since
* trylock busy does not aquire a lock and there
@@ -118,6 +135,7 @@ typedef struct erts_lcnt_lock_stats_s {
unsigned long timer_n; /* #times waited for lock */
erts_lcnt_time_t timer; /* total wait time for lock */
+ erts_lcnt_hist_t hist;
} erts_lcnt_lock_stats_t;
/* rw locks uses both states, other locks only uses w_state */
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 0eb8117980..8870fac7d9 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -415,7 +415,13 @@ erts_queue_dist_message(Process *rcvr,
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr);
+ erts_proc_notify_new_message(rcvr,
+#ifdef ERTS_SMP
+ *rcvr_locks
+#else
+ 0
+#endif
+ );
}
}
@@ -542,7 +548,13 @@ queue_message(Process *c_p,
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver);
+ erts_proc_notify_new_message(receiver,
+#ifdef ERTS_SMP
+ *receiver_locks
+#else
+ 0
+#endif
+ );
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
@@ -1032,7 +1044,6 @@ erts_send_message(Process* sender,
}
BM_SWAP_TIMER(send,system);
} else {
-#ifdef ERTS_SMP
ErlOffHeap *ohp;
Eterm *hp;
erts_aint32_t state;
@@ -1064,42 +1075,6 @@ erts_send_message(Process* sender,
#endif
);
BM_SWAP_TIMER(send,system);
-#else
- ErlMessage* mp = message_alloc();
- Eterm *hp;
- BM_SWAP_TIMER(send,size);
- msize = size_object(message);
- BM_SWAP_TIMER(size,send);
-
- if (receiver->stop - receiver->htop <= msize) {
- BM_SWAP_TIMER(send,system);
- erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity);
- BM_SWAP_TIMER(system,send);
- }
- hp = receiver->htop;
- receiver->htop = hp + msize;
- BM_SWAP_TIMER(send,copy);
- message = copy_struct(message, msize, &hp, &receiver->off_heap);
- BM_MESSAGE_COPIED(msize);
- BM_SWAP_TIMER(copy,send);
- DTRACE6(message_send, sender_name, receiver_name,
- (uint32_t)msize, tok_label, tok_lastcnt, tok_serial);
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = NIL;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
-#endif
- mp->next = NULL;
- mp->data.attached = NULL;
- LINK_MESSAGE(receiver, mp);
- res = receiver->msg.len;
- erts_proc_notify_new_message(receiver);
-
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
- trace_receive(receiver, message);
- }
- BM_SWAP_TIMER(send,system);
-#endif /* #ifndef ERTS_SMP */
}
return res;
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ff551ea3af..1414744763 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1513,72 +1513,251 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(env->proc) == 0;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-
-/* NIFs exports need one more item than the Export struct provides, the
- * erl_module_nif*, so the DirtyNifExport below adds that. The Export
- * member must be first in the struct.
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv
+ * members are used to track the MFA and arguments of the top NIF in case a
+ * chain of one or more enif_schedule_nif() calls results in an exception,
+ * since in that case the original MFA and registers have to be restored
+ * before returning to Erlang to ensure stacktrace information associated
+ * with the exception is correct.
*/
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
typedef struct {
Export exp;
struct erl_module_nif* m;
-} DirtyNifExport;
+ NativeFunPtr fp;
+ Eterm saved_mfa[3];
+ int saved_argc;
+ int alloced_argv_sz;
+ Eterm argv[1];
+} NifExport;
-static void
-alloc_proc_psd(Process* proc, DirtyNifExport **ep)
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. This function is declared in erl_process.h.
+ */
+int
+erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
+{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ int gc = (ep && ep->saved_argc > 0);
+
+ if (gc) {
+ *objv = ep->argv;
+ *nobj = ep->saved_argc;
+ }
+ return gc;
+}
+
+/*
+ * Allocate a NifExport and set it in proc specific data
+ */
+static NifExport*
+allocate_nif_sched_data(Process* proc, int argc)
{
+ NifExport* ep;
+ size_t argv_extra, total;
int i;
- if (!*ep) {
- *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(DirtyNifExport));
- sys_memset((void*) *ep, 0, sizeof(DirtyNifExport));
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- (*ep)->exp.addressv[i] = &(*ep)->exp.code[3];
- }
- (*ep)->exp.code[3] = (BeamInstr) em_call_nif;
+
+ argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0;
+ total = sizeof(NifExport) + argv_extra;
+ ep = erts_alloc(ERTS_ALC_T_PSD, total);
+ sys_memset((void*) ep, 0, total);
+ ep->alloced_argv_sz = argc;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->exp.addressv[i] = &ep->exp.code[3];
}
- (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &(*ep)->exp);
+ ep->exp.code[3] = (BeamInstr) em_call_nif;
+ (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &ep->exp);
+ return ep;
}
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save MFA and registers only if the need_save
+ * parameter is true.
+ */
static ERL_NIF_TERM
-execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ int need_save, int argc, const ERL_NIF_TERM argv[])
{
- Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array;
- ERL_NIF_TERM result, dirty_result = (ERL_NIF_TERM) reg[0];
- typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM);
- FinalizerFP fp;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- enif_get_ulong(env, reg[1], (unsigned long *) &fp);
-#endif
- result = (*fp)(env, dirty_result);
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0
- && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- return result;
+ Process* proc = env->proc;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ NifExport* ep;
+ int i;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ if (!ep)
+ ep = allocate_nif_sched_data(proc, argc);
+ else if (need_save && ep->alloced_argv_sz < argc) {
+ NifExport* new_ep = allocate_nif_sched_data(proc, argc);
+ erts_free(ERTS_ALC_T_PSD, (void*) ep);
+ ep = new_ep;
+ }
+ ERTS_VBUMP_ALL_REDS(proc);
+ for (i = 0; i < argc; i++) {
+ if (need_save)
+ ep->argv[i] = reg[i];
+ reg[i] = (Eterm) argv[i];
+ }
+ if (need_save) {
+ ep->saved_mfa[0] = proc->current[0];
+ ep->saved_mfa[1] = proc->current[1];
+ ep->saved_mfa[2] = proc->current[2];
+ ep->saved_argc = argc;
+ }
+ proc->i = (BeamInstr*) ep->exp.addressv[0];
+ ep->exp.code[0] = (BeamInstr) proc->current[0];
+ ep->exp.code[1] = (BeamInstr) proc->current[1];
+ ep->exp.code[2] = argc;
+ ep->exp.code[4] = (BeamInstr) direct_fp;
+ ep->m = env->mod_nif;
+ ep->fp = indirect_fp;
+ proc->freason = TRAP;
+ return THE_NON_VALUE;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
+/*
+ * Restore saved MFA and registers. Registers are restored only when the
+ * exception flag is true.
+ */
+static void
+restore_nif_mfa(Process* proc, NifExport* ep, int exception)
+{
+ int i;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ proc->current[0] = ep->saved_mfa[0];
+ proc->current[1] = ep->saved_mfa[1];
+ proc->current[2] = ep->saved_mfa[2];
+ if (exception)
+ for (i = 0; i < ep->saved_argc; i++)
+ reg[i] = ep->argv[i];
+ ep->saved_argc = 0;
+ ep->saved_mfa[0] = THE_NON_VALUE;
+}
-ERL_NIF_TERM
-enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
- int argc, const ERL_NIF_TERM argv[])
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+/*
+ * Finalize a dirty NIF call. This function is scheduled to cause the VM to
+ * switch the process off a dirty scheduler thread and back onto a regular
+ * scheduler thread, and then return the result from the dirty NIF. It also
+ * restores the original NIF MFA when necessary based on the value of
+ * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * means restore, NULL means do not restore.
+ */
+static ERL_NIF_TERM
+dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 0);
+ return argv[0];
+}
+
+/* Finalize a dirty NIF call that raised an exception. Otherwise same as
+ * the dirty_nif_finalizer() function.
+ */
+static ERL_NIF_TERM
+dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 1);
+ return enif_make_badarg(env);
+}
+
+/*
+ * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
+ * then check the result and schedule the appropriate finalizer function
+ * where needed. Also restore the original NIF MFA when appropriate.
+ */
+static ERL_NIF_TERM
+execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+
+ /*
+ * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
+ close_lib(env->mod_nif);
+ /*
+ * If no more NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF calling
+ * context. Reuse fp essentially as a boolean for this, passing it to
+ * init_nif_sched_data below. Both dirty_nif_exception and
+ * dirty_nif_finalizer then check ep->fp to decide whether or not to
+ * restore the original calling context.
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ fp = NULL;
+ if (is_non_value(result)) {
+ if (proc->freason != TRAP) {
+ ASSERT(proc->freason == BADARG);
+ return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
+ } else {
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, 1);
+ return result;
+ }
+ }
+ else
+ return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+}
+
+/*
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
+ * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
+ * type (CPU or I/O) is indicated in flags parameter.
+ */
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
erts_aint32_t state, n, a;
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep = NULL;
- int i;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ int need_save;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND)
- return enif_make_badarg(env);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
a = erts_smp_atomic32_read_acqb(&proc->state);
while (1) {
@@ -1590,7 +1769,7 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
*/
n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
n |= ERTS_PSFLG_DIRTY_CPU_PROC;
else
n |= ERTS_PSFLG_DIRTY_IO_PROC;
@@ -1598,69 +1777,100 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
if (a == state)
break;
}
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = argc;
- for (i = 0; i < argc; i++) {
- reg[i] = (Eterm) argv[i];
- }
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) fp;
- ep->m = env->mod_nif;
- proc->freason = TRAP;
-
erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+ return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+}
- return THE_NON_VALUE;
-#else
- return (*fp)(env, argc, argv);
-#endif
+static ERL_NIF_TERM
+schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+}
+
+static ERL_NIF_TERM
+schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+/*
+ * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
+ * calls the actual NIF, restores original NIF MFA if necessary, and
+ * then returns the NIF result.
+ */
+static ERL_NIF_TERM
+execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ /*
+ * If no NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF MFA.
+ */
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP);
+ return result;
}
ERL_NIF_TERM
-enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
+enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
+ ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
+ int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep;
+ NifExport* ep;
+ ERL_NIF_TERM fun_name_atom, result;
+ int need_save;
- erts_smp_atomic32_read_band_mb(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- |ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
- |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = 2;
- reg[0] = (Eterm) result;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp);
-#endif
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) execute_dirty_nif_finalizer;
- proc->freason = TRAP;
+ if (argc > MAX_ARG)
+ return enif_make_badarg(env);
+ fun_name_atom = enif_make_atom(env, fun_name);
+ if (enif_is_exception(env, fun_name_atom))
+ return fun_name_atom;
- return THE_NON_VALUE;
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+
+ if (flags) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ NativeFunPtr sched_fun;
+ int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
+ if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
+ sched_fun = schedule_dirty_io_nif;
+ else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ sched_fun = schedule_dirty_cpu_nif;
+ else
+ return enif_make_badarg(env);
+ result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
#else
- return (*fp)(env, result);
+ return enif_make_badarg(env);
#endif
-}
+ }
+ else
+ result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-/* A simple finalizer that just returns its result argument */
-ERL_NIF_TERM
-enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)
-{
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->exp.code[1] = (BeamInstr) fun_name_atom;
return result;
}
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
int
enif_is_on_dirty_scheduler(ErlNifEnv* env)
{
@@ -1977,6 +2187,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+/*
+ * The function below is for looping through ErlNifFunc arrays, helping
+ * provide backwards compatibility across the version 2.7 change that added
+ * the "flags" field to ErlNifFunc.
+ */
+static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+{
+ ASSERT(incrp);
+ if (!*incrp) {
+ if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ *incrp = sizeof(ErlNifFunc);
+ else {
+ /*
+ * ErlNifFuncV1 below is what ErlNifFunc was before the
+ * addition of the flags field for 2.7, and is needed to handle
+ * backward compatibility.
+ */
+ typedef struct {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }ErlNifFuncV1;
+ *incrp = sizeof(ErlNifFuncV1);
+ }
+ }
+ return (ErlNifFunc*) ((char*)func + *incrp);
+}
+
+
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
@@ -2086,22 +2325,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
/*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
+
+ int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
BeamInstr** code_pp;
- ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
|| (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
- }
- else if (code_pp[1] - code_pp[0] < (5+3)) {
+ }
+ else if (maybe_dirty_nifs && f->flags) {
+ /*
+ * If the flags field is non-zero and this emulator was
+ * built with dirty scheduler support, check that the flags
+ * value is legal. But if this emulator was built without
+ * dirty scheduler support, treat a non-zero flags field as
+ * a load error.
+ */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ f->flags, mod_atom, f->name, f->arity);
+#else
+ ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
+ mod_atom, f->name, f->arity);
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (code_pp[1] - code_pp[0] < (5+4))
+#else
+ else if (code_pp[1] - code_pp[0] < (5+3))
+#endif
+ {
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u to small)",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ " in module (%T:%s/%u too small)",
+ mod_atom, f->name, f->arity);
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ mod_atom, f->name, f->arity);*/
+ f = next_func(entry, &incr, f);
}
}
@@ -2127,7 +2392,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* is deprecated and was only ment as a development feature not to
* be used in production systems. (See warning below)
*/
- int k;
+ int k, old_incr = 0;
+ ErlNifFunc* old_func;
lib->priv_data = mod->curr.nif->priv_data;
ASSERT(mod->curr.nif->entry != NULL);
@@ -2136,13 +2402,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
/* Check that no NIF is removed */
+ old_func = mod->curr.nif->entry->funcs;
for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == entry->funcs[i].arity
- && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
+ if (old_func->arity == f->arity
+ && sys_strcmp(old_func->name, f->name) == 0) {
break;
}
+ f = next_func(entry, &incr, f);
}
if (i == entry->num_of_funcs) {
ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
@@ -2150,7 +2419,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
old_func->name, old_func->arity);
goto error;
}
- }
+ old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
+ }
erts_pre_nif(&env, BIF_P, lib);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
@@ -2197,13 +2467,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->curr.nif = lib;
+
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
+
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
- erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
-
+ erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity);
+
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
@@ -2211,10 +2485,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
GenericBp* g = (GenericBp *) code_ptr[1];
ASSERT(code_ptr[5+0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
- }
- code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ }
+ if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ code_ptr[5+3] = (BeamInstr) f->fptr;
+ code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) schedule_dirty_io_nif :
+ (BeamInstr) schedule_dirty_cpu_nif;
+#endif
+ }
+ else
+ code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
+ f = next_func(entry, &incr, f);
}
}
else {
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 5b93c2398e..226fc199a1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -42,9 +42,13 @@
** 2.5: R17 Maps API additions
** 2.6: R17 with maps
** R17 dirty schedulers
+** 2.7: 17.3 add enif_schedule_nif
+** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer
+** add ErlNifEntry options
+** add ErlNifFunc flags
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 6
+#define ERL_NIF_MINOR_VERSION 7
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -125,8 +129,10 @@ typedef struct
const char* name;
unsigned arity;
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
}ErlNifFunc;
+
typedef struct enif_entry_t
{
int major;
@@ -139,8 +145,11 @@ typedef struct enif_entry_t
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
const char* vm_variant;
+ unsigned options;
}ErlNifEntry;
+/* Field bits for ErlNifEntry options */
+#define ERL_NIF_DIRTY_NIF_OPTION 1
typedef struct
@@ -232,10 +241,21 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# else
# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
# endif
-# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_INIT_BODY do { \
+ memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)); \
+ entry.options = ERL_NIF_DIRTY_NIF_OPTION; \
+ } while(0)
+# else
+# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# endif
#else
# define ERL_NIF_INIT_GLOB
-# define ERL_NIF_INIT_BODY
+# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_INIT_BODY entry.options = ERL_NIF_DIRTY_NIF_OPTION
+# else
+# define ERL_NIF_INIT_BODY
+# endif
# ifdef STATIC_ERLANG_NIF
# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void)
# else
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index d7c554e60b..be39816a64 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -141,10 +141,8 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM)));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM));
ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void));
#endif
@@ -289,10 +287,8 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
+# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif)
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif)
-# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer)
-# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer)
# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers)
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 31d9a1e26e..682f6f8f4b 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -68,6 +68,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
#endif
+#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \
+ ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
+ erts_smp_atomic_read_nob(&(PP)->run_queue))); \
+ } while (0)
+
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PT_STATE_SCHEDULED 0
@@ -798,12 +805,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
static ERTS_INLINE void
abort_nosuspend_task(Port *pp,
ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp)
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
- if (!pp->sched.taskq.bpq)
+ if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
@@ -991,6 +999,7 @@ static ERTS_INLINE int
finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
erts_aint32_t act;
+ unsigned int prof_runnable_ports;
if (!processing_busy_q)
pp->sched.taskq.local.first = *execq;
@@ -1007,6 +1016,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1018,12 +1031,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
- ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
if (exp == act)
break;
}
+ if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS))
+ trace_sched_ports(pp, am_out);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS)))
+ profile_runnable_port(pp, am_inactive);
+ erts_port_task_sched_unlock(&pp->sched);
+ }
+ }
+
return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
@@ -1345,7 +1370,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
#endif
schedule_port_task_handle_list_free(pthlp);
- abort_nosuspend_task(pp, type, &td);
+ abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
}
}
@@ -1369,6 +1394,7 @@ erts_port_task_schedule(Eterm id,
Port *pp;
ErtsPortTask *ptp = NULL;
erts_aint32_t act, add_flags;
+ unsigned int prof_runnable_ports;
if (pthp && erts_port_task_is_scheduled(pthp)) {
ASSERT(0);
@@ -1457,6 +1483,10 @@ erts_port_task_schedule(Eterm id,
if (ns_pthlp)
add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1481,6 +1511,13 @@ erts_port_task_schedule(Eterm id,
goto done; /* Died after our task insert... */
}
+ if (prof_runnable_ports) {
+ if (!(act & ERTS_PTS_FLG_EXEC_IMM))
+ profile_runnable_port(pp, am_active);
+ erts_port_task_sched_unlock(&pp->sched);
+ prof_runnable_ports = 0;
+ }
+
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
@@ -1489,8 +1526,10 @@ erts_port_task_schedule(Eterm id,
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
- /* Port emigrated ... */
+ /* Emigrate port ... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = erts_port_runq(pp);
@@ -1500,10 +1539,6 @@ erts_port_task_schedule(Eterm id,
#endif
enqueue_port(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
- }
erts_smp_runq_unlock(runq);
@@ -1511,6 +1546,9 @@ erts_port_task_schedule(Eterm id,
done:
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&pp->sched);
+
#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
@@ -1525,7 +1563,7 @@ abort_nosuspend:
erts_port_dec_refc(pp);
#endif
- abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
@@ -1609,6 +1647,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto done;
}
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+
erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
@@ -1765,10 +1805,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
@@ -1791,11 +1827,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
- if (!active) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- }
- else {
+ if (active) {
#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
#endif
@@ -1804,6 +1836,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
@@ -1811,7 +1845,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
}
else {
- /* Port emigrated ... */
+ /* Emigrate port... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 1d30465ec9..9ef0cfcedc 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -78,6 +78,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11)
+#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12)
#define ERTS_PTS_FLGS_BUSY \
(ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
@@ -87,6 +88,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
| ERTS_PTS_FLG_HAVE_BUSY_TASKS \
| ERTS_PTS_FLG_HAVE_TASKS \
| ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_EXEC_IMM \
| ERTS_PTS_FLG_FORCE_SCHED \
| ERTS_PTS_FLG_EXITING)
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b73f9b7f92..685004f267 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -590,12 +590,10 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
= ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS;
-#endif
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -2211,6 +2209,9 @@ aux_work_timeout_early_init(int no_schedulers)
p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ sizeof(erts_atomic32_t)*(no_schedulers+1))
+ ERTS_CACHE_LINE_SIZE-1);
+ if (!p) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
if (p & ERTS_CACHE_LINE_MASK)
p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
@@ -3755,17 +3756,25 @@ evacuate_run_queue(ErtsRunQueue *rq,
}
#ifdef ERTS_DIRTY_SCHEDULERS
else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
} else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_IO_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
}
@@ -5874,6 +5883,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
case ERTS_ENQUEUE_NOT:
if (erts_system_profile_flags.runnable_procs) {
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
if (!(a & ERTS_PSFLG_ACTIVE_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))) {
@@ -5987,7 +5999,8 @@ change_proc_schedule_state(Process *p,
erts_aint32_t clear_state_flags,
erts_aint32_t set_state_flags,
erts_aint32_t *statep,
- erts_aint32_t *enq_prio_p)
+ erts_aint32_t *enq_prio_p,
+ ErtsProcLocks locks)
{
/*
* NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
@@ -5996,6 +6009,11 @@ change_proc_schedule_state(Process *p,
*/
erts_aint32_t a = *statep, n;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int lock_status = (prof_runnable_procs
+ && !(locks & ERTS_PROC_LOCK_STATUS));
+
+ ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
@@ -6005,6 +6023,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ if (lock_status)
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
while (1) {
erts_aint32_t e;
n = e = a;
@@ -6040,7 +6061,9 @@ change_proc_schedule_state(Process *p,
break;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
@@ -6053,15 +6076,18 @@ change_proc_schedule_state(Process *p,
profile_runnable_proc(p, am_active);
}
+ if (lock_status)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
+
*statep = a;
return enqueue;
}
static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t in_state)
+schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
{
erts_aint32_t enq_prio = -1;
erts_aint32_t state = in_state;
@@ -6069,7 +6095,8 @@ schedule_process(Process *p, erts_aint32_t in_state)
0,
ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue != ERTS_ENQUEUE_NOT)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -6077,16 +6104,27 @@ schedule_process(Process *p, erts_aint32_t in_state)
}
void
-erts_schedule_process(Process *p, erts_aint32_t state)
+erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
{
- schedule_process(p, state);
+ schedule_process(p, state, locks);
}
static void
schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
{
+ /*
+ * Expects status lock to be locked when called, and
+ * returns with status lock unlocked...
+ */
erts_aint32_t a = state, n, enq_prio = -1;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
ASSERT(!(state & ERTS_PSFLG_PROXY));
@@ -6095,7 +6133,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
n = e = a;
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
+ goto cleanup; /* We don't want to schedule free processes... */
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
@@ -6108,7 +6146,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
goto cleanup;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
@@ -6118,6 +6156,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
profile_runnable_proc(p, am_active);
}
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ prof_runnable_procs = 0;
}
if (enqueue != ERTS_ENQUEUE_NOT) {
@@ -6132,8 +6172,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
cleanup:
+
+ if (prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (proxy)
free_proxy_proc(proxy);
+
+ ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
}
static ERTS_INLINE int
@@ -6200,7 +6246,7 @@ suspend_process(Process *c_p, Process *p)
}
static ERTS_INLINE void
-resume_process(Process *p)
+resume_process(Process *p, ErtsProcLocks locks)
{
erts_aint32_t state, enq_prio = -1;
int enqueue;
@@ -6217,7 +6263,8 @@ resume_process(Process *p)
ERTS_PSFLG_SUSPENDED,
0,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -7818,6 +7865,9 @@ erts_start_schedulers(void)
#ifdef ETHR_HAVE_THREAD_NAMES
opts.name = malloc(80);
+ if (!opts.name) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#endif
#ifdef ERTS_SMP
@@ -8030,7 +8080,8 @@ handle_pend_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspendee != suspender);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8065,7 +8116,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(c_p->flags & F_P2PNR_RESCHED);
c_p->flags &= ~F_P2PNR_RESCHED;
if (!suspend && rp)
- resume_process(rp);
+ resume_process(rp, rp_locks);
}
else {
@@ -8223,7 +8274,8 @@ handle_pend_bif_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspender != suspendee);
+ resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
}
@@ -8583,7 +8635,8 @@ resume_process_1(BIF_ALIST_1)
ASSERT(ERTS_PSFLG_SUSPENDED
& erts_smp_atomic32_read_nob(&suspendee->state));
- resume_process(suspendee);
+ ASSERT(BIF_P != suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
@@ -8713,7 +8766,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
- resume_process(process);
+ resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
@@ -8732,7 +8785,7 @@ erts_resume_processes(ErtsProcList *list)
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
if (erts_proclist_same(plp, proc)) {
- resume_process(proc);
+ resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
@@ -9968,8 +10021,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
rp_state = n;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
-
+ /*
+ * schedule_process_sys_task() unlocks status
+ * lock on process.
+ */
schedule_process_sys_task(rp, rp_state, NULL);
if (free_stqs)
@@ -10714,7 +10769,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- schedule_process(p, state);
+ schedule_process(p, state, 0);
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
@@ -11035,7 +11090,8 @@ set_proc_exiting(Process *p,
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
p->fvalue = reason;
if (bp)
@@ -11076,7 +11132,8 @@ set_proc_self_exiting(Process *c_p)
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
ASSERT(!enqueue);
return state;
@@ -11721,8 +11778,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
smon->pid, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
+ ASSERT(suspendee != vc_p);
if (smon->active)
- resume_process(suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
erts_destroy_suspend_monitor(smon);
@@ -12055,7 +12113,7 @@ timeout_proc(Process* p)
state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state);
+ schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index ed6dadbffa..9b740f049e 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -734,13 +734,9 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_DIST_ENTRY 3
#define ERTS_PSD_CALL_TIME_BP 4
#define ERTS_PSD_DELAYED_GC_TASK_QS 5
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6
+#define ERTS_PSD_NIF_TRAP_EXPORT 6
#define ERTS_PSD_SIZE 7
-#else
-#define ERTS_PSD_SIZE 6
-#endif
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -767,10 +763,8 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#endif
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -1367,6 +1361,8 @@ Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
+int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
+
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1704,17 +1700,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
#endif
#endif
-void erts_schedule_process(Process *, erts_aint32_t);
+void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_proc_notify_new_message(Process *p)
+erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- erts_schedule_process(p, state);
+ erts_schedule_process(p, state, locks);
}
#endif
@@ -1817,12 +1813,10 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \
((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \
- ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT))
-#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \
- ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE)))
-#endif
+#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \
+ ((Export *) erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT))
+#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, DSTE) \
+ ((Export *) erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (DSTE)))
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 3a968594f3..f8e1431a53 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -2126,6 +2126,8 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
mac = 1;
case ERL_FILENAME_UTF8:
size = strlen((char *) bytes);
+ if (size == 0)
+ return NIL;
if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) {
goto noconvert;
}
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 656de7c49a..8d240355b0 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -44,9 +44,6 @@
#include "erl_zlib.h"
#include "erl_map.h"
-#ifdef HIPE
-#include "hipe_mode_switch.h"
-#endif
#define in_area(ptr,start,nbytes) ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define MAX_STRING_LEN 0xffff
@@ -111,26 +108,17 @@ static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acm
static Export binary_to_term_trap_export;
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1);
-static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b);
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif, Eterm arg0, Eterm arg1);
void erts_init_external(void) {
-#if 1 /* In R16 */
erts_init_trap_export(&term_to_binary_trap_export,
- am_erlang, am_term_to_binary_trap, 1,
+ am_erts_internal, am_term_to_binary_trap, 1,
&term_to_binary_trap_1);
erts_init_trap_export(&binary_to_term_trap_export,
- am_erlang, am_binary_to_term_trap, 1,
+ am_erts_internal, am_binary_to_term_trap, 1,
&binary_to_term_trap_1);
-#else
- sys_memset((void *) &term_to_binary_trap_export, 0, sizeof(Export));
- term_to_binary_trap_export.address = &term_to_binary_trap_export.code[3];
- term_to_binary_trap_export.code[0] = am_erlang;
- term_to_binary_trap_export.code[1] = am_term_to_binary_trap;
- term_to_binary_trap_export.code[2] = 1;
- term_to_binary_trap_export.code[3] = (BeamInstr) em_apply_bif;
- term_to_binary_trap_export.code[4] = (BeamInstr) &term_to_binary_trap_1;
-#endif
return;
}
@@ -1069,6 +1057,8 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 1)
+
BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL);
@@ -1081,6 +1071,8 @@ BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
}
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 2)
+
BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
{
Process* p = BIF_P;
@@ -1185,6 +1177,8 @@ typedef struct B2TContext_t {
Uint32 flags;
SWord reds;
Eterm trap_bin;
+ Export *bif;
+ Eterm arg[2];
enum B2TState state;
union {
B2TSizeContext sc;
@@ -1356,7 +1350,8 @@ static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
- return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin);
+ return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
+ THE_NON_VALUE, THE_NON_VALUE);
}
@@ -1391,8 +1386,10 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src)
return ctx;
}
-static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b)
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif_init, Eterm arg0, Eterm arg1)
{
+ BIF_RETTYPE ret_val;
#ifdef EXTREME_B2T_TRAPPING
SWord initial_reds = 1 + b2t_rand() % 4;
#else
@@ -1409,6 +1406,9 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
ctx->state = B2TPrepare;
ctx->aligned_alloc = NULL;
ctx->flags = flags;
+ ctx->bif = bif_init;
+ ctx->arg[0] = arg0;
+ ctx->arg[1] = arg1;
IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;)
} else {
is_first_call = 0;
@@ -1504,12 +1504,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start);
/*fall through*/
case B2TBadArg:
- b2t_destroy_context(ctx);
- if (!is_first_call) {
- erts_set_gc_state(p, 1);
- }
BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- BIF_ERROR(p, BADARG & ~EXF_SAVETRACE);
+
+ ASSERT(ctx->bif == bif_export[BIF_binary_to_term_1]
+ || ctx->bif == bif_export[BIF_binary_to_term_2]);
+
+ if (is_first_call)
+ ERTS_BIF_PREP_ERROR(ret_val, p, BADARG);
+ else {
+ erts_set_gc_state(p, 1);
+ if (is_non_value(ctx->arg[1]))
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0]);
+ else
+ ERTS_BIF_PREP_ERROR_TRAPPED2(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0], ctx->arg[1]);
+ }
+ b2t_destroy_context(ctx);
+ return ret_val;
case B2TDone:
b2t_destroy_context(ctx);
@@ -1524,7 +1536,8 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
erts_set_gc_state(p, 1);
}
BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- return ctx->u.dc.res;
+ ERTS_BIF_PREP_RET(ret_val, ctx->u.dc.res);
+ return ret_val;
default:
ASSERT(!"Unknown state in binary_to_term");
@@ -1541,15 +1554,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con
erts_set_gc_state(p, 0);
}
BUMP_ALL_REDS(p);
- BIF_TRAP1(&binary_to_term_trap_export, p, ctx->trap_bin);
+
+ ERTS_BIF_PREP_TRAP1(ret_val, &binary_to_term_trap_export,
+ p, ctx->trap_bin);
+
+ return ret_val;
}
-BIF_RETTYPE erts_internal_binary_to_term_1(BIF_ALIST_1)
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1)
+
+BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
{
- return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL);
+ return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1],
+ BIF_ARG_1, THE_NON_VALUE);
}
-BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2)
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2)
+
+BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
{
Eterm opts;
Eterm opt;
@@ -1570,7 +1592,8 @@ BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2)
if (is_not_nil(opts))
goto error;
- return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL);
+ return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2],
+ BIF_ARG_1, BIF_ARG_2);
error:
BIF_ERROR(BIF_P, BADARG);
@@ -4440,66 +4463,3 @@ error:
#undef SKIP2
#undef CHKSIZE
}
-
-
-#ifdef HIPE
-BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1);
-BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2);
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1);
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2);
-
-/* Hipe wrappers used by native code for BIFs that disable GC while trapping.
- *
- * Problem:
- * When native code calls a BIF that traps, hipe_mode_switch will push a
- * "trap frame" on the Erlang stack in order to find its way back from beam_emu
- * back to native caller when finally done. If GC is disabled and stack/heap
- * is full there is no place to push the "trap frame".
- *
- * Solution:
- * We reserve space on stack for the "trap frame" here before the BIF is called.
- * If the BIF does not trap, the space is reclaimed here before returning.
- * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
- * already is reserved and use it.
- */
-BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1);
- res = term_to_binary_1(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2);
- res = term_to_binary_2(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1);
- res = erts_internal_binary_to_term_1(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2)
-{
- Eterm res;
- hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2);
- res = erts_internal_binary_to_term_2(BIF_P, BIF__ARGS);
- if (is_value(res) || BIF_P->freason != TRAP) {
- hipe_unreserve_beam_trap_frame(BIF_P);
- }
- return res;
-}
-#endif /*HIPE*/
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 8fcb95d0e2..891046a8b5 100755..100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -435,6 +435,8 @@ do {\
}\
} while(0)
+#define CLEAR_SAVED_ESTACK(estack) ((void) ((estack)->start = NULL))
+
/*
* Use on empty stack, only the allocator can be changed before this.
* The src stack is reset to NULL.
@@ -551,6 +553,8 @@ do {\
}\
} while(0)
+#define CLEAR_SAVED_WSTACK(wstack) ((void) ((wstack)->wstart = NULL))
+
/*
* Use on empty stack, only the allocator can be changed before this.
* The src stack is reset to NULL.
@@ -951,20 +955,67 @@ struct Sint_buf {
};
char* Sint_to_buf(Sint, struct Sint_buf*);
+#define ERTS_IOLIST_STATE_INITER(C_P, OBJ) \
+ {(C_P), 0, 0, (OBJ), {NULL, NULL, NULL, ERTS_ALC_T_INVALID}, 0, 0}
+
+#define ERTS_IOLIST_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOListState))
+
+#define ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED 8
+
+typedef struct {
+ Process *c_p;
+ ErlDrvSizeT size;
+ Uint offs;
+ Eterm obj;
+ ErtsEStack estack;
+ int reds_left;
+ int have_size;
+} ErtsIOListState;
+
+#define ERTS_IOLIST2BUF_STATE_INITER(C_P, OBJ) \
+ {ERTS_IOLIST_STATE_INITER((C_P), (OBJ)), {NULL, 0, 0, 0}, NULL, 0, NULL, 0}
+
+#define ERTS_IOLIST2BUF_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOList2BufState))
+
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT 32
+#define ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED 8
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_RED \
+ (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED*ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT)
+
+typedef struct {
+ ErtsIOListState iolist;
+ struct {
+ byte *bptr;
+ size_t size;
+ Uint bitoffs;
+ Uint bitsize;
+ } bcopy;
+ char *buf;
+ ErlDrvSizeT len;
+ Eterm *objp;
+ int offset;
+} ErtsIOList2BufState;
+
#define ERTS_IOLIST_OK 0
#define ERTS_IOLIST_OVERFLOW 1
#define ERTS_IOLIST_TYPE 2
+#define ERTS_IOLIST_YIELD 3
Eterm buf_to_intlist(Eterm**, const char*, size_t, Eterm); /* most callers pass plain char*'s */
#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0))
#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1))
+#define ERTS_IOLIST_TO_BUF_YIELD (~((ErlDrvSizeT) 2))
#define ERTS_IOLIST_TO_BUF_FAILED(R) \
- (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1)))
+ (((R) & (~((ErlDrvSizeT) 3))) == (~((ErlDrvSizeT) 3)))
#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \
(!ERTS_IOLIST_TO_BUF_FAILED((R)))
ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT);
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *);
+int erts_iolist_size_yielding(ErtsIOListState *state);
int erts_iolist_size(Eterm, ErlDrvSizeT *);
int is_string(Eterm);
void erl_at_exit(void (*) (void*), void*);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index edf4a28784..ae053fc191 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1218,9 +1218,10 @@ typedef struct {
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
{
+ unsigned int prof_runnable_ports;
ErtsTryImmDrvCallResult res;
int reds_left_in;
- erts_aint32_t invalid_state, invalid_sched_flags;
+ erts_aint32_t act, exp, invalid_state, invalid_sched_flags;
Port *prt = sp->port;
Process *c_p = sp->c_p;
@@ -1247,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
goto locked_fail;
}
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- if (sp->sched_flags & invalid_sched_flags) {
- res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
- goto locked_fail;
- }
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+ act = erts_smp_atomic32_read_nob(&prt->sched.flags);
+
+ do {
+ erts_aint32_t new;
+
+ if (act & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ sp->sched_flags = act;
+ goto locked_fail;
+ }
+ exp = act;
+ new = act | ERTS_PTS_FLG_EXEC_IMM;
+ act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ } while (act != exp);
+
+ sp->sched_flags = act;
if (!c_p)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_out);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_inactive);
@@ -1273,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_in, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_active);
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_active);
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&prt->sched);
+ }
sp->fpe_was_unmasked = erts_block_fpe();
@@ -1294,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
int reds;
Port *prt = sp->port;
Process *c_p = sp->c_p;
+ erts_aint32_t act;
+ unsigned int prof_runnable_ports;
reds = prt->reds;
reds += erts_port_driver_callback_epilogue(prt, NULL);
erts_unblock_fpe(sp->fpe_was_unmasked);
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_out, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_inactive);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_band_mb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_inactive);
+ erts_port_task_sched_unlock(&prt->sched);
+ }
+ }
erts_port_release(prt);
@@ -1319,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_in);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_active);
@@ -6129,7 +6175,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init(&pdl->mtx, "port_data_lock");
+ erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7166,7 +7212,7 @@ char *driver_dl_error(void)
#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
- (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ (offsetof(ErlDrvSysInfo, LAST_FIELD) \
+ sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
void
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 05f07e57b2..3d8dd9c6d0 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -274,6 +274,7 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
typedef unsigned int Eterm;
typedef unsigned int Uint;
typedef int Sint;
+#define ERTS_UINT_MAX UINT_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#else
@@ -347,6 +348,7 @@ typedef long long Sint;
typedef Uint UWord;
typedef Sint SWord;
+#define ERTS_UINT_MAX ERTS_UWORD_MAX
#endif /* HALFWORD_HEAP */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 738f793020..55f9e68e78 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3197,106 +3197,303 @@ buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
**
*/
-ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+typedef enum {
+ ERTS_IL2B_BCOPY_OK,
+ ERTS_IL2B_BCOPY_YIELD,
+ ERTS_IL2B_BCOPY_OVERFLOW,
+ ERTS_IL2B_BCOPY_TYPE_ERROR
+} ErtsIL2BBCopyRes;
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
+static ERTS_INLINE ErlDrvSizeT
+iolist_to_buf(const int yield_support,
+ ErtsIOList2BufState *state,
+ Eterm obj,
+ char* buf,
+ ErlDrvSizeT alloced_len)
{
- ErlDrvSizeT len = (ErlDrvSizeT) alloced_len;
- Eterm* objp;
+#undef IOLIST_TO_BUF_BCOPY
+#define IOLIST_TO_BUF_BCOPY(CONSP) \
+do { \
+ size_t size = binary_size(obj); \
+ if (size > 0) { \
+ Uint bitsize; \
+ byte* bptr; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ if (len < size) \
+ goto L_overflow; \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ if (bitsize != 0) \
+ goto L_type_error; \
+ num_bits = 8*size; \
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \
+ buf += size; \
+ len -= size; \
+ } \
+} while (0)
+
+ ErlDrvSizeT res, len;
+ Eterm* objp = NULL;
+ int init_yield_count;
+ int yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0) {
- goto L_overflow;
- }
- *buf++ = unsigned_val(obj);
- len--;
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- if (len < size) {
+
+ len = (ErlDrvSizeT) alloced_len;
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ break;
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
+ obj = state->iolist.obj;
+ buf = state->buf;
+ len = state->len;
+ objp = state->objp;
+ state->objp = NULL;
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0) {
+ goto L_overflow;
+ }
+ *buf++ = unsigned_val(obj);
+ len--;
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+
+ L_tail:
+
+ obj = CDR(objp);
+
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(NULL);
+ } else if (is_not_nil(obj)) {
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
- goto L_type_error;
- }
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
+ IOLIST_TO_BUF_BCOPY(NULL);
} else if (is_not_nil(obj)) {
goto L_type_error;
- }
+ } else if (yield_support && --yield_count <= 0)
+ goto L_yield;
}
+ res = len;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return len;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+
+
+ return res;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ goto L_return;
L_overflow:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_OVERFLOW;
+ res = ERTS_IOLIST_TO_BUF_OVERFLOW;
+ goto L_return;
+
+ L_bcopy_yield:
+
+ state->buf = buf;
+ state->len = len;
+
+ switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ ERTS_INTERNAL_ERROR("Missing yield");
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
+ goto L_overflow;
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
+ goto L_type_error;
+ }
+
+ L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->len = len;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+#undef IOLIST_TO_BUF_BCOPY
+}
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ ErtsIL2BBCopyRes res;
+ char *buf = state->buf;
+ ErlDrvSizeT len = state->len;
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+ Uint bitsize;
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+ if (size <= 0)
+ return ERTS_IL2B_BCOPY_OK;
+
+ if (len < size)
+ return ERTS_IL2B_BCOPY_OVERFLOW;
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0)
+ return ERTS_IL2B_BCOPY_TYPE_ERROR;
+ }
+
+ ASSERT(size > 0);
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ res = ERTS_IL2B_BCOPY_OK;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.size = size - max_size;
+ size = max_size;
+ res = ERTS_IL2B_BCOPY_YIELD;
+ }
+
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ state->buf += size;
+ state->len -= size;
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
+{
+ return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
+}
+
+ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+{
+ return iolist_to_buf(0, NULL, obj, buf, alloced_len);
}
/*
@@ -3307,11 +3504,32 @@ ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
* Any input term error detected in erts_iolist_to_buf should also
* be detected in this function!
*/
-int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+
+static ERTS_INLINE int
+iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
{
+ int res, init_yield_count, yield_count;
Eterm* objp;
- Uint size = 0; /* Intentionally Uint due to halfword heap */
+ Uint size = (Uint) *sizep; /* Intentionally Uint due to halfword heap */
DECLARE_ESTACK(s);
+
+ if (!yield_support)
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ else {
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->estack);
+ size = (Uint) state->size;
+ obj = state->obj;
+ }
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -3327,51 +3545,101 @@ int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- size++;
- if (size == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ size++;
+ if (size == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
- SAFE_ADD(size, binary_size(obj));
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ } else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
SAFE_ADD(size, binary_size(obj));
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
- SAFE_ADD(size, binary_size(obj));
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
- DESTROY_ESTACK(s);
*sizep = (ErlDrvSizeT) size;
- return ERTS_IOLIST_OK;
- L_overflow_error:
+ res = ERTS_IOLIST_OK;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+
+ if (yield_support) {
+ int yc, reds;
+ CLEAR_SAVED_ESTACK(&state->estack);
+ yc = init_yield_count - yield_count;
+ reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) size;
+ state->have_size = 1;
+ }
+
+ return res;
+
+ L_overflow_error:
+ res = ERTS_IOLIST_OVERFLOW;
+ size = 0;
+ goto L_return;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_TYPE;
+ size = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = size;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
+}
+
+int erts_iolist_size_yielding(ErtsIOListState *state)
+{
+ ErlDrvSizeT size = state->size;
+ return iolist_size(1, state, state->obj, &size);
+}
+
+int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+{
+ *sizep = 0;
+ return iolist_size(0, NULL, obj, sizep);
}
/* return 0 if item is not a non-empty flat list of bytes */
@@ -3680,6 +3948,9 @@ erts_save_emu_args(int argc, char **argv)
size += sz+1;
}
ptr = (char *) malloc(size);
+ if (!ptr) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#ifdef DEBUG
end_ptr = ptr + size;
#endif