diff options
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r-- | erts/emulator/beam/beam_load.c | 31 | ||||
-rw-r--r-- | erts/emulator/beam/bif.c | 18 | ||||
-rw-r--r-- | erts/emulator/beam/dist.c | 6 | ||||
-rw-r--r-- | erts/emulator/beam/erl_alloc.types | 1 | ||||
-rw-r--r-- | erts/emulator/beam/erl_bif_trace.c | 13 | ||||
-rw-r--r-- | erts/emulator/beam/erl_message.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_nif.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_proc_sig_queue.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 62 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_trace.c | 3 | ||||
-rw-r--r-- | erts/emulator/beam/erl_trace.h | 4 | ||||
-rw-r--r-- | erts/emulator/beam/erl_utils.h | 1 | ||||
-rw-r--r-- | erts/emulator/beam/instrs.tab | 9 | ||||
-rw-r--r-- | erts/emulator/beam/ops.tab | 69 | ||||
-rw-r--r-- | erts/emulator/beam/sys.h | 6 | ||||
-rw-r--r-- | erts/emulator/beam/utils.c | 723 |
17 files changed, 682 insertions, 272 deletions
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 941c3ebbbe..0de694f449 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -3131,27 +3131,6 @@ mixed_types(LoaderState* stp, GenOpArg Size, GenOpArg* Rest) return 0; } -static int -is_killed_apply(LoaderState* stp, GenOpArg Reg, GenOpArg Live) -{ - return Reg.type == TAG_x && Live.type == TAG_u && - Live.val+2 <= Reg.val; -} - -static int -is_killed(LoaderState* stp, GenOpArg Reg, GenOpArg Live) -{ - return Reg.type == TAG_x && Live.type == TAG_u && - Live.val <= Reg.val; -} - -static int -is_killed_by_call_fun(LoaderState* stp, GenOpArg Reg, GenOpArg Live) -{ - return Reg.type == TAG_x && Live.type == TAG_u && - Live.val+1 <= Reg.val; -} - /* * Test whether register Reg is killed by make_fun instruction that * creates the fun given by index idx. @@ -3172,16 +3151,6 @@ is_killed_by_make_fun(LoaderState* stp, GenOpArg Reg, GenOpArg idx) } /* - * Test whether register Reg is killed by the send instruction that follows. - */ - -static int -is_killed_by_send(LoaderState* stp, GenOpArg Reg) -{ - return Reg.type == TAG_x && 2 <= Reg.val; -} - -/* * Generate an instruction for element/2. */ diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index b81056c774..dd04018ce6 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -1987,7 +1987,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm return_term, Eterm *refp, trace_send(p, portid, msg); if (have_seqtrace(SEQ_TRACE_TOKEN(p))) { - seq_trace_update_send(p); + seq_trace_update_serial(p); seq_trace_output(SEQ_TRACE_TOKEN(p), msg, SEQ_TRACE_SEND, portid, p); } @@ -4866,9 +4866,13 @@ BIF_RETTYPE phash_2(BIF_ALIST_2) BIF_RETTYPE phash2_1(BIF_ALIST_1) { Uint32 hash; - - hash = make_hash2(BIF_ARG_1); - BIF_RET(make_small(hash & ((1L << 27) - 1))); + Eterm trap_state = THE_NON_VALUE; + hash = trapping_make_hash2(BIF_ARG_1, &trap_state, BIF_P); + if (trap_state == THE_NON_VALUE) { + BIF_RET(make_small(hash & ((1L << 27) - 1))); + } else { + BIF_TRAP1(bif_export[BIF_phash2_1], BIF_P, trap_state); + } } BIF_RETTYPE phash2_2(BIF_ALIST_2) @@ -4876,6 +4880,7 @@ BIF_RETTYPE phash2_2(BIF_ALIST_2) Uint32 hash; Uint32 final_hash; Uint32 range; + Eterm trap_state = THE_NON_VALUE; /* Check for special case 2^32 */ if (term_equals_2pow32(BIF_ARG_2)) { @@ -4887,7 +4892,10 @@ BIF_RETTYPE phash2_2(BIF_ALIST_2) } range = (Uint32) u; } - hash = make_hash2(BIF_ARG_1); + hash = trapping_make_hash2(BIF_ARG_1, &trap_state, BIF_P); + if (trap_state != THE_NON_VALUE) { + BIF_TRAP2(bif_export[BIF_phash2_2], BIF_P, trap_state, BIF_ARG_2); + } if (range) { final_hash = hash % range; /* [0..range-1] */ } else { diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index ff19ef018e..1329d9d55f 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1051,7 +1051,7 @@ erts_dsig_send_msg(ErtsDSigSendContext* ctx, Eterm remote, Eterm message) #endif if (have_seqtrace(SEQ_TRACE_TOKEN(sender))) { - seq_trace_update_send(sender); + seq_trace_update_serial(sender); token = SEQ_TRACE_TOKEN(sender); seq_trace_output(token, message, SEQ_TRACE_SEND, remote, sender); } @@ -1125,7 +1125,7 @@ erts_dsig_send_reg_msg(ErtsDSigSendContext* ctx, Eterm remote_name, Eterm messag #endif if (have_seqtrace(SEQ_TRACE_TOKEN(sender))) { - seq_trace_update_send(sender); + seq_trace_update_serial(sender); token = SEQ_TRACE_TOKEN(sender); seq_trace_output(token, message, SEQ_TRACE_SEND, remote_name, sender); } @@ -1184,7 +1184,7 @@ erts_dsig_send_exit_tt(ErtsDSigSendContext *ctx, Eterm local, Eterm remote, msg = reason; if (have_seqtrace(token)) { - seq_trace_update_send(ctx->c_p); + seq_trace_update_serial(ctx->c_p); seq_trace_output_exit(token, reason, SEQ_TRACE_SEND, remote, local); if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD) { ctl = TUPLE4(&ctx->ctl_heap[0], diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 92e5069c71..58d586453c 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -277,6 +277,7 @@ type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument type LIST_TRAP SHORT_LIVED PROCESSES list_bif_trap_state type CONT_EXIT_TRAP SHORT_LIVED PROCESSES continue_exit_trap_state type SEQ_YIELD_STATE SHORT_LIVED SYSTEM dist_seq_yield_state +type PHASH2_TRAP SHORT_LIVED PROCESSES phash2_trap_state type ENVIRONMENT SYSTEM SYSTEM environment diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 711e62c795..cf5339feb9 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -1858,6 +1858,8 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2, if (arg1 == am_send) { current_flag = SEQ_TRACE_SEND; + } else if (arg1 == am_spawn) { + current_flag = SEQ_TRACE_SPAWN; } else if (arg1 == am_receive) { current_flag = SEQ_TRACE_RECEIVE; } else if (arg1 == am_print) { @@ -1966,8 +1968,9 @@ BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item) } if (have_no_seqtrace(SEQ_TRACE_TOKEN(p))) { - if ((item == am_send) || (item == am_receive) || - (item == am_print) || (item == am_timestamp) + if ((item == am_send) || (item == am_spawn) || + (item == am_receive) || (item == am_print) + || (item == am_timestamp) || (item == am_monotonic_timestamp) || (item == am_strict_monotonic_timestamp)) { hp = HAlloc(p,3); @@ -1982,6 +1985,8 @@ BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item) if (item == am_send) { current_flag = SEQ_TRACE_SEND; + } else if (item == am_spawn) { + current_flag = SEQ_TRACE_SPAWN; } else if (item == am_receive) { current_flag = SEQ_TRACE_RECEIVE; } else if (item == am_print) { @@ -2031,7 +2036,7 @@ BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1) if (have_no_seqtrace(SEQ_TRACE_TOKEN(BIF_P))) { BIF_RET(am_false); } - seq_trace_update_send(BIF_P); + seq_trace_update_serial(BIF_P); seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_1, SEQ_TRACE_PRINT, NIL, BIF_P); BIF_RET(am_true); @@ -2055,7 +2060,7 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2) } if (SEQ_TRACE_TOKEN_LABEL(BIF_P) != BIF_ARG_1) BIF_RET(am_false); - seq_trace_update_send(BIF_P); + seq_trace_update_serial(BIF_P); seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_2, SEQ_TRACE_PRINT, NIL, BIF_P); BIF_RET(am_true); diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 6645341512..c27c3b5423 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -674,7 +674,7 @@ erts_send_message(Process* sender, * Make sure we don't use the heap between those instances. */ if (have_seqtrace(stoken)) { - seq_trace_update_send(sender); + seq_trace_update_serial(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 1fbe362330..ce43cb9e71 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -815,7 +815,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, } #endif if (have_seqtrace(stoken)) { - seq_trace_update_send(c_p); + seq_trace_update_serial(c_p); seq_trace_output(stoken, msg, SEQ_TRACE_SEND, rp->common.id, c_p); } diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c index f58a606d57..fb8a860b27 100644 --- a/erts/emulator/beam/erl_proc_sig_queue.c +++ b/erts/emulator/beam/erl_proc_sig_queue.c @@ -995,7 +995,7 @@ send_gen_exit_signal(Process *c_p, Eterm from_tag, seq_trace = c_p && have_seqtrace(token); if (seq_trace) - seq_trace_update_send(c_p); + seq_trace_update_serial(c_p); #ifdef USE_VM_PROBES utag_sz = 0; diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 1f6adb98ef..70f48ddd97 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -11019,8 +11019,13 @@ erts_set_gc_state(Process *c_p, int enable) ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); if (!enable) { - c_p->flags |= F_DISABLE_GC; - return 0; + /* Strictly speaking it's not illegal to disable the GC when it's + * already disabled, but we risk enabling the GC prematurely if (for + * example) a BIF were to blindly disable it when trapping and then + * re-enable it before returning its result. */ + ASSERT(!(c_p->flags & F_DISABLE_GC)); + c_p->flags |= F_DISABLE_GC; + return 0; } c_p->flags &= ~F_DISABLE_GC; @@ -11610,9 +11615,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->mbuf_sz = 0; erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); p->dictionary = NULL; - p->seq_trace_lastcnt = 0; - p->seq_trace_clock = 0; - SEQ_TRACE_TOKEN(p) = NIL; #ifdef USE_VM_PROBES DT_UTAG(p) = NIL; DT_UTAG_FLAGS(p) = 0; @@ -11633,6 +11635,45 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->fp_exception = 0; #endif + /* seq_trace is handled before regular tracing as the latter may touch the + * trace token. */ + if (have_seqtrace(SEQ_TRACE_TOKEN(parent))) { + Eterm token; + Uint token_sz; + Eterm *hp; + + ASSERT(SEQ_TRACE_TOKEN_ARITY(parent) == 5); + ASSERT(is_immed(SEQ_TRACE_TOKEN_FLAGS(parent))); + ASSERT(is_immed(SEQ_TRACE_TOKEN_SERIAL(parent))); + ASSERT(is_immed(SEQ_TRACE_TOKEN_LASTCNT(parent))); + + seq_trace_update_serial(parent); + + token = SEQ_TRACE_TOKEN(parent); + token_sz = size_object(token); + + hp = HAlloc(p, token_sz); + SEQ_TRACE_TOKEN(p) = copy_struct(token, token_sz, &hp, &MSO(p)); + + /* The counters behave the same way on spawning as they do on messages; + * we don't inherit our parent's lastcnt. */ + p->seq_trace_lastcnt = parent->seq_trace_clock; + p->seq_trace_clock = parent->seq_trace_clock; + + ASSERT((locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) == + (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)); + + locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + + seq_trace_output(token, NIL, SEQ_TRACE_SPAWN, p->common.id, parent); + } else { + SEQ_TRACE_TOKEN(p) = NIL; + p->seq_trace_lastcnt = 0; + p->seq_trace_clock = 0; + } + if (IS_TRACED(parent)) { if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) { ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS); @@ -11654,9 +11695,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). } } if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) { - locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + /* The locks may already be released if seq_trace is enabled as + * well. */ + if ((locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) + == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) { + locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + } trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args); if (so->flags & SPO_LINK) trace_proc(parent, locks, parent, am_link, p->common.id); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 6118c671ee..93a3215c17 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1481,6 +1481,8 @@ extern int erts_system_profile_ts_type; #define SEQ_TRACE_SEND (1 << 0) #define SEQ_TRACE_RECEIVE (1 << 1) #define SEQ_TRACE_PRINT (1 << 2) +/* (This three-bit gap contains the timestamp.) */ +#define SEQ_TRACE_SPAWN (1 << 6) #define ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT 3 diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index c85a7df5ec..6e462b64ef 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -826,7 +826,7 @@ trace_receive(Process* receiver, } int -seq_trace_update_send(Process *p) +seq_trace_update_serial(Process *p) { ErtsTracer seq_tracer = erts_get_system_seq_tracer(); ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p)))); @@ -894,6 +894,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type, switch (type) { case SEQ_TRACE_SEND: type_atom = am_send; break; + case SEQ_TRACE_SPAWN: type_atom = am_spawn; break; case SEQ_TRACE_PRINT: type_atom = am_print; break; case SEQ_TRACE_RECEIVE: type_atom = am_receive; break; default: diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h index b7844d1cb0..bb5c9ac276 100644 --- a/erts/emulator/beam/erl_trace.h +++ b/erts/emulator/beam/erl_trace.h @@ -163,7 +163,9 @@ seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom)) void seq_trace_output_generic(Eterm token, Eterm msg, Uint type, Eterm receiver, Process *process, Eterm exitfrom); -int seq_trace_update_send(Process *process); +/* Bump the sequence number if tracing is enabled; must be used before sending + * send/spawn trace messages. */ +int seq_trace_update_serial(Process *process); Eterm erts_seq_trace(Process *process, Eterm atom_type, Eterm atom_true_or_false, diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 430ac305c5..449243a9b7 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -70,6 +70,7 @@ int erts_fit_in_bits_uint(Uint); Sint erts_list_length(Eterm); int erts_is_builtin(Eterm, Eterm, int); Uint32 make_hash2(Eterm); +Uint32 trapping_make_hash2(Eterm, Eterm*, struct process*); Uint32 make_hash(Eterm); Uint32 make_internal_hash(Eterm, Uint32 salt); diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab index 7cffe7fb5c..bc8c1189a8 100644 --- a/erts/emulator/beam/instrs.tab +++ b/erts/emulator/beam/instrs.tab @@ -683,10 +683,11 @@ swap(R1, R2) { $R2 = V; } -swap_temp(R1, R2, Tmp) { - Eterm V = $R1; - $R1 = $R2; - $R2 = $Tmp = V; +swap2(R1, R2, R3) { + Eterm V = $R2; + $R2 = $R1; + $R1 = $R3; + $R3 = V; } test_heap(Nh, Live) { diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 10ca74cd60..1beeb67c1f 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -324,76 +324,15 @@ move_src_window2 y x x move_src_window3 y x x x move_src_window4 y x x x x -# Swap registers. -move R1=xy Tmp=x | move R2=xy R1 | move Tmp R2 => swap_temp R1 R2 Tmp - -# The compiler uses x(1022) when swapping registers. It will definitely -# not be used again. -swap_temp R1 R2 Tmp=x==1022 => swap R1 R2 - -swap_temp R1 R2 Tmp | move Src Tmp => swap R1 R2 | move Src Tmp - -swap_temp R1 R2 Tmp | line Loc | apply Live | is_killed_apply(Tmp, Live) => \ - swap R1 R2 | line Loc | apply Live -swap_temp R1 R2 Tmp | line Loc | apply_last Live D | is_killed_apply(Tmp, Live) => \ - swap R1 R2 | line Loc | apply_last Live D - -swap_temp R1 R2 Tmp | line Loc | call_fun Live | is_killed_by_call_fun(Tmp, Live) => \ - swap R1 R2 | line Loc | call_fun Live -swap_temp R1 R2 Tmp | make_fun2 OldIndex=u | is_killed_by_make_fun(Tmp, OldIndex) => \ - swap R1 R2 | make_fun2 OldIndex - -swap_temp R1 R2 Tmp | line Loc | call Live Addr | is_killed(Tmp, Live) => \ - swap R1 R2 | line Loc | call Live Addr -swap_temp R1 R2 Tmp | call_only Live Addr | \ - is_killed(Tmp, Live) => swap R1 R2 | call_only Live Addr -swap_temp R1 R2 Tmp | call_last Live Addr D | \ - is_killed(Tmp, Live) => swap R1 R2 | call_last Live Addr D - -swap_temp R1 R2 Tmp | line Loc | call_ext Live Addr | is_killed(Tmp, Live) => \ - swap R1 R2 | line Loc | call_ext Live Addr -swap_temp R1 R2 Tmp | line Loc | call_ext_only Live Addr | \ - is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_only Live Addr -swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \ - is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D - -swap_temp R1 R2 Tmp | call_ext Live Addr | is_killed(Tmp, Live) => \ - swap R1 R2 | call_ext Live Addr -swap_temp R1 R2 Tmp | call_ext_only Live Addr | is_killed(Tmp, Live) => \ - swap R1 R2 | call_ext_only Live Addr -swap_temp R1 R2 Tmp | call_ext_last Live Addr D | is_killed(Tmp, Live) => \ - swap R1 R2 | call_ext_last Live Addr D - -swap_temp R1 R2 Tmp | move Src Any | line Loc | call Live Addr | \ - is_killed(Tmp, Live) | distinct(Tmp, Src) => \ - swap R1 R2 | move Src Any | line Loc | call Live Addr -swap_temp R1 R2 Tmp | move Src Any | line Loc | call_ext Live Addr | \ - is_killed(Tmp, Live) | distinct(Tmp, Src) => \ - swap R1 R2 | move Src Any | line Loc | call_ext Live Addr -swap_temp R1 R2 Tmp | move Src Any | call_only Live Addr | \ - is_killed(Tmp, Live) | distinct(Tmp, Src) => \ - swap R1 R2 | move Src Any | call_only Live Addr -swap_temp R1 R2 Tmp | move Src Any | line Loc | call_ext_only Live Addr | \ - is_killed(Tmp, Live) | distinct(Tmp, Src) => \ - swap R1 R2 | move Src Any | line Loc | call_ext_only Live Addr -swap_temp R1 R2 Tmp | move Src Any | line Loc | call_fun Live | \ - is_killed(Tmp, Live) | distinct(Tmp, Src) => \ - swap R1 R2 | move Src Any | line Loc | call_fun Live - -swap_temp R1 R2 Tmp | line Loc | send | is_killed_by_send(Tmp) => \ - swap R1 R2 | line Loc | send - -# swap_temp/3 with Y register operands are rare. -swap_temp R1 R2=y Tmp => swap R1 R2 | move R2 Tmp -swap_temp R1=y R2 Tmp => swap R1 R2 | move R2 Tmp - swap R1=x R2=y => swap R2 R1 -swap_temp x x x - swap xy x swap y y +swap R1=x R2=x | swap R3=x R1 => swap2 R1 R2 R3 + +swap2 x x x + # move_shift move SD=x D=x | move Src=cxy SD=x | distinct(D, Src) => move_shift Src SD D diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index c261c8e117..acc321aa51 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -92,6 +92,12 @@ # define ERTS_GLB_INLINE_INCL_FUNC_DEF 0 #endif +#ifdef __GNUC__ +# define ERTS_NOINLINE __attribute__((__noinline__)) +#else +# define ERTS_NOINLINE +#endif + #if defined(VALGRIND) && !defined(NO_FPE_SIGNALS) # define NO_FPE_SIGNALS #endif diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 0bbae65e28..88cdcc2675 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -1069,54 +1069,237 @@ do { \ #define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */ -static Uint32 -block_hash(byte *k, Uint length, Uint32 initval) +typedef struct { + Uint32 a,b,c; +} ErtsBlockHashHelperCtx; + +#define BLOCK_HASH_BYTES_PER_ITER 12 + +/* The three functions below are separated into different functions even + though they are always used together to make trapping and handling + of unaligned binaries easier. Examples of how they are used can be + found in block_hash and make_hash2_helper.*/ +static ERTS_INLINE +void block_hash_setup(Uint32 initval, + ErtsBlockHashHelperCtx* ctx /* out parameter */) +{ + ctx->a = ctx->b = HCONST; + ctx->c = initval; /* the previous hash value */ +} + +static ERTS_INLINE +void block_hash_buffer(byte *buf, + Uint buf_length, + ErtsBlockHashHelperCtx* ctx /* out parameter */) { - Uint32 a,b,c; - Uint len; - - /* Set up the internal state */ - len = length; - a = b = HCONST; - c = initval; /* the previous hash value */ - - while (len >= 12) - { - a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24)); - b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24)); - c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24)); - MIX(a,b,c); - k += 12; len -= 12; - } - - c += length; - switch(len) /* all the case statements fall through */ - { - case 11: c+=((Uint32)k[10]<<24); - case 10: c+=((Uint32)k[9]<<16); - case 9 : c+=((Uint32)k[8]<<8); - /* the first byte of c is reserved for the length */ - case 8 : b+=((Uint32)k[7]<<24); - case 7 : b+=((Uint32)k[6]<<16); - case 6 : b+=((Uint32)k[5]<<8); - case 5 : b+=k[4]; - case 4 : a+=((Uint32)k[3]<<24); - case 3 : a+=((Uint32)k[2]<<16); - case 2 : a+=((Uint32)k[1]<<8); - case 1 : a+=k[0]; - /* case 0: nothing left to add */ - } - MIX(a,b,c); - return c; + Uint len = buf_length; + byte *k = buf; + ASSERT(buf_length % BLOCK_HASH_BYTES_PER_ITER == 0); + while (len >= BLOCK_HASH_BYTES_PER_ITER) { + ctx->a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24)); + ctx->b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24)); + ctx->c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24)); + MIX(ctx->a,ctx->b,ctx->c); + k += BLOCK_HASH_BYTES_PER_ITER; len -= BLOCK_HASH_BYTES_PER_ITER; + } } +static ERTS_INLINE +Uint32 block_hash_final_bytes(byte *buf, + Uint buf_length, + Uint full_length, + ErtsBlockHashHelperCtx* ctx) +{ + Uint len = buf_length; + byte *k = buf; + ctx->c += full_length; + switch(len) + { /* all the case statements fall through */ + case 11: ctx->c+=((Uint32)k[10]<<24); + case 10: ctx->c+=((Uint32)k[9]<<16); + case 9 : ctx->c+=((Uint32)k[8]<<8); + /* the first byte of c is reserved for the length */ + case 8 : ctx->b+=((Uint32)k[7]<<24); + case 7 : ctx->b+=((Uint32)k[6]<<16); + case 6 : ctx->b+=((Uint32)k[5]<<8); + case 5 : ctx->b+=k[4]; + case 4 : ctx->a+=((Uint32)k[3]<<24); + case 3 : ctx->a+=((Uint32)k[2]<<16); + case 2 : ctx->a+=((Uint32)k[1]<<8); + case 1 : ctx->a+=k[0]; + /* case 0: nothing left to add */ + } + MIX(ctx->a,ctx->b,ctx->c); + return ctx->c; +} + +static Uint32 -make_hash2(Eterm term) +block_hash(byte *block, Uint block_length, Uint32 initval) { + ErtsBlockHashHelperCtx ctx; + Uint no_bytes_not_in_loop = + (block_length % BLOCK_HASH_BYTES_PER_ITER); + Uint no_bytes_to_process_in_loop = + block_length - no_bytes_not_in_loop; + byte *final_bytes = block + no_bytes_to_process_in_loop; + block_hash_setup(initval, &ctx); + block_hash_buffer(block, + no_bytes_to_process_in_loop, + &ctx); + return block_hash_final_bytes(final_bytes, + no_bytes_not_in_loop, + block_length, + &ctx); +} + +typedef enum { + tag_primary_list, + arityval_subtag, + hamt_subtag_head_flatmap, + map_subtag, + fun_subtag, + neg_big_subtag, + sub_binary_subtag_1, + sub_binary_subtag_2, + hash2_common_1, + hash2_common_2, + hash2_common_3, +} ErtsMakeHash2TrapLocation; + +typedef struct { + int c; + Uint32 sh; + Eterm* ptr; +} ErtsMakeHash2Context_TAG_PRIMARY_LIST; + +typedef struct { + int i; + int arity; + Eterm* elem; +} ErtsMakeHash2Context_ARITYVAL_SUBTAG; + +typedef struct { + Eterm *ks; + Eterm *vs; + int i; + Uint size; +} ErtsMakeHash2Context_HAMT_SUBTAG_HEAD_FLATMAP; + +typedef struct { + Eterm* ptr; + int i; +} ErtsMakeHash2Context_MAP_SUBTAG; + +typedef struct { + Uint num_free; + Eterm* bptr; +} ErtsMakeHash2Context_FUN_SUBTAG; + +typedef struct { + Eterm* ptr; + Uint i; + Uint n; + Uint32 con; +} ErtsMakeHash2Context_NEG_BIG_SUBTAG; + +typedef struct { + byte* bptr; + Uint sz; + Uint bitsize; + Uint bitoffs; + Uint no_bytes_processed; + ErtsBlockHashHelperCtx block_hash_ctx; + /* The following fields are only used when bitoffs != 0 */ + byte* buf; + int done; + +} ErtsMakeHash2Context_SUB_BINARY_SUBTAG; + +typedef struct { + int dummy__; /* Empty structs are not supported on all platforms */ +} ErtsMakeHash2Context_EMPTY; + +typedef struct { + ErtsMakeHash2TrapLocation trap_location; + /* specific to the trap location: */ + union { + ErtsMakeHash2Context_TAG_PRIMARY_LIST tag_primary_list; + ErtsMakeHash2Context_ARITYVAL_SUBTAG arityval_subtag; + ErtsMakeHash2Context_HAMT_SUBTAG_HEAD_FLATMAP hamt_subtag_head_flatmap; + ErtsMakeHash2Context_MAP_SUBTAG map_subtag; + ErtsMakeHash2Context_FUN_SUBTAG fun_subtag; + ErtsMakeHash2Context_NEG_BIG_SUBTAG neg_big_subtag; + ErtsMakeHash2Context_SUB_BINARY_SUBTAG sub_binary_subtag_1; + ErtsMakeHash2Context_SUB_BINARY_SUBTAG sub_binary_subtag_2; + ErtsMakeHash2Context_EMPTY hash2_common_1; + ErtsMakeHash2Context_EMPTY hash2_common_2; + ErtsMakeHash2Context_EMPTY hash2_common_3; + } trap_location_state; + /* same for all trap locations: */ + Eterm term; Uint32 hash; Uint32 hash_xor_pairs; - DeclareTmpHeapNoproc(tmp_big,2); + ErtsEStack stack; +} ErtsMakeHash2Context; + +static int make_hash2_ctx_bin_dtor(Binary *context_bin) { + ErtsMakeHash2Context* context = ERTS_MAGIC_BIN_DATA(context_bin); + DESTROY_SAVED_ESTACK(&context->stack); + if (context->trap_location == sub_binary_subtag_2 && + context->trap_location_state.sub_binary_subtag_2.buf != NULL) { + erts_free(ERTS_ALC_T_PHASH2_TRAP, context->trap_location_state.sub_binary_subtag_2.buf); + } + return 1; +} +/* hash2_save_trap_state is called seldom so we want to avoid inlining */ +static ERTS_NOINLINE +Eterm hash2_save_trap_state(Eterm state_mref, + Uint32 hash_xor_pairs, + Uint32 hash, + Process* p, + Eterm term, + Eterm* ESTK_DEF_STACK(s), + ErtsEStack s, + ErtsMakeHash2TrapLocation trap_location, + void* trap_location_state_ptr, + size_t trap_location_state_size) { + Binary* state_bin; + ErtsMakeHash2Context* context; + if (state_mref == THE_NON_VALUE) { + Eterm* hp; + state_bin = erts_create_magic_binary(sizeof(ErtsMakeHash2Context), + make_hash2_ctx_bin_dtor); + hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); + state_mref = erts_mk_magic_ref(&hp, &MSO(p), state_bin); + } else { + state_bin = erts_magic_ref2bin(state_mref); + } + context = ERTS_MAGIC_BIN_DATA(state_bin); + context->term = term; + context->hash = hash; + context->hash_xor_pairs = hash_xor_pairs; + ESTACK_SAVE(s, &context->stack); + context->trap_location = trap_location; + sys_memcpy(&context->trap_location_state, + trap_location_state_ptr, + trap_location_state_size); + erts_set_gc_state(p, 0); + BUMP_ALL_REDS(p); + return state_mref; +} +#undef NOINLINE_HASH2_SAVE_TRAP_STATE + +/* Writes back a magic reference to *state_mref_write_back when the + function traps */ +static ERTS_INLINE Uint32 +make_hash2_helper(Eterm term_param, const int can_trap, Eterm* state_mref_write_back, Process* p) +{ + static const Uint ITERATIONS_PER_RED = 64; + Uint32 hash; + Uint32 hash_xor_pairs; + Eterm term = term_param; ERTS_UNDEF(hash_xor_pairs, 0); /* (HCONST * {2, ..., 22}) mod 2^32 */ @@ -1168,12 +1351,63 @@ make_hash2(Eterm term) #define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2) +#define NOT_SSMALL28_HASH(SMALL) \ + do { \ + Uint64 t; \ + Uint32 x, y; \ + Uint32 con; \ + if (SMALL < 0) { \ + con = HCONST_10; \ + t = (Uint64)(SMALL * (-1)); \ + } else { \ + con = HCONST_11; \ + t = SMALL; \ + } \ + x = t & 0xffffffff; \ + y = t >> 32; \ + UINT32_HASH_2(x, y, con); \ + } while(0) + #ifdef ARCH_64 # define POINTER_HASH(Ptr, AConst) UINT32_HASH_2((Uint32)(UWord)(Ptr), (((UWord)(Ptr)) >> 32), AConst) #else # define POINTER_HASH(Ptr, AConst) UINT32_HASH(Ptr, AConst) #endif +#define TRAP_LOCATION_NO_RED(location_name) \ + do { \ + if(can_trap && iterations_until_trap <= 0) { \ + *state_mref_write_back = \ + hash2_save_trap_state(state_mref, \ + hash_xor_pairs, \ + hash, \ + p, \ + term, \ + ESTK_DEF_STACK(s), \ + s, \ + location_name, \ + &ctx, \ + sizeof(ctx)); \ + return 0; \ + L_##location_name: \ + ctx = context->trap_location_state. location_name; \ + } \ + } while(0) + +#define TRAP_LOCATION(location_name) \ + do { \ + if (can_trap) { \ + iterations_until_trap--; \ + TRAP_LOCATION_NO_RED(location_name); \ + } \ + } while(0) + +#define TRAP_LOCATION_NO_CTX(location_name) \ + do { \ + ErtsMakeHash2Context_EMPTY ctx; \ + TRAP_LOCATION(location_name); \ + } while(0) + /* Optimization. Simple cases before declaration of estack. */ if (primary_tag(term) == TAG_PRIMARY_IMMED1) { switch (term & _TAG_IMMED1_MASK) { @@ -1186,51 +1420,94 @@ make_hash2(Eterm term) break; case _TAG_IMMED1_SMALL: { - Sint x = signed_val(term); - - if (SMALL_BITS > 28 && !IS_SSMALL28(x)) { - term = small_to_big(x, tmp_big); - break; + Sint small = signed_val(term); + if (SMALL_BITS > 28 && !IS_SSMALL28(small)) { + hash = 0; + NOT_SSMALL28_HASH(small); + return hash; } hash = 0; - SINT32_HASH(x, HCONST); + SINT32_HASH(small, HCONST); return hash; } } }; { Eterm tmp; + long max_iterations = 0; + long iterations_until_trap = 0; + Eterm state_mref = THE_NON_VALUE; + ErtsMakeHash2Context* context = NULL; DECLARE_ESTACK(s); - - UseTmpHeapNoproc(2); + ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); + if(can_trap){ +#ifdef DEBUG + (void)ITERATIONS_PER_RED; + iterations_until_trap = max_iterations = + (1103515245 * (ERTS_BIF_REDS_LEFT(p)) + 12345) % 227; +#else + iterations_until_trap = max_iterations = + ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(p); +#endif + } + if (can_trap && is_internal_magic_ref(term)) { + Binary* state_bin; + state_mref = term; + state_bin = erts_magic_ref2bin(state_mref); + if (ERTS_MAGIC_BIN_DESTRUCTOR(state_bin) == make_hash2_ctx_bin_dtor) { + /* Restore state after a trap */ + context = ERTS_MAGIC_BIN_DATA(state_bin); + term = context->term; + hash = context->hash; + hash_xor_pairs = context->hash_xor_pairs; + ESTACK_RESTORE(s, &context->stack); + ASSERT(p->flags & F_DISABLE_GC); + erts_set_gc_state(p, 1); + switch (context->trap_location) { + case hash2_common_3: goto L_hash2_common_3; + case tag_primary_list: goto L_tag_primary_list; + case arityval_subtag: goto L_arityval_subtag; + case hamt_subtag_head_flatmap: goto L_hamt_subtag_head_flatmap; + case map_subtag: goto L_map_subtag; + case fun_subtag: goto L_fun_subtag; + case neg_big_subtag: goto L_neg_big_subtag; + case sub_binary_subtag_1: goto L_sub_binary_subtag_1; + case sub_binary_subtag_2: goto L_sub_binary_subtag_2; + case hash2_common_1: goto L_hash2_common_1; + case hash2_common_2: goto L_hash2_common_2; + } + } + } hash = 0; for (;;) { switch (primary_tag(term)) { case TAG_PRIMARY_LIST: { - int c = 0; - Uint32 sh = 0; - Eterm* ptr = list_val(term); - while (is_byte(*ptr)) { + ErtsMakeHash2Context_TAG_PRIMARY_LIST ctx = { + .c = 0, + .sh = 0, + .ptr = list_val(term)}; + while (is_byte(*ctx.ptr)) { /* Optimization for strings. */ - sh = (sh << 8) + unsigned_val(*ptr); - if (c == 3) { - UINT32_HASH(sh, HCONST_4); - c = sh = 0; + ctx.sh = (ctx.sh << 8) + unsigned_val(*ctx.ptr); + if (ctx.c == 3) { + UINT32_HASH(ctx.sh, HCONST_4); + ctx.c = ctx.sh = 0; } else { - c++; + ctx.c++; } - term = CDR(ptr); + term = CDR(ctx.ptr); if (is_not_list(term)) break; - ptr = list_val(term); + ctx.ptr = list_val(term); + TRAP_LOCATION(tag_primary_list); } - if (c > 0) - UINT32_HASH(sh, HCONST_4); + if (ctx.c > 0) + UINT32_HASH(ctx.sh, HCONST_4); if (is_list(term)) { - tmp = CDR(ptr); + tmp = CDR(ctx.ptr); ESTACK_PUSH(s, tmp); - term = CAR(ptr); + term = CAR(ctx.ptr); } } break; @@ -1241,34 +1518,39 @@ make_hash2(Eterm term) switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: { - int i; - int arity = header_arity(hdr); - Eterm* elem = tuple_val(term); - UINT32_HASH(arity, HCONST_9); - if (arity == 0) /* Empty tuple */ + ErtsMakeHash2Context_ARITYVAL_SUBTAG ctx = { + .i = 0, + .arity = header_arity(hdr), + .elem = tuple_val(term)}; + UINT32_HASH(ctx.arity, HCONST_9); + if (ctx.arity == 0) /* Empty tuple */ goto hash2_common; - for (i = arity; ; i--) { - term = elem[i]; - if (i == 1) + for (ctx.i = ctx.arity; ; ctx.i--) { + term = ctx.elem[ctx.i]; + if (ctx.i == 1) break; ESTACK_PUSH(s, term); + TRAP_LOCATION(arityval_subtag); } } break; case MAP_SUBTAG: { - Eterm* ptr = boxed_val(term) + 1; Uint size; - int i; + ErtsMakeHash2Context_MAP_SUBTAG ctx = { + .ptr = boxed_val(term) + 1, + .i = 0}; switch (hdr & _HEADER_MAP_SUBTAG_MASK) { case HAMT_SUBTAG_HEAD_FLATMAP: { flatmap_t *mp = (flatmap_t *)flatmap_val(term); - Eterm *ks = flatmap_get_keys(mp); - Eterm *vs = flatmap_get_values(mp); - size = flatmap_get_size(mp); - UINT32_HASH(size, HCONST_16); - if (size == 0) + ErtsMakeHash2Context_HAMT_SUBTAG_HEAD_FLATMAP ctx = { + .ks = flatmap_get_keys(mp), + .vs = flatmap_get_values(mp), + .i = 0, + .size = flatmap_get_size(mp)}; + UINT32_HASH(ctx.size, HCONST_16); + if (ctx.size == 0) goto hash2_common; /* We want a portable hash function that is *independent* of @@ -1281,17 +1563,18 @@ make_hash2(Eterm term) ESTACK_PUSH(s, HASH_MAP_TAIL); hash = 0; hash_xor_pairs = 0; - for (i = size - 1; i >= 0; i--) { + for (ctx.i = ctx.size - 1; ctx.i >= 0; ctx.i--) { ESTACK_PUSH(s, HASH_MAP_PAIR); - ESTACK_PUSH(s, vs[i]); - ESTACK_PUSH(s, ks[i]); + ESTACK_PUSH(s, ctx.vs[ctx.i]); + ESTACK_PUSH(s, ctx.ks[ctx.i]); + TRAP_LOCATION(hamt_subtag_head_flatmap); } goto hash2_common; } case HAMT_SUBTAG_HEAD_ARRAY: case HAMT_SUBTAG_HEAD_BITMAP: - size = *ptr++; + size = *ctx.ptr++; UINT32_HASH(size, HCONST_16); if (size == 0) goto hash2_common; @@ -1303,27 +1586,28 @@ make_hash2(Eterm term) } switch (hdr & _HEADER_MAP_SUBTAG_MASK) { case HAMT_SUBTAG_HEAD_ARRAY: - i = 16; + ctx.i = 16; break; case HAMT_SUBTAG_HEAD_BITMAP: case HAMT_SUBTAG_NODE_BITMAP: - i = hashmap_bitcount(MAP_HEADER_VAL(hdr)); + ctx.i = hashmap_bitcount(MAP_HEADER_VAL(hdr)); break; default: erts_exit(ERTS_ERROR_EXIT, "bad header"); } - while (i) { - if (is_list(*ptr)) { - Eterm* cons = list_val(*ptr); + while (ctx.i) { + if (is_list(*ctx.ptr)) { + Eterm* cons = list_val(*ctx.ptr); ESTACK_PUSH(s, HASH_MAP_PAIR); ESTACK_PUSH(s, CDR(cons)); ESTACK_PUSH(s, CAR(cons)); } else { - ASSERT(is_boxed(*ptr)); - ESTACK_PUSH(s, *ptr); + ASSERT(is_boxed(*ctx.ptr)); + ESTACK_PUSH(s, *ctx.ptr); } - i--; ptr++; + ctx.i--; ctx.ptr++; + TRAP_LOCATION(map_subtag); } goto hash2_common; } @@ -1344,22 +1628,25 @@ make_hash2(Eterm term) case FUN_SUBTAG: { ErlFunThing* funp = (ErlFunThing *) fun_val(term); - Uint num_free = funp->num_free; + ErtsMakeHash2Context_FUN_SUBTAG ctx = { + .num_free = funp->num_free, + .bptr = NULL}; UINT32_HASH_2 - (num_free, + (ctx.num_free, atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue, HCONST); UINT32_HASH_2 (funp->fe->old_index, funp->fe->old_uniq, HCONST); - if (num_free == 0) { + if (ctx.num_free == 0) { goto hash2_common; } else { - Eterm* bptr = funp->env + num_free - 1; - while (num_free-- > 1) { - term = *bptr--; + ctx.bptr = funp->env + ctx.num_free - 1; + while (ctx.num_free-- > 1) { + term = *ctx.bptr--; ESTACK_PUSH(s, term); + TRAP_LOCATION(fun_subtag); } - term = *bptr; + term = *ctx.bptr; } } break; @@ -1367,70 +1654,190 @@ make_hash2(Eterm term) case HEAP_BINARY_SUBTAG: case SUB_BINARY_SUBTAG: { - byte* bptr; - unsigned sz = binary_size(term); +#define BYTE_BITS 8 + ErtsMakeHash2Context_SUB_BINARY_SUBTAG ctx = { + .bptr = 0, + /* !!!!!!!!!!!!!!!!!!!! OBS !!!!!!!!!!!!!!!!!!!! + * + * The size is truncated to 32 bits on the line + * below so that the code is compatible with old + * versions of the code. This means that hash + * values for binaries with a size greater than + * 4GB do not take all bytes in consideration. + * + * !!!!!!!!!!!!!!!!!!!! OBS !!!!!!!!!!!!!!!!!!!! + */ + .sz = (0xFFFFFFFF & binary_size(term)), + .bitsize = 0, + .bitoffs = 0, + .no_bytes_processed = 0 + }; Uint32 con = HCONST_13 + hash; - Uint bitoffs; - Uint bitsize; - - ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize); - if (sz == 0 && bitsize == 0) { + Uint iters_for_bin = MAX(1, ctx.sz / BLOCK_HASH_BYTES_PER_ITER); + ERTS_GET_BINARY_BYTES(term, ctx.bptr, ctx.bitoffs, ctx.bitsize); + if (ctx.sz == 0 && ctx.bitsize == 0) { hash = con; - } else { - if (bitoffs == 0) { - hash = block_hash(bptr, sz, con); - if (bitsize > 0) { - UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)), - HCONST_15); - } - } else { - byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, - sz + (bitsize != 0)); - erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize); - hash = block_hash(buf, sz, con); - if (bitsize > 0) { - UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)), - HCONST_15); - } - erts_free(ERTS_ALC_T_TMP, (void *) buf); - } + } else if (ctx.bitoffs == 0 && + (!can_trap || + (iterations_until_trap - iters_for_bin) > 0)) { + /* No need to trap while hashing binary */ + if (can_trap) iterations_until_trap -= iters_for_bin; + hash = block_hash(ctx.bptr, ctx.sz, con); + if (ctx.bitsize > 0) { + UINT32_HASH_2(ctx.bitsize, + (ctx.bptr[ctx.sz] >> (BYTE_BITS - ctx.bitsize)), + HCONST_15); + } + } else if (ctx.bitoffs == 0) { + /* Need to trap while hashing binary */ + ErtsBlockHashHelperCtx* block_hash_ctx = &ctx.block_hash_ctx; + block_hash_setup(con, block_hash_ctx); + do { + Uint max_bytes_to_process = + iterations_until_trap <= 0 ? BLOCK_HASH_BYTES_PER_ITER : + iterations_until_trap * BLOCK_HASH_BYTES_PER_ITER; + Uint bytes_left = ctx.sz - ctx.no_bytes_processed; + Uint even_bytes_left = + bytes_left - (bytes_left % BLOCK_HASH_BYTES_PER_ITER); + Uint bytes_to_process = + MIN(max_bytes_to_process, even_bytes_left); + block_hash_buffer(&ctx.bptr[ctx.no_bytes_processed], + bytes_to_process, + block_hash_ctx); + ctx.no_bytes_processed += bytes_to_process; + iterations_until_trap -= + MAX(1, bytes_to_process / BLOCK_HASH_BYTES_PER_ITER); + TRAP_LOCATION_NO_RED(sub_binary_subtag_1); + block_hash_ctx = &ctx.block_hash_ctx; /* Restore after trap */ + } while ((ctx.sz - ctx.no_bytes_processed) >= + BLOCK_HASH_BYTES_PER_ITER); + hash = block_hash_final_bytes(ctx.bptr + + ctx.no_bytes_processed, + ctx.sz - ctx.no_bytes_processed, + ctx.sz, + block_hash_ctx); + if (ctx.bitsize > 0) { + UINT32_HASH_2(ctx.bitsize, + (ctx.bptr[ctx.sz] >> (BYTE_BITS - ctx.bitsize)), + HCONST_15); + } + } else if (/* ctx.bitoffs != 0 && */ + (!can_trap || + (iterations_until_trap - iters_for_bin) > 0)) { + /* No need to trap while hashing binary */ + Uint nr_of_bytes = ctx.sz + (ctx.bitsize != 0); + byte *buf = erts_alloc(ERTS_ALC_T_TMP, nr_of_bytes); + Uint nr_of_bits_to_copy = ctx.sz*BYTE_BITS+ctx.bitsize; + if (can_trap) iterations_until_trap -= iters_for_bin; + erts_copy_bits(ctx.bptr, + ctx.bitoffs, 1, buf, 0, 1, nr_of_bits_to_copy); + hash = block_hash(buf, ctx.sz, con); + if (ctx.bitsize > 0) { + UINT32_HASH_2(ctx.bitsize, + (buf[ctx.sz] >> (BYTE_BITS - ctx.bitsize)), + HCONST_15); + } + erts_free(ERTS_ALC_T_TMP, buf); + } else /* ctx.bitoffs != 0 && */ { +#ifdef DEBUG +#define BINARY_BUF_SIZE (BLOCK_HASH_BYTES_PER_ITER * 3) +#else +#define BINARY_BUF_SIZE (BLOCK_HASH_BYTES_PER_ITER * 256) +#endif +#define BINARY_BUF_SIZE_BITS (BINARY_BUF_SIZE*BYTE_BITS) + /* Need to trap while hashing binary */ + ErtsBlockHashHelperCtx* block_hash_ctx = &ctx.block_hash_ctx; + Uint nr_of_bytes = ctx.sz + (ctx.bitsize != 0); + ERTS_CT_ASSERT(BINARY_BUF_SIZE % BLOCK_HASH_BYTES_PER_ITER == 0); + ctx.buf = erts_alloc(ERTS_ALC_T_PHASH2_TRAP, + MIN(nr_of_bytes, BINARY_BUF_SIZE)); + block_hash_setup(con, block_hash_ctx); + do { + Uint bytes_left = + ctx.sz - ctx.no_bytes_processed; + Uint even_bytes_left = + bytes_left - (bytes_left % BLOCK_HASH_BYTES_PER_ITER); + Uint bytes_to_process = + MIN(BINARY_BUF_SIZE, even_bytes_left); + Uint nr_of_bits_left = + (ctx.sz*BYTE_BITS+ctx.bitsize) - + ctx.no_bytes_processed*BYTE_BITS; + Uint nr_of_bits_to_copy = + MIN(nr_of_bits_left, BINARY_BUF_SIZE_BITS); + ctx.done = nr_of_bits_left == nr_of_bits_to_copy; + erts_copy_bits(ctx.bptr + ctx.no_bytes_processed, + ctx.bitoffs, 1, ctx.buf, 0, 1, + nr_of_bits_to_copy); + block_hash_buffer(ctx.buf, + bytes_to_process, + block_hash_ctx); + ctx.no_bytes_processed += bytes_to_process; + iterations_until_trap -= + MAX(1, bytes_to_process / BLOCK_HASH_BYTES_PER_ITER); + TRAP_LOCATION_NO_RED(sub_binary_subtag_2); + block_hash_ctx = &ctx.block_hash_ctx; /* Restore after trap */ + } while (!ctx.done); + nr_of_bytes = ctx.sz + (ctx.bitsize != 0); + hash = block_hash_final_bytes(ctx.buf + + (ctx.no_bytes_processed - + ((nr_of_bytes-1) / BINARY_BUF_SIZE) * BINARY_BUF_SIZE), + ctx.sz - ctx.no_bytes_processed, + ctx.sz, + block_hash_ctx); + if (ctx.bitsize > 0) { + Uint last_byte_index = + nr_of_bytes - (((nr_of_bytes-1) / BINARY_BUF_SIZE) * BINARY_BUF_SIZE) -1; + UINT32_HASH_2(ctx.bitsize, + (ctx.buf[last_byte_index] >> (BYTE_BITS - ctx.bitsize)), + HCONST_15); + } + erts_free(ERTS_ALC_T_PHASH2_TRAP, ctx.buf); + context->trap_location_state.sub_binary_subtag_2.buf = NULL; } goto hash2_common; +#undef BYTE_BITS +#undef BINARY_BUF_SIZE +#undef BINARY_BUF_SIZE_BITS } break; case POS_BIG_SUBTAG: case NEG_BIG_SUBTAG: { - Eterm* ptr = big_val(term); - Uint i = 0; - Uint n = BIG_SIZE(ptr); - Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11; + Eterm* big_val_ptr = big_val(term); + ErtsMakeHash2Context_NEG_BIG_SUBTAG ctx = { + .ptr = big_val_ptr, + .i = 0, + .n = BIG_SIZE(big_val_ptr), + .con = BIG_SIGN(big_val_ptr) ? HCONST_10 : HCONST_11}; #if D_EXP == 16 do { Uint32 x, y; - x = i < n ? BIG_DIGIT(ptr, i++) : 0; - x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16; - y = i < n ? BIG_DIGIT(ptr, i++) : 0; - y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16; - UINT32_HASH_2(x, y, con); - } while (i < n); + x = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0; + x += (Uint32)(ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0) << 16; + y = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0; + y += (Uint32)(ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0) << 16; + UINT32_HASH_2(x, y, ctx.con); + TRAP_LOCATION(neg_big_subtag); + } while (ctx.i < ctx.n); #elif D_EXP == 32 do { Uint32 x, y; - x = i < n ? BIG_DIGIT(ptr, i++) : 0; - y = i < n ? BIG_DIGIT(ptr, i++) : 0; - UINT32_HASH_2(x, y, con); - } while (i < n); + x = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0; + y = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0; + UINT32_HASH_2(x, y, ctx.con); + TRAP_LOCATION(neg_big_subtag); + } while (ctx.i < ctx.n); #elif D_EXP == 64 do { Uint t; Uint32 x, y; - ASSERT(i < n); - t = BIG_DIGIT(ptr, i++); + ASSERT(ctx.i < ctx.n); + t = BIG_DIGIT(ctx.ptr, ctx.i++); x = t & 0xffffffff; y = t >> 32; - UINT32_HASH_2(x, y, con); - } while (i < n); + UINT32_HASH_2(x, y, ctx.con); + TRAP_LOCATION(neg_big_subtag); + } while (ctx.i < ctx.n); #else #error "unsupported D_EXP size" #endif @@ -1508,13 +1915,13 @@ make_hash2(Eterm term) } case _TAG_IMMED1_SMALL: { - Sint x = signed_val(term); + Sint small = signed_val(term); + if (SMALL_BITS > 28 && !IS_SSMALL28(small)) { + NOT_SSMALL28_HASH(small); + } else { + SINT32_HASH(small, HCONST); + } - if (SMALL_BITS > 28 && !IS_SSMALL28(x)) { - term = small_to_big(x, tmp_big); - break; - } - SINT32_HASH(x, HCONST); goto hash2_common; } } @@ -1529,7 +1936,10 @@ make_hash2(Eterm term) if (ESTACK_ISEMPTY(s)) { DESTROY_ESTACK(s); - UnUseTmpHeapNoproc(2); + if (can_trap) { + BUMP_REDS(p, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED); + ASSERT(!(p->flags & F_DISABLE_GC)); + } return hash; } @@ -1540,18 +1950,37 @@ make_hash2(Eterm term) hash = (Uint32) ESTACK_POP(s); UINT32_HASH(hash_xor_pairs, HCONST_19); hash_xor_pairs = (Uint32) ESTACK_POP(s); + TRAP_LOCATION_NO_CTX(hash2_common_1); goto hash2_common; } case HASH_MAP_PAIR: hash_xor_pairs ^= hash; hash = 0; + TRAP_LOCATION_NO_CTX(hash2_common_2); goto hash2_common; default: break; } + } + TRAP_LOCATION_NO_CTX(hash2_common_3); } } +#undef TRAP_LOCATION_NO_RED +#undef TRAP_LOCATION +#undef TRAP_LOCATION_NO_CTX +} + +Uint32 +make_hash2(Eterm term) +{ + return make_hash2_helper(term, 0, NULL, NULL); +} + +Uint32 +trapping_make_hash2(Eterm term, Eterm* state_mref_write_back, Process* p) +{ + return make_hash2_helper(term, 1, state_mref_write_back, p); } /* Term hash function for internal use. |