aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names24
-rw-r--r--erts/emulator/beam/beam_bif_load.c147
-rw-r--r--erts/emulator/beam/beam_bp.c11
-rw-r--r--erts/emulator/beam/beam_debug.c5
-rw-r--r--erts/emulator/beam/beam_emu.c655
-rw-r--r--erts/emulator/beam/beam_load.c108
-rw-r--r--erts/emulator/beam/beam_load.h1
-rw-r--r--erts/emulator/beam/beam_ranges.c2
-rw-r--r--erts/emulator/beam/bif.c190
-rw-r--r--erts/emulator/beam/bif.h118
-rw-r--r--erts/emulator/beam/bif.tab45
-rw-r--r--erts/emulator/beam/big.c86
-rw-r--r--erts/emulator/beam/big.h6
-rw-r--r--erts/emulator/beam/binary.c1144
-rw-r--r--erts/emulator/beam/break.c21
-rw-r--r--erts/emulator/beam/code_ix.c3
-rw-r--r--erts/emulator/beam/copy.c35
-rw-r--r--erts/emulator/beam/dist.c88
-rw-r--r--erts/emulator/beam/dist.h7
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c26
-rw-r--r--erts/emulator/beam/erl_afit_alloc.h2
-rw-r--r--erts/emulator/beam/erl_alloc.c538
-rw-r--r--erts/emulator/beam/erl_alloc.h48
-rw-r--r--erts/emulator/beam/erl_alloc.types35
-rw-r--r--erts/emulator/beam/erl_alloc_util.c3313
-rw-r--r--erts/emulator/beam/erl_alloc_util.h202
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c522
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h14
-rw-r--r--erts/emulator/beam/erl_async.c55
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c93
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.h5
-rw-r--r--erts/emulator/beam/erl_bif_binary.c30
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c32
-rw-r--r--erts/emulator/beam/erl_bif_guard.c23
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/erl_bif_info.c408
-rw-r--r--erts/emulator/beam/erl_bif_lists.c6
-rw-r--r--erts/emulator/beam/erl_bif_op.c12
-rw-r--r--erts/emulator/beam/erl_bif_os.c22
-rw-r--r--erts/emulator/beam/erl_bif_port.c292
-rw-r--r--erts/emulator/beam/erl_bif_re.c471
-rw-r--r--erts/emulator/beam/erl_bif_timer.c2
-rw-r--r--erts/emulator/beam/erl_bif_trace.c27
-rw-r--r--erts/emulator/beam/erl_binary.h41
-rw-r--r--erts/emulator/beam/erl_bits.c145
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c2
-rw-r--r--erts/emulator/beam/erl_db.c104
-rw-r--r--erts/emulator/beam/erl_db.h2
-rw-r--r--erts/emulator/beam/erl_db_hash.c2
-rw-r--r--erts/emulator/beam/erl_db_hash.h5
-rw-r--r--erts/emulator/beam/erl_db_tree.c4
-rw-r--r--erts/emulator/beam/erl_db_util.c25
-rw-r--r--erts/emulator/beam/erl_db_util.h2
-rw-r--r--erts/emulator/beam/erl_debug.c6
-rw-r--r--erts/emulator/beam/erl_driver.h65
-rw-r--r--erts/emulator/beam/erl_drv_nif.h8
-rw-r--r--erts/emulator/beam/erl_drv_thread.c47
-rw-r--r--erts/emulator/beam/erl_gc.c208
-rw-r--r--erts/emulator/beam/erl_gc.h37
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c41
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.h4
-rw-r--r--erts/emulator/beam/erl_init.c616
-rw-r--r--erts/emulator/beam/erl_instrument.c103
-rw-r--r--erts/emulator/beam/erl_lock_check.c119
-rw-r--r--erts/emulator/beam/erl_lock_check.h26
-rw-r--r--erts/emulator/beam/erl_lock_count.c163
-rw-r--r--erts/emulator/beam/erl_lock_count.h26
-rw-r--r--erts/emulator/beam/erl_map.c837
-rw-r--r--erts/emulator/beam/erl_map.h72
-rw-r--r--erts/emulator/beam/erl_message.c61
-rw-r--r--erts/emulator/beam/erl_message.h5
-rw-r--r--erts/emulator/beam/erl_monitors.c20
-rw-r--r--erts/emulator/beam/erl_monitors.h2
-rw-r--r--erts/emulator/beam/erl_mtrace.c44
-rw-r--r--erts/emulator/beam/erl_nif.c867
-rw-r--r--erts/emulator/beam/erl_nif.h81
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h58
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h10
-rw-r--r--erts/emulator/beam/erl_node_tables.c8
-rw-r--r--erts/emulator/beam/erl_port.h22
-rw-r--r--erts/emulator/beam/erl_port_task.c188
-rw-r--r--erts/emulator/beam/erl_port_task.h27
-rw-r--r--erts/emulator/beam/erl_printf_term.c52
-rw-r--r--erts/emulator/beam/erl_process.c4019
-rw-r--r--erts/emulator/beam/erl_process.h392
-rw-r--r--erts/emulator/beam/erl_process_dict.c2
-rw-r--r--erts/emulator/beam/erl_process_dump.c83
-rw-r--r--erts/emulator/beam/erl_process_lock.c64
-rw-r--r--erts/emulator/beam/erl_process_lock.h25
-rw-r--r--erts/emulator/beam/erl_ptab.c393
-rw-r--r--erts/emulator/beam/erl_ptab.h17
-rw-r--r--erts/emulator/beam/erl_smp.h85
-rw-r--r--erts/emulator/beam/erl_term.c6
-rw-r--r--erts/emulator/beam/erl_term.h55
-rw-r--r--erts/emulator/beam/erl_thr_progress.c5
-rw-r--r--erts/emulator/beam/erl_thr_progress.h4
-rw-r--r--erts/emulator/beam/erl_threads.h128
-rw-r--r--erts/emulator/beam/erl_trace.c199
-rw-r--r--erts/emulator/beam/erl_trace.h5
-rw-r--r--erts/emulator/beam/erl_unicode.c176
-rw-r--r--erts/emulator/beam/erl_utils.h62
-rw-r--r--erts/emulator/beam/erl_vm.h15
-rw-r--r--erts/emulator/beam/erl_zlib.c84
-rw-r--r--erts/emulator/beam/erl_zlib.h16
-rw-r--r--erts/emulator/beam/external.c1500
-rw-r--r--erts/emulator/beam/external.h6
-rw-r--r--[-rwxr-xr-x]erts/emulator/beam/global.h400
-rw-r--r--erts/emulator/beam/io.c558
-rw-r--r--erts/emulator/beam/ops.tab101
-rw-r--r--erts/emulator/beam/sys.h95
-rw-r--r--erts/emulator/beam/utils.c1119
110 files changed, 18204 insertions, 4439 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index ce60bb9bbc..5d06a32941 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -71,6 +71,7 @@ atom ac
atom active
atom all
atom all_but_first
+atom all_names
atom alloc_info
atom alloc_sizes
atom allocated
@@ -78,6 +79,7 @@ atom allocated_areas
atom allocator
atom allocator_sizes
atom alloc_util_allocators
+atom allow_gc
atom allow_passive_connect
atom already_loaded
atom amd64
@@ -114,6 +116,8 @@ atom binary_longest_prefix_trap
atom binary_longest_suffix_trap
atom binary_match_trap
atom binary_matches_trap
+atom binary_to_list_continue
+atom binary_to_term_trap
atom block
atom blocked
atom bm
@@ -139,6 +143,7 @@ atom caseless
atom catchlevel
atom cd
atom cdr
+atom cflags
atom characters_to_binary_int
atom characters_to_list_int
atom clear
@@ -150,6 +155,7 @@ atom compact
atom compat_rel
atom compile
atom compressed
+atom config_h
atom connect
atom connected
atom connection_closed
@@ -174,10 +180,12 @@ atom dexit
atom depth
atom dgroup_leader
atom dictionary
+atom dirty_cpu_schedulers_online
atom disable_trace
atom disabled
atom display_items
atom dist
+atom dist_cmd
atom Div='/'
atom div
atom dlink
@@ -248,6 +256,7 @@ atom get_data
atom get_seq_token
atom get_tcw
atom getenv
+atom gather_gc_info_result
atom gather_sched_wall_time_result
atom getting_linked
atom getting_unlinked
@@ -299,6 +308,7 @@ atom label
atom large_heap
atom last_calls
atom latin1
+atom ldflags
atom Le='=<'
atom lf
atom line
@@ -306,22 +316,27 @@ atom line_length
atom linked_in_driver
atom links
atom list
+atom list_to_binary_continue
atom little
atom loaded
atom load_cancelled
atom load_failure
atom local
atom long_gc
+atom long_schedule
atom low
atom Lt='<'
atom machine
atom match
+atom match_limit
+atom match_limit_recursion
atom match_spec
atom max
atom maximum
atom max_tables max_processes
atom mbuf_size
atom memory
+atom memory_internal
atom memory_types
atom message
atom message_binary
@@ -344,11 +359,13 @@ atom multi_scheduling
atom multiline
atom name
atom named_table
+atom namelist
atom native_addresses
atom Neq='=/='
atom Neqeq='/='
atom net_kernel
atom net_kernel_terminated
+atom never_utf
atom new
atom new_index
atom new_uniq
@@ -374,6 +391,7 @@ atom nosuspend
atom no_float
atom no_integer
atom no_network
+atom no_start_optimize
atom not
atom not_a_list
atom not_loaded
@@ -384,6 +402,7 @@ atom notalive
atom notbol
atom noteol
atom notempty
+atom notempty_atstart
atom notify
atom notsup
atom nouse_stdio
@@ -430,6 +449,7 @@ atom port
atom ports
atom port_count
atom port_limit
+atom port_op
atom print
atom priority
atom private
@@ -441,6 +461,7 @@ atom process_display
atom process_limit
atom process_dump
atom procs
+atom proc_sig
atom profile
atom protected
atom protection
@@ -466,6 +487,7 @@ atom register
atom registered_name
atom reload
atom rem
+atom report_errors
atom reset
atom restart
atom return_from
@@ -528,6 +550,7 @@ atom system_version
atom system_architecture
atom SYSTEM='SYSTEM'
atom table
+atom term_to_binary_trap
atom this
atom thread_pool_size
atom threads
@@ -547,6 +570,7 @@ atom true
atom tuple
atom type
atom ucompile
+atom ucp
atom undef
atom ungreedy
atom unicode
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 649594a334..df1983a83d 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -36,7 +36,7 @@
#include "erl_thr_progress.h"
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp);
+static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp);
static void delete_code(Module* modp);
static void decrement_refc(BeamInstr* code);
static int is_native(BeamInstr* code);
@@ -201,7 +201,7 @@ finish_loading_1(BIF_ALIST_1)
* to keep the elements in.
*/
- n = list_length(BIF_ARG_1);
+ n = erts_list_length(BIF_ARG_1);
if (n == -1) {
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
goto done;
@@ -427,69 +427,82 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-check_process_code_2(BIF_ALIST_2)
+erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
{
- Process* rp;
Module* modp;
+ Eterm res;
+ ErtsCodeIndex code_ix;
- if (is_not_atom(BIF_ARG_2)) {
- goto error;
- }
- if (is_internal_pid(BIF_ARG_1)) {
- Eterm res;
- ErtsCodeIndex code_ix;
-
- code_ix = erts_active_code_ix();
- modp = erts_get_module(BIF_ARG_2, code_ix);
- if (modp == NULL) { /* Doesn't exist. */
- return am_false;
- }
- erts_rlock_old_code(code_ix);
- if (modp->old.code == NULL) { /* No old code. */
- erts_runlock_old_code(code_ix);
- return am_false;
- }
- erts_runlock_old_code(code_ix);
-
-#ifdef ERTS_SMP
- rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
-#else
- rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0);
-#endif
- if (!rp) {
- BIF_RET(am_false);
- }
- if (rp == ERTS_PROC_LOCK_BUSY) {
- ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- }
- erts_rlock_old_code(code_ix);
- if (modp->old.code != NULL) { /* must check again */
- res = check_process_code(rp, modp);
- }
- else {
- res = am_false;
- }
- erts_runlock_old_code(code_ix);
-#ifdef ERTS_SMP
- if (BIF_P != rp) {
- erts_resume(rp, ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ (*redsp)++;
+
+ ASSERT(is_atom(module));
+
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(module, code_ix);
+ if (!modp)
+ return am_false;
+ erts_rlock_old_code(code_ix);
+ res = modp->old.code ? check_process_code(c_p, modp, allow_gc, redsp) : am_false;
+ erts_runlock_old_code(code_ix);
+
+ return res;
+}
+
+BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
+{
+ int reds = 0;
+ Eterm res;
+ Eterm olist = BIF_ARG_2;
+ int allow_gc = 1;
+
+ if (is_not_atom(BIF_ARG_1))
+ goto badarg;
+
+ while (is_list(olist)) {
+ Eterm *lp = list_val(olist);
+ Eterm opt = CAR(lp);
+ if (is_tuple(opt)) {
+ Eterm* tp = tuple_val(opt);
+ switch (arityval(tp[0])) {
+ case 2:
+ switch (tp[1]) {
+ case am_allow_gc:
+ switch (tp[2]) {
+ case am_false:
+ allow_gc = 0;
+ break;
+ case am_true:
+ allow_gc = 1;
+ break;
+ default:
+ goto badarg;
+ }
+ break;
+ default:
+ goto badarg;
+ }
+ break;
+ default:
+ goto badarg;
+ }
}
-#endif
- BIF_RET(res);
- }
- else if (is_external_pid(BIF_ARG_1)
- && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
- BIF_RET(am_false);
+ else
+ goto badarg;
+ olist = CDR(lp);
}
+ if (is_not_nil(olist))
+ goto badarg;
+
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, allow_gc, &reds);
+
+ ASSERT(is_value(res));
+
+ BIF_RET2(res, reds);
- error:
+badarg:
BIF_ERROR(BIF_P, BADARG);
}
-
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
ErtsCodeIndex code_ix;
@@ -710,7 +723,7 @@ set_default_trace_pattern(Eterm module)
}
static Eterm
-check_process_code(Process* rp, Module* modp)
+check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
{
BeamInstr* start;
char* mod_start;
@@ -773,6 +786,16 @@ check_process_code(Process* rp, Module* modp)
}
}
+ if (rp->flags & F_DISABLE_GC) {
+ /*
+ * Cannot proceed. Process has disabled gc in order to
+ * safely leave inconsistent data on the heap and/or
+ * off heap lists. Need to wait for gc to be enabled
+ * again.
+ */
+ return THE_NON_VALUE;
+ }
+
/*
* See if there are funs that refer to the old version of the module.
*/
@@ -786,6 +809,8 @@ check_process_code(Process* rp, Module* modp)
if (done_gc) {
return am_true;
} else {
+ if (!allow_gc)
+ return am_aborted;
/*
* Try to get rid of this fun by garbage collecting.
* Clear both fvalue and ftrace to make sure they
@@ -796,7 +821,7 @@ check_process_code(Process* rp, Module* modp)
rp->ftrace = NIL;
done_gc = 1;
FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
goto rescan;
}
}
@@ -850,6 +875,9 @@ check_process_code(Process* rp, Module* modp)
Uint lit_size;
struct erl_off_heap_header* oh;
+ if (!allow_gc)
+ return am_aborted;
+
/*
* Try to get rid of constants by by garbage collecting.
* Clear both fvalue and ftrace.
@@ -859,11 +887,12 @@ check_process_code(Process* rp, Module* modp)
rp->ftrace = NIL;
done_gc = 1;
FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
literals = (Eterm *) modp->old.code[MI_LITERALS_START];
lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals;
oh = (struct erl_off_heap_header *)
modp->old.code[MI_LITERALS_OFF_HEAP];
+ *redsp += lit_size / 10; /* Need, better value... */
erts_garbage_collect_literals(rp, literals, lit_size, oh);
}
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 9b17de34ec..4e711c89e0 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -46,7 +46,8 @@
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
@@ -641,7 +642,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
erts_smp_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && erts_is_tracer_proc_valid(c_p)) {
Eterm w;
erts_trace_time_call(c_p, I, bp->time);
w = (BeamInstr) *c_p->cp;
@@ -729,7 +730,8 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
- IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ IS_TRACED_FL(p, F_TRACE_CALLS) &&
+ erts_is_tracer_proc_valid(p)) {
BeamInstr *pc = (BeamInstr *)ep->code+3;
erts_trace_time_call(p, pc, bp->time);
}
@@ -1580,6 +1582,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
+ bp_meta_unref(bp->meta_pid);
}
if (common & ERTS_BPF_COUNT) {
ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index e36ec2a93e..a3cd08834f 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -635,6 +635,11 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
case op_i_put_tuple_rI:
case op_i_put_tuple_xI:
case op_i_put_tuple_yI:
+ case op_new_map_jdII:
+ case op_update_map_assoc_jsdII:
+ case op_update_map_exact_jsdII:
+ case op_i_has_map_fields_fsI:
+ case op_i_get_map_elements_fsI:
{
int n = unpacked[-1];
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 944ed6da81..8bfb7d2ad2 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,6 +31,7 @@
#include "big.h"
#include "beam_load.h"
#include "erl_binary.h"
+#include "erl_map.h"
#include "erl_bits.h"
#include "dist.h"
#include "beam_bp.h"
@@ -48,7 +49,7 @@
# define OpCase(OpCode) case op_##OpCode
# define CountCase(OpCode) case op_count_##OpCode
# define OpCode(OpCode) ((Uint*)op_##OpCode)
-# define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
+# define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;}
# define LabelAddr(Addr) &&##Addr
#else
# define OpCase(OpCode) lb_##OpCode
@@ -70,7 +71,8 @@ do { \
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
} while (0)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
# else
@@ -133,7 +135,7 @@ do { \
/* We don't check the range if an ordinary switch is used */
#ifdef NO_JUMP_TABLE
-#define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
+#define VALID_INSTR(IP) ((UWord)(IP) < (NUMBER_OF_OPCODES*2+10))
#else
#define VALID_INSTR(IP) \
((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
@@ -217,6 +219,7 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
+BeamInstr* em_call_nif;
/* NOTE These should be the only variables containing trace instructions.
@@ -700,6 +703,19 @@ extern int count_instructions;
Fail; \
}
+#define IsMap(Src, Fail) if (is_not_map(Src)) { Fail; }
+
+#define HasMapField(Src, Key, Fail) if (has_not_map_field(Src, Key)) { Fail; }
+
+#define GetMapElement(Src, Key, Dst, Fail) \
+ do { \
+ Eterm _res = get_map_element(Src, Key); \
+ if (is_non_value(_res)) { \
+ Fail; \
+ } \
+ Dst = _res; \
+ } while (0)
+
#define IsFunction(X, Action) \
do { \
if ( !(is_any_fun(X)) ) { \
@@ -924,6 +940,7 @@ extern int count_instructions;
# define NOINLINE
#endif
+
/*
* The following functions are called directly by process_main().
* Don't inline them.
@@ -942,7 +959,13 @@ static BeamInstr* apply_fun(Process* p, Eterm fun,
Eterm args, Eterm* reg) NOINLINE;
static Eterm new_fun(Process* p, Eterm* reg,
ErlFunEntry* fe, int num_free) NOINLINE;
-
+static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE;
+static Eterm update_map_assoc(Process* p, Eterm* reg,
+ Eterm map, BeamInstr* I) NOINLINE;
+static Eterm update_map_exact(Process* p, Eterm* reg,
+ Eterm map, BeamInstr* I) NOINLINE;
+static int has_not_map_field(Eterm map, Eterm key);
+static Eterm get_map_element(Eterm map, Eterm key);
/*
* Functions not directly called by process_main(). OK to inline.
@@ -1153,6 +1176,9 @@ void process_main(void)
Eterm pt_arity; /* Used by do_put_tuple */
+ Uint64 start_time = 0; /* Monitor long schedule */
+ BeamInstr* start_time_i = NULL;
+
ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
@@ -1165,16 +1191,35 @@ void process_main(void)
* c_p->arg_reg before calling the scheduler.
*/
if (!init_done) {
+ /* This should only be reached during the init phase when only the main
+ * process is running. I.e. there is no race for init_done.
+ */
init_done = 1;
goto init_emulator;
}
+
c_p = NULL;
reds_used = 0;
+
goto do_schedule1;
do_schedule:
reds_used = REDS_IN(c_p) - FCALLS;
do_schedule1:
+
+ if (start_time != 0) {
+ Sint64 diff = erts_timestamp_millis() - start_time;
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
+#ifdef ERTS_DIRTY_SCHEDULERS
+ && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
+#endif
+ ) {
+ BeamInstr *inptr = find_function_from_pc(start_time_i);
+ BeamInstr *outptr = find_function_from_pc(c_p->i);
+ monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
+ }
+ }
+
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
#if HALFWORD_HEAP
@@ -1183,11 +1228,18 @@ void process_main(void)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
c_p = schedule(c_p, reds_used);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ start_time = 0;
#ifdef DEBUG
pid = c_p->common.id; /* Save for debugging purpouses */
#endif
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ if (erts_system_monitor_long_schedule != 0) {
+ start_time = erts_timestamp_millis();
+ start_time_i = c_p->i;
+ }
+
reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
#if !HEAP_ON_C_STACK
@@ -1249,7 +1301,7 @@ void process_main(void)
(Eterm)fptr[1], (Uint)fptr[2],
NULL, fun_buf);
} else {
- erts_snprintf(fun_buf, sizeof(fun_buf),
+ erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
"<unknown/%p>", next);
}
}
@@ -2301,6 +2353,175 @@ void process_main(void)
Goto(*I);
}
+ OpCase(new_map_jdII): {
+ Eterm res;
+
+ x(0) = r(0);
+ SWAPOUT;
+ res = new_map(c_p, reg, I);
+ SWAPIN;
+ r(0) = x(0);
+ StoreResult(res, Arg(1));
+ Next(4+Arg(3));
+ }
+
+ OpCase(i_has_map_fields_fsI): {
+ map_t* mp;
+ Eterm map;
+ Eterm field;
+ Eterm *ks;
+ BeamInstr* fs;
+ Uint sz,n;
+
+ GetArg1(1, map);
+
+ /* this instruction assumes Arg1 is a map,
+ * i.e. that it follows a test is_map if needed.
+ */
+
+ mp = (map_t *)map_val(map);
+ sz = map_get_size(mp);
+
+ if (sz == 0) {
+ SET_I((BeamInstr *) Arg(0));
+ goto has_map_fields_fail;
+ }
+
+ ks = map_get_keys(mp);
+ n = (Uint)Arg(2);
+ fs = &Arg(3); /* pattern fields */
+
+ ASSERT(n>0);
+
+ while(sz) {
+ field = (Eterm)*fs;
+ if (EQ(field,*ks)) {
+ n--;
+ fs++;
+ if (n == 0) break;
+ }
+ ks++; sz--;
+ }
+
+ if (n) {
+ SET_I((BeamInstr *) Arg(0));
+ goto has_map_fields_fail;
+ }
+
+ I += 4 + Arg(2);
+has_map_fields_fail:
+ ASSERT(VALID_INSTR(*I));
+ Goto(*I);
+ }
+
+#define PUT_TERM_REG(term, desc) \
+do { \
+ switch ((desc) & _TAG_IMMED1_MASK) { \
+ case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
+ r(0) = (term); \
+ break; \
+ case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
+ x((desc) >> _TAG_IMMED1_SIZE) = (term); \
+ break; \
+ case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
+ y((desc) >> _TAG_IMMED1_SIZE) = (term); \
+ break; \
+ default: \
+ ASSERT(0); \
+ break; \
+ } \
+} while(0)
+
+ OpCase(i_get_map_elements_fsI): {
+ Eterm map;
+ map_t *mp;
+ Eterm field;
+ Eterm *ks;
+ Eterm *vs;
+ BeamInstr *fs;
+ Uint sz,n;
+
+ GetArg1(1, map);
+
+ /* this instruction assumes Arg1 is a map,
+ * i.e. that it follows a test is_map if needed.
+ */
+
+ mp = (map_t *)map_val(map);
+ sz = map_get_size(mp);
+
+ if (sz == 0) {
+ SET_I((BeamInstr *) Arg(0));
+ goto get_map_elements_fail;
+ }
+
+ n = (Uint)Arg(2) / 2;
+ fs = &Arg(3); /* pattern fields and target registers */
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ while(sz) {
+ field = (Eterm)*fs;
+ if (EQ(field,*ks)) {
+ PUT_TERM_REG(*vs, fs[1]);
+ n--;
+ fs += 2;
+ /* no more values to fetch, we are done */
+ if (n == 0) break;
+ }
+ ks++; sz--;
+ vs++;
+ }
+
+ if (n) {
+ SET_I((BeamInstr *) Arg(0));
+ goto get_map_elements_fail;
+ }
+
+ I += 4 + Arg(2);
+get_map_elements_fail:
+ ASSERT(VALID_INSTR(*I));
+ Goto(*I);
+ }
+#undef PUT_TERM_REG
+
+ OpCase(update_map_assoc_jsdII): {
+ Eterm res;
+ Eterm map;
+
+ GetArg1(1, map);
+ x(0) = r(0);
+ SWAPOUT;
+ res = update_map_assoc(c_p, reg, map, I);
+ SWAPIN;
+ if (is_value(res)) {
+ r(0) = x(0);
+ StoreResult(res, Arg(2));
+ Next(5+Arg(4));
+ } else {
+ goto badarg;
+ }
+ }
+
+ OpCase(update_map_exact_jsdII): {
+ Eterm res;
+ Eterm map;
+
+ GetArg1(1, map);
+ x(0) = r(0);
+ SWAPOUT;
+ res = update_map_exact(c_p, reg, map, I);
+ SWAPIN;
+ if (is_value(res)) {
+ r(0) = x(0);
+ StoreResult(res, Arg(2));
+ Next(5+Arg(4));
+ } else {
+ goto badarg;
+ }
+ }
+
+
/*
* All guards with zero arguments have special instructions:
* self/0
@@ -3282,6 +3503,7 @@ void process_main(void)
* I[0]: &&call_nif
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
*/
BifFunction vbf;
@@ -4305,7 +4527,19 @@ void process_main(void)
flags = Arg(2);
BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
if (size >= SMALL_BITS) {
- Uint wordsneeded = 1+WSIZE(NBYTES((Uint) size));
+ Uint wordsneeded;
+ /* check bits size before potential gc.
+ * We do not want a gc and then realize we don't need
+ * the allocated space (i.e. if the op fails)
+ *
+ * remember to reacquire the matchbuffer after gc.
+ */
+
+ mb = ms_matchbuffer(tmp_arg1);
+ if (mb->size - mb->offset < size) {
+ ClauseFail();
+ }
+ wordsneeded = 1+WSIZE(NBYTES((Uint) size));
TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
}
mb = ms_matchbuffer(tmp_arg1);
@@ -4931,6 +5165,7 @@ void process_main(void)
em_call_error_handler = OpCode(call_error_handler);
em_apply_bif = OpCode(apply_bif);
+ em_call_nif = OpCode(call_nif);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
beam_apply[1] = (BeamInstr) OpCode(normal_exit);
@@ -4989,6 +5224,8 @@ translate_gc_bif(void* gcf)
return bit_size_1;
} else if (gcf == erts_gc_byte_size_1) {
return byte_size_1;
+ } else if (gcf == erts_gc_map_size_1) {
+ return map_size_1;
} else if (gcf == erts_gc_abs_1) {
return abs_1;
} else if (gcf == erts_gc_float_1) {
@@ -5633,7 +5870,6 @@ build_stacktrace(Process* c_p, Eterm exc) {
return res;
}
-
static BeamInstr*
call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
{
@@ -5681,7 +5917,6 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
return ep->addressv[erts_active_code_ix()];
}
-
static Export*
apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
{
@@ -6151,6 +6386,7 @@ apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
}
+
static Eterm
new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
{
@@ -6186,7 +6422,397 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
return make_fun(funp);
}
-
+static int has_not_map_field(Eterm map, Eterm key)
+{
+ map_t* mp;
+ Eterm* keys;
+ Uint i;
+ Uint n;
+
+ mp = (map_t *)map_val(map);
+ keys = map_get_keys(mp);
+ n = map_get_size(mp);
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (keys[i] == key) {
+ return 0;
+ }
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(keys[i], key)) {
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static Eterm get_map_element(Eterm map, Eterm key)
+{
+ map_t *mp;
+ Eterm* ks, *vs;
+ Uint i;
+ Uint n;
+
+ mp = (map_t *)map_val(map);
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+ n = map_get_size(mp);
+ if (is_immed(key)) {
+ for (i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ return vs[i];
+ }
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ return vs[i];
+ }
+ }
+ }
+ return THE_NON_VALUE;
+}
+
+#define GET_TERM(term, dest) \
+do { \
+ Eterm src = (Eterm)(term); \
+ switch (src & _TAG_IMMED1_MASK) { \
+ case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
+ dest = x(0); \
+ break; \
+ case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
+ dest = x(src >> _TAG_IMMED1_SIZE); \
+ break; \
+ case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
+ dest = y(src >> _TAG_IMMED1_SIZE); \
+ break; \
+ default: \
+ dest = src; \
+ break; \
+ } \
+} while(0)
+
+
+static Eterm
+new_map(Process* p, Eterm* reg, BeamInstr* I)
+{
+ Uint n = Arg(3);
+ Uint i;
+ Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
+ Eterm keys;
+ Eterm *mhp,*thp;
+ Eterm *E;
+ BeamInstr *ptr;
+ map_t *mp;
+
+ if (HeapWordsLeft(p) < need) {
+ erts_garbage_collect(p, need, reg, Arg(2));
+ }
+
+ thp = p->htop;
+ mhp = thp + 1 + n/2;
+ E = p->stop;
+ ptr = &Arg(4);
+ keys = make_tuple(thp);
+ *thp++ = make_arityval(n/2);
+
+ mp = (map_t *)mhp; mhp += MAP_HEADER_SIZE;
+ mp->thing_word = MAP_HEADER;
+ mp->size = n/2;
+ mp->keys = keys;
+
+ for (i = 0; i < n/2; i++) {
+ GET_TERM(*ptr++, *thp++);
+ GET_TERM(*ptr++, *mhp++);
+ }
+ p->htop = mhp;
+ return make_map(mp);
+}
+
+static Eterm
+update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
+{
+ Uint n;
+ Uint num_old;
+ Uint num_updates;
+ Uint need;
+ map_t *old_mp, *mp;
+ Eterm res;
+ Eterm* hp;
+ Eterm* E;
+ Eterm* old_keys;
+ Eterm* old_vals;
+ BeamInstr* new_p;
+ Eterm new_key;
+ Eterm* kp;
+
+ if (is_not_map(map)) {
+ return THE_NON_VALUE;
+ }
+
+ old_mp = (map_t *) map_val(map);
+ num_old = map_get_size(old_mp);
+
+ /*
+ * If the old map is empty, create a new map.
+ */
+
+ if (num_old == 0) {
+ return new_map(p, reg, I+1);
+ }
+
+ /*
+ * Allocate heap space for the worst case (i.e. all keys in the
+ * update list are new).
+ */
+
+ num_updates = Arg(4) / 2;
+ need = 2*(num_old+num_updates) + 1 + MAP_HEADER_SIZE;
+ if (HeapWordsLeft(p) < need) {
+ Uint live = Arg(3);
+ reg[live] = map;
+ erts_garbage_collect(p, need, reg, live+1);
+ map = reg[live];
+ old_mp = (map_t *)map_val(map);
+ }
+
+ /*
+ * Build the skeleton for the map, ready to be filled in.
+ *
+ * +-----------------------------------+
+ * | (Space for aritvyal for keys) | <-----------+
+ * +-----------------------------------+ |
+ * | (Space for key 1) | | <-- kp
+ * +-----------------------------------+ |
+ * . |
+ * . |
+ * . |
+ * +-----------------------------------+ |
+ * | (Space for last key) | |
+ * +-----------------------------------+ |
+ * | MAP_HEADER | |
+ * +-----------------------------------+ |
+ * | (Space for number of keys/values) | |
+ * +-----------------------------------+ |
+ * | Boxed tuple pointer >----------------+
+ * +-----------------------------------+
+ * | (Space for value 1) | <-- hp
+ * +-----------------------------------+
+ */
+
+ E = p->stop;
+ kp = p->htop + 1; /* Point to first key */
+ hp = kp + num_old + num_updates;
+
+ res = make_map(hp);
+ mp = (map_t *)hp;
+ hp += MAP_HEADER_SIZE;
+ mp->thing_word = MAP_HEADER;
+ mp->keys = make_tuple(kp-1);
+
+ old_vals = map_get_values(old_mp);
+ old_keys = map_get_keys(old_mp);
+
+ new_p = &Arg(5);
+ GET_TERM(*new_p, new_key);
+ n = num_updates;
+
+ /*
+ * Fill in keys and values, until we run out of either updates
+ * or old values and keys.
+ */
+
+ for (;;) {
+ Eterm key;
+ Sint c;
+
+ ASSERT(kp < (Eterm *)mp);
+ key = *old_keys;
+ if ((c = CMP_TERM(key, new_key)) < 0) {
+ /* Copy old key and value */
+ *kp++ = key;
+ *hp++ = *old_vals;
+ old_keys++, old_vals++, num_old--;
+ } else { /* Replace or insert new */
+ GET_TERM(new_p[1], *hp++);
+ if (c > 0) { /* If new new key */
+ *kp++ = new_key;
+ } else { /* If replacement */
+ *kp++ = key;
+ old_keys++, old_vals++, num_old--;
+ }
+ n--;
+ if (n == 0) {
+ break;
+ } else {
+ new_p += 2;
+ GET_TERM(*new_p, new_key);
+ }
+ }
+ if (num_old == 0) {
+ break;
+ }
+ }
+
+ /*
+ * At this point, we have run out of either old keys and values,
+ * or the update list. In other words, at least of one n and
+ * num_old must be zero.
+ */
+
+ if (n > 0) {
+ /*
+ * All old keys and values have been copied, but there
+ * are still new keys and values in the update list that
+ * must be copied.
+ */
+ ASSERT(num_old == 0);
+ while (n-- > 0) {
+ GET_TERM(new_p[0], *kp++);
+ GET_TERM(new_p[1], *hp++);
+ new_p += 2;
+ }
+ } else {
+ /*
+ * All updates are now done. We may still have old
+ * keys and values that we must copy.
+ */
+ ASSERT(n == 0);
+ while (num_old-- > 0) {
+ ASSERT(kp < (Eterm *)mp);
+ *kp++ = *old_keys++;
+ *hp++ = *old_vals++;
+ }
+ }
+
+ /*
+ * Calculate how many values that are unused at the end of the
+ * key tuple and fill it out with a bignum header.
+ */
+ if ((n = (Eterm *)mp - kp) > 0) {
+ *kp = make_pos_bignum_header(n-1);
+ }
+
+ /*
+ * Fill in the size of the map in both the key tuple and in the map.
+ */
+
+ n = kp - p->htop - 1; /* Actual number of keys/values */
+ *p->htop = make_arityval(n);
+ mp->size = n;
+ p->htop = hp;
+ return res;
+}
+
+/*
+ * Update values for keys that already exist in the map.
+ */
+
+static Eterm
+update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
+{
+ Uint n;
+ Uint i;
+ Uint num_old;
+ Uint need;
+ map_t *old_mp, *mp;
+ Eterm res;
+ Eterm* hp;
+ Eterm* E;
+ Eterm* old_keys;
+ Eterm* old_vals;
+ BeamInstr* new_p;
+ Eterm new_key;
+
+ if (is_not_map(map)) {
+ return THE_NON_VALUE;
+ }
+
+ old_mp = (map_t *) map_val(map);
+ num_old = map_get_size(old_mp);
+
+ /*
+ * If the old map is empty, create a new map.
+ */
+
+ if (num_old == 0) {
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * Allocate the exact heap space needed.
+ */
+
+ need = num_old + MAP_HEADER_SIZE;
+ if (HeapWordsLeft(p) < need) {
+ Uint live = Arg(3);
+ reg[live] = map;
+ erts_garbage_collect(p, need, reg, live+1);
+ map = reg[live];
+ old_mp = (map_t *)map_val(map);
+ }
+
+ /*
+ * Update map, keeping the old key tuple.
+ */
+
+ hp = p->htop;
+ E = p->stop;
+
+ old_vals = map_get_values(old_mp);
+ old_keys = map_get_keys(old_mp);
+
+ res = make_map(hp);
+ mp = (map_t *)hp;
+ hp += MAP_HEADER_SIZE;
+ mp->thing_word = MAP_HEADER;
+ mp->size = num_old;
+ mp->keys = old_mp->keys;
+
+ /* Get array of key/value pairs to be updated */
+ new_p = &Arg(5);
+ GET_TERM(*new_p, new_key);
+
+ /* Update all values */
+ n = Arg(4) / 2; /* Number of values to be updated */
+ ASSERT(n > 0);
+ for (i = 0; i < num_old; i++) {
+ if (!EQ(*old_keys, new_key)) {
+ /* Not same keys */
+ *hp++ = *old_vals;
+ } else {
+ GET_TERM(new_p[1], *hp);
+ hp++;
+ n--;
+ if (n == 0) {
+ /*
+ * All updates done. Copy remaining values
+ * and return the result.
+ */
+ for (i++, old_vals++; i < num_old; i++) {
+ *hp++ = *old_vals++;
+ }
+ ASSERT(hp == p->htop + need);
+ p->htop = hp;
+ return res;
+ } else {
+ new_p += 2;
+ GET_TERM(*new_p, new_key);
+ }
+ }
+ old_vals++, old_keys++;
+ }
+
+ /*
+ * Updates left. That means that at least one the keys in the
+ * update list did not previously exist.
+ */
+ ASSERT(hp == p->htop + need);
+ return THE_NON_VALUE;
+}
+#undef GET_TERM
int catchlevel(Process *p)
{
@@ -6234,3 +6860,12 @@ erts_current_reductions(Process *current, Process *p)
}
}
+int
+erts_beam_jump_table(void)
+{
+#if defined(NO_JUMP_TABLE)
+ return 0;
+#else
+ return 1;
+#endif
+}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index bd4e5a52d0..cfc6146b0a 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -409,7 +409,7 @@ typedef struct LoaderState {
__result = __result << 8 | *Stp->file_p++; \
} \
Dest = __result; \
- } while (0)
+ }
#define GetByte(Stp, Dest) \
if ((Stp)->file_left < 1) { \
@@ -506,6 +506,9 @@ static GenOp* gen_select_literals(LoaderState* stp, GenOpArg S,
static GenOp* const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest);
+static GenOp* gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
+ GenOpArg Size, GenOpArg* Rest);
+
static int freeze_code(LoaderState* stp);
static void final_touch(LoaderState* stp);
@@ -535,7 +538,6 @@ static int must_swap_floats;
Uint erts_total_code_size;
/**********************************************************************/
-
void init_load(void)
{
FloatDef f;
@@ -1209,7 +1211,6 @@ verify_chunks(LoaderState* stp)
return 0;
}
-
static int
load_atom_table(LoaderState* stp)
{
@@ -1255,7 +1256,6 @@ load_atom_table(LoaderState* stp)
return 0;
}
-
static int
load_import_table(LoaderState* stp)
{
@@ -1308,7 +1308,6 @@ load_import_table(LoaderState* stp)
return 0;
}
-
static int
read_export_table(LoaderState* stp)
{
@@ -1641,7 +1640,6 @@ read_line_table(LoaderState* stp)
return 0;
}
-
static int
read_code_header(LoaderState* stp)
{
@@ -1711,7 +1709,6 @@ read_code_header(LoaderState* stp)
return 0;
}
-
#define VerifyTag(Stp, Actual, Expected) \
if (Actual != Expected) { \
LoadError2(Stp, "bad tag %d; expected %d", Actual, Expected); \
@@ -1730,7 +1727,6 @@ read_code_header(LoaderState* stp)
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
-
static int
load_code(LoaderState* stp)
{
@@ -2367,7 +2363,11 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- enum { MIN_FUNC_SZ = 3 };
+#ifdef ERTS_DIRTY_SCHEDULERS
+ enum { MIN_FUNC_SZ = 4 };
+#else
+ enum { MIN_FUNC_SZ = 3 };
+#endif
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
@@ -2512,7 +2512,6 @@ load_code(LoaderState* stp)
return retval;
}
-
#define succ(St, X, Y) ((X).type == (Y).type && (X).val + 1 == (Y).val)
#define succ2(St, X, Y) ((X).type == (Y).type && (X).val + 2 == (Y).val)
#define succ3(St, X, Y) ((X).type == (Y).type && (X).val + 3 == (Y).val)
@@ -2964,19 +2963,19 @@ gen_put_integer(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
NEW_GENOP(stp, op);
NATIVE_ENDIAN(Flags);
- if (Size.type == TAG_i && Size.val < 0) {
- error:
/* Negative size must fail */
- op->op = genop_badarg_1;
- op->arity = 1;
- op->a[0] = Fail;
- } else if (Size.type == TAG_i) {
+ if (Size.type == TAG_i) {
op->op = genop_i_new_bs_put_integer_imm_4;
op->arity = 4;
op->a[0] = Fail;
op->a[1].type = TAG_u;
if (!safe_mul(Size.val, Unit.val, &op->a[1].val)) {
- goto error;
+ error:
+ op->op = genop_badarg_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ op->next = NULL;
+ return op;
}
op->a[1].val = Size.val * Unit.val;
op->a[2].type = Flags.type;
@@ -3791,6 +3790,8 @@ gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1;
} else if (bf == byte_size_1) {
op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1;
+ } else if (bf == map_size_1) {
+ op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1;
} else if (bf == abs_1) {
op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1;
} else if (bf == float_1) {
@@ -3957,8 +3958,50 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
return op;
}
+/*
+ * Replace a get_map_elements with one key to an instruction with one
+ * element
+ */
+
+static GenOp*
+gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ GenOp* op;
+
+ ASSERT(Size.type == TAG_u);
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_get_map_element_4;
+ op->arity = 4;
+
+ op->a[0] = Fail;
+ op->a[1] = Src;
+ op->a[2] = Rest[0];
+ op->a[3] = Rest[1];
+ return op;
+}
+
+static GenOp*
+gen_has_map_field(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ GenOp* op;
+
+ ASSERT(Size.type == TAG_u);
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_has_map_field_3;
+ op->arity = 4;
+
+ op->a[0] = Fail;
+ op->a[1] = Src;
+ op->a[2] = Rest[0];
+ return op;
+}
-
/*
* Freeze the code in memory, move the string table into place,
* resolve all labels.
@@ -4276,7 +4319,6 @@ freeze_code(LoaderState* stp)
return 0;
}
-
static void
final_touch(LoaderState* stp)
{
@@ -4378,7 +4420,6 @@ final_touch(LoaderState* stp)
}
}
-
static int
transform_engine(LoaderState* st)
{
@@ -4387,6 +4428,7 @@ transform_engine(LoaderState* st)
Uint* restart; /* Where to restart if current match fails. */
GenOpArg def_vars[TE_MAX_VARS]; /* Default buffer for variables. */
GenOpArg* var = def_vars;
+ int num_vars = 0;
int i; /* General index. */
Uint mask;
GenOp* instr;
@@ -4589,9 +4631,9 @@ transform_engine(LoaderState* st)
{
int n = *pc++;
int formal_arity = gen_opc[instr->op].arity;
- int num_vars = n + (instr->arity - formal_arity);
int j = formal_arity;
+ num_vars = n + (instr->arity - formal_arity);
var = erts_alloc(ERTS_ALC_T_LOADER_TMP,
num_vars * sizeof(GenOpArg));
for (i = 0; i < n; i++) {
@@ -4603,7 +4645,6 @@ transform_engine(LoaderState* st)
}
break;
#endif
-
case TOP_next_arg:
ap++;
break;
@@ -4691,6 +4732,20 @@ transform_engine(LoaderState* st)
instr->a[ap].val = var[i].val;
ap++;
break;
+#if defined(TOP_store_rest_args)
+ case TOP_store_rest_args:
+ {
+ int n = *pc++;
+ int num_extra = num_vars - n;
+
+ ASSERT(n <= num_vars);
+ GENOP_ARITY(instr, instr->arity+num_extra);
+ memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg));
+ memcpy(instr->a+ap, var+n, num_extra*sizeof(GenOpArg));
+ ap += num_extra;
+ }
+ break;
+#endif
case TOP_try_me_else:
restart = pc + 1;
restart += *pc++;
@@ -4716,7 +4771,6 @@ transform_engine(LoaderState* st)
return rval;
}
-
static void
short_file(int line, LoaderState* stp, unsigned needed)
{
@@ -4724,7 +4778,6 @@ short_file(int line, LoaderState* stp, unsigned needed)
stp->file_name, needed);
}
-
static void
load_printf(int line, LoaderState* context, char *fmt,...)
{
@@ -5190,7 +5243,6 @@ native_addresses(Process* p, Eterm mod)
return result;
}
-
/*
* Builds a list of all exported functions in the given module:
* [{Name, Arity},...]
@@ -5240,7 +5292,6 @@ exported_from_module(Process* p, /* Process whose heap to use. */
return result;
}
-
/*
* Returns a list of all attributes for the module.
*
@@ -5281,7 +5332,6 @@ attributes_for_module(Process* p, /* Process whose heap to use. */
return result;
}
-
/*
* Returns a list containing compilation information.
*
@@ -5824,7 +5874,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
Funcs = tp[1];
Patchlist = tp[2];
- if ((n = list_length(Funcs)) < 0) {
+ if ((n = erts_list_length(Funcs)) < 0) {
goto error;
}
if ((bytes = erts_get_aligned_binary_bytes(Beam, &temp_alloc)) == NULL) {
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 65a8f26d7c..bd22b0c4de 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -49,6 +49,7 @@ extern void** beam_ops;
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_apply_bif;
+extern BeamInstr* em_call_nif;
/*
* The following variables keep a sorted list of address ranges for
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
index 0f2d5d0c2a..cb6470638f 100644
--- a/erts/emulator/beam/beam_ranges.c
+++ b/erts/emulator/beam/beam_ranges.c
@@ -282,7 +282,7 @@ find_range(BeamInstr* pc)
while (low < high) {
if (pc < mid->start) {
high = mid;
- } else if (pc > RANGE_END(mid)) {
+ } else if (pc >= RANGE_END(mid)) {
low = mid + 1;
} else {
erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 9c438679ea..42dd160e38 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -160,7 +160,10 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
if (is_internal_port(BIF_ARG_1)) {
int send_link_signal = 0;
- Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ Port *prt = erts_port_lookup(BIF_ARG_1,
+ (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP));
if (!prt) {
goto res_no_proc;
}
@@ -1363,11 +1366,22 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
*/
if (is_internal_port(BIF_ARG_1)) {
- Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ Eterm ref, *refp;
+ Uint32 invalid_flags;
+ Port *prt;
+
+ if (erts_port_synchronous_ops) {
+ refp = &ref;
+ invalid_flags = ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP;
+ }
+ else {
+ refp = NULL;
+ invalid_flags = ERTS_PORT_SFLGS_INVALID_LOOKUP;
+ }
+
+ prt = erts_port_lookup(BIF_ARG_1, invalid_flags);
if (prt) {
- Eterm ref;
- Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
ErtsPortOpResult res;
#ifdef DEBUG
@@ -1855,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1865,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_pid_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
}
return remote_send(p, dep, to, to, msg, suspend);
@@ -1872,10 +1888,18 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
Eterm id = erts_whereis_name_to_id(p, to);
rp = erts_proc_lookup(id);
- if (rp)
+ if (rp) {
+ if (IS_TRACED(p))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
goto send_message;
+ }
- pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ pt = erts_port_lookup(id,
+ (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP));
if (pt) {
portid = id;
goto port_common;
@@ -1890,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1900,12 +1925,16 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
} else if (is_internal_port(to)) {
int ret_val;
portid = to;
- pt = erts_port_lookup(portid, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ pt = erts_port_lookup(portid,
+ (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP));
port_common:
ret_val = 0;
@@ -1994,7 +2023,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
rp = erts_proc_lookup_raw(id);
if (rp)
goto send_message;
- pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ pt = erts_port_lookup(id,
+ (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP));
if (pt) {
portid = id;
goto port_common;
@@ -2543,7 +2575,7 @@ BIF_RETTYPE insert_element_3(BIF_ALIST_3)
Eterm* hp;
Uint arity;
Eterm res;
- Sint ix;
+ Sint ix, c1, c2;
if (is_not_tuple(BIF_ARG_2) || is_not_small(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -2561,14 +2593,12 @@ BIF_RETTYPE insert_element_3(BIF_ALIST_3)
res = make_tuple(hp);
*hp = make_arityval(arity + 1);
- ix--;
- arity -= ix;
-
- while (ix--) { *++hp = *++ptr; }
+ c1 = ix - 1;
+ c2 = arity - ix + 1;
+ while (c1--) { *++hp = *++ptr; }
*++hp = BIF_ARG_3;
-
- while(arity--) { *++hp = *++ptr; }
+ while (c2--) { *++hp = *++ptr; }
BIF_RET(res);
}
@@ -2579,7 +2609,7 @@ BIF_RETTYPE delete_element_2(BIF_ALIST_3)
Eterm* hp;
Uint arity;
Eterm res;
- Sint ix;
+ Sint ix, c1, c2;
if (is_not_tuple(BIF_ARG_2) || is_not_small(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -2597,14 +2627,12 @@ BIF_RETTYPE delete_element_2(BIF_ALIST_3)
res = make_tuple(hp);
*hp = make_arityval(arity - 1);
- ix--;
- arity -= ix;
-
- while (ix--) { *++hp = *++ptr; }
+ c1 = ix - 1;
+ c2 = arity - ix;
+ while (c1--) { *++hp = *++ptr; }
++ptr;
-
- while(arity--) { *++hp = *++ptr; }
+ while (c2--) { *++hp = *++ptr; }
BIF_RET(res);
}
@@ -2656,7 +2684,7 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i > MAX_ATOM_CHARACTERS) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
@@ -2744,6 +2772,7 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
Eterm *integer, Eterm *rest)
{
Sint i = 0;
+ Uint ui = 0;
int skip = 0;
int neg = 0;
int n = 0;
@@ -2797,8 +2826,8 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
unsigned_val(CAR(list_val(lst))) > '9') {
break;
}
- i = i * 10;
- i = i + unsigned_val(CAR(list_val(lst))) - '0';
+ ui = ui * 10;
+ ui = ui + unsigned_val(CAR(list_val(lst))) - '0';
n++;
lst = CDR(list_val(lst));
if (is_nil(lst)) {
@@ -2822,7 +2851,8 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
*/
if (n <= SMALL_DIGITS) { /* It must be small */
- if (neg) i = -i;
+ if (neg) i = -(Sint)ui;
+ else i = (Sint)ui;
res = make_small(i);
} else {
lg2 = (n+1)*230/69+1;
@@ -2863,9 +2893,6 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2875,8 +2902,12 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
}
}
- if (is_big(res)) {
- hp += (big_arity(res)+1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res)+1);
+ }
}
HRelease(p,hp_end,hp);
}
@@ -2934,7 +2965,7 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
char *buf = NULL;
int base;
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i < 0)
BIF_ERROR(BIF_P, BADARG);
@@ -3273,7 +3304,7 @@ BIF_RETTYPE list_to_float_1(BIF_ALIST_1)
Eterm res;
char *buf = NULL;
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i < 0)
BIF_ERROR(BIF_P, BADARG);
@@ -3388,7 +3419,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
Eterm* hp;
int len;
- if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
+ if ((len = erts_list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -3745,45 +3776,6 @@ BIF_RETTYPE now_0(BIF_ALIST_0)
/**********************************************************************/
-BIF_RETTYPE garbage_collect_1(BIF_ALIST_1)
-{
- int reds;
- Process *rp;
-
- if (is_not_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (BIF_P->common.id == BIF_ARG_1)
- rp = BIF_P;
- else {
-#ifdef ERTS_SMP
- rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD1(bif_export[BIF_garbage_collect_1], BIF_P, BIF_ARG_1);
-#else
- rp = erts_proc_lookup(BIF_ARG_1);
-#endif
- if (!rp)
- BIF_RET(am_false);
- }
-
- /* The GC cost is taken for the process executing this BIF. */
-
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- reds = erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
-
-#ifdef ERTS_SMP
- if (BIF_P != rp) {
- erts_resume(rp, ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
- }
-#endif
-
- BIF_RET2(am_true, reds);
-}
-
BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
{
int reds;
@@ -3938,7 +3930,7 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
{
Sint code;
Eterm optlist = BIF_ARG_2;
- int flush = 0;
+ int flush = 1;
for (optlist = BIF_ARG_2;
is_list(optlist);
@@ -4353,7 +4345,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
switch (erts_set_schedulers_online(BIF_P,
ERTS_PROC_LOCK_MAIN,
signed_val(BIF_ARG_2),
- &old_no)) {
+ &old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , 0
+#endif
+ )) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(old_no));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
@@ -4485,6 +4481,33 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
ref,
old ? am_true : am_false);
}
+#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
+ Sint old_no;
+ if (!is_small(BIF_ARG_2))
+ goto error;
+ switch (erts_set_schedulers_online(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ signed_val(BIF_ARG_2),
+ &old_no,
+ 1)) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(make_small(old_no));
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no),
+ am_dirty_cpu_schedulers_online);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
+#endif
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
@@ -4508,7 +4531,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_P->group_leader,
"A call to erlang:system_flag(cpu_topology, _) was made.\n"
"The cpu_topology argument is deprecated and scheduled\n"
- "for removal in erts-5.10/OTP-R16. For more information\n"
+ "for removal in Erlang/OTP 18. For more information\n"
"see the erlang:system_flag/2 documentation.\n");
BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
@@ -4516,7 +4539,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_P->group_leader,
"A call to erlang:system_flag(scheduler_bind_type, _) was\n"
"made. The scheduler_bind_type argument is deprecated and\n"
- "scheduled for removal in erts-5.10/OTP-R16. For more\n"
+ "scheduled for removal in Erlang/OTP 18. For more\n"
"information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
}
@@ -4635,6 +4658,17 @@ BIF_RETTYPE bump_reductions_1(BIF_ALIST_1)
BIF_RET2(am_true, reds);
}
+BIF_RETTYPE erts_internal_cmp_term_2(BIF_ALIST_2) {
+ int res = CMP_TERM(BIF_ARG_1,BIF_ARG_2);
+
+ /* ensure -1, 0, 1 result */
+ if (res < 0) {
+ BIF_RET(make_small(-1));
+ } else if (res > 0) {
+ BIF_RET(make_small(1));
+ }
+ BIF_RET(make_small(0));
+}
/*
* Processes doing yield on return in a bif ends up in bif_return_trap().
*/
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 51b77a95ed..72c55ccb55 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -124,12 +124,85 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ return THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_ERROR(Ret, Proc, Reason) \
do { \
(Proc)->freason = (Reason); \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->freason = (Reason); \
+ (Proc)->current = (Bif)->code; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
@@ -392,6 +465,51 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+#ifndef HIPE
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY)
+
+#else
+
+#include "erl_fun.h"
+#include "hipe_mode_switch.h"
+
+/*
+ * Hipe wrappers used by native code for BIFs that disable GC while trapping.
+ * Also add usage of the wrapper in ../hipe/hipe_bif_list.m4
+ *
+ * Problem:
+ * When native code calls a BIF that traps, hipe_mode_switch will push a
+ * "trap frame" on the Erlang stack in order to find its way back from beam_emu
+ * back to native caller when finally done. If GC is disabled and stack/heap
+ * is full there is no place to push the "trap frame".
+ *
+ * Solution:
+ * We reserve space on stack for the "trap frame" here before the BIF is called.
+ * If the BIF does not trap, the space is reclaimed here before returning.
+ * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame"
+ * already is reserved and use it.
+ */
+
+
+#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
+BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
+ Eterm* args); \
+BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
+ Eterm* args) \
+{ \
+ BIF_RETTYPE res; \
+ hipe_reserve_beam_trap_frame(c_p, args, ARITY); \
+ res = BIF_NAME ## _ ## ARITY (c_p, args); \
+ if (is_value(res) || c_p->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(c_p); \
+ } \
+ return res; \
+}
+
+#endif
+
+
#include "erl_bif_table.h"
#endif
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 8bc994c8c3..e68b8e6274 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -45,8 +45,7 @@ bif erlang:apply/3
bif erlang:atom_to_list/1
bif erlang:binary_to_list/1
bif erlang:binary_to_list/3
-bif erlang:binary_to_term/1
-bif erlang:check_process_code/2
+bif erlang:binary_to_term/1
bif erlang:crc32/1
bif erlang:crc32/2
bif erlang:crc32_combine/3
@@ -67,7 +66,6 @@ bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
bif erlang:garbage_collect/0
-bif erlang:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
@@ -154,8 +152,15 @@ bif erts_internal:port_command/3
bif erts_internal:port_control/3
bif erts_internal:port_close/1
bif erts_internal:port_connect/2
-bif erts_internal:port_set_data/2
-bif erts_internal:port_get_data/1
+
+bif erts_internal:request_system_task/3
+bif erts_internal:check_process_code/2
+
+bif erts_internal:map_to_tuple_keys/1
+
+# inet_db support
+bif erlang:port_set_data/2
+bif erlang:port_get_data/1
# Tracing & debugging.
bif erlang:trace_pattern/2
@@ -475,7 +480,7 @@ bif erlang:call_on_load_function/1
bif erlang:finish_after_on_load/2
#
-# New Bifs in R13B4
+# New Bifs in R13B04
#
bif erlang:binary_to_term/2
@@ -570,7 +575,35 @@ bif erlang:float_to_binary/2
bif erlang:binary_to_float/1
bif io:printable_range/0
+bif os:unsetenv/1
+
+#
+# New in R17A
+#
+bif re:inspect/2
+
+ubif erlang:is_map/1
+ubif erlang:map_size/1
+bif maps:to_list/1
+bif maps:find/2
+bif maps:get/2
+bif maps:from_list/1
+bif maps:is_key/2
+bif maps:keys/1
+bif maps:merge/2
+bif maps:new/0
+bif maps:put/3
+bif maps:remove/2
+bif maps:update/3
+bif maps:values/1
+
+bif erts_internal:cmp_term/2
+
+#
+# New in 17.1.
+#
+bif erlang:fun_info_mfa/1
#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index acfcc845e4..de7d370938 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -150,14 +150,14 @@
#define D2GTE(a1,a0,b1,b0) (!D2LT(a1,a0,b1,b0))
#define D2LTE(a1,a0,b1,b0) (!D2GT(a1,a0,b1,b0))
-// Add (A+B), A=(a1B+a0) B=(b1B+b0)
+/* Add (A+B), A=(a1B+a0) B=(b1B+b0) */
#define D2ADD(a1,a0,b1,b0,c1,c0) do { \
ErtsDigit __ci = 0; \
DSUM(a0,b0,__ci,c0); \
DSUMc(a1,b1,__ci,c1); \
} while(0)
-// Subtract (A-B), A=(a1B+a0), B=(b1B+b0) (A>=B)
+/* Subtract (A-B), A=(a1B+a0), B=(b1B+b0) (A>=B) */
#define D2SUB(a1,a0,b1,b0,c1,c0) do { \
ErtsDigit __bi; \
DSUB(a0,b0,__bi,c0); \
@@ -274,7 +274,9 @@
_b = _b << _s; \
_vn1 = _b >> H_EXP; \
_vn0 = _b & LO_MASK; \
- _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ /* If needed to avoid undefined behaviour */ \
+ if (_s) _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ else _un32 = _a1; \
_un10 = _a0 << _s; \
_un1 = _un10 >> H_EXP; \
_un0 = _un10 & LO_MASK; \
@@ -1325,9 +1327,9 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
return 1;
}
else {
- SWord ay = (y < 0) ? -y : y;
- int bw = ay / D_EXP;
- int sw = ay % D_EXP;
+ Uint ay = (y < 0) ? -y : y;
+ Uint bw = ay / D_EXP;
+ Uint sw = ay % D_EXP;
dsize_t rl;
ErtsDigit a1=0;
ErtsDigit a0=0;
@@ -1337,7 +1339,7 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
while(bw--)
*r++ = 0;
- if (sw) { // NOTE! x >> 32 is not = 0!
+ if (sw) { /* NOTE! x >> 32 is not = 0! */
while(xl--) {
a0 = (*x << sw) | a1;
a1 = (*x >> (D_EXP - sw));
@@ -1368,7 +1370,7 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
}
if (sign) {
- int zl = bw;
+ Uint zl = bw;
ErtsDigit* z = x;
while(zl--) {
@@ -1384,7 +1386,7 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
x += (xl-1);
r += (rl-1);
xl -= bw;
- if (sw) { // NOTE! x >> 32 is not = 0!
+ if (sw) { /* NOTE! x >> 32 is not = 0! */
while(xl--) {
a1 = (*x >> sw) | a0;
a0 = (*x << (D_EXP-sw));
@@ -1506,13 +1508,15 @@ Eterm uword_to_big(UWord x, Eterm *y)
*/
Eterm small_to_big(Sint x, Eterm *y)
{
+ Uint xu;
if (x >= 0) {
+ xu = x;
*y = make_pos_bignum_header(1);
} else {
- x = -x;
+ xu = -(Uint)x;
*y = make_neg_bignum_header(1);
}
- BIG_DIGIT(y, 0) = x;
+ BIG_DIGIT(y, 0) = xu;
return make_big(y);
}
@@ -1540,21 +1544,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
+ Uint64 ux;
int neg;
- if (x >= 0)
+ if (x >= 0) {
neg = 0;
+ ux = x;
+ }
else {
neg = 1;
- x = -x;
+ ux = -(Uint64)x;
}
#if defined(ARCH_32) || HALFWORD_HEAP
- if (x >= (((Uint64) 1) << 32)) {
+ if (ux >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
else
*hp = make_pos_bignum_header(2);
- BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
- BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
*hpp += 3;
}
else
@@ -1564,7 +1571,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_neg_bignum_header(1);
else
*hp = make_pos_bignum_header(1);
- BIG_DIGIT(hp, 0) = (Uint) x;
+ BIG_DIGIT(hp, 0) = (Uint) ux;
*hpp += 2;
}
return make_big(hp);
@@ -1603,9 +1610,11 @@ big_to_double(Wterm x, double* resp)
/*
* Logic has been copied from erl_bif_guard.c and slightly
* modified to use a static instead of dynamic heap
+ *
+ * HALFWORD: Return relative term with 'heap' as base.
*/
Eterm
-double_to_big(double x, Eterm *heap)
+double_to_big(double x, Eterm *heap, Uint hsz)
{
int is_negative;
int ds;
@@ -1633,9 +1642,10 @@ double_to_big(double x, Eterm *heap)
sz = BIG_NEED_SIZE(ds); /* number of words including arity */
hp = heap;
- res = make_big(hp);
+ res = make_big_rel(hp, heap);
xp = (ErtsDigit*) (hp + 1);
+ ASSERT(ds < hsz);
for (i = ds - 1; i >= 0; i--) {
ErtsDigit d;
@@ -2468,7 +2478,7 @@ int term_equals_2pow32(Eterm x)
if (!is_big(x))
return 0;
bp = big_val(x);
-#if D_EXP == 16 // 16 bit platfrom not really supported!!!
+#if D_EXP == 16 /* 16 bit platfrom not really supported!!! */
return (BIG_SIZE(bp) == 3) && !BIG_DIGIT(bp,0) && !BIG_DIGIT(bp,1) &&
BIG_DIGIT(bp,2) == 1;
#elif D_EXP == 32
@@ -2528,17 +2538,18 @@ const byte d_base_exp_lookup[] = { 31, 19, 15, 13, 11, 11, 10, 9, 9, 8, 8, 8, 8,
* end
* How much can the characters which fit in 31 bit represent
*/
-const Uint d_base_base_lookup[] = { 2147483648, 1162261467, 1073741824,
- 1220703125, 362797056, 1977326743, 1073741824, 387420489, 1000000000,
- 214358881, 429981696, 815730721, 1475789056, 170859375, 268435456,
- 410338673, 612220032, 893871739, 1280000000, 1801088541, 113379904,
- 148035889, 191102976, 244140625, 308915776, 387420489, 481890304,
- 594823321, 729000000, 887503681, 1073741824, 1291467969, 1544804416,
- 1838265625, 60466176, 69343957, 79235168, 90224199, 102400000,
- 115856201, 130691232, 147008443, 164916224, 184528125, 205962976,
- 229345007, 254803968, 282475249, 312500000, 345025251, 380204032,
- 418195493, 459165024, 503284375, 550731776, 601692057, 656356768,
- 714924299, 777600000, 844596301, 916132832, 992436543, 1073741824 };
+const Uint d_base_base_lookup[] = { 2147483648u, 1162261467u, 1073741824u,
+ 1220703125u, 362797056u, 1977326743u, 1073741824u, 387420489u,
+ 1000000000u, 214358881u, 429981696u, 815730721u, 1475789056u,
+ 170859375u, 268435456u, 410338673u, 612220032u, 893871739u, 1280000000u,
+ 1801088541u, 113379904u, 148035889u, 191102976u, 244140625u, 308915776u,
+ 387420489u, 481890304u, 594823321u, 729000000u, 887503681u, 1073741824u,
+ 1291467969u, 1544804416u, 1838265625u, 60466176u, 69343957u, 79235168u,
+ 90224199u, 102400000u, 115856201u, 130691232u, 147008443u, 164916224u,
+ 184528125u, 205962976u, 229345007u, 254803968u, 282475249u, 312500000u,
+ 345025251u, 380204032u, 418195493u, 459165024u, 503284375u, 550731776u,
+ 601692057u, 656356768u, 714924299u, 777600000u, 844596301u, 916132832u,
+ 992436543u, 1073741824u };
Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
Uint size, const int base) {
@@ -2663,9 +2674,6 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2675,8 +2683,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
}
}
- if (is_big(res)) {
- hp += (big_arity(res) + 1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res) + 1);
+ }
}
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_done;
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 1a7b14170f..da31876d75 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */
#define ERTS_SINT64_HEAP_SIZE(X) \
(IS_SSMALL((X)) \
? 0 \
- : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
@@ -141,7 +141,7 @@ Eterm big_lshift(Eterm, Sint, Eterm*);
int big_comp (Wterm, Wterm);
int big_ucomp (Eterm, Eterm);
int big_to_double(Wterm x, double* resp);
-Eterm double_to_big(double, Eterm*);
+Eterm double_to_big(double, Eterm*, Uint hsz);
Eterm small_to_big(Sint, Eterm*);
Eterm uint_to_big(Uint, Eterm*);
Eterm uword_to_big(UWord, Eterm*);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 33abac2f3d..f50d484576 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -31,12 +31,11 @@
#include "erl_binary.h"
#include "erl_bits.h"
-#ifdef DEBUG
-static int list_to_bitstr_buf(Eterm obj, char* buf, Uint len);
-#else
-static int list_to_bitstr_buf(Eterm obj, char* buf);
-#endif
-static int bitstr_list_len(Eterm obj, Uint* num_bytes);
+static Export binary_to_list_continue_export;
+static Export list_to_binary_continue_export;
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1);
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1);
void
erts_init_binary(void)
@@ -49,6 +48,15 @@ erts_init_binary(void)
"Internal error: Address of orig_bytes[0] of a Binary"
" is *not* 8-byte aligned\n");
}
+
+ erts_init_trap_export(&binary_to_list_continue_export,
+ am_erts_internal, am_binary_to_list_continue, 1,
+ &binary_to_list_continue);
+
+ erts_init_trap_export(&list_to_binary_continue_export,
+ am_erts_internal, am_list_to_binary_continue, 1,
+ &list_to_binary_continue);
+
}
/*
@@ -333,6 +341,132 @@ BIF_RETTYPE integer_to_binary_1(BIF_ALIST_1)
BIF_RET(res);
}
+#define ERTS_B2L_BYTES_PER_REDUCTION 256
+
+typedef struct {
+ Eterm res;
+ Eterm *hp;
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+ byte *bytes;
+ Uint size;
+ Uint bitoffs;
+} ErtsB2LState;
+
+static void b2l_state_destructor(Binary *mbp)
+{
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+}
+
+static BIF_RETTYPE
+binary_to_list_chunk(Process *c_p,
+ Eterm mb_eterm,
+ ErtsB2LState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ BIF_RETTYPE ret;
+ int bump_reds;
+ Uint size;
+ byte *bytes;
+
+ size = (reds_left + 1)*ERTS_B2L_BYTES_PER_REDUCTION;
+ if (size > sp->size)
+ size = sp->size;
+ bytes = sp->bytes + (sp->size - size);
+
+ bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+
+ ASSERT(is_list(sp->res) || is_nil(sp->res));
+
+ sp->res = erts_bin_bytes_to_list(sp->res,
+ sp->hp,
+ bytes,
+ size,
+ sp->bitoffs);
+ sp->size -= size;
+ sp->hp += 2*size;
+
+ if (sp->size > 0) {
+
+ if (!gc_disabled)
+ erts_set_gc_state(c_p, 0);
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+ ASSERT(is_value(mb_eterm));
+ ERTS_BIF_PREP_TRAP1(ret,
+ &binary_to_list_continue_export,
+ c_p,
+ mb_eterm);
+ }
+ else {
+
+ ASSERT(sp->hp == sp->hp_end);
+ ASSERT(sp->size == 0);
+
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, sp->res);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, sp->res);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ }
+
+ return ret;
+}
+
+static ERTS_INLINE BIF_RETTYPE
+binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes, Uint size, Uint bitoffs)
+{
+ int reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ if (size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION) {
+ Eterm res;
+ BIF_RETTYPE ret;
+ int bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1;
+ BUMP_REDS(c_p, bump_reds);
+ res = erts_bin_bytes_to_list(tail, hp, bytes, size, bitoffs);
+ ERTS_BIF_PREP_RET(ret, res);
+ return ret;
+ }
+ else {
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsB2LState),
+ b2l_state_destructor);
+ ErtsB2LState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ Eterm mb;
+
+ sp->res = tail;
+ sp->hp = hp;
+#ifdef DEBUG
+ sp->hp_end = sp->hp + 2*size;
+#endif
+ sp->bytes = bytes;
+ sp->size = size;
+ sp->bitoffs = bitoffs;
+
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
+ }
+}
+
+static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
+{
+ Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return binary_to_list_chunk(BIF_P,
+ BIF_ARG_1,
+ (ErtsB2LState*) ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 1)
+
BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -354,14 +488,15 @@ BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
} else {
Eterm* hp = HAlloc(BIF_P, 2 * size);
byte* bytes = binary_bytes(real_bin)+offset;
-
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes, size, bitoffs));
+ return binary_to_list(BIF_P, hp, NIL, bytes, size, bitoffs);
}
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 3)
+
BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
{
byte* bytes;
@@ -387,12 +522,13 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
}
i = stop-start+1;
hp = HAlloc(BIF_P, 2*i);
- BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes+start-1, i, bitoffs));
-
+ return binary_to_list(BIF_P, hp, NIL, bytes+start-1, i, bitoffs);
error:
BIF_ERROR(BIF_P, BADARG);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(bitstring_to_list, 1)
+
BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
{
Eterm real_bin;
@@ -431,113 +567,441 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
previous = CONS(hp, make_binary(last), previous);
hp += 2;
}
- BIF_RET(erts_bin_bytes_to_list(previous, hp, bytes, size, bitoffs));
+
+ return binary_to_list(BIF_P, hp, previous, bytes, size, bitoffs);
}
/* Turn a possibly deep list of ints (and binaries) into */
/* One large binary object */
-/*
- * This bif also exists in the binary module, under the name
- * binary:list_to_bin/1, why it's divided into interface and
- * implementation. Also the backend for iolist_to_binary_1.
- */
+typedef enum {
+ ERTS_L2B_OK,
+ ERTS_L2B_YIELD,
+ ERTS_L2B_TYPE_ERROR,
+ ERTS_L2B_OVERFLOW_ERROR
+} ErtsL2BResult;
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
-{
+#define ERTS_L2B_STATE_INITER(C_P, ARG, BIF, SZFunc, TBufFunc) \
+ {ERTS_IOLIST2BUF_STATE_INITER((C_P), (ARG)), \
+ (ARG), THE_NON_VALUE, (BIF), (SZFunc), (TBufFunc)}
+
+#define ERTS_L2B_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsL2BState))
+
+typedef struct ErtsL2BState_ ErtsL2BState;
+
+struct ErtsL2BState_ {
+ ErtsIOList2BufState buf;
+ Eterm arg;
Eterm bin;
- ErlDrvSizeT size;
- byte* bytes;
-#ifdef DEBUG
- ErlDrvSizeT offset;
-#endif
+ Export *bif;
+ int (*iolist_to_buf_size)(ErtsIOListState *);
+ ErlDrvSizeT (*iolist_to_buf)(ErtsIOList2BufState *);
+};
- if (is_nil(arg)) {
- BIF_RET(new_binary(p,(byte*)"",0));
+static ERTS_INLINE ErtsL2BResult
+list_to_binary_engine(ErtsL2BState *sp)
+{
+ ErlDrvSizeT res;
+ Process *c_p = sp->buf.iolist.c_p;
+
+ /*
+ * have_size == 0 while sp->iolist_to_buf_size()
+ * has not finished the calculation.
+ */
+
+ if (!sp->buf.iolist.have_size) {
+ switch (sp->iolist_to_buf_size(&sp->buf.iolist)) {
+ case ERTS_IOLIST_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TYPE:
+ return ERTS_L2B_TYPE_ERROR;
+ case ERTS_IOLIST_OK:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ ASSERT(sp->buf.iolist.have_size);
+
+ /*
+ * Size calculated... Setup state for
+ * sp->iolist_to_buf_*()
+ */
+
+ sp->bin = new_binary(c_p,
+ (byte *) NULL,
+ sp->buf.iolist.size);
+
+ if (sp->buf.iolist.size == 0)
+ return ERTS_L2B_OK;
+
+ sp->buf.buf = (char *) binary_bytes(sp->bin);
+ sp->buf.len = sp->buf.iolist.size;
+ sp->buf.iolist.obj = sp->arg;
+
+ if (sp->buf.iolist.reds_left <= 0) {
+ BUMP_ALL_REDS(c_p);
+ return ERTS_L2B_YIELD;
+ }
}
- if (is_not_list(arg)) {
- goto error;
+
+ ASSERT(sp->buf.iolist.size != 0);
+ ASSERT(is_value(sp->bin));
+ ASSERT(sp->buf.buf);
+
+ res = sp->iolist_to_buf(&sp->buf);
+
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res)) {
+ ASSERT(res == 0);
+ return ERTS_L2B_OK;
}
- switch (erts_iolist_size(arg, &size)) {
- case ERTS_IOLIST_OVERFLOW: BIF_ERROR(p, SYSTEM_LIMIT);
- case ERTS_IOLIST_TYPE: goto error;
- default: ;
+
+ switch (res) {
+ case ERTS_IOLIST_TO_BUF_YIELD:
+ return ERTS_L2B_YIELD;
+ case ERTS_IOLIST_TO_BUF_OVERFLOW:
+ return ERTS_L2B_OVERFLOW_ERROR;
+ case ERTS_IOLIST_TO_BUF_TYPE_ERROR:
+ return ERTS_L2B_TYPE_ERROR;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from iolist_to_buf_yielding()");
+ return ERTS_L2B_TYPE_ERROR;
}
- bin = new_binary(p, (byte *)NULL, size);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset =
-#endif
- erts_iolist_to_buf(arg, (char*) bytes, size);
+}
+
+static void
+l2b_state_destructor(Binary *mbp)
+{
+ ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+}
+
+static ERTS_INLINE Eterm
+l2b_final_touch(Process *c_p, ErtsL2BState *sp)
+{
+ Eterm *hp;
+ ErlSubBin* sbin;
+ if (sp->buf.offset == 0)
+ return sp->bin;
+
+ hp = HAlloc(c_p, ERL_SUB_BIN_SIZE);
+ ASSERT(sp->buf.offset > 0);
+ sbin = (ErlSubBin *) hp;
+ sbin->thing_word = HEADER_SUB_BIN;
+ sbin->size = sp->buf.iolist.size-1;
+ sbin->offs = 0;
+ sbin->orig = sp->bin;
+ sbin->bitoffs = 0;
+ sbin->bitsize = sp->buf.offset;
+ sbin->is_writable = 0;
+ return make_binary(sbin);
+}
+
+static BIF_RETTYPE
+list_to_binary_chunk(Eterm mb_eterm,
+ ErtsL2BState* sp,
+ int reds_left,
+ int gc_disabled)
+{
+ Eterm err = BADARG;
+ BIF_RETTYPE ret;
+ Process *c_p = sp->buf.iolist.c_p;
- ASSERT(offset == 0);
- BIF_RET(bin);
+ sp->buf.iolist.reds_left = reds_left;
- error:
- BIF_ERROR(p, BADARG);
+ switch (list_to_binary_engine(sp)) {
+
+ case ERTS_L2B_OK: {
+ Eterm result = l2b_final_touch(c_p, sp);
+ if (!gc_disabled || !erts_set_gc_state(c_p, 1))
+ ERTS_BIF_PREP_RET(ret, result);
+ else
+ ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, result);
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+ }
+ case ERTS_L2B_YIELD:
+ if (!gc_disabled) {
+ /* first yield... */
+ Eterm *hp;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsL2BState),
+ l2b_state_destructor);
+ ErtsL2BState *new_sp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_L2B_STATE_MOVE(new_sp, sp);
+ sp = new_sp;
+
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+
+ ASSERT(is_value(mb_eterm));
+
+ erts_set_gc_state(c_p, 0);
+ }
+
+ ASSERT(c_p->flags & F_DISABLE_GC);
+
+ ERTS_BIF_PREP_TRAP1(ret,
+ &list_to_binary_continue_export,
+ c_p,
+ mb_eterm);
+ break;
+
+ case ERTS_L2B_OVERFLOW_ERROR:
+ err = SYSTEM_LIMIT;
+ /* fall through */
+
+ case ERTS_L2B_TYPE_ERROR:
+ if (!gc_disabled)
+ ERTS_BIF_PREP_ERROR(ret, c_p, err);
+ else {
+ if (erts_set_gc_state(c_p, 1))
+ ERTS_VBUMP_ALL_REDS(c_p);
+
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret,
+ c_p,
+ err,
+ sp->bif,
+ sp->arg);
+ }
+
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Invalid return value from list_to_binary_engine()");
+ ERTS_BIF_PREP_ERROR(ret,c_p, EXC_INTERNAL_ERROR);
+ break;
+ }
+ return ret;
+}
+
+static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
+{
+ Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ return list_to_binary_chunk(BIF_ARG_1,
+ ERTS_MAGIC_BIN_DATA(mbp),
+ ERTS_BIF_REDS_LEFT(BIF_P),
+ 1);
+}
+
+BIF_RETTYPE erts_list_to_binary_bif(Process *c_p, Eterm arg, Export *bif)
+{
+ BIF_RETTYPE ret;
+
+ if (is_nil(arg))
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) "", 0));
+ else if (is_not_list(arg))
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ else {
+ /* check for [binary()] case */
+ Eterm h = CAR(list_val(arg));
+ Eterm t = CDR(list_val(arg));
+ if (is_binary(h)
+ && is_nil(t)
+ && !(HEADER_SUB_BIN == *(binary_val(h))
+ && (((ErlSubBin *)binary_val(h))->bitoffs != 0
+ || ((ErlSubBin *)binary_val(h))->bitsize != 0))) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(c_p,
+ arg,
+ bif,
+ erts_iolist_size_yielding,
+ erts_iolist_to_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(c_p);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding iolist_to_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (erts_iolist_size_yielding(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+ Eterm bin;
+ char *buf;
+
+ if (size == 0) {
+ ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) NULL, 0));
+ break; /* done */
+ }
+
+ bin = new_binary(c_p, (byte *) NULL, size);
+ buf = (char *) binary_bytes(bin);
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = erts_iolist_to_buf(arg, buf, size);
+ if (res == 0) {
+ BUMP_REDS(c_p, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * state prepared for iolist_to_buf.
+ */
+ state.bin = bin;
+ state.buf.buf = buf;
+ state.buf.len = size;
+ state.buf.iolist.obj = arg;
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ break;
+ }
+ }
+ }
+ return ret;
}
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_binary, 1)
+
BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_list_to_binary_1]);
}
-/* Turn a possibly deep list of ints (and binaries) into */
-/* One large binary object */
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
if (is_binary(BIF_ARG_1)) {
BIF_RET(BIF_ARG_1);
}
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]);
}
+static int bitstr_list_len(ErtsIOListState *);
+static ErlDrvSizeT list_to_bitstr_buf_yielding(ErtsIOList2BufState *);
+static ErlDrvSizeT list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *);
+
+HIPE_WRAPPER_BIF_DISABLE_GC(list_to_bitstring, 1)
+
BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
{
- Eterm bin;
- Uint sz;
- int offset;
- byte* bytes;
- ErlSubBin* sb1;
- Eterm* hp;
-
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(new_binary(BIF_P,(byte*)"",0));
- }
- if (is_not_list(BIF_ARG_1)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
- }
- switch (bitstr_list_len(BIF_ARG_1, &sz)) {
- case ERTS_IOLIST_TYPE:
- goto error;
- case ERTS_IOLIST_OVERFLOW:
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
- bin = new_binary(BIF_P, (byte *)NULL, sz);
- bytes = binary_bytes(bin);
-#ifdef DEBUG
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes, sz);
-#else
- offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes);
-#endif
- ASSERT(offset >= 0);
- if (offset > 0) {
- hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE);
- sb1 = (ErlSubBin *) hp;
- sb1->thing_word = HEADER_SUB_BIN;
- sb1->size = sz-1;
- sb1->offs = 0;
- sb1->orig = bin;
- sb1->bitoffs = 0;
- sb1->bitsize = offset;
- sb1->is_writable = 0;
- bin = make_binary(sb1);
+ BIF_RETTYPE ret;
+
+ if (is_nil(BIF_ARG_1))
+ ERTS_BIF_PREP_RET(ret, new_binary(BIF_P, (byte *) "", 0));
+ else if (is_not_list(BIF_ARG_1))
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ else {
+ /* check for [bitstring()] case */
+ Eterm h = CAR(list_val(BIF_ARG_1));
+ Eterm t = CDR(list_val(BIF_ARG_1));
+ if (is_binary(h) && is_nil(t)) {
+ ERTS_BIF_PREP_RET(ret, h);
+ }
+ else {
+ ErtsL2BState state = ERTS_L2B_STATE_INITER(BIF_P,
+ BIF_ARG_1,
+ bif_export[BIF_list_to_bitstring_1],
+ bitstr_list_len,
+ list_to_bitstr_buf_yielding);
+ int orig_reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+
+ /*
+ * First try to do it all at once without having to use
+ * yielding list_to_bitstr_buf().
+ */
+ state.buf.iolist.reds_left = orig_reds_left;
+ switch (bitstr_list_len(&state.buf.iolist)) {
+ case ERTS_IOLIST_OK: {
+ ErlDrvSizeT size = state.buf.iolist.size;
+
+ state.bin = new_binary(BIF_P, (byte *) NULL, size);
+ state.buf.buf = (char *) binary_bytes(state.bin);
+ state.buf.len = size;
+ state.buf.iolist.obj = BIF_ARG_1;
+
+ if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) {
+ /* An (over) estimation of reductions needed */
+ int reds_left = state.buf.iolist.reds_left;
+ int to_buf_reds = orig_reds_left - reds_left;
+ to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED;
+ if (to_buf_reds <= reds_left) {
+ ErlDrvSizeT res;
+
+ res = list_to_bitstr_buf_not_yielding(&state.buf);
+ if (res == 0) {
+ Eterm res_bin = l2b_final_touch(BIF_P, &state);
+ BUMP_REDS(BIF_P, to_buf_reds);
+ ERTS_BIF_PREP_RET(ret, res_bin);
+ break; /* done */
+ }
+ if (!ERTS_IOLIST_TO_BUF_FAILED(res))
+ ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch");
+ if (res == ERTS_IOLIST_TO_BUF_OVERFLOW)
+ goto overflow;
+ goto type_error;
+ }
+ }
+ /*
+ * Since size has been computed list_to_binary_chunk() expects
+ * the state prepared for list_to_bitstr_buf.
+ */
+
+ /* Fall through... */
+ }
+ case ERTS_IOLIST_YIELD:
+ ret = list_to_binary_chunk(THE_NON_VALUE,
+ &state,
+ state.buf.iolist.reds_left,
+ 0);
+ break;
+ case ERTS_IOLIST_OVERFLOW:
+ overflow:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
+ break;
+ case ERTS_IOLIST_TYPE:
+ type_error:
+ default:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ }
+ }
}
-
- BIF_RET(bin);
+
+ return ret;
}
BIF_RETTYPE split_binary_2(BIF_ALIST_2)
@@ -594,123 +1058,353 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
* Local functions.
*/
+static int
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
/*
* The input list is assumed to be type-correct and the buffer is
* assumed to be of sufficient size. Those assumptions are verified in
* the DEBUG-built emulator.
*/
-static int
+static ErlDrvSizeT
+list_to_bitstr_buf(int yield_support, ErtsIOList2BufState *state)
+{
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
#ifdef DEBUG
-list_to_bitstr_buf(Eterm obj, char* buf, Uint len)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG \
+ len -= size + (offset>7);
#else
-list_to_bitstr_buf(Eterm obj, char* buf)
+#define LIST_TO_BITSTR_BUF_BCOPY_DBG
#endif
-{
- Eterm* objp;
- int offset = 0;
+#define LIST_TO_BITSTR_BUF_BCOPY(CONSP) \
+ do { \
+ byte* bptr; \
+ Uint bitsize; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ size_t size = binary_size(obj); \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ ASSERT(size <= len); \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ num_bits = 8*size+bitsize; \
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); \
+ offset += bitsize; \
+ buf += size + (offset>7); \
+ LIST_TO_BITSTR_BUF_BCOPY_DBG; \
+ offset = offset & 7; \
+ } while(0)
+
+#ifdef DEBUG
+ ErlDrvSizeT len;
+#endif
+ Eterm obj;
+ char *buf;
+ Eterm *objp = NULL;
+ int offset;
+ int init_yield_count = 0, yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- ASSERT(len > 0);
- if (offset == 0) {
- *buf++ = unsigned_val(obj);
- } else {
- *buf = (char)((unsigned_val(obj) >> offset) |
- ((*buf >> (8-offset)) << (8-offset)));
- buf++;
- *buf = (unsigned_val(obj) << (8-offset));
- }
+
+ obj = state->iolist.obj;
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len--;
+ len = state->len;
#endif
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ if (list_to_bitstr_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ /* Yield again... */
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ }
+ buf = state->buf;
+ offset = state->offset;
#ifdef DEBUG
- len -= size + (offset>7);
+ len = state->len;
#endif
- offset = offset & 7;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else {
- ASSERT(is_nil(obj));
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size+(offset>7);
+ objp = state->objp;
+ state->objp = NULL;
+
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ ASSERT(len > 0);
+ if (offset == 0) {
+ *buf++ = unsigned_val(obj);
+ } else {
+ *buf = (char)((unsigned_val(obj) >> offset) |
+ ((*buf >> (8-offset)) << (8-offset)));
+ buf++;
+ *buf = (unsigned_val(obj) << (8-offset));
+ }
#ifdef DEBUG
- len -= size+(offset>7);
+ len--;
#endif
- offset = offset & 7;
- } else {
- ASSERT(is_nil(obj));
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
+ }
+
+ L_tail:
+
+ obj = CDR(objp);
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- ASSERT(size <= len);
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- num_bits = 8*size+bitsize;
- copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
- offset += bitsize;
- buf += size + (offset>7);
-#ifdef DEBUG
- len -= size + (offset>7);
-#endif
- offset = offset & 7;
+ LIST_TO_BITSTR_BUF_BCOPY(NULL);
} else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
ASSERT(is_nil(obj));
}
}
DESTROY_ESTACK(s);
- return offset;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+ state->buf = buf;
+ state->offset = offset;
+ return 0;
+
+L_bcopy_yield:
+
+ state->buf = buf;
+ state->offset = offset;
+#ifdef DEBUG
+ state->len = len;
+#endif
+
+ if (list_to_bitstr_buf_bcopy(state, obj, &yield_count) == 0)
+ ERTS_INTERNAL_ERROR("Missing yield");
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->offset = offset;
+ ESTACK_SAVE(s, &state->iolist.estack);
+#ifdef DEBUG
+ state->len = len;
+#endif
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+
+#undef LIST_TO_BITSTR_BUF_BCOPY_DBG
+#undef LIST_TO_BITSTR_BUF_BCOPY
+
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(1, state);
+}
+
+static ErlDrvSizeT
+list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *state)
+{
+ return list_to_bitstr_buf(0, state);
}
static int
-bitstr_list_len(Eterm obj, Uint* num_bytes)
+list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ int res;
+ char *buf = state->buf;
+ char *next_buf;
+ int offset = state->offset;
+ int next_offset;
+#ifdef DEBUG
+ ErlDrvSizeT len = state->len;
+ ErlDrvSizeT next_len;
+#endif
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ Uint bitsize;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ bitsize = state->bcopy.bitsize;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+
+ ASSERT(size <= len);
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ }
+
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ next_offset = offset + bitsize;
+ next_buf = buf + size+(next_offset>7);
+#ifdef DEBUG
+ next_len = len - size+(next_offset>7);
+#endif
+ next_offset &= 7;
+ num_bits = 8*size+bitsize;
+ res = 0;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.bitsize = bitsize;
+ state->bcopy.size = size - max_size;
+ next_buf = buf + max_size;
+#ifdef DEBUG
+ next_len = len - max_size;
+#endif
+ next_offset = offset;
+ num_bits = 8*max_size;
+ size = max_size;
+ res = 1;
+ }
+
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+
+ state->offset = next_offset;
+ state->buf = next_buf;
+#ifdef DEBUG
+ state->len = next_len;
+#endif
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+static int
+bitstr_list_len(ErtsIOListState *state)
{
Eterm* objp;
- Uint len = 0;
- Uint offs = 0;
+ Eterm obj;
+ Uint len, offs;
+ int res, init_yield_count, yield_count;
DECLARE_ESTACK(s);
+
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+
+ len = (Uint) state->size;
+ offs = state->offs;
+ obj = state->obj;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore estack... */
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -737,46 +1431,55 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- len++;
- if (len == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (--yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ len++;
+ if (len == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj)) {
+ SAFE_ADD(len, binary_size(obj));
+ SAFE_ADD_BITSIZE(offs, obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
+ } else {
+ if (--yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj)) {
SAFE_ADD(len, binary_size(obj));
SAFE_ADD_BITSIZE(offs, obj);
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj)) {
- SAFE_ADD(len, binary_size(obj));
- SAFE_ADD_BITSIZE(offs, obj);
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
#undef SAFE_ADD_BITSIZE
- DESTROY_ESTACK(s);
-
/*
* Make sure that the number of bits in the bitstring will fit
* in an Uint to ensure that the binary can be matched using
@@ -789,15 +1492,42 @@ bitstr_list_len(Eterm obj, Uint* num_bytes)
if (len << 3 < len) {
goto L_overflow_error;
}
- *num_bytes = len;
- return ERTS_IOLIST_OK;
+ state->size = len;
- L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_OK;
+
+ L_return: {
+ int yc = init_yield_count - yield_count;
+ int reds;
+
+ DESTROY_ESTACK(s);
+ CLEAR_SAVED_ESTACK(&state->estack);
+
+ reds = (yc - 1)/ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) len;
+ state->have_size = 1;
+ return res;
+ }
L_overflow_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+ res = ERTS_IOLIST_OVERFLOW;
+ len = 0;
+ goto L_return;
+
+ L_type_error:
+ res = ERTS_IOLIST_TYPE;
+ len = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = len;
+ state->offs = offs;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 9aa1e5f30d..08265b590d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -76,7 +76,10 @@ process_info(int to, void *to_arg)
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
- if (!ERTS_PROC_IS_EXITING(p))
+ /* Do not include processes with no heap,
+ * they are most likely just created and has invalid data
+ */
+ if (!ERTS_PROC_IS_EXITING(p) && p->heap != NULL)
print_process_info(to, to_arg, p);
}
}
@@ -109,10 +112,12 @@ process_killer(void)
erts_smp_proc_lock(rp, rp_locks);
state = erts_smp_atomic32_read_acqb(&rp->state);
if (state & (ERTS_PSFLG_FREE
- | ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_RUNNING)) {
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_IN_RUNQ
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS)) {
erts_printf("Can only kill WAITING processes this way\n");
}
else {
@@ -251,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p)
p->current[1],
p->current[2]);
}
+ erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix);
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
approx_started = (time_t) p->approx_started;
@@ -331,6 +337,7 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop));
erts_print(to, to_arg, "OldHeap unused: %bpu\n",
(OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HTOP(p)) );
+ erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p));
if (garbing) {
print_garb_info(to, to_arg, p);
@@ -753,7 +760,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
return; /* Can't create the crash dump, skip it */
time(&now);
- erts_fdprintf(fd, "=erl_crash_dump:0.1\n%s", ctime(&now));
+ erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now));
if (file != NULL)
erts_fdprintf(fd, "The error occurred in file %s, line %d\n", file, line);
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index c66d5a2f05..4344558348 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -58,7 +58,8 @@ void erts_code_ix_init(void)
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_tsd_key_create(&has_code_write_permission);
+ erts_tsd_key_create(&has_code_write_permission,
+ "erts_has_code_write_permission");
#endif
CIX_TRACE("init");
}
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 23c0fca6aa..50548850eb 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -27,6 +27,7 @@
#include "erl_process.h"
#include "erl_gc.h"
#include "big.h"
+#include "erl_map.h"
#include "erl_binary.h"
#include "erl_bits.h"
#include "dtrace-wrapper.h"
@@ -47,7 +48,8 @@ copy_object(Eterm obj, Process* to)
if (DTRACE_ENABLED(copy_object)) {
DTRACE_CHARBUF(proc_name, 64);
- erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id);
+ erts_snprintf(proc_name, sizeof(DTRACE_CHARBUF_NAME(proc_name)),
+ "%T", to->common.id);
DTRACE2(copy_object, proc_name, size);
}
#endif
@@ -150,6 +152,24 @@ Uint size_object(Eterm obj)
goto pop_next;
}
break;
+ case MAP_SUBTAG:
+ {
+ Uint n;
+ map_t *mp;
+ mp = (map_t*)map_val_rel(obj,base);
+ ptr = (Eterm *)mp;
+ n = map_get_size(mp) + 1;
+ sum += n + 2;
+ ptr += 2; /* hdr + size words */
+ while (n--) {
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ goto pop_next;
+ }
+ break;
case BIN_MATCHSTATE_SUBTAG:
erl_exit(ERTS_ABORT_EXIT,
"size_object: matchstate term not allowed");
@@ -318,6 +338,15 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
}
break;
+ case MAP_SUBTAG:
+ {
+ i = map_get_size(objp) + 3;
+ *argp = make_map_rel(htop, dst_base);
+ while (i--) {
+ *htop++ = *objp++;
+ }
+ }
+ break;
case REFC_BINARY_SUBTAG:
{
ProcBin* pb;
@@ -537,6 +566,10 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
goto off_heap_common;
+ case MAP_SUBTAG:
+ *hp++ = *tp++;
+ sz--;
+ break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 0781665f05..ec07ddcd9c 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -353,7 +353,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
static void doit_link_net_exits(ErtsLink *lnk, void *vnecp)
{
LinkNetExitsContext lnec = {(NetExitsContext *) vnecp, lnk};
- ASSERT(lnk->type == LINK_PID)
+ ASSERT(lnk->type == LINK_PID);
erts_sweep_links(ERTS_LINK_ROOT(lnk), &doit_link_net_exits_sub, (void *) &lnec);
#ifdef DEBUG
ERTS_LINK_ROOT(lnk) = NULL;
@@ -369,7 +369,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
Process *rp;
ErtsLink *rlnk;
Uint i,n;
- ASSERT(lnk->type == LINK_NODE)
+ ASSERT(lnk->type == LINK_NODE);
if (is_internal_pid(lnk->pid)) {
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
@@ -455,7 +455,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
Eterm nd_reason = (reason == am_no_network
? am_no_network
: am_net_kernel_terminated);
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next)
no_dist_port++;
@@ -464,7 +464,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
/* KILL all port controllers */
if (no_dist_port == 0)
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
else {
Eterm def_buf[128];
int i = 0;
@@ -483,7 +483,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
ASSERT(is_internal_port(tdep->cid));
dist_port[i++] = tdep->cid;
}
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
for (i = 0; i < no_dist_port; i++) {
Port *prt = erts_port_lookup(dist_port[i],
@@ -851,9 +851,12 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
#ifdef USE_VM_PROBES
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
- erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(receiver_name), "%T", remote);
+ erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
+ "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
+ "%T", remote);
msize = size_object(message);
if (token != NIL && token != am_have_dt_utag) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
@@ -908,9 +911,11 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
#ifdef USE_VM_PROBES
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
- erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(receiver_name),
+ erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
+ "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
"{%T,%s}", remote_name, node_name);
msize = size_object(message);
if (token != NIL && token != am_have_dt_utag) {
@@ -971,11 +976,14 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
#ifdef USE_VM_PROBES
*node_name = *sender_name = *remote_name = '\0';
if (DTRACE_ENABLED(process_exit_signal_remote)) {
- erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(remote_name, sizeof(remote_name),
+ erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
+ "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(remote_name, sizeof(DTRACE_CHARBUF_NAME(remote_name)),
"{%T,%s}", remote, node_name);
- erts_snprintf(reason_str, sizeof(reason), "%T", reason);
+ erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)),
+ "%T", reason);
if (token != NIL && token != am_have_dt_utag) {
tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
@@ -1509,12 +1517,12 @@ int erts_net_message(Port *prt,
break;
}
rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+
+ erts_destroy_monitor(mon);
if (rp == NULL) {
break;
}
- erts_destroy_monitor(mon);
-
mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
if (mon == NULL) {
@@ -1797,8 +1805,9 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", cid);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", dep->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
port_str, remote_str);
@@ -1855,9 +1864,11 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
DTRACE_CHARBUF(remote_str, 64);
DTRACE_CHARBUF(pid_str, 16);
- erts_snprintf(port_str, sizeof(port_str), "%T", cid);
- erts_snprintf(remote_str, sizeof(remote_str), "%T", dep->sysname);
- erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", cid);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
+ "%T", dep->sysname);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)),
+ "%T", c_p->common.id);
DTRACE4(dist_port_busy, erts_this_node_sysname,
port_str, remote_str, pid_str);
}
@@ -1890,8 +1901,9 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_output, erts_this_node_sysname, port_str,
remote_str, size);
@@ -1944,8 +1956,9 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
remote_str, size);
@@ -2280,8 +2293,9 @@ erts_dist_port_not_busy(Port *prt)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
- erts_snprintf(remote_str, sizeof(remote_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
"%T", prt->dist_entry->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
port_str, remote_str);
@@ -2560,9 +2574,9 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN);
#ifdef DEBUG
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
#endif
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -2912,7 +2926,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
length = 0;
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
ASSERT(erts_no_of_not_connected_dist_entries >= 0);
ASSERT(erts_no_of_hidden_dist_entries >= 0);
@@ -2929,7 +2943,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
result = NIL;
if (length == 0) {
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
goto done;
}
@@ -2958,7 +2972,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
hp += 2;
}
ASSERT(endp == hp);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
done:
UnUseTmpHeap(2,BIF_P);
@@ -3246,10 +3260,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
DTRACE_CHARBUF(type_str, 12);
DTRACE_CHARBUF(reason_str, 64);
- erts_snprintf(what_str, sizeof(what_str), "%T", what);
- erts_snprintf(node_str, sizeof(node_str), "%T", node);
- erts_snprintf(type_str, sizeof(type_str), "%T", type);
- erts_snprintf(reason_str, sizeof(reason_str), "%T", reason);
+ erts_snprintf(what_str, sizeof(DTRACE_CHARBUF_NAME(what_str)), "%T", what);
+ erts_snprintf(node_str, sizeof(DTRACE_CHARBUF_NAME(node_str)), "%T", node);
+ erts_snprintf(type_str, sizeof(DTRACE_CHARBUF_NAME(type_str)), "%T", type);
+ erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)), "%T", reason);
DTRACE5(dist_monitor, erts_this_node_sysname,
what_str, node_str, type_str, reason_str);
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index ff8f5e106f..f32b999198 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,13 +40,16 @@
#define DFLAG_SMALL_ATOM_TAGS 0x4000
#define DFLAG_INTERNAL_TAGS 0x8000
#define DFLAG_UTF8_ATOMS 0x10000
+#define DFLAG_MAP_TAG 0x20000
/* All flags that should be enabled when term_to_binary/1 is used. */
#define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \
| DFLAG_NEW_FUN_TAGS \
+ | DFLAG_NEW_FLOATS \
| DFLAG_EXTENDED_PIDS_PORTS \
| DFLAG_EXPORT_PTR_TAG \
- | DFLAG_BIT_BINARIES)
+ | DFLAG_BIT_BINARIES \
+ | DFLAG_MAP_TAG)
/* opcodes used in distribution messages */
#define DOP_LINK 1
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index 5850f80843..eca4e3b3bb 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -48,10 +48,9 @@ struct AFFreeBlock_t_ {
#define MIN_MBC_FIRST_FREE_SZ (4*1024)
/* Prototypes of callback functions */
-static Block_t * get_free_block (Allctr_t *, Uint,
- Block_t *, Uint, Uint32);
-static void link_free_block (Allctr_t *, Block_t *, Uint32);
-static void unlink_free_block (Allctr_t *, Block_t *, Uint32);
+static Block_t * get_free_block (Allctr_t *, Uint, Block_t *, Uint);
+static void link_free_block (Allctr_t *, Block_t *);
+static void unlink_free_block (Allctr_t *, Block_t *);
static Eterm info_options (Allctr_t *, char *, int *,
@@ -84,8 +83,7 @@ erts_afalc_start(AFAllctr_t *afallctr,
sys_memcpy((void *) afallctr, (void *) &zero.allctr, sizeof(AFAllctr_t));
- init->sbmbct = 0; /* Small mbc not supported by afit */
-
+ allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(AFFreeBlock_t);
@@ -100,6 +98,9 @@ erts_afalc_start(AFAllctr_t *afallctr,
allctr->get_next_mbc_size = NULL;
allctr->creating_mbc = NULL;
allctr->destroying_mbc = NULL;
+ allctr->add_mbc = NULL;
+ allctr->remove_mbc = NULL;
+ allctr->largest_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -116,8 +117,7 @@ erts_afalc_start(AFAllctr_t *afallctr,
}
static Block_t *
-get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size,
- Uint32 flags)
+get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size)
{
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
@@ -135,7 +135,7 @@ get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size,
}
static void
-link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+link_free_block(Allctr_t *allctr, Block_t *block)
{
AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
@@ -156,7 +156,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
static void
-unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+unlink_free_block(Allctr_t *allctr, Block_t *block)
{
AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
@@ -259,10 +259,10 @@ info_options(Allctr_t *allctr,
* to erts_afalc_test() *
\* */
-unsigned long
-erts_afalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+UWord
+erts_afalc_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
- default: ASSERT(0); return ~((unsigned long) 0);
+ default: ASSERT(0); return ~((UWord) 0);
}
}
diff --git a/erts/emulator/beam/erl_afit_alloc.h b/erts/emulator/beam/erl_afit_alloc.h
index cf7b99c463..b90ac8f7c5 100644
--- a/erts/emulator/beam/erl_afit_alloc.h
+++ b/erts/emulator/beam/erl_afit_alloc.h
@@ -56,7 +56,7 @@ struct AFAllctr_t_ {
AFFreeBlock_t * free_list;
};
-unsigned long erts_afalc_test(unsigned long, unsigned long, unsigned long);
+UWord erts_afalc_test(UWord, UWord, UWord);
#endif /* #if defined(GET_ERL_AF_ALLOC_IMPL)
&& !defined(ERL_AF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index d748f86d75..90cd227fae 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -71,6 +71,23 @@
#define AU_ALLOC_DEFAULT_ENABLE(X) (X)
#endif
+#define ERTS_ALC_DEFAULT_ENABLED_ACUL 60
+#define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45
+#define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85
+
+#define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL
+#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
+#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
+
+#ifndef ERTS_SMP
+# undef ERTS_ALC_DEFAULT_ACUL
+# define ERTS_ALC_DEFAULT_ACUL 0
+# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC
+# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0
+# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC
+# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0
+#endif
+
#ifdef DEBUG
static Uint install_debug_functions(void);
#if 0
@@ -83,6 +100,8 @@ static Uint install_debug_functions(void);
#endif
#endif
+static int lock_all_physical_memory = 0;
+
ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
@@ -101,11 +120,9 @@ typedef union {
char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
} ErtsAllocatorState_t;
-static ErtsAllocatorState_t sbmbc_alloc_state;
static ErtsAllocatorState_t std_alloc_state;
static ErtsAllocatorState_t ll_alloc_state;
#if HALFWORD_HEAP
-static ErtsAllocatorState_t sbmbc_low_alloc_state;
static ErtsAllocatorState_t std_low_alloc_state;
static ErtsAllocatorState_t ll_low_alloc_state;
#endif
@@ -120,6 +137,7 @@ static ErtsAllocatorState_t fix_alloc_state;
typedef struct {
erts_smp_atomic32_t refc;
int only_sz;
+ int internal;
Uint req_sched;
Process *proc;
Eterm ref;
@@ -162,6 +180,7 @@ enum allctr_type {
struct au_init {
int enable;
int thr_spec;
+ int carrier_migration_allowed;
enum allctr_type atype;
struct {
AllctrInit_t util;
@@ -200,7 +219,6 @@ typedef struct {
char *mtrace;
char *nodename;
} instr;
- struct au_init sbmbc_alloc;
struct au_init sl_alloc;
struct au_init std_alloc;
struct au_init ll_alloc;
@@ -211,13 +229,12 @@ typedef struct {
struct au_init driver_alloc;
struct au_init fix_alloc;
#if HALFWORD_HEAP
- struct au_init sbmbc_low_alloc;
struct au_init std_low_alloc;
struct au_init ll_low_alloc;
#endif
} erts_alc_hndl_args_init_t;
-#define ERTS_AU_INIT__ {0, 0, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
+#define ERTS_AU_INIT__ {0, 0, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
#define SET_DEFAULT_ALLOC_OPTS(IP) \
do { \
@@ -226,34 +243,6 @@ do { \
} while (0)
static void
-set_default_sbmbc_alloc_opts(struct au_init *ip)
-{
- SET_DEFAULT_ALLOC_OPTS(ip);
- ip->enable = 0;
- ip->thr_spec = 0;
- ip->atype = BESTFIT;
- ip->init.bf.ao = 1;
- ip->init.util.ramv = 0;
- ip->init.util.mmsbc = 0;
- ip->init.util.mmmbc = 500;
- ip->init.util.sbct = ~((UWord) 0);
- ip->init.util.name_prefix = "sbmbc_";
- ip->init.util.alloc_no = ERTS_ALC_A_SBMBC;
-#ifndef SMALL_MEMORY
- ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
-#else
- ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
-#endif
- ip->init.util.ts = ERTS_ALC_MTA_SBMBC;
- ip->init.util.asbcst = 0;
- ip->init.util.rsbcst = 0;
- ip->init.util.rsbcmt = 0;
- ip->init.util.rmbcmt = 0;
- ip->init.util.sbmbct = 0;
- ip->init.util.sbmbcs = 0;
-}
-
-static void
set_default_sl_alloc_opts(struct au_init *ip)
{
SET_DEFAULT_ALLOC_OPTS(ip);
@@ -261,7 +250,6 @@ set_default_sl_alloc_opts(struct au_init *ip)
ip->thr_spec = 1;
ip->atype = GOODFIT;
ip->init.util.name_prefix = "sl_";
- ip->init.util.mmmbc = 5;
ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
#ifndef SMALL_MEMORY
ip->init.util.mmbcs = 128*1024; /* Main carrier size */
@@ -274,7 +262,7 @@ set_default_sl_alloc_opts(struct au_init *ip)
ip->init.util.force = 1;
ip->init.util.low_mem = 1;
#endif
-
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
static void
@@ -285,7 +273,6 @@ set_default_std_alloc_opts(struct au_init *ip)
ip->thr_spec = 1;
ip->atype = BESTFIT;
ip->init.util.name_prefix = "std_";
- ip->init.util.mmmbc = 5;
ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
#ifndef SMALL_MEMORY
ip->init.util.mmbcs = 128*1024; /* Main carrier size */
@@ -293,6 +280,7 @@ set_default_std_alloc_opts(struct au_init *ip)
ip->init.util.mmbcs = 32*1024; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
static void
@@ -305,7 +293,6 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.bf.ao = 1;
ip->init.util.ramv = 0;
ip->init.util.mmsbc = 0;
- ip->init.util.mmmbc = 0;
ip->init.util.sbct = ~((UWord) 0);
ip->init.util.name_prefix = "ll_";
ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
@@ -319,8 +306,7 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.util.rsbcst = 0;
ip->init.util.rsbcmt = 0;
ip->init.util.rmbcmt = 0;
- ip->init.util.sbmbct = 0;
- ip->init.util.sbmbcs = 0;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_LL_ALLOC;
}
static void
@@ -329,6 +315,7 @@ set_default_temp_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
+ ip->carrier_migration_allowed = 0;
ip->atype = AFIT;
ip->init.util.name_prefix = "temp_";
ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
@@ -353,7 +340,6 @@ set_default_eheap_alloc_opts(struct au_init *ip)
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
ip->atype = GOODFIT;
- ip->init.util.mmmbc = 100;
ip->init.util.name_prefix = "eheap_";
ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
#ifndef SMALL_MEMORY
@@ -367,6 +353,7 @@ set_default_eheap_alloc_opts(struct au_init *ip)
ip->init.util.force = 1;
ip->init.util.low_mem = 1;
#endif
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC;
}
static void
@@ -376,7 +363,6 @@ set_default_binary_alloc_opts(struct au_init *ip)
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
ip->atype = BESTFIT;
- ip->init.util.mmmbc = 50;
ip->init.util.name_prefix = "binary_";
ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
#ifndef SMALL_MEMORY
@@ -385,6 +371,7 @@ set_default_binary_alloc_opts(struct au_init *ip)
ip->init.util.mmbcs = 32*1024; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_BINARY;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
static void
@@ -394,7 +381,6 @@ set_default_ets_alloc_opts(struct au_init *ip)
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
ip->atype = BESTFIT;
- ip->init.util.mmmbc = 100;
ip->init.util.name_prefix = "ets_";
ip->init.util.alloc_no = ERTS_ALC_A_ETS;
#ifndef SMALL_MEMORY
@@ -403,6 +389,7 @@ set_default_ets_alloc_opts(struct au_init *ip)
ip->init.util.mmbcs = 32*1024; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_ETS;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
static void
@@ -420,6 +407,7 @@ set_default_driver_alloc_opts(struct au_init *ip)
ip->init.util.mmbcs = 32*1024; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
static void
@@ -440,6 +428,7 @@ set_default_fix_alloc_opts(struct au_init *ip,
ip->init.util.mmbcs = 128*1024; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
+ ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
}
#ifdef ERTS_SMP
@@ -462,13 +451,6 @@ adjust_tpref(struct au_init *ip, int no_sched)
/* ... shrink smallest multi-block carrier size */
if (ip->default_.smbcs)
ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
- /* ... and more than three allocators shrink
- max mseg multi-block carriers */
- if (ip->default_.mmmbc && no_sched > 2) {
- ip->init.util.mmmbc /= ERTS_MIN(4, no_sched - 1);
- if (ip->init.util.mmmbc < 3)
- ip->init.util.mmmbc = 3;
- }
}
}
@@ -495,6 +477,70 @@ refuse_af_strategy(struct au_init *init)
static void hdbg_init(void);
#endif
+static void adjust_fix_alloc_sizes(UWord extra_block_size)
+{
+
+ if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
+ int j;
+
+#ifdef ERTS_SMP
+ if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
+ int i;
+ ErtsAllocatorThrSpec_t* tspec;
+
+ tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
+ ASSERT(tspec->enabled);
+
+ for (i=0; i < tspec->size; i++) {
+ Allctr_t* allctr = tspec->allctr[i];
+ for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
+ allctr->fix[j].type_size += extra_block_size;
+ }
+ }
+ }
+ else
+#endif
+ {
+ Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
+ for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
+ allctr->fix[j].type_size += extra_block_size;
+ }
+ }
+ }
+}
+
+static ERTS_INLINE int
+strategy_support_carrier_migration(struct au_init *auip)
+{
+ /*
+ * Currently only aoff, aoffcbf and aoffcaobf support carrier
+ * migration, i.e, type AOFIRSTFIT.
+ */
+ return auip->atype == AOFIRSTFIT;
+}
+
+static ERTS_INLINE void
+adjust_carrier_migration_support(struct au_init *auip)
+{
+#ifdef ERTS_SMP
+ if (auip->init.util.acul) {
+ auip->thr_spec = -1; /* Need thread preferred */
+
+ /*
+ * If strategy cannot handle carrier migration,
+ * default to a strategy that can...
+ */
+ if (!strategy_support_carrier_migration(auip)) {
+ /* Default to aoffcbf */
+ auip->atype = AOFIRSTFIT;
+ auip->init.aoff.flavor = AOFF_BF;
+ }
+ }
+#else
+ auip->init.util.acul = 0;
+#endif
+}
+
void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
@@ -515,9 +561,9 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= sizeof(Process);
#if !HALFWORD_HEAP
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR_SH)]
- = ERTS_MONITOR_SH_SIZE;
+ = ERTS_MONITOR_SH_SIZE * sizeof(Uint);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)]
- = ERTS_LINK_SH_SIZE;
+ = ERTS_LINK_SH_SIZE * sizeof(Uint);
#endif
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)]
= sizeof(ErtsDrvEventDataState);
@@ -533,15 +579,18 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
hdbg_init();
#endif
- erts_have_sbmbc_alloc = 0;
+ lock_all_physical_memory = 0;
+
ncpu = eaiop->ncpu;
if (ncpu < 1)
ncpu = 1;
+ erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
+ "erts_allctr_prelock_tsd_key");
+
erts_sys_alloc_init();
erts_init_utils_mem();
- set_default_sbmbc_alloc_opts(&init.sbmbc_alloc);
set_default_sl_alloc_opts(&init.sl_alloc);
set_default_std_alloc_opts(&init.std_alloc);
set_default_ll_alloc_opts(&init.ll_alloc);
@@ -556,8 +605,21 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
if (argc && argv)
handle_args(argc, argv, &init);
+ if (lock_all_physical_memory) {
+#ifdef HAVE_MLOCKALL
+ errno = 0;
+ if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) {
+ int err = errno;
+ char *errstr = err ? strerror(err) : "unknown";
+ erl_exit(-1, "Failed to lock physical memory: %s (%d)\n",
+ errstr, err);
+ }
+#else
+ erl_exit(-1, "Failed to lock physical memory: Not supported\n");
+#endif
+ }
+
#ifndef ERTS_SMP
- init.sbmbc_alloc.thr_spec = 0;
init.sl_alloc.thr_spec = 0;
init.std_alloc.thr_spec = 0;
init.ll_alloc.thr_spec = 0;
@@ -568,9 +630,21 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.fix_alloc.thr_spec = 0;
#endif
+ /* Make adjustments for carrier migration support */
+ init.temp_alloc.init.util.acul = 0;
+ adjust_carrier_migration_support(&init.sl_alloc);
+ adjust_carrier_migration_support(&init.std_alloc);
+ adjust_carrier_migration_support(&init.ll_alloc);
+ adjust_carrier_migration_support(&init.eheap_alloc);
+ adjust_carrier_migration_support(&init.binary_alloc);
+ adjust_carrier_migration_support(&init.ets_alloc);
+ adjust_carrier_migration_support(&init.driver_alloc);
+ adjust_carrier_migration_support(&init.fix_alloc);
+
if (init.erts_alloc_config) {
/* Adjust flags that erts_alloc_config won't like */
- init.sbmbc_alloc.thr_spec = 0;
+
+ /* No thread specific instances */
init.temp_alloc.thr_spec = 0;
init.sl_alloc.thr_spec = 0;
init.std_alloc.thr_spec = 0;
@@ -579,7 +653,18 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.binary_alloc.thr_spec = 0;
init.ets_alloc.thr_spec = 0;
init.driver_alloc.thr_spec = 0;
- init.fix_alloc.thr_spec = 0;
+ init.fix_alloc.thr_spec = 0;
+
+ /* No carrier migration */
+ init.temp_alloc.init.util.acul = 0;
+ init.sl_alloc.init.util.acul = 0;
+ init.std_alloc.init.util.acul = 0;
+ init.ll_alloc.init.util.acul = 0;
+ init.eheap_alloc.init.util.acul = 0;
+ init.binary_alloc.init.util.acul = 0;
+ init.ets_alloc.init.util.acul = 0;
+ init.driver_alloc.init.util.acul = 0;
+ init.fix_alloc.init.util.acul = 0;
}
#ifdef ERTS_SMP
@@ -588,7 +673,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.temp_alloc.thr_spec = erts_no_schedulers;
/* Others must use thread preferred interface */
- adjust_tpref(&init.sbmbc_alloc, erts_no_schedulers);
adjust_tpref(&init.sl_alloc, erts_no_schedulers);
adjust_tpref(&init.std_alloc, erts_no_schedulers);
adjust_tpref(&init.ll_alloc, erts_no_schedulers);
@@ -607,7 +691,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
* The following allocators cannot be run with afit strategy.
* Make sure they don't...
*/
- refuse_af_strategy(&init.sbmbc_alloc);
refuse_af_strategy(&init.sl_alloc);
refuse_af_strategy(&init.std_alloc);
refuse_af_strategy(&init.ll_alloc);
@@ -627,6 +710,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.mseg.nos = erts_no_schedulers;
erts_mseg_init(&init.mseg);
#endif
+
erts_alcu_init(&init.alloc_util);
erts_afalc_init();
erts_bfalc_init();
@@ -651,11 +735,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#if HALFWORD_HEAP
/* Init low memory variants by cloning */
- init.sbmbc_low_alloc = init.sbmbc_alloc;
- init.sbmbc_low_alloc.init.util.name_prefix = "sbmbc_low_";
- init.sbmbc_low_alloc.init.util.alloc_no = ERTS_ALC_A_SBMBC_LOW;
- init.sbmbc_low_alloc.init.util.low_mem = 1;
-
init.std_low_alloc = init.std_alloc;
init.std_low_alloc.init.util.name_prefix = "std_low_";
init.std_low_alloc.init.util.alloc_no = ERTS_ALC_A_STANDARD_LOW;
@@ -668,13 +747,11 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.ll_low_alloc.init.util.force = 1;
init.ll_low_alloc.init.util.low_mem = 1;
- set_au_allocator(ERTS_ALC_A_SBMBC_LOW, &init.sbmbc_low_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_low_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_low_alloc, ncpu);
#endif /* HALFWORD */
set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
- set_au_allocator(ERTS_ALC_A_SBMBC, &init.sbmbc_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
@@ -701,20 +778,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
- /* sbmbc_alloc() needs to be started first */
- start_au_allocator(ERTS_ALC_A_SBMBC,
- &init.sbmbc_alloc,
- &sbmbc_alloc_state);
-#if HALFWORD_HEAP
- start_au_allocator(ERTS_ALC_A_SBMBC_LOW,
- &init.sbmbc_low_alloc,
- &sbmbc_low_alloc_state);
- erts_have_sbmbc_alloc = (init.sbmbc_alloc.enable
- && init.sbmbc_low_alloc.enable);
-#else
- erts_have_sbmbc_alloc = init.sbmbc_alloc.enable;
-#endif
-
start_au_allocator(ERTS_ALC_A_TEMPORARY,
&init.temp_alloc,
&temp_alloc_state);
@@ -768,7 +831,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#ifdef DEBUG
extra_block_size += install_debug_functions();
#endif
-
+ adjust_fix_alloc_sizes(extra_block_size);
}
void
@@ -1104,6 +1167,26 @@ get_kb_value(char *param_end, char** argv, int* ip)
return ((Uint) tmp)*1024;
}
+static UWord
+get_mb_value(char *param_end, char** argv, int* ip)
+{
+ SWord tmp;
+ UWord max = ((~((UWord) 0))/(1024*1024)) + 1;
+ char *rest;
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ errno = 0;
+ tmp = (SWord) ErtsStrToSint(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp))
+ bad_value(param, param_end, value);
+ if (max == (UWord) tmp)
+ return ~((UWord) 0);
+ else
+ return ((UWord) tmp)*1024*1024;
+}
+
+
+#if 0
static Uint
get_byte_value(char *param_end, char** argv, int* ip)
{
@@ -1117,6 +1200,7 @@ get_byte_value(char *param_end, char** argv, int* ip)
bad_value(param, param_end, value);
return (Uint) tmp;
}
+#endif
static Uint
get_amount_value(char *param_end, char** argv, int* ip)
@@ -1132,17 +1216,57 @@ get_amount_value(char *param_end, char** argv, int* ip)
return (Uint) tmp;
}
+static Uint
+get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip)
+{
+ Sint tmp;
+ char *rest;
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ if (sys_strcmp(value, "de") == 0) {
+ switch (auip->init.util.alloc_no) {
+ case ERTS_ALC_A_LONG_LIVED:
+#if HALFWORD_HEAP
+ case ERTS_ALC_A_LONG_LIVED_LOW:
+#endif
+ return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC;
+ case ERTS_ALC_A_EHEAP:
+ return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC;
+ default:
+ return ERTS_ALC_DEFAULT_ENABLED_ACUL;
+ }
+ }
+ errno = 0;
+ tmp = (Sint) ErtsStrToSint(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp < 0 || 100 < tmp)
+ bad_value(param, param_end, value);
+ return (Uint) tmp;
+}
+
static void
handle_au_arg(struct au_init *auip,
char* sub_param,
char** argv,
- int* ip)
+ int* ip,
+ int u_switch)
{
char *param = argv[*ip]+1;
switch (sub_param[0]) {
case 'a':
- if(has_prefix("asbcst", sub_param)) {
+ if (has_prefix("acul", sub_param)) {
+ if (!auip->carrier_migration_allowed) {
+ if (!u_switch)
+ goto bad_switch;
+ else {
+ /* ignore */
+ (void) get_acul_value(auip, sub_param + 4, argv, ip);
+ break;
+ }
+ }
+ auip->init.util.acul = get_acul_value(auip, sub_param + 4, argv, ip);
+ }
+ else if(has_prefix("asbcst", sub_param)) {
auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
}
else if(has_prefix("as", sub_param)) {
@@ -1163,21 +1287,27 @@ handle_au_arg(struct au_init *auip,
}
else if (strcmp("aoff", alg) == 0) {
auip->atype = AOFIRSTFIT;
+ auip->init.aoff.flavor = AOFF_AOFF;
+ }
+ else if (strcmp("aoffcbf", alg) == 0) {
+ auip->atype = AOFIRSTFIT;
+ auip->init.aoff.flavor = AOFF_BF;
+ }
+ else if (strcmp("aoffcaobf", alg) == 0) {
+ auip->atype = AOFIRSTFIT;
+ auip->init.aoff.flavor = AOFF_AOBF;
}
else {
bad_value(param, sub_param + 1, alg);
}
+ if (!strategy_support_carrier_migration(auip))
+ auip->init.util.acul = 0;
}
else
goto bad_switch;
break;
case 'e':
auip->enable = get_bool_value(sub_param+1, argv, ip);
-#if !HAVE_ERTS_SBMBC
- if (auip->init.util.alloc_no == ERTS_ALC_A_SBMBC) {
- auip->enable = 0;
- }
-#endif
break;
case 'l':
if (has_prefix("lmbcs", sub_param)) {
@@ -1237,18 +1367,6 @@ handle_au_arg(struct au_init *auip,
if(has_prefix("sbct", sub_param)) {
auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
}
- else if (has_prefix("sbmbcs", sub_param)) {
-#if HAVE_ERTS_SBMBC
- auip->init.util.sbmbcs =
-#endif
- get_byte_value(sub_param + 6, argv, ip);
- }
- else if (has_prefix("sbmbct", sub_param)) {
-#if HAVE_ERTS_SBMBC
- auip->init.util.sbmbct =
-#endif
- get_byte_value(sub_param + 6, argv, ip);
- }
else if (has_prefix("smbcs", sub_param)) {
auip->default_.smbcs = 0;
auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
@@ -1264,6 +1382,7 @@ handle_au_arg(struct au_init *auip,
}
else if (res == 0) {
auip->thr_spec = 0;
+ auip->init.util.acul = 0;
break;
}
goto bad_switch;
@@ -1278,7 +1397,6 @@ static void
handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
{
struct au_init *aui[] = {
- &init->sbmbc_alloc,
&init->binary_alloc,
&init->std_alloc,
&init->ets_alloc,
@@ -1305,25 +1423,22 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'M':
switch (argv[i][2]) {
case 'B':
- handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i);
- break;
- case 'C':
- handle_au_arg(&init->sbmbc_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0);
break;
case 'D':
- handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0);
break;
case 'E':
- handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i, 0);
break;
case 'F':
- handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i, 0);
break;
case 'H':
- handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i, 0);
break;
case 'L':
- handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i, 0);
break;
case 'M':
if (has_prefix("amcbf", argv[i]+3)) {
@@ -1344,18 +1459,42 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
#endif
get_amount_value(argv[i]+6, argv, &i);
}
+ else if (has_prefix("scs", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.scs =
+#endif
+ get_mb_value(argv[i]+6, argv, &i);
+ }
+ else if (has_prefix("sco", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.sco =
+#endif
+ get_bool_value(argv[i]+6, argv, &i);
+ }
+ else if (has_prefix("scrpm", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.scrpm =
+#endif
+ get_bool_value(argv[i]+8, argv, &i);
+ }
+ else if (has_prefix("scrfsd", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mmap.scrfsd =
+#endif
+ get_amount_value(argv[i]+9, argv, &i);
+ }
else {
bad_param(param, param+2);
}
break;
case 'R':
- handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i, 0);
break;
case 'S':
- handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i, 0);
break;
case 'T':
- handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i);
+ handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0);
break;
case 'Y': { /* sys_alloc */
if (has_prefix("tt", param+2)) {
@@ -1414,9 +1553,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
else if (strcmp("max", arg) == 0) {
for (a = 0; a < aui_sz; a++)
aui[a]->enable = 1;
-#if !HAVE_ERTS_SBMBC
- init->sbmbc_alloc.enable = 0;
-#endif
}
else if (strcmp("config", arg) == 0) {
init->erts_alloc_config = 1;
@@ -1444,8 +1580,8 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
for (a = 0; a < aui_sz; a++) {
aui[a]->thr_spec = 0;
+ aui[a]->init.util.acul = 0;
aui[a]->init.util.ramv = 0;
- aui[a]->init.util.mmmbc = 10;
aui[a]->init.util.lmbcs = 5*1024*1024;
}
}
@@ -1485,6 +1621,19 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
bad_param(param, param+2);
}
break;
+ case 'l':
+ if (has_prefix("pm", param+2)) {
+ arg = get_value(argv[i]+5, argv, &i);
+ if (strcmp("all", arg) == 0)
+ lock_all_physical_memory = 1;
+ else if (strcmp("no", arg) == 0)
+ lock_all_physical_memory = 0;
+ else
+ bad_value(param, param+4, arg);
+ break;
+ }
+ bad_param(param, param+2);
+ break;
case 'u':
if (has_prefix("ycs", argv[i]+3)) {
init->alloc_util.ycs
@@ -1494,6 +1643,10 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
init->alloc_util.mmc
= get_amount_value(argv[i]+6, argv, &i);
}
+ else if (has_prefix("sac", argv[i]+3)) {
+ init->alloc_util.sac
+ = get_bool_value(argv[i]+6, argv, &i);
+ }
else {
int a;
int start = i;
@@ -1508,7 +1661,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
argv[start + 1] = val;
i = start;
}
- handle_au_arg(aui[a], &argv[i][3], argv, &i);
+ handle_au_arg(aui[a], &argv[i][3], argv, &i, 1);
}
}
break;
@@ -1574,6 +1727,9 @@ erts_alloc_register_scheduler(void *vesdp)
int ix = (int) esdp->no;
int aix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+#endif
for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
esdp->alloc_data.deallctr[aix] = NULL;
@@ -1717,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
size = va_arg(argp, Uint);
va_end(argp);
erl_exit(1,
- "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
- allctr_str, op, size, t_str);
+ "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n",
+ allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX());
break;
}
case ERTS_ALC_E_NOALLCTR:
@@ -2054,15 +2210,11 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
return am_badarg;
}
- /* All alloc_util allocators except sbmbc_alloc *have* to be enabled */
+ /* All alloc_util allocators *have* to be enabled */
for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
switch (ai) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_A_SBMBC:
-#if HALFWORD_HEAP
- case ERTS_ALC_A_SBMBC_LOW:
-#endif
break;
default:
if (!erts_allctrs_info[ai].enabled
@@ -2102,12 +2254,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
* Often not thread safe and usually never
* contain any allocated memory.
*/
- case ERTS_ALC_A_SBMBC:
- /* Included in other allocators */
-#if HALFWORD_HEAP
- case ERTS_ALC_A_SBMBC_LOW:
- /* Included in other allocators */
-#endif
continue;
case ERTS_ALC_A_EHEAP:
save = &size.processes;
@@ -2142,7 +2288,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (want_tot_or_sys || want.processes || want.processes_used) {
- int max_processes = erts_ptab_max(&erts_proc);
UWord tmp;
if (ERTS_MEM_NEED_ALL_ALCU)
@@ -2152,7 +2297,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
fi, ERTS_ALC_NO_FIXED_SIZES);
tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
}
- tmp += max_processes*sizeof(erts_smp_atomic_t);
+ tmp += erts_ptab_mem_size(&erts_proc);
tmp += erts_bif_timer_memory_size();
tmp += erts_tot_link_lh_size();
@@ -2278,13 +2423,11 @@ struct aa_values {
Eterm
erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
{
-#define MAX_AA_VALUES (23)
+#define MAX_AA_VALUES (24)
struct aa_values values[MAX_AA_VALUES];
Eterm res = THE_NON_VALUE;
int i, length;
Uint reserved_atom_space, atom_space;
- int max_processes = erts_ptab_max(&erts_proc);
- int max_ports = erts_ptab_max(&erts_port);
if (proc) {
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
@@ -2315,8 +2458,8 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "static";
- values[i].ui[0] =
- max_ports*sizeof(erts_smp_atomic_t) /* Port table */
+ values[i].ui[0] =
+ sizeof(ErtsPTab)*2 /* proc & port tables */
+ erts_timer_wheel_memory_size(); /* Timer wheel */
i++;
@@ -2395,7 +2538,12 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "process_table";
- values[i].ui[0] = max_processes*sizeof(Process*);
+ values[i].ui[0] = erts_ptab_mem_size(&erts_proc);
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "port_table";
+ values[i].ui[0] = erts_ptab_mem_size(&erts_port);
i++;
values[i].arity = 2;
@@ -2549,7 +2697,7 @@ erts_allocator_info(int to, void *arg)
as = erts_allctr_thr_spec[a].allctr[ai];
}
/* Binary alloc has its own thread safety... */
- erts_alcu_info(as, 0, &to, arg, NULL, NULL);
+ erts_alcu_info(as, 0, 0, &to, arg, NULL, NULL);
}
else {
switch (a) {
@@ -2575,6 +2723,7 @@ erts_allocator_info(int to, void *arg)
#if HAVE_ERTS_MSEG
{
+ struct erts_mmap_info_struct emis;
#ifdef ERTS_SMP
int max = (int) erts_no_schedulers;
#else
@@ -2585,6 +2734,8 @@ erts_allocator_info(int to, void *arg)
erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
erts_mseg_info(i, &to, arg, 0, NULL, NULL);
}
+ erts_print(to, arg, "=allocator:mseg_alloc.erts_mmap\n");
+ erts_mmap_info(&to, arg, NULL, NULL, &emis);
}
#endif
@@ -2609,8 +2760,8 @@ erts_allocator_options(void *proc)
#endif
Uint sz, *szp, *hp, **hpp;
Eterm res, features, settings;
- Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
- Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
+ Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
+ Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
int a, length;
SysAllocStat sas;
Uint *endp = NULL;
@@ -2708,6 +2859,11 @@ erts_allocator_options(void *proc)
terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v);
}
+ atoms[length] = am_atom_put("lock_physical_memory", 20);
+ terms[length++] = (lock_all_physical_memory
+ ? am_atom_put("all", 3)
+ : am_atom_put("no", 2));
+
settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
length = 0;
@@ -2723,6 +2879,9 @@ erts_allocator_options(void *proc)
if (use_mseg)
terms[length++] = am_atom_put("mseg_alloc", 10);
#endif
+#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ terms[length++] = am_atom_put("sys_aligned_alloc", 17);
+#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
@@ -2808,9 +2967,11 @@ reply_alloc_info(void *vair)
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
+ struct erts_mmap_info_struct emis;
int i;
Eterm (*info_func)(Allctr_t *,
int,
+ int,
int *,
void *,
Uint **,
@@ -2919,15 +3080,23 @@ reply_alloc_info(void *vair)
? NIL
: erts_mseg_info(0, NULL, NULL, hpp != NULL,
hpp, szp));
- ainfo = erts_bld_tuple(hpp, szp, 3,
- alloc_atom,
- make_small(0),
- ainfo);
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ ainfo = (air->only_sz ? NIL : erts_mmap_info(NULL, NULL, hpp, szp, &emis));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"erts_mmap"),
+ ainfo);
#else
- ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
- am_false);
+ ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
+ am_false);
#endif
- break;
+ break;
default:
alloc_atom = erts_bld_atom(hpp, szp,
(char *) ERTS_ALC_A2AD(ai));
@@ -2939,8 +3108,8 @@ reply_alloc_info(void *vair)
allctr = erts_allctr_thr_spec[ai].allctr[0];
else
allctr = erts_allctrs_info[ai].extra;
- ainfo = info_func(allctr, hpp != NULL, NULL,
- NULL, hpp, szp);
+ ainfo = info_func(allctr, air->internal, hpp != NULL,
+ NULL, NULL, hpp, szp);
ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
make_small(0), ainfo);
}
@@ -2975,7 +3144,7 @@ reply_alloc_info(void *vair)
alloc_atom = erts_bld_atom(hpp, szp,
(char *) ERTS_ALC_A2AD(ai));
allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
- ainfo = info_func(allctr, hpp != NULL, NULL,
+ ainfo = info_func(allctr, air->internal, hpp != NULL, NULL,
NULL, hpp, szp);
ai_list = erts_bld_cons(hpp, szp,
erts_bld_tuple(
@@ -3031,7 +3200,8 @@ int
erts_request_alloc_info(struct process *c_p,
Eterm ref,
Eterm allocs,
- int only_sz)
+ int only_sz,
+ int internal)
{
ErtsAllocInfoReq *air = aireq_alloc();
Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0};
@@ -3043,6 +3213,8 @@ erts_request_alloc_info(struct process *c_p,
air->only_sz = only_sz;
+ air->internal = internal;
+
air->proc = c_p;
if (is_not_internal_ref(ref))
@@ -3107,6 +3279,55 @@ erts_request_alloc_info(struct process *c_p,
return 1;
}
+/*
+ * The allocator wrapper prelocking stuff below is about the locking order.
+ * It only affects wrappers (erl_mtrace.c and erl_instrument.c) that keep locks
+ * during alloc/realloc/free.
+ *
+ * Some query functions in erl_alloc_util.c lock the allocator mutex and then
+ * use erts_printf that in turn may call the sys allocator through the wrappers.
+ * To avoid breaking locking order these query functions first "pre-locks" all
+ * allocator wrappers.
+ */
+ErtsAllocatorWrapper_t *erts_allctr_wrappers;
+int erts_allctr_wrapper_prelocked = 0;
+erts_tsd_key_t erts_allctr_prelock_tsd_key;
+
+void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper)
+{
+ ASSERT(wrapper->lock && wrapper->unlock);
+ wrapper->next = erts_allctr_wrappers;
+ erts_allctr_wrappers = wrapper;
+}
+
+void erts_allctr_wrapper_pre_lock(void)
+{
+ if (erts_allctr_wrappers) {
+ ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
+ for ( ; wrapper; wrapper = wrapper->next) {
+ wrapper->lock();
+ }
+ ASSERT(!erts_allctr_wrapper_prelocked);
+ erts_allctr_wrapper_prelocked = 1;
+ erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)1);
+ }
+}
+
+void erts_allctr_wrapper_pre_unlock(void)
+{
+ if (erts_allctr_wrappers) {
+ ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
+
+ erts_allctr_wrapper_prelocked = 0;
+ erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)0);
+ for ( ; wrapper; wrapper = wrapper->next) {
+ wrapper->unlock();
+ }
+ }
+}
+
+
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Deprecated functions *
* *
@@ -3136,10 +3357,7 @@ void *safe_realloc(void *ptr, Uint sz)
\* */
#define ERTS_ALC_TEST_ABORT erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
-UWord erts_alc_test(UWord op,
- UWord a1,
- UWord a2,
- UWord a3)
+UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
{
switch (op >> 8) {
case 0x0: return erts_alcu_test(op, a1, a2);
@@ -3191,14 +3409,13 @@ UWord erts_alc_test(UWord op,
init.atype = GOODFIT;
init.init.util.name_prefix = (char *) a1;
init.init.util.ts = a2 ? 1 : 0;
- init.init.util.sbmbct = 0;
if ((char **) a3) {
char **argv = (char **) a3;
int i = 0;
while (argv[i]) {
if (argv[i][0] == '-' && argv[i][1] == 't')
- handle_au_arg(&init, &argv[i][2], argv, &i);
+ handle_au_arg(&init, &argv[i][2], argv, &i, 0);
else
return (UWord) NULL;
i++;
@@ -3318,6 +3535,11 @@ UWord erts_alc_test(UWord op,
ERTS_ALC_TEST_ABORT;
break;
#endif /* #ifdef USE_THREADS */
+#ifdef ERTS_SMP
+ case 0xf13: return (UWord) 1;
+#else
+ case 0xf13: return (UWord) 0;
+#endif
default:
break;
}
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index ba5ec9c367..d3109b9432 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -54,6 +54,16 @@ void erts_sys_alloc_init(void);
void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);
void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint);
void erts_sys_free(ErtsAlcType_t, void *, void *);
+#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+/*
+ * Note 'alignment' must remain the same in calls to
+ * 'erts_sys_aligned_realloc()' and 'erts_sys_aligned_free()'
+ * as in the initial call to 'erts_sys_aligned_alloc()'.
+ */
+void *erts_sys_aligned_alloc(UWord alignment, UWord size);
+void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size);
+void erts_sys_aligned_free(UWord alignment, void *ptr);
+#endif
Eterm erts_memory(int *, void *, void *, Eterm);
Eterm erts_allocated_areas(int *, void *, void *);
@@ -65,7 +75,7 @@ Eterm erts_allocator_options(void *proc);
struct process;
int erts_request_alloc_info(struct process *c_p, Eterm ref, Eterm allocs,
- int only_sz);
+ int only_sz, int internal);
#define ERTS_ALLOC_INIT_DEF_OPTS_INITER {0}
typedef struct {
@@ -100,14 +110,6 @@ UWord erts_alc_test(UWord,
#define ERTS_ALC_MIN_LONG_LIVED_TIME (10*60*1000)
-#if HALFWORD_HEAP
-#define ERTS_IS_SBMBC_ALLOCATOR_NO__(NO) \
- ((NO) == ERTS_ALC_A_SBMBC || (NO) == ERTS_ALC_A_SBMBC_LOW)
-#else
-#define ERTS_IS_SBMBC_ALLOCATOR_NO__(NO) \
- ((NO) == ERTS_ALC_A_SBMBC)
-#endif
-
typedef struct {
int alloc_util;
int enabled;
@@ -135,6 +137,18 @@ typedef struct {
extern ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
+typedef struct ErtsAllocatorWrapper_t_ {
+ void (*lock)(void);
+ void (*unlock)(void);
+ struct ErtsAllocatorWrapper_t_* next;
+}ErtsAllocatorWrapper_t;
+ErtsAllocatorWrapper_t *erts_allctr_wrappers;
+extern int erts_allctr_wrapper_prelocked;
+extern erts_tsd_key_t erts_allctr_prelock_tsd_key;
+void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper);
+void erts_allctr_wrapper_pre_lock(void);
+void erts_allctr_wrapper_pre_unlock(void);
+
void erts_alloc_register_scheduler(void *vesdp);
#ifdef ERTS_SMP
void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
@@ -188,14 +202,15 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size);
void erts_free(ErtsAlcType_t type, void *ptr);
void *erts_alloc_fnf(ErtsAlcType_t type, Uint size);
void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size);
+int erts_is_allctr_wrapper_prelocked(void);
#endif /* #if !ERTS_ALC_DO_INLINE */
void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
#ifndef ERTS_CACHE_LINE_SIZE
-/* Assume a cache line size of 64 bytes */
-# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
+/* Assumed cache line size */
+# define ERTS_CACHE_LINE_SIZE ((UWord) ASSUMED_CACHE_LINE_SIZE)
# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1)
#endif
@@ -258,6 +273,13 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
size);
}
+ERTS_ALC_INLINE
+int erts_is_allctr_wrapper_prelocked(void)
+{
+ return erts_allctr_wrapper_prelocked /* locked */
+ && !!erts_tsd_get(erts_allctr_prelock_tsd_key); /* by me */
+}
+
#endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */
#define ERTS_ALC_GET_THR_IX() ((int) erts_get_scheduler_id())
@@ -470,7 +492,7 @@ static TYPE * \
NAME##_alloc(void) \
{ \
ErtsSchedulerData *esdp = erts_get_scheduler_data(); \
- if (!esdp) \
+ if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp)) \
return NULL; \
return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
(int) esdp->no - 1); \
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 5e3615ccc2..21434eb117 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2013. All Rights Reserved.
+# Copyright Ericsson AB 2003-2014. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -76,11 +76,6 @@
allocator SYSTEM true sys_alloc
-allocator SBMBC true sbmbc_alloc
-+if halfword
-allocator SBMBC_LOW true sbmbc_low_alloc
-+endif
-
+if smp
allocator TEMPORARY true temp_alloc
@@ -146,7 +141,6 @@ class SYSTEM system_data
#
# <TYPE> <ALLOCATOR> <CLASS> <DESCRIPTION>
-type SBMBC SBMBC SYSTEM small_block_mbc
type PROC FIXED_SIZE PROCESSES proc
type PORT DRIVER SYSTEM port
type ATOM LONG_LIVED ATOM atom_entry
@@ -156,6 +150,7 @@ type LINK_LH STANDARD PROCESSES link_lh
type SUSPEND_MON STANDARD PROCESSES suspend_monitor
type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
+type SAVED_ESTACK SHORT_LIVED PROCESSES saved_estack
type FUN_ENTRY LONG_LIVED CODE fun_entry
type ATOM_TXT LONG_LIVED ATOM atom_text
type BEAM_REGISTER EHEAP PROCESSES beam_register
@@ -272,6 +267,8 @@ type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
+type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
+type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
@@ -350,7 +347,6 @@ type SSB SHORT_LIVED PROCESSES ssb
+if halfword
-type SBMBC_LOW SBMBC_LOW SYSTEM small_block_mbc_low
type DDLL_PROCESS STANDARD_LOW SYSTEM ddll_processes
type MONITOR_LH STANDARD_LOW PROCESSES monitor_lh
type NLINK_LH STANDARD_LOW PROCESSES nlink_lh
@@ -360,11 +356,14 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD_LOW CODE nif_trap_export_entry
type EXPORT LONG_LIVED_LOW CODE export_entry
type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request
+type GC_INFO_REQ STANDARD_LOW SYSTEM gc_info_request
+type PORT_DATA_HEAP STANDARD_LOW SYSTEM port_data_heap
+else # "fullword"
@@ -377,11 +376,14 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
+type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
+type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
+endif
@@ -396,11 +398,13 @@ type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
type FD_LIST SHORT_LIVED SYSTEM fd_list
+type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
type POLL_FDS LONG_LIVED SYSTEM poll_fds
type POLL_RES_EVS LONG_LIVED SYSTEM poll_result_events
type FD_STATUS LONG_LIVED SYSTEM fd_status
+type SELECT_FDS LONG_LIVED SYSTEM select_fds
+if unix
@@ -414,6 +418,21 @@ type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+endif
++if ose
+
+type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
+type FD_TAB LONG_LIVED SYSTEM fd_tab
+type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
+type FD_SIG_LIST SHORT_LIVED SYSTEM fd_sig_list
+type DRV_EV STANDARD SYSTEM driver_event
+type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
+type ENVIRONMENT TEMPORARY SYSTEM environment
+type PUTENV_STR SYSTEM SYSTEM putenv_string
+type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+
++endif
+
+
+if win32
type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index ac7f420708..e3172dc4fb 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -71,31 +71,22 @@
#define ALLOC_ZERO_EQ_NULL 0
+#ifndef ERTS_MSEG_FLG_2POW
+# define ERTS_MSEG_FLG_2POW 0
+#endif
+#ifndef ERTS_MSEG_FLG_NONE
+# define ERTS_MSEG_FLG_NONE 0
+#endif
+
static int atoms_initialized = 0;
static int initialized = 0;
-int erts_have_sbmbc_alloc;
-
-#if HAVE_ERTS_MSEG
-
-#define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS
-#define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT)
-#define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT)
-
-#define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK)
-#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK)
-
-#endif
-
#define INV_SYS_ALLOC_CARRIER_MASK ((UWord) (sys_alloc_carrier_size - 1))
#define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_CEILING(X) \
SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK)
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
#if 0
/* Can be useful for debugging */
#define MBC_REALLOC_ALWAYS_MOVES
@@ -107,16 +98,16 @@ static Uint sys_alloc_carrier_size;
#if HAVE_ERTS_MSEG
static Uint max_mseg_carriers;
#endif
+static int allow_sys_alloc_carriers;
#define ONE_GIGA (1000000000)
-#define INC_CC(CC) ((CC).no == ONE_GIGA - 1 \
- ? ((CC).giga_no++, (CC).no = 0) \
- : (CC).no++)
+#define ERTS_ALC_CC_GIGA_VAL(CC) ((CC) / ONE_GIGA)
+#define ERTS_ALC_CC_VAL(CC) ((CC) % ONE_GIGA)
+
+#define INC_CC(CC) ((CC)++)
-#define DEC_CC(CC) ((CC).no == 0 \
- ? ((CC).giga_no--, (CC).no = ONE_GIGA - 1) \
- : (CC).no--)
+#define DEC_CC(CC) ((CC)--)
/* Multi block carrier (MBC) memory layout in R16:
@@ -173,14 +164,6 @@ MBC after deallocating first block:
#define SET_BLK_SZ_FTR(B, SZ) \
(((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))
-#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
-#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
-#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
-
-/* Special flag combo for (allocated) SBC blocks
-*/
-#define SBC_BLK_HDR_FLG (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
-
#define SET_MBC_ABLK_SZ(B, SZ) \
(ASSERT(((SZ) & FLG_MASK) == 0), \
(B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))
@@ -208,9 +191,9 @@ MBC after deallocating first block:
#if MBC_ABLK_OFFSET_BITS
-# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << MSEG_ALIGN_BITS)
+# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << ERTS_SUPER_ALIGN_BITS)
-# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> MSEG_UNIT_SHIFT)
+# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> ERTS_SACRR_UNIT_SHIFT)
# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
(ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
@@ -223,19 +206,8 @@ MBC after deallocating first block:
(B)->bhdr = ((Sz) | (F)), \
(B)->u.carrier = (C))
-# define ABLK_TO_MBC(B) \
- (ASSERT(IS_MBC_BLK(B) && IS_ALLOCED_BLK(B)), \
- (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \
- (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT))))
-
-# define FBLK_TO_MBC(B) \
- (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \
- (B)->u.carrier)
-
-# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))
-
# define IS_MBC_FIRST_ABLK(AP,B) \
- ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
+ ((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
&& ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
# define IS_MBC_FIRST_FBLK(AP,B) \
@@ -272,10 +244,6 @@ MBC after deallocating first block:
(B)->bhdr = ((Sz) | (F)), \
(B)->carrier = (C))
-# define BLK_TO_MBC(B) ((B)->carrier)
-# define ABLK_TO_MBC(B) BLK_TO_MBC(B)
-# define FBLK_TO_MBC(B) BLK_TO_MBC(B)
-
# define IS_MBC_FIRST_BLK(AP,B) \
((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP))
# define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
@@ -300,8 +268,6 @@ MBC after deallocating first block:
((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
#define IS_PREV_BLK_ALLOCED(B) \
(!IS_PREV_BLK_FREE((B)))
-#define IS_FREE_BLK(B) \
- (ASSERT(!IS_SBC_BLK(B)), (B)->bhdr & THIS_FREE_BLK_HDR_FLG)
#define IS_ALLOCED_BLK(B) \
(!IS_FREE_BLK((B)))
#define IS_LAST_BLK(B) \
@@ -318,13 +284,6 @@ MBC after deallocating first block:
#define GET_BLK_HDR_FLGS(B) \
((B)->bhdr & FLG_MASK)
-#define IS_SBC_BLK(B) \
- (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
-#define IS_MBC_BLK(B) \
- (!IS_SBC_BLK((B)))
-
-#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B))
-
#define NXT_BLK(B) \
(ASSERT(IS_MBC_BLK(B)), \
(Block_t *) (((char *) (B)) + MBC_BLK_SZ((B))))
@@ -338,9 +297,97 @@ MBC after deallocating first block:
/* Carriers ... */
-#define SBC_HEADER_SIZE (UNIT_CEILING(sizeof(Carrier_t) + ABLK_HDR_SZ) \
- - ABLK_HDR_SZ)
-#define MBC_HEADER_SIZE(AP) SBC_HEADER_SIZE
+/* #define ERTS_ALC_CPOOL_DEBUG */
+
+#if defined(DEBUG) && !defined(ERTS_ALC_CPOOL_DEBUG)
+# define ERTS_ALC_CPOOL_DEBUG
+#endif
+
+#ifndef ERTS_SMP
+# undef ERTS_ALC_CPOOL_DEBUG
+#endif
+
+#ifdef ERTS_ALC_CPOOL_DEBUG
+# define ERTS_ALC_CPOOL_ASSERT(A) \
+ ((void) ((A) \
+ ? 1 \
+ : (erts_alcu_assert_failed(#A, \
+ (char *) __FILE__, \
+ __LINE__, \
+ (char *) __func__), \
+ 0)))
+#else
+# define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1)
+#endif
+
+#ifdef ERTS_SMP
+#define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit)
+#else
+#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0)
+#endif
+
+#ifdef ERTS_SMP
+
+#define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000
+#define ERTS_ALC_CPOOL_ALLOC_OP_INC 8
+#define ERTS_ALC_CPOOL_FREE_OP_DEC 10
+
+#define ERTS_ALC_CPOOL_ALLOC_OP(A) \
+do { \
+ if ((A)->cpool.disable_abandon < ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) { \
+ (A)->cpool.disable_abandon += ERTS_ALC_CPOOL_ALLOC_OP_INC; \
+ if ((A)->cpool.disable_abandon > ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) \
+ (A)->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON; \
+ } \
+} while (0)
+
+
+#if ERTS_ALC_CPOOL_ALLOC_OP_INC >= ERTS_ALC_CPOOL_FREE_OP_DEC
+# error "Implementation assume ERTS_ALC_CPOOL_ALLOC_OP_INC < ERTS_ALC_CPOOL_FREE_OP_DEC"
+#endif
+
+#define ERTS_ALC_CPOOL_REALLOC_OP(A) \
+do { \
+ if ((A)->cpool.disable_abandon) { \
+ (A)->cpool.disable_abandon -= (ERTS_ALC_CPOOL_FREE_OP_DEC \
+ - ERTS_ALC_CPOOL_ALLOC_OP_INC); \
+ if ((A)->cpool.disable_abandon < 0) \
+ (A)->cpool.disable_abandon = 0; \
+ } \
+} while (0)
+
+#define ERTS_ALC_CPOOL_FREE_OP(A) \
+do { \
+ if ((A)->cpool.disable_abandon) { \
+ (A)->cpool.disable_abandon -= ERTS_ALC_CPOOL_FREE_OP_DEC; \
+ if ((A)->cpool.disable_abandon < 0) \
+ (A)->cpool.disable_abandon = 0; \
+ } \
+} while (0)
+
+#else
+#define ERTS_ALC_CPOOL_ALLOC_OP(A)
+#define ERTS_ALC_CPOOL_REALLOC_OP(A)
+#define ERTS_ALC_CPOOL_FREE_OP(A)
+#endif
+
+#define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0)
+#define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1)
+#define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
+ ERTS_CRR_ALCTR_FLG_BUSY)
+
+#ifdef ERTS_SMP
+#define SBC_HEADER_SIZE \
+ (UNIT_CEILING(offsetof(Carrier_t, cpool) \
+ + ABLK_HDR_SZ) \
+ - ABLK_HDR_SZ)
+#else
+#define SBC_HEADER_SIZE \
+ (UNIT_CEILING(sizeof(Carrier_t) \
+ + ABLK_HDR_SZ) \
+ - ABLK_HDR_SZ)
+#endif
+#define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size)
#define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0)
@@ -352,7 +399,8 @@ MBC after deallocating first block:
#define SCH_SBC SBC_CARRIER_HDR_FLAG
#define SET_CARRIER_HDR(C, Sz, F, AP) \
- (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), (C)->allctr = (AP))
+ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
+ erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
#define BLK_TO_SBC(B) \
((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
@@ -385,6 +433,7 @@ MBC after deallocating first block:
#define CFLG_FORCE_SYS_ALLOC (1 << 3)
#define CFLG_FORCE_SIZE (1 << 4)
#define CFLG_MAIN_CARRIER (1 << 5)
+#define CFLG_NO_CPOOL (1 << 6)
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void check_blk_carrier(Allctr_t *, Block_t *);
@@ -412,11 +461,7 @@ static void check_blk_carrier(Allctr_t *, Block_t *);
ASSERT(((AP)->mbcs.curr.norm.sys_alloc.no \
&& (AP)->mbcs.curr.norm.sys_alloc.size) \
|| (!(AP)->mbcs.curr.norm.sys_alloc.no \
- && !(AP)->mbcs.curr.norm.sys_alloc.size)); \
- ASSERT(((AP)->sbmbcs.curr.small_block.no \
- && (AP)->sbmbcs.curr.small_block.size) \
- || (!(AP)->sbmbcs.curr.small_block.no \
- && !(AP)->sbmbcs.curr.small_block.size))
+ && !(AP)->mbcs.curr.norm.sys_alloc.size));
#else
#define DEBUG_CHECK_CARRIER_NO_SZ(AP)
@@ -487,17 +532,6 @@ do { \
+ (AP)->mbcs.curr.norm.sys_alloc.size)
-#define STAT_SBMBC_ALLOC(AP, CSZ) \
-do { \
- (AP)->sbmbcs.curr.small_block.no++; \
- (AP)->sbmbcs.curr.small_block.size += (CSZ); \
- if ((AP)->sbmbcs.max.no < (AP)->sbmbcs.curr.small_block.no) \
- (AP)->sbmbcs.max.no = (AP)->sbmbcs.curr.small_block.no; \
- if ((AP)->sbmbcs.max.size < (AP)->sbmbcs.curr.small_block.size) \
- (AP)->sbmbcs.max.size = (AP)->sbmbcs.curr.small_block.size; \
- DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
-} while (0)
-
#define STAT_MSEG_MBC_ALLOC(AP, CSZ) \
do { \
(AP)->mbcs.curr.norm.mseg.no++; \
@@ -514,13 +548,19 @@ do { \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
-#define STAT_SBMBC_FREE(AP, CSZ) \
+#define STAT_MBC_CPOOL_FETCH(AP, CRR) \
do { \
- ASSERT((AP)->sbmbcs.curr.small_block.no > 0); \
- (AP)->sbmbcs.curr.small_block.no--; \
- ASSERT((AP)->sbmbcs.curr.small_block.size >= (CSZ)); \
- (AP)->sbmbcs.curr.small_block.size -= (CSZ); \
- DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+ UWord csz__ = CARRIER_SZ((CRR)); \
+ if (IS_MSEG_CARRIER((CRR))) \
+ STAT_MSEG_MBC_ALLOC((AP), csz__); \
+ else \
+ STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__); \
+ (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks; \
+ if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \
+ (AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \
+ (AP)->mbcs.blocks.curr.size += (CRR)->cpool.blocks_size; \
+ if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \
+ (AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \
} while (0)
#define STAT_MSEG_MBC_FREE(AP, CSZ) \
@@ -541,28 +581,88 @@ do { \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
-#define STAT_MBC_BLK_ALLOC(AP, BSZ, FLGS) \
+#define STAT_MBC_CPOOL_INSERT(AP, CRR) \
+do { \
+ UWord csz__ = CARRIER_SZ((CRR)); \
+ if (IS_MSEG_CARRIER((CRR))) \
+ STAT_MSEG_MBC_FREE((AP), csz__); \
+ else \
+ STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \
+ ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \
+ >= (CRR)->cpool.blocks); \
+ (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks; \
+ ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \
+ >= (CRR)->cpool.blocks_size); \
+ (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \
+} while (0)
+
+#ifdef ERTS_SMP
+#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \
do { \
- CarriersStats_t *cstats__ = (((FLGS) & ERTS_ALCU_FLG_SBMBC) \
- ? &(AP)->sbmbcs \
- : &(AP)->mbcs); \
+ (CRR)->cpool.blocks++; \
+ (CRR)->cpool.blocks_size += (BSZ); \
+} while (0)
+#else
+#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */
+#endif
+
+#define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \
+do { \
+ CarriersStats_t *cstats__ = &(AP)->mbcs; \
cstats__->blocks.curr.no++; \
if (cstats__->blocks.max.no < cstats__->blocks.curr.no) \
cstats__->blocks.max.no = cstats__->blocks.curr.no; \
cstats__->blocks.curr.size += (BSZ); \
if (cstats__->blocks.max.size < cstats__->blocks.curr.size) \
cstats__->blocks.max.size = cstats__->blocks.curr.size; \
+ STAT_MBC_BLK_ALLOC_CRR((CRR), (BSZ)); \
} while (0)
-#define STAT_MBC_BLK_FREE(AP, BSZ, FLGS) \
+static ERTS_INLINE int
+stat_cpool_mbc_blk_free(Allctr_t *allctr,
+ Carrier_t *crr,
+ Carrier_t **busy_pcrr_pp,
+ UWord blksz)
+{
+#ifdef ERTS_SMP
+
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0);
+ crr->cpool.blocks--;
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size >= blksz);
+ crr->cpool.blocks_size -= blksz;
+
+ if (!busy_pcrr_pp || !*busy_pcrr_pp)
+ return 0;
+
+ ERTS_ALC_CPOOL_ASSERT(crr == *busy_pcrr_pp);
+
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ ERTS_ALC_CPOOL_ASSERT(
+ erts_atomic_dec_read_nob(&allctr->cpool.stat.no_blocks) >= 0);
+ ERTS_ALC_CPOOL_ASSERT(
+ erts_atomic_add_read_nob(&allctr->cpool.stat.blocks_size,
+ -((erts_aint_t) blksz)) >= 0);
+#else
+ erts_atomic_dec_nob(&allctr->cpool.stat.no_blocks);
+ erts_atomic_add_nob(&allctr->cpool.stat.blocks_size,
+ -((erts_aint_t) blksz));
+#endif
+
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \
do { \
- CarriersStats_t *cstats__ = (((FLGS) & ERTS_ALCU_FLG_SBMBC) \
- ? &(AP)->sbmbcs \
- : &(AP)->mbcs); \
- ASSERT(cstats__->blocks.curr.no > 0); \
- cstats__->blocks.curr.no--; \
- ASSERT(cstats__->blocks.curr.size >= (BSZ)); \
- cstats__->blocks.curr.size -= (BSZ); \
+ if (!stat_cpool_mbc_blk_free((AP), (CRR), (BPCRRPP), (BSZ))) { \
+ CarriersStats_t *cstats__ = &(AP)->mbcs; \
+ ASSERT(cstats__->blocks.curr.no > 0); \
+ cstats__->blocks.curr.no--; \
+ ASSERT(cstats__->blocks.curr.size >= (BSZ)); \
+ cstats__->blocks.curr.size -= (BSZ); \
+ } \
} while (0)
/* Debug stuff... */
@@ -588,17 +688,21 @@ do { \
#ifdef DEBUG
#ifdef USE_THREADS
+# ifdef ERTS_SMP
+# define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking())
+# else
+# define IS_ACTUALLY_BLOCKING 0
+# endif
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
do { \
- if (!(A)->thread_safe) { \
- if (!(A)->debug.saved_tid) { \
+ if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \
+ if (!(A)->debug.saved_tid) { \
(A)->debug.tid = erts_thr_self(); \
(A)->debug.saved_tid = 1; \
} \
else { \
ERTS_SMP_LC_ASSERT( \
- ethr_equal_tids((A)->debug.tid, erts_thr_self()) \
- || erts_thr_progress_is_blocking()); \
+ ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
} \
} \
} while (0)
@@ -609,13 +713,42 @@ do { \
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
-
static void make_name_atoms(Allctr_t *allctr);
static Block_t *create_carrier(Allctr_t *, Uint, UWord);
-static void destroy_carrier(Allctr_t *, Block_t *);
-static void mbc_free(Allctr_t *allctr, void *p);
+static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
+static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp);
+static void dealloc_block(Allctr_t *, void *, int);
+
+/* internal data... */
+
+#if 0
+
+static ERTS_INLINE void *
+internal_alloc(UWord size)
+{
+ void *res = erts_sys_alloc(0, NULL, size);
+ if (!res)
+ erts_alloc_enomem(ERTS_ALC_T_UNDEF, size);
+ return res;
+}
+
+static ERTS_INLINE void *
+internal_realloc(void *ptr, UWord size)
+{
+ void *res = erts_sys_realloc(0, NULL, ptr, size);
+ if (!res)
+ erts_alloc_enomem(ERTS_ALC_T_UNDEF, size);
+ return res;
+}
+
+static ERTS_INLINE void
+internal_free(void *ptr)
+{
+ erts_sys_free(0, NULL, ptr);
+}
+#endif
/* mseg ... */
@@ -625,8 +758,9 @@ static ERTS_INLINE void *
alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
-
- res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, flags, &allctr->mseg_opt);
+ UWord size = (UWord) *size_p;
+ res = erts_mseg_alloc_opt(allctr->alloc_no, &size, flags, &allctr->mseg_opt);
+ *size_p = (Uint) size;
INC_CC(allctr->calls.mseg_alloc);
return res;
}
@@ -635,9 +769,10 @@ static ERTS_INLINE void *
alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
{
void *res;
-
- res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p,
+ UWord new_size = (UWord) *new_size_p;
+ res = erts_mseg_realloc_opt(allctr->alloc_no, seg, (UWord) old_size, &new_size,
ERTS_MSEG_FLG_NONE, &allctr->mseg_opt);
+ *new_size_p = (Uint) new_size;
INC_CC(allctr->calls.mseg_realloc);
return res;
}
@@ -645,18 +780,22 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
static ERTS_INLINE void
alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
{
- erts_mseg_dealloc_opt(allctr->alloc_no, seg, size, flags, &allctr->mseg_opt);
+ erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_dealloc);
}
#endif
static ERTS_INLINE void *
-alcu_sys_alloc(Allctr_t *allctr, Uint size)
+alcu_sys_alloc(Allctr_t *allctr, Uint size, int superalign)
{
void *res;
-
- res = erts_sys_alloc(0, NULL, size);
+#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ if (superalign)
+ res = erts_sys_aligned_alloc(ERTS_SACRR_UNIT_SZ, size);
+ else
+#endif
+ res = erts_sys_alloc(0, NULL, size);
INC_CC(allctr->calls.sys_alloc);
if (erts_mtrace_enabled)
erts_mtrace_crr_alloc(res, allctr->alloc_no, ERTS_ALC_A_SYSTEM, size);
@@ -664,11 +803,16 @@ alcu_sys_alloc(Allctr_t *allctr, Uint size)
}
static ERTS_INLINE void *
-alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size)
+alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size, Uint old_size, int superalign)
{
void *res;
- res = erts_sys_realloc(0, NULL, ptr, size);
+#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ if (superalign)
+ res = erts_sys_aligned_realloc(ERTS_SACRR_UNIT_SZ, ptr, size, old_size);
+ else
+#endif
+ res = erts_sys_realloc(0, NULL, ptr, size);
INC_CC(allctr->calls.sys_realloc);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(res,
@@ -680,9 +824,14 @@ alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size)
}
static ERTS_INLINE void
-alcu_sys_free(Allctr_t *allctr, void *ptr)
+alcu_sys_free(Allctr_t *allctr, void *ptr, int superalign)
{
- erts_sys_free(0, NULL, ptr);
+#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+ if (superalign)
+ erts_sys_aligned_free(ERTS_SACRR_UNIT_SZ, ptr);
+ else
+#endif
+ erts_sys_free(0, NULL, ptr);
INC_CC(allctr->calls.sys_free);
if (erts_mtrace_enabled)
erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr);
@@ -777,10 +926,117 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
}
}
-static Block_t *create_sbmbc(Allctr_t *allctr, Uint umem_sz);
-static void destroy_sbmbc(Allctr_t *allctr, Block_t *blk);
-static Block_t *create_carrier(Allctr_t *, Uint, UWord);
-static void destroy_carrier(Allctr_t *, Block_t *);
+#ifdef ERTS_SMP
+
+#ifdef DEBUG
+static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* p;
+
+ ASSERT(node != sentinel);
+ for (p = sentinel->next; p != sentinel; p = p->next) {
+ if (p == node)
+ return 1;
+ }
+ return 0;
+}
+#endif /* DEBUG */
+
+static ERTS_INLINE void
+link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* before_me = after_me->next;
+ ASSERT(node != after_me && node != before_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ ErtsDoubleLink_t* after_me = before_me->prev;
+ ASSERT(node != before_me && node != after_me);
+ node->next = before_me;
+ node->prev = after_me;
+ before_me->prev = node;
+ after_me->next = node;
+}
+
+static ERTS_INLINE void
+unlink_edl(ErtsDoubleLink_t* node)
+{
+ node->next->prev = node->prev;
+ node->prev->next = node->next;
+}
+
+static ERTS_INLINE void
+relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
+{
+ if (node != before_me && node != before_me->prev) {
+ unlink_edl(node);
+ link_edl_before(before_me, node);
+ }
+}
+
+static ERTS_INLINE int is_abandoned(Carrier_t *crr)
+{
+ return crr->cpool.abandoned.next != NULL;
+}
+
+static ERTS_INLINE void
+link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr)
+{
+ ASSERT(!is_abandoned(crr));
+
+ link_edl_after(list, &crr->cpool.abandoned);
+
+ ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned);
+ ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned);
+}
+
+static ERTS_INLINE void
+unlink_abandoned_carrier(Carrier_t *crr)
+{
+ ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list,
+ &crr->cpool.abandoned) ||
+ is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list,
+ &crr->cpool.abandoned));
+
+ unlink_edl(&crr->cpool.abandoned);
+
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
+}
+
+static ERTS_INLINE void
+clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
+{
+ if (crr) {
+ erts_aint_t max_size;
+ erts_aint_t new_val;
+
+ max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
+ erts_atomic_set_nob(&crr->cpool.max_size, max_size);
+
+ new_val = (((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
+
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ {
+ erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY;
+
+ ERTS_ALC_CPOOL_ASSERT(old_val
+ == erts_smp_atomic_xchg_relb(&crr->allctr,
+ new_val));
+ }
+#else
+ erts_smp_atomic_set_relb(&crr->allctr, new_val);
+#endif
+ }
+}
+
+#endif /* ERTS_SMP */
#if 0
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
@@ -802,13 +1058,154 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)
#endif
-erts_aint32_t
-erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
+static void *mbc_alloc(Allctr_t *allctr, Uint size);
+
+#ifdef ERTS_SMP
+typedef struct {
+ ErtsAllctrDDBlock_t ddblock__; /* must be first */
+ ErtsAlcType_t fix_type;
+} ErtsAllctrFixDDBlock_t;
+#endif
+
+static ERTS_INLINE void
+dealloc_fix_block(Allctr_t *allctr,
+ ErtsAlcType_t type,
+ void *ptr,
+ int dec_cc_on_redirect)
+{
+#ifdef ERTS_SMP
+ /* May be redirected... */
+ ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type;
+#endif
+ dealloc_block(allctr, ptr, dec_cc_on_redirect);
+}
+
+static ERTS_INLINE void
+sched_fix_shrink(Allctr_t *allctr, int on)
+{
+ if (on && !allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 1;
+ erts_set_aux_work_timeout(allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 1);
+ }
+ else if (!on && allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 0;
+ erts_set_aux_work_timeout(allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 0);
+ }
+}
+
+static ERTS_INLINE void
+fix_cpool_check_shrink(Allctr_t *allctr,
+ ErtsAlcType_t type,
+ ErtsAlcFixList_t *fix,
+ Carrier_t **busy_pcrr_pp)
+{
+ if (fix->u.cpool.shrink_list > 0) {
+ if (fix->list_size == 0)
+ fix->u.cpool.shrink_list = 0;
+ else {
+ void *p;
+#ifdef ERTS_SMP
+ if (busy_pcrr_pp) {
+ clear_busy_pool_carrier(allctr, *busy_pcrr_pp);
+ *busy_pcrr_pp = NULL;
+ }
+#endif
+ fix->u.cpool.shrink_list--;
+ p = fix->list;
+ fix->list = *((void **) p);
+ fix->list_size--;
+ if (fix->u.cpool.min_list_size > fix->list_size)
+ fix->u.cpool.min_list_size = fix->list_size;
+
+ fix->u.cpool.allocated--;
+ dealloc_fix_block(allctr, type, p, 0);
+ }
+ }
+}
+
+static ERTS_INLINE void *
+fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
+{
+ void *res;
+ ErtsAlcFixList_t *fix;
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
+ && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+
+ res = fix->list;
+ if (res) {
+ fix->list = *((void **) res);
+ fix->list_size--;
+ if (fix->u.cpool.min_list_size > fix->list_size)
+ fix->u.cpool.min_list_size = fix->list_size;
+ fix->u.cpool.used++;
+ fix_cpool_check_shrink(allctr, type, fix, NULL);
+ return res;
+ }
+ if (size < 2*sizeof(UWord))
+ size += sizeof(UWord);
+ if (size >= allctr->sbc_threshold) {
+ Block_t *blk;
+ blk = create_carrier(allctr, size, CFLG_SBC);
+ res = blk ? BLK2UMEM(blk) : NULL;
+ }
+ else
+ res = mbc_alloc(allctr, size);
+ if (res) {
+ fix->u.cpool.used++;
+ fix->u.cpool.allocated++;
+ }
+ return res;
+}
+
+static ERTS_INLINE void
+fix_cpool_free(Allctr_t *allctr,
+ ErtsAlcType_t type,
+ void *p,
+ Carrier_t **busy_pcrr_pp)
+{
+ ErtsAlcFixList_t *fix;
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
+ && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+
+ fix->u.cpool.used--;
+
+ if ((!busy_pcrr_pp || !*busy_pcrr_pp)
+ && !fix->u.cpool.shrink_list
+ && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
+ *((void **) p) = fix->list;
+ fix->list = p;
+ fix->list_size++;
+ sched_fix_shrink(allctr, 1);
+ }
+ else {
+ Block_t *blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk, NULL);
+ else
+ mbc_free(allctr, p, busy_pcrr_pp);
+ fix->u.cpool.allocated--;
+ fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp);
+ }
+}
+
+static ERTS_INLINE erts_aint32_t
+fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
{
int all_empty = 1;
erts_aint32_t res = 0;
int ix, o;
- ErtsAlcFixList_t *fix = allctr->fix;
int flush = flgs == 0;
#ifdef USE_THREADS
@@ -817,56 +1214,204 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
+ ErtsAlcFixList_t *fix = &allctr->fix[ix];
+ ErtsAlcType_t type;
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
- if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
- fix[ix].limit = fix[ix].max_used;
- if (fix[ix].limit < fix[ix].used)
- fix[ix].limit = fix[ix].used;
- fix[ix].max_used = fix[ix].used;
- ASSERT(fix[ix].limit >= 0);
-
- }
- if (flush) {
- fix[ix].limit = 0;
- fix[ix].max_used = fix[ix].used;
- ASSERT(fix[ix].limit >= 0);
+ if (flush)
+ fix->u.cpool.shrink_list = fix->list_size;
+ else if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
+ fix->u.cpool.shrink_list = fix->u.cpool.min_list_size;
+ fix->u.cpool.min_list_size = fix->list_size;
}
+ type = (ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE);
for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
- Block_t *blk;
void *ptr;
- if (!flush && fix[ix].limit >= fix[ix].allocated)
+ if (fix->u.cpool.shrink_list == 0)
break;
- if (fix[ix].list_size == 0)
+ if (fix->list_size == 0) {
+ fix->u.cpool.shrink_list = 0;
break;
- ptr = fix[ix].list;
- fix[ix].list = *((void **) ptr);
- fix[ix].list_size--;
+ }
+ ptr = fix->list;
+ fix->list = *((void **) ptr);
+ fix->list_size--;
+ fix->u.cpool.shrink_list--;
+ fix->u.cpool.allocated--;
+ dealloc_fix_block(allctr, type, ptr, 0);
+ }
+ if (fix->u.cpool.min_list_size > fix->list_size)
+ fix->u.cpool.min_list_size = fix->list_size;
+ if (fix->list_size != 0) {
+ if (fix->u.cpool.shrink_list > 0)
+ res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
+ all_empty = 0;
+ }
+ }
+
+ if (all_empty)
+ sched_fix_shrink(allctr, 0);
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
- blk = UMEM2BLK(ptr);
+ return res;
+}
+static ERTS_INLINE void *
+fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
+{
+ ErtsAlcFixList_t *fix;
+ void *res;
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
+ && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix->u.nocpool.used++;
+ res = fix->list;
+ if (res) {
+ fix->list_size--;
+ fix->list = *((void **) res);
+ if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) {
+ Block_t *blk;
+ void *p = fix->list;
+ fix->list = *((void **) p);
+ fix->list_size--;
+ blk = UMEM2BLK(p);
if (IS_SBC_BLK(blk))
- destroy_carrier(allctr, blk);
+ destroy_carrier(allctr, blk, NULL);
else
- mbc_free(allctr, ptr);
+ mbc_free(allctr, p, NULL);
+ fix->u.nocpool.allocated--;
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ return res;
+ }
+ if (size < 2*sizeof(UWord))
+ size += sizeof(UWord);
+ if (fix->u.nocpool.limit < fix->u.nocpool.used)
+ fix->u.nocpool.limit = fix->u.nocpool.used;
+ if (fix->u.nocpool.max_used < fix->u.nocpool.used)
+ fix->u.nocpool.max_used = fix->u.nocpool.used;
+ fix->u.nocpool.allocated++;
- fix[ix].allocated--;
+ if (size >= allctr->sbc_threshold) {
+ Block_t *blk;
+ blk = create_carrier(allctr, size, CFLG_SBC);
+ res = blk ? BLK2UMEM(blk) : NULL;
+ }
+ else
+ res = mbc_alloc(allctr, size);
+
+ if (!res) {
+ fix->u.nocpool.allocated--;
+ fix->u.nocpool.used--;
+ }
+ return res;
+}
+
+static ERTS_INLINE void
+fix_nocpool_free(Allctr_t *allctr,
+ ErtsAlcType_t type,
+ void *p)
+{
+ Block_t *blk;
+ ErtsAlcFixList_t *fix;
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
+ && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix->u.nocpool.used--;
+ if (fix->u.nocpool.allocated < fix->u.nocpool.limit
+ && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
+ *((void **) p) = fix->list;
+ fix->list = p;
+ fix->list_size++;
+ sched_fix_shrink(allctr, 1);
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ return;
+ }
+ fix->u.nocpool.allocated--;
+ if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) {
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk, NULL);
+ else
+ mbc_free(allctr, p, NULL);
+ p = fix->list;
+ fix->list = *((void **) p);
+ fix->list_size--;
+ fix->u.nocpool.allocated--;
+ }
+
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk, NULL);
+ else
+ mbc_free(allctr, p, NULL);
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+}
+
+static ERTS_INLINE erts_aint32_t
+fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
+{
+ int all_empty = 1;
+ erts_aint32_t res = 0;
+ int ix, o;
+ int flush = flgs == 0;
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
+ ErtsAlcFixList_t *fix = &allctr->fix[ix];
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
+ fix->u.nocpool.limit = fix->u.nocpool.max_used;
+ if (fix->u.nocpool.limit < fix->u.nocpool.used)
+ fix->u.nocpool.limit = fix->u.nocpool.used;
+ fix->u.nocpool.max_used = fix->u.nocpool.used;
+ ASSERT(fix->u.nocpool.limit >= 0);
+
+ }
+ if (flush) {
+ fix->u.nocpool.limit = 0;
+ fix->u.nocpool.max_used = fix->u.nocpool.used;
+ ASSERT(fix->u.nocpool.limit >= 0);
+ }
+ for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
+ void *ptr;
+
+ if (!flush && fix->u.nocpool.limit >= fix->u.nocpool.allocated)
+ break;
+ if (fix->list_size == 0)
+ break;
+ ptr = fix->list;
+ fix->list = *((void **) ptr);
+ fix->list_size--;
+ dealloc_block(allctr, ptr, 0);
+ fix->u.nocpool.allocated--;
}
- if (fix[ix].list_size != 0) {
- if (fix[ix].limit < fix[ix].allocated)
+ if (fix->list_size != 0) {
+ if (fix->u.nocpool.limit < fix->u.nocpool.allocated)
res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
all_empty = 0;
}
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
}
- if (all_empty && allctr->fix_shrink_scheduled) {
- allctr->fix_shrink_scheduled = 0;
- erts_set_aux_work_timeout(allctr->ix,
- (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
- | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
- 0);
- }
+ if (all_empty)
+ sched_fix_shrink(allctr, 0);
#ifdef USE_THREADS
if (allctr->thread_safe)
@@ -876,11 +1421,18 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
return res;
}
-#ifdef ERTS_SMP
+erts_aint32_t
+erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
+{
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ return fix_cpool_alloc_shrink(allctr, flgs);
+ else
+ return fix_nocpool_alloc_shrink(allctr, flgs);
+}
-#define ERTS_ALCU_DD_FIX_TYPE_OFFS \
- ((sizeof(ErtsAllctrDDBlock_t)-1)/sizeof(UWord) + 1)
+static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned);
+#ifdef ERTS_SMP
static ERTS_INLINE Allctr_t*
get_pref_allctr(void *extra)
@@ -896,6 +1448,9 @@ get_pref_allctr(void *extra)
return tspec->allctr[pref_ix];
}
+#define ERTS_ALC_TS_PREF_LOCK_IF_USED (1)
+#define ERTS_ALC_TS_PREF_LOCK_NO (0)
+
/* SMP note:
* get_used_allctr() must be safe WITHOUT locking the allocator while
* concurrent threads may be updating adjacent blocks.
@@ -904,22 +1459,85 @@ get_pref_allctr(void *extra)
* the "PREV_FREE" flag bit.
*/
static ERTS_INLINE Allctr_t*
-get_used_allctr(void *extra, void *p, UWord *sizep)
+get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
+ Carrier_t **busy_pcrr_pp)
{
Block_t* blk = UMEM2BLK(p);
- Carrier_t* crr;
+ Carrier_t *crr;
+ erts_aint_t iallctr;
+ Allctr_t *used_allctr;
+
+ *busy_pcrr_pp = NULL;
if (IS_SBC_BLK(blk)) {
crr = BLK_TO_SBC(blk);
if (sizep)
*sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
+ iallctr = erts_smp_atomic_read_dirty(&crr->allctr);
}
else {
crr = ABLK_TO_MBC(blk);
+
if (sizep)
*sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
+ if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr))
+ iallctr = erts_smp_atomic_read_dirty(&crr->allctr);
+ else {
+ int locked_pref_allctr = 0;
+ iallctr = erts_smp_atomic_read_ddrb(&crr->allctr);
+
+ if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
+ && pref_allctr->thread_safe) {
+ used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
+ if (pref_allctr == used_allctr) {
+ erts_mtx_lock(&pref_allctr->mutex);
+ locked_pref_allctr = 1;
+ }
+ }
+
+ while ((iallctr & ((~ERTS_CRR_ALCTR_FLG_MASK)|ERTS_CRR_ALCTR_FLG_IN_POOL))
+ == (((erts_aint_t) pref_allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL)) {
+ erts_aint_t act;
+
+ ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
+ act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr,
+ iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
+ iallctr);
+ if (act == iallctr) {
+ *busy_pcrr_pp = crr;
+ break;
+ }
+ iallctr = act;
+ }
+
+ used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
+
+ if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock) {
+ if (locked_pref_allctr && used_allctr != pref_allctr) {
+ /* Was taken out of pool; now owned by someone else */
+ erts_mtx_unlock(&pref_allctr->mutex);
+ }
+ }
+
+ ERTS_ALC_CPOOL_ASSERT(
+ (((iallctr & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t) pref_allctr)
+ ? (((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL)
+ || ((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == 0))
+ : 1));
+
+ return used_allctr;
+ }
}
- return crr->allctr;
+
+ used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
+
+ if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
+ && used_allctr == pref_allctr
+ && pref_allctr->thread_safe) {
+ erts_mtx_lock(&pref_allctr->mutex);
+ }
+
+ return used_allctr;
}
static void
@@ -1014,7 +1632,7 @@ check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast)
}
static ERTS_INLINE int
-ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
+ddq_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
{
int last_elem;
int um_refc_ix = 0;
@@ -1115,6 +1733,59 @@ store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsAllctrDDQueue_t *ddq)
}
}
+static void
+check_pending_dealloc_carrier(Allctr_t *allctr,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work);
+
+static void
+handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
+{
+ ErtsAlcType_t type;
+
+ type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
+ && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ fix_nocpool_free(allctr, type, ptr);
+ else {
+ Block_t *blk = UMEM2BLK(ptr);
+ Carrier_t *busy_pcrr_p;
+ Allctr_t *used_allctr;
+
+ if (IS_SBC_BLK(blk)) {
+ busy_pcrr_p = NULL;
+ goto doit;
+ }
+
+ used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
+ NULL, &busy_pcrr_p);
+ if (used_allctr == allctr) {
+ doit:
+ fix_cpool_free(allctr, type, ptr, &busy_pcrr_p);
+ clear_busy_pool_carrier(allctr, busy_pcrr_p);
+ }
+ else {
+ /* Carrier migrated; need to redirect block to new owner... */
+ int cinit = used_allctr->dd.ix - allctr->dd.ix;
+
+ ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
+
+ DEC_CC(allctr->calls.this_free);
+
+ ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type;
+ if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
+ erts_alloc_notify_delayed_dealloc(used_allctr->ix);
+ }
+ }
+}
+
+static void
+schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr);
+
static ERTS_INLINE int
handle_delayed_dealloc(Allctr_t *allctr,
int allctr_locked,
@@ -1146,7 +1817,6 @@ handle_delayed_dealloc(Allctr_t *allctr,
while (1) {
Block_t *blk;
void *ptr;
- int ix;
if (use_limit && ++ops > ops_limit) {
if (ddq->head.first != ddq->head.unref_end) {
@@ -1175,52 +1845,48 @@ handle_delayed_dealloc(Allctr_t *allctr,
res = 1;
- INC_CC(allctr->calls.this_free);
+ blk = UMEM2BLK(ptr);
+ if (IS_FREE_LAST_MBC_BLK(blk)) {
+ /*
+ * A multiblock carrier that previously has been migrated away
+ * from us and now is back to be deallocated. For more info
+ * see schedule_dealloc_carrier().
+ *
+ * Note that we cannot use FBLK_TO_MBC(blk) since it
+ * data has been overwritten by the queue.
+ */
+ Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk);
+
+ /* Restore word overwritten by the dd-queue as it will be read
+ * if this carrier is pulled from dc_list by cpool_fetch()
+ */
+ ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
+ ERTS_ALC_CPOOL_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
+#ifdef MBC_ABLK_OFFSET_BITS
+ blk->u.carrier = crr;
+#else
+ blk->carrier = crr;
+#endif
- if (fix) {
- ErtsAlcType_t type;
-
- type = (ErtsAlcType_t) ((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS];
- ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
- fix[ix].used--;
- if (fix[ix].allocated < fix[ix].limit
- && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
- *((void **) ptr) = fix[ix].list;
- fix[ix].list = ptr;
- fix[ix].list_size++;
- if (!allctr->fix_shrink_scheduled) {
- allctr->fix_shrink_scheduled = 1;
- erts_set_aux_work_timeout(
- allctr->ix,
- (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
- | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
- 1);
- }
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
- continue;
- }
- fix[ix].allocated--;
- if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
- blk = UMEM2BLK(ptr);
- if (IS_SBC_BLK(blk))
- destroy_carrier(allctr, blk);
- else
- mbc_free(allctr, ptr);
- ptr = fix[ix].list;
- fix[ix].list = *((void **) ptr);
- fix[ix].list_size--;
- fix[ix].allocated--;
- }
+ ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
+ ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
+ != (erts_smp_atomic_read_nob(&crr->allctr)
+ & ~ERTS_CRR_ALCTR_FLG_MASK));
+
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+
+ schedule_dealloc_carrier(allctr, crr);
}
+ else {
- blk = UMEM2BLK(ptr);
+ INC_CC(allctr->calls.this_free);
- if (IS_SBC_BLK(blk))
- destroy_carrier(allctr, blk);
- else
- mbc_free(allctr, ptr);
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ if (fix)
+ handle_delayed_fix_dealloc(allctr, ptr);
+ else
+ dealloc_block(allctr, ptr, 1);
+ }
}
if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
@@ -1230,6 +1896,12 @@ handle_delayed_dealloc(Allctr_t *allctr,
store_earliest_thr_prgr(thr_prgr_p, ddq);
}
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ check_pending_dealloc_carrier(allctr,
+ need_thr_progress,
+ thr_prgr_p,
+ need_more_work);
+
if (allctr->thread_safe && !allctr_locked)
erts_mtx_unlock(&allctr->mutex);
return res;
@@ -1242,15 +1914,61 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,
int cinit)
{
if (allctr->fix)
- ((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS] = (UWord) type;
+ ((ErtsAllctrFixDDBlock_t*) ptr)->fix_type = type;
- if (ddq_enqueue(type, &allctr->dd.q, ptr, cinit))
+ if (ddq_enqueue(&allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(allctr->ix);
}
#endif
#ifdef ERTS_SMP
+static void
+set_new_allctr_abandon_limit(Allctr_t *allctr);
+static void
+abandon_carrier(Allctr_t *allctr, Carrier_t *crr);
+
+
+static ERTS_INLINE void
+check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
+{
+ Carrier_t *crr;
+
+ if (busy_pcrr_pp && *busy_pcrr_pp)
+ return;
+
+ if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ return;
+
+ allctr->cpool.check_limit_count--;
+ if (--allctr->cpool.check_limit_count <= 0)
+ set_new_allctr_abandon_limit(allctr);
+
+ if (!erts_thr_progress_is_managed_thread())
+ return;
+
+ if (allctr->cpool.disable_abandon)
+ return;
+
+ if (allctr->mbcs.blocks.curr.size > allctr->cpool.abandon_limit)
+ return;
+
+
+ crr = FBLK_TO_MBC(fblk);
+
+ if (allctr->main_carrier == crr)
+ return;
+
+ if (crr->cpool.blocks_size > crr->cpool.abandon_limit)
+ return;
+
+ if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID
+ && !erts_thr_progress_has_reached(crr->cpool.thr_prgr))
+ return;
+
+ abandon_carrier(allctr, crr);
+}
+
void
erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
int limit,
@@ -1268,78 +1986,88 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
}
#endif
-#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
- handle_delayed_dealloc((Allctr), (Locked), 1, \
+#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
+ handle_delayed_dealloc((Allctr), (Locked), 1, \
ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)
+static void
+dealloc_block(Allctr_t *allctr, void *ptr, int dec_cc_on_redirect)
+{
+ Block_t *blk = UMEM2BLK(ptr);
+
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk, NULL);
+#ifndef ERTS_SMP
+ else
+ mbc_free(allctr, ptr, NULL);
+#else
+ else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ mbc_free(allctr, ptr, NULL);
+ else {
+ Carrier_t *busy_pcrr_p;
+ Allctr_t *used_allctr;
+ used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
+ NULL, &busy_pcrr_p);
+ if (used_allctr == allctr) {
+ mbc_free(allctr, ptr, &busy_pcrr_p);
+ clear_busy_pool_carrier(allctr, busy_pcrr_p);
+ }
+ else {
+ /* Carrier migrated; need to redirect block to new owner... */
+ int cinit = used_allctr->dd.ix - allctr->dd.ix;
+
+ ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
+
+ if (dec_cc_on_redirect)
+ DEC_CC(allctr->calls.this_free);
+ if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
+ erts_alloc_notify_delayed_dealloc(used_allctr->ix);
+ }
+ }
+#endif
+}
+
/* Multi block carrier alloc/realloc/free ... */
/* NOTE! mbc_alloc() may in case of memory shortage place the requested
* block in a sbc.
*/
static ERTS_INLINE void *
-mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp)
+mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
{
Block_t *blk;
Uint get_blk_sz;
- Uint sbmbct;
ASSERT(size);
ASSERT(size < allctr->sbc_threshold);
*blk_szp = get_blk_sz = UMEMSZ2BLKSZ(allctr, size);
- sbmbct = allctr->sbmbc_threshold;
- if (sbmbct) {
- if (get_blk_sz < sbmbct) {
- *alcu_flgsp |= ERTS_ALCU_FLG_SBMBC;
- if (get_blk_sz + allctr->min_block_size > sbmbct) {
- /* Since we use block size to determine if blocks are
- located in sbmbc or not... */
- get_blk_sz += allctr->min_block_size;
- }
- }
- }
-
-#ifdef ERTS_SMP
- if (allctr->dd.use)
- ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
-#endif
-
- blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0, *alcu_flgsp);
-
-#ifdef ERTS_SMP
- if (!blk && allctr->dd.use) {
- if (ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1))
- blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0,
- *alcu_flgsp);
- }
-#endif
+ blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0);
if (!blk) {
- if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC)
- blk = create_sbmbc(allctr, get_blk_sz);
- else {
- blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
-#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
- if (!blk) {
- /* Emergency! We couldn't create the carrier as we wanted.
- Try to place it in a sys_alloced sbc. */
- blk = create_carrier(allctr,
- size,
- (CFLG_SBC
- | CFLG_FORCE_SIZE
- | CFLG_FORCE_SYS_ALLOC));
- }
-#endif
+ blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
+#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY
+ if (!blk) {
+ /* Emergency! We couldn't create the carrier as we wanted.
+ Try to place it in a sys_alloced sbc. */
+ blk = create_carrier(allctr,
+ size,
+ (CFLG_SBC
+ | CFLG_FORCE_SIZE
+ | CFLG_FORCE_SYS_ALLOC));
}
+#endif
}
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
if (IS_MBC_BLK(blk)) {
- (*allctr->link_free_block)(allctr, blk, *alcu_flgsp);
+ (*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, blk, *alcu_flgsp);
+ (*allctr->unlink_free_block)(allctr, blk);
}
#endif
@@ -1353,8 +2081,7 @@ mbc_alloc_finalize(Allctr_t *allctr,
UWord flags,
Carrier_t *crr,
Uint want_blk_sz,
- int valid_blk_info,
- Uint32 alcu_flgs)
+ int valid_blk_info)
{
Uint blk_sz;
Uint nxt_blk_sz;
@@ -1386,7 +2113,7 @@ mbc_alloc_finalize(Allctr_t *allctr,
SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
}
}
- (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
+ (*allctr->link_free_block)(allctr, nxt_blk);
ASSERT(IS_NOT_LAST_BLK(blk));
ASSERT(IS_FREE_BLK(nxt_blk));
@@ -1427,7 +2154,8 @@ mbc_alloc_finalize(Allctr_t *allctr,
ASSERT(ABLK_TO_MBC(blk) == crr);
}
- STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
+ ERTS_ALC_CPOOL_ALLOC_OP(allctr);
+ STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
ASSERT(blk_sz == MBC_BLK_SZ(blk));
@@ -1447,8 +2175,7 @@ mbc_alloc(Allctr_t *allctr, Uint size)
{
Block_t *blk;
Uint blk_sz;
- Uint32 alcu_flgs = 0;
- blk = mbc_alloc_block(allctr, size, &blk_sz, &alcu_flgs);
+ blk = mbc_alloc_block(allctr, size, &blk_sz);
if (!blk)
return NULL;
if (IS_MBC_BLK(blk))
@@ -1458,35 +2185,34 @@ mbc_alloc(Allctr_t *allctr, Uint size)
GET_BLK_HDR_FLGS(blk),
FBLK_TO_MBC(blk),
blk_sz,
- 1,
- alcu_flgs);
+ 1);
return BLK2UMEM(blk);
}
static void
-mbc_free(Allctr_t *allctr, void *p)
+mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
{
Uint is_first_blk;
Uint is_last_blk;
- Uint32 alcu_flgs = 0;
Uint blk_sz;
Block_t *blk;
Block_t *nxt_blk;
-
+ Carrier_t *crr;
ASSERT(p);
blk = UMEM2BLK(p);
blk_sz = MBC_ABLK_SZ(blk);
- if (blk_sz < allctr->sbmbc_threshold)
- alcu_flgs |= ERTS_ALCU_FLG_SBMBC;
ASSERT(IS_MBC_BLK(blk));
ASSERT(blk_sz >= allctr->min_block_size);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- STAT_MBC_BLK_FREE(allctr, blk_sz, alcu_flgs);
+ crr = ABLK_TO_MBC(blk);
+
+ ERTS_ALC_CPOOL_FREE_OP(allctr);
+ STAT_MBC_BLK_FREE(allctr, crr, busy_pcrr_pp, blk_sz, alcu_flgs);
is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);
is_last_blk = IS_LAST_BLK(blk);
@@ -1495,7 +2221,7 @@ mbc_free(Allctr_t *allctr, void *p)
ASSERT(!is_first_blk);
/* Coalesce with previous block... */
blk = PREV_BLK(blk);
- (*allctr->unlink_free_block)(allctr, blk, alcu_flgs);
+ (*allctr->unlink_free_block)(allctr, blk);
blk_sz += MBC_FBLK_SZ(blk);
is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk);
@@ -1511,7 +2237,7 @@ mbc_free(Allctr_t *allctr, void *p)
nxt_blk = BLK_AFTER(blk, blk_sz);
if (IS_FREE_BLK(nxt_blk)) {
/* Coalesce with next block... */
- (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
+ (*allctr->unlink_free_block)(allctr, nxt_blk);
blk_sz += MBC_FBLK_SZ(nxt_blk);
SET_MBC_FBLK_SZ(blk, blk_sz);
@@ -1544,19 +2270,20 @@ mbc_free(Allctr_t *allctr, void *p)
if (is_first_blk
&& is_last_blk
&& allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) {
- if (alcu_flgs & ERTS_ALCU_FLG_SBMBC)
- destroy_sbmbc(allctr, blk);
- else
- destroy_carrier(allctr, blk);
+ destroy_carrier(allctr, blk, busy_pcrr_pp);
}
else {
- (*allctr->link_free_block)(allctr, blk, alcu_flgs);
+ (*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
+#ifdef ERTS_SMP
+ check_abandon_carrier(allctr, blk, busy_pcrr_pp);
+#endif
}
}
static void *
-mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
+mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
+ Carrier_t **busy_pcrr_pp)
{
void *new_p;
Uint old_blk_sz;
@@ -1570,11 +2297,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
Uint is_last_blk;
#endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */
-#ifdef ERTS_SMP
- if (allctr->dd.use)
- ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
-#endif
-
ASSERT(p);
ASSERT(size);
ASSERT(size < allctr->sbc_threshold);
@@ -1588,13 +2310,13 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
#else /* !MBC_REALLOC_ALWAYS_MOVES */
+
+#ifdef ERTS_SMP
+ if (busy_pcrr_pp && *busy_pcrr_pp)
+ goto realloc_move; /* Don't want to use carrier in pool */
+#endif
+
get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
- if ((alcu_flgs & ERTS_ALCU_FLG_SBMBC)
- && (blk_sz + allctr->min_block_size > allctr->sbmbc_threshold)) {
- /* Since we use block size to determine if blocks are
- located in sbmbc or not... */
- get_blk_sz = blk_sz + allctr->min_block_size;
- }
ASSERT(IS_ALLOCED_BLK(blk));
ASSERT(IS_MBC_BLK(blk));
@@ -1642,8 +2364,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_blk = (*allctr->get_free_block)(allctr,
get_blk_sz,
cand_blk,
- cand_blk_sz,
- alcu_flgs);
+ cand_blk_sz);
if (new_blk || cand_blk != blk)
goto move_into_new_blk;
}
@@ -1665,8 +2386,11 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
nxt_blk = BLK_AFTER(blk, blk_sz);
- STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
- STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
+ crr = ABLK_TO_MBC(blk);
+
+ ERTS_ALC_CPOOL_REALLOC_OP(allctr);
+ STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size);
@@ -1674,7 +2398,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (IS_FREE_BLK(nxt_nxt_blk)) {
/* Coalesce with next free block... */
nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk);
- (*allctr->unlink_free_block)(allctr, nxt_nxt_blk, alcu_flgs);
+ (*allctr->unlink_free_block)(allctr, nxt_nxt_blk);
is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk);
}
@@ -1684,12 +2408,11 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
}
- crr = ABLK_TO_MBC(blk);
SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0),
crr);
- (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
+ (*allctr->link_free_block)(allctr, nxt_blk);
ASSERT(IS_ALLOCED_BLK(blk));
@@ -1712,6 +2435,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
HARD_CHECK_BLK_CARRIER(allctr, blk);
+#ifdef ERTS_SMP
+ check_abandon_carrier(allctr, nxt_blk, NULL);
+#endif
+
return p;
}
@@ -1721,11 +2448,12 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
nxt_blk = BLK_AFTER(blk, old_blk_sz);
nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) {
+ Carrier_t* crr = ABLK_TO_MBC(blk);
/* Grow into next block... */
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
+ (*allctr->unlink_free_block)(allctr, nxt_blk);
nxt_blk_sz -= blk_sz - old_blk_sz;
is_last_blk = IS_LAST_BLK(nxt_blk);
@@ -1750,7 +2478,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
}
}
else {
- Carrier_t* crr = ABLK_TO_MBC(blk);
SET_MBC_ABLK_SZ(blk, blk_sz);
nxt_blk = BLK_AFTER(blk, blk_sz);
@@ -1761,15 +2488,15 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
else
SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
- (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
+ (*allctr->link_free_block)(allctr, nxt_blk);
ASSERT(IS_FREE_BLK(nxt_blk));
ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
}
- STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
- STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
-
+ ERTS_ALC_CPOOL_REALLOC_OP(allctr);
+ STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
ASSERT(blk_sz == MBC_BLK_SZ(blk));
@@ -1822,13 +2549,16 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (cand_blk_sz < get_blk_sz) {
/* We wont fit in cand_blk get a new one */
+#ifdef ERTS_SMP
+ realloc_move:
+#endif
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
new_p = mbc_alloc(allctr, size);
if (!new_p)
return NULL;
sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
- mbc_free(allctr, p);
+ mbc_free(allctr, p, busy_pcrr_pp);
return new_p;
@@ -1841,8 +2571,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_blk = (*allctr->get_free_block)(allctr,
get_blk_sz,
cand_blk,
- cand_blk_sz,
- alcu_flgs);
+ cand_blk_sz);
move_into_new_blk:
/*
* new_blk, and cand_blk have to be correctly set
@@ -1856,11 +2585,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
GET_BLK_HDR_FLGS(new_blk),
FBLK_TO_MBC(new_blk),
blk_sz,
- 1,
- alcu_flgs);
+ 1);
new_p = BLK2UMEM(new_blk);
sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
- mbc_free(allctr, p);
+ mbc_free(allctr, p, NULL);
return new_p;
}
else {
@@ -1880,7 +2608,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, new_blk, alcu_flgs); /* prev */
+ (*allctr->unlink_free_block)(allctr, new_blk); /* prev */
if (is_last_blk)
new_blk_flgs |= LAST_BLK_HDR_FLG;
@@ -1889,7 +2617,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (IS_FREE_BLK(nxt_blk)) {
new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
new_blk_sz += MBC_FBLK_SZ(nxt_blk);
- (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
+ (*allctr->unlink_free_block)(allctr, nxt_blk);
}
}
@@ -1914,10 +2642,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_blk_flgs,
crr,
blk_sz,
- 0,
- alcu_flgs);
+ 0);
- STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
+ ERTS_ALC_CPOOL_FREE_OP(allctr);
+ STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs);
return new_p;
}
@@ -1925,123 +2653,826 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
}
-#ifdef DEBUG
+#ifdef ERTS_SMP
-#if HAVE_ERTS_MSEG
-#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % MSEG_UNIT_SZ == 0)
+#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
+#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
+#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10
+#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
+#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
+
+#define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0)
+#define ERTS_ALC_CPOOL_PTR_DEL_MRK (((erts_aint_t) 1) << 1)
+
+#define ERTS_ALC_CPOOL_PTR_MRKS \
+ (ERTS_ALC_CPOOL_PTR_MOD_MRK | ERTS_ALC_CPOOL_PTR_DEL_MRK)
+
+/*
+ * When setting multiple mod markers we always
+ * set mod markers in pointer order and always
+ * on next pointers before prev pointers.
+ */
+
+typedef union {
+ ErtsAlcCPoolData_t sentinel;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAlcCPoolData_t))];
+} ErtsAlcCrrPool_t;
+
+#if ERTS_ALC_A_INVALID != 0
+# error "Carrier pool implementation assumes ERTS_ALC_A_INVALID == 0"
+#endif
+#if ERTS_ALC_A_MIN <= ERTS_ALC_A_INVALID
+# error "Carrier pool implementation assumes ERTS_ALC_A_MIN > ERTS_ALC_A_INVALID"
+#endif
+
+/*
+ * The pool is only allowed to be manipulated by managed
+ * threads except in the alloc_SUITE:cpool case. In this
+ * test case carrier_pool[ERTS_ALC_A_INVALID] will be
+ * used.
+ */
+
+static ErtsAlcCrrPool_t carrier_pool[ERTS_ALC_A_MAX+1] erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+#define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8)
+
+static int
+backoff(int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ERTS_SPIN_BODY;
+
+ if (n >= ERTS_ALC_CPOOL_MAX_BACKOFF)
+ return ERTS_ALC_CPOOL_MAX_BACKOFF;
+ else
+ return n << 1;
+}
+
+static int
+cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)
+{
+ ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *cpdp = sentinel;
+ Carrier_t *tmp_crr;
+
+ while (1) {
+ cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~FLG_MASK);
+ if (cpdp == sentinel)
+ return 0;
+ tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
+ if (tmp_crr == crr)
+ return 1;
+ }
+}
+
+static int
+cpool_is_empty(Allctr_t *allctr)
+{
+ ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel)
+ && (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel));
+}
+
+static ERTS_INLINE ErtsAlcCPoolData_t *
+cpool_aint2cpd(erts_aint_t aint)
+{
+ return (ErtsAlcCPoolData_t *) (aint & ~ERTS_ALC_CPOOL_PTR_MRKS);
+}
+
+static ERTS_INLINE erts_aint_t
+cpool_read(erts_atomic_t *aptr)
+{
+ return erts_atomic_read_acqb(aptr);
+}
+
+static ERTS_INLINE void
+cpool_init(erts_atomic_t *aptr, erts_aint_t val)
+{
+ erts_atomic_set_nob(aptr, val);
+}
+
+static ERTS_INLINE void
+cpool_set_mod_marked(erts_atomic_t *aptr, erts_aint_t new, erts_aint_t old)
+{
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ erts_aint_t act = erts_atomic_xchg_relb(aptr, new);
+ ERTS_ALC_CPOOL_ASSERT(act == (old | ERTS_ALC_CPOOL_PTR_MOD_MRK));
#else
-#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ)
+ erts_atomic_set_relb(aptr, new);
#endif
+}
-static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
- UWord CSZ, Block_t* B, UWord BSZ)
+
+static ERTS_INLINE erts_aint_t
+cpool_try_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp)
{
- ASSERT(IS_LAST_BLK((B)));
- ASSERT((CSZ) == CARRIER_SZ((C)));
- ASSERT((BSZ) % sizeof(Unit_t) == 0);
- if ((SBC)) {
- ASSERT((BSZ) == SBC_BLK_SZ((B)));
- ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE);
- ASSERT(IS_SBC_BLK((B)));
- ASSERT(IS_SB_CARRIER((C)));
+ ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0);
+ return erts_atomic_cmpxchg_nob(aptr, exp | ERTS_ALC_CPOOL_PTR_MOD_MRK, exp);
+}
+
+static ERTS_INLINE erts_aint_t
+cpool_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp)
+{
+ int b;
+ erts_aint_t act;
+ ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0);
+ while (1) {
+ act = erts_atomic_cmpxchg_nob(aptr,
+ exp | ERTS_ALC_CPOOL_PTR_MOD_MRK,
+ exp);
+ if (act == exp)
+ return exp;
+ b = 1;
+ do {
+ if ((act & ~ERTS_ALC_CPOOL_PTR_MOD_MRK) != exp)
+ return act;
+ b = backoff(b);
+ act = erts_atomic_read_nob(aptr);
+ } while (act != exp);
}
- else {
- ASSERT(IS_FREE_BLK(B));
- ASSERT((BSZ) == MBC_FBLK_SZ((B)));
- ASSERT(IS_MBC_FIRST_FBLK(A, (B)));
- ASSERT(IS_MBC_BLK((B)));
- ASSERT(IS_MB_CARRIER((C)));
- ASSERT(FBLK_TO_MBC(B) == (C));
+}
+
+static ERTS_INLINE erts_aint_t
+cpool_mod_mark(erts_atomic_t *aptr)
+{
+ int b;
+ erts_aint_t act, exp;
+ act = cpool_read(aptr);
+ while (1) {
+ b = 1;
+ while (act & ERTS_ALC_CPOOL_PTR_MOD_MRK) {
+ b = backoff(b);
+ act = erts_atomic_read_nob(aptr);
+ }
+ exp = act;
+ act = erts_atomic_cmpxchg_acqb(aptr,
+ exp | ERTS_ALC_CPOOL_PTR_MOD_MRK,
+ exp);
+ if (act == exp)
+ return exp;
}
- if ((MSEGED)) {
- ASSERT(IS_MSEG_CARRIER((C)));
- ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ));
+}
+
+static void
+cpool_insert(Allctr_t *allctr, Carrier_t *crr)
+{
+ ErtsAlcCPoolData_t *cpd1p, *cpd2p;
+ erts_aint_t val;
+ ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+
+ ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
+ || erts_thr_progress_is_managed_thread());
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
+ == (erts_aint_t) allctr);
+
+ erts_atomic_add_nob(&allctr->cpool.stat.blocks_size,
+ (erts_aint_t) crr->cpool.blocks_size);
+ erts_atomic_add_nob(&allctr->cpool.stat.no_blocks,
+ (erts_aint_t) crr->cpool.blocks);
+ erts_atomic_add_nob(&allctr->cpool.stat.carriers_size,
+ (erts_aint_t) CARRIER_SZ(crr));
+ erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers);
+
+ /*
+ * We search in 'next' direction and begin by passing
+ * one element before trying to insert. This in order to
+ * avoid contention with threads fetching elements.
+ */
+
+ val = cpool_read(&sentinel->next);
+
+ /* Find a predecessor to be, and set mod marker on its next ptr */
+
+ while (1) {
+ cpd1p = cpool_aint2cpd(val);
+ if (cpd1p == sentinel) {
+ val = cpool_mod_mark(&cpd1p->next);
+ break;
+ }
+ val = cpool_read(&cpd1p->next);
+ if (!(val & ERTS_ALC_CPOOL_PTR_MRKS)) {
+ erts_aint_t tmp = cpool_try_mod_mark_exp(&cpd1p->next, val);
+ if (tmp == val) {
+ val = tmp;
+ break;
+ }
+ val = tmp;
+ }
}
- else {
- ASSERT(IS_SYS_ALLOC_CARRIER((C)));
- ASSERT((CSZ) % sizeof(Unit_t) == 0);
+
+ /* Set mod marker on prev ptr of the to be successor */
+
+ cpd2p = cpool_aint2cpd(val);
+
+ cpool_init(&crr->cpool.next, (erts_aint_t) cpd2p);
+ cpool_init(&crr->cpool.prev, (erts_aint_t) cpd1p);
+
+ val = (erts_aint_t) cpd1p;
+
+ while (1) {
+ int b;
+ erts_aint_t tmp;
+
+ tmp = cpool_mod_mark_exp(&cpd2p->prev, val);
+ if (tmp == val)
+ break;
+ b = 1;
+ do {
+ b = backoff(b);
+ tmp = cpool_read(&cpd2p->prev);
+ } while (tmp != val);
}
+
+ /* Write pointers to this element in successor and predecessor */
+
+ cpool_set_mod_marked(&cpd1p->next,
+ (erts_aint_t) &crr->cpool,
+ (erts_aint_t) cpd2p);
+ cpool_set_mod_marked(&cpd2p->prev,
+ (erts_aint_t) &crr->cpool,
+ (erts_aint_t) cpd1p);
+
+ erts_smp_atomic_set_wb(&crr->allctr,
+ ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
}
-#else
-#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
+static void
+cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
+{
+ ErtsAlcCPoolData_t *cpd1p, *cpd2p;
+ erts_aint_t val;
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
#endif
-static Block_t *
-create_sbmbc(Allctr_t *allctr, Uint umem_sz)
+ ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
+ || erts_thr_progress_is_managed_thread());
+ ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool);
+
+ /* Set mod marker on next ptr of our predecessor */
+
+ val = (erts_aint_t) &crr->cpool;
+ while (1) {
+ erts_aint_t tmp;
+ cpd1p = cpool_aint2cpd(cpool_read(&crr->cpool.prev));
+ tmp = cpool_mod_mark_exp(&cpd1p->next, val);
+ if (tmp == val)
+ break;
+ }
+
+ /* Set mod marker on our next ptr */
+
+ val = cpool_mod_mark(&crr->cpool.next);
+
+ /* Set mod marker on the prev ptr of our successor */
+
+ cpd2p = cpool_aint2cpd(val);
+
+ val = (erts_aint_t) &crr->cpool;
+
+ while (1) {
+ int b;
+ erts_aint_t tmp;
+
+ tmp = cpool_mod_mark_exp(&cpd2p->prev, val);
+ if (tmp == val)
+ break;
+ b = 1;
+ do {
+ b = backoff(b);
+ tmp = cpool_read(&cpd2p->prev);
+ } while (tmp != val);
+ }
+
+ /* Set mod marker on our prev ptr */
+
+ val = (erts_aint_t) cpd1p;
+
+ while (1) {
+ int b;
+ erts_aint_t tmp;
+
+ tmp = cpool_mod_mark_exp(&crr->cpool.prev, val);
+ if (tmp == val)
+ break;
+ b = 1;
+ do {
+ b = backoff(b);
+ tmp = cpool_read(&cpd2p->prev);
+ } while (tmp != val);
+ }
+
+ /* Write pointers past this element in predecessor and successor */
+
+ cpool_set_mod_marked(&cpd1p->next,
+ (erts_aint_t) cpd2p,
+ (erts_aint_t) &crr->cpool);
+ cpool_set_mod_marked(&cpd2p->prev,
+ (erts_aint_t) cpd1p,
+ (erts_aint_t) &crr->cpool);
+
+ /* Repleace mod markers with delete markers on this element */
+ cpool_set_mod_marked(&crr->cpool.next,
+ ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_DEL_MRK,
+ ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_MOD_MRK);
+ cpool_set_mod_marked(&crr->cpool.prev,
+ ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_DEL_MRK,
+ ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_MOD_MRK);
+
+ crr->cpool.thr_prgr = erts_thr_progress_later(NULL);
+
+ erts_atomic_add_nob(&prev_allctr->cpool.stat.blocks_size,
+ -((erts_aint_t) crr->cpool.blocks_size));
+ erts_atomic_add_nob(&prev_allctr->cpool.stat.no_blocks,
+ -((erts_aint_t) crr->cpool.blocks));
+ erts_atomic_add_nob(&prev_allctr->cpool.stat.carriers_size,
+ -((erts_aint_t) CARRIER_SZ(crr)));
+ erts_atomic_dec_wb(&prev_allctr->cpool.stat.no_carriers);
+
+}
+
+static Carrier_t *
+cpool_fetch(Allctr_t *allctr, UWord size)
{
- Block_t *blk;
- Uint blk_sz;
- Uint crr_sz = allctr->sbmbc_size;
+ int i, i_stop, has_passed_sentinel;
Carrier_t *crr;
+ ErtsAlcCPoolData_t *cpdp;
+ ErtsAlcCPoolData_t *cpool_entrance;
+ ErtsAlcCPoolData_t *sentinel;
+ ErtsDoubleLink_t* dl;
+ ErtsDoubleLink_t* first_old_traitor;
+
+ ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
+ || erts_thr_progress_is_managed_thread());
+
+ i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
+ first_old_traitor = allctr->cpool.traitor_list.next;
+ cpool_entrance = NULL;
+
+ /*
+ * Search my own pooled_list,
+ * i.e my abandoned carriers that were in the pool last time I checked.
+ */
+
+ dl = allctr->cpool.pooled_list.next;
+ while(dl != &allctr->cpool.pooled_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+
+ ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl));
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+ }
+ else { /* Not in pool, move to traitor_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.traitor_list, crr);
+ }
+ if (--i <= 0) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.pooled_list);
+ return NULL;
+ }
+ }
-#if HALFWORD_HEAP
- if (allctr->mseg_opt.low_mem)
- crr = erts_alloc(ERTS_ALC_T_SBMBC_LOW, crr_sz);
- else
+ /* Now search traitor_list.
+ * i.e carriers employed by other allocators last time I checked.
+ * They might have been abandoned since then.
+ */
+
+ i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ?
+ 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT);
+ dl = first_old_traitor;
+ while(dl != &allctr->cpool.traitor_list) {
+ erts_aint_t exp, act;
+ crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
+ ASSERT(dl != &allctr->cpool.pooled_list);
+ ASSERT(crr->cpool.orig_allctr == allctr);
+ dl = dl->next;
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY)
+ && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ unlink_abandoned_carrier(crr);
+
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ return crr;
+ }
+ exp = act;
+ }
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ if (!cpool_entrance)
+ cpool_entrance = &crr->cpool;
+
+ /* Move to pooled_list */
+ unlink_abandoned_carrier(crr);
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
+ }
+ if (--i <= i_stop) {
+ /* Move sentinel to continue next search from here */
+ relink_edl_before(dl, &allctr->cpool.traitor_list);
+ if (i > 0)
+ break;
+ else
+ return NULL;
+ }
+ }
+
+ /*
+ * Finally search the shared pool and try employ foreign carriers
+ */
+
+ sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ if (cpool_entrance) {
+ /* We saw a pooled carried above, use it as entrance into the pool
+ */
+ cpdp = cpool_entrance;
+ }
+ else {
+ /* No pooled carried seen above. Start search at cpool sentinel,
+ * but begin by passing one element before trying to fetch.
+ * This in order to avoid contention with threads inserting elements.
+ */
+ cpool_entrance = sentinel;
+ cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ }
+
+ has_passed_sentinel = 0;
+ while (1) {
+ erts_aint_t exp;
+ cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
+ if (cpdp == cpool_entrance) {
+ if (cpool_entrance == sentinel) {
+ cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ }
+ i = 0; /* Last one to inspect */
+ }
+ else if (cpdp == sentinel) {
+ if (has_passed_sentinel) {
+ /* We been here before. cpool_entrance must have been removed */
+ return NULL;
+ }
+ cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
+ if (cpdp == sentinel)
+ return NULL;
+ has_passed_sentinel = 1;
+ }
+ crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool));
+ exp = erts_smp_atomic_read_rb(&crr->allctr);
+ if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL)
+ && (erts_atomic_read_nob(&cpdp->max_size) >= size)) {
+ erts_aint_t act;
+ /* Try to fetch it... */
+ act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
+ (erts_aint_t) allctr,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ unlink_abandoned_carrier(crr);
+ }
+ return crr;
+ }
+ }
+ if (--i <= 0)
+ return NULL;
+ }
+
+ /* Last; check our own pending dealloc carrier list... */
+ crr = allctr->cpool.dc_list.last;
+ while (crr) {
+ if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
+ unlink_carrier(&allctr->cpool.dc_list, crr);
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
+ ((erts_aint_t) allctr))
+ == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
+#else
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
#endif
- crr = erts_alloc(ERTS_ALC_T_SBMBC, crr_sz);
+ return crr;
+ }
+ crr = crr->prev;
+ if (--i <= 0)
+ return NULL;
+ }
- INC_CC(allctr->calls.sbmbc_alloc);
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
+ return NULL;
+}
- blk = MBC_TO_FIRST_BLK(allctr, crr);
+static void
+check_pending_dealloc_carrier(Allctr_t *allctr,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work)
+{
+ Carrier_t *crr = allctr->cpool.dc_list.first;
- blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
+ if (crr) {
+ ErtsThrPrgrVal current = erts_thr_progress_current();
+ int i = 0;
- SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
+ do {
+ Carrier_t *dcrr;
- link_carrier(&allctr->sbmbc_list, crr);
+ if (!erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr))
+ break;
- STAT_SBMBC_ALLOC(allctr, crr_sz);
- CHECK_1BLK_CARRIER(allctr, 0, 0, crr, crr_sz, blk, blk_sz);
- if (allctr->creating_mbc)
- (*allctr->creating_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC);
+ dcrr = crr;
+ crr = crr->next;
+ dealloc_carrier(allctr, dcrr, 1);
+ i++;
+ } while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER);
- DEBUG_SAVE_ALIGNMENT(crr);
- return blk;
+ allctr->cpool.dc_list.first = crr;
+ if (!crr)
+ allctr->cpool.dc_list.last = NULL;
+ else {
+ crr->prev = NULL;
+
+ if (need_more_work) {
+ ERTS_ALC_CPOOL_ASSERT(need_thr_progress && thr_prgr_p);
+ if (erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr))
+ *need_more_work = 1;
+ else {
+ *need_thr_progress = 1;
+ if (*thr_prgr_p == ERTS_THR_PRGR_INVALID
+ || erts_thr_progress_cmp(crr->cpool.thr_prgr,
+ *thr_prgr_p) < 0) {
+ *thr_prgr_p = crr->cpool.thr_prgr;
+ }
+ }
+ }
+ }
+ }
}
static void
-destroy_sbmbc(Allctr_t *allctr, Block_t *blk)
+schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
{
- Uint crr_sz;
- Carrier_t *crr;
+ Allctr_t *orig_allctr;
+ int check_pending_dealloc;
+ erts_aint_t max_size;
- ASSERT(IS_MBC_BLK(blk));
- ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
+ if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+ dealloc_carrier(allctr, crr, 1);
+ return;
+ }
- crr = FIRST_BLK_TO_MBC(allctr, blk);
- crr_sz = CARRIER_SZ(crr);
+ orig_allctr = crr->cpool.orig_allctr;
-#ifdef DEBUG
- if (!allctr->stopped) {
- ASSERT(IS_LAST_BLK(blk));
+ if (allctr != orig_allctr) {
+ Block_t *blk = MBC_TO_FIRST_BLK(allctr, crr);
+ int cinit = orig_allctr->dd.ix - allctr->dd.ix;
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- (*allctr->link_free_block)(allctr, blk, ERTS_ALCU_FLG_SBMBC);
- HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, blk, ERTS_ALCU_FLG_SBMBC);
-#endif
+ /*
+ * We send the carrier to its origin for deallocation.
+ * This in order:
+ * - not to complicate things for the thread specific
+ * instances of mseg_alloc, and
+ * - to ensure that we always only reuse empty carriers
+ * originating from our own thread specific mseg_alloc
+ * instance which is beneficial on NUMA systems.
+ *
+ * The receiver will recognize that this is a carrier to
+ * deallocate (and not a block which is the common case)
+ * since the block is an mbc block that is free and last
+ * in the carrier.
+ */
+ ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(blk));
+
+ ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, blk));
+ ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk));
+ ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk));
+ ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
+ == (erts_smp_atomic_read_nob(&crr->allctr)
+ & ~ERTS_CRR_ALCTR_FLG_MASK));
+
+ if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit))
+ erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
+ return;
}
-#endif
- STAT_SBMBC_FREE(allctr, crr_sz);
+ if (is_abandoned(crr))
+ unlink_abandoned_carrier(crr);
- unlink_carrier(&allctr->sbmbc_list, crr);
- if (allctr->destroying_mbc)
- (*allctr->destroying_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC);
+ if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
+ || erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
+ dealloc_carrier(allctr, crr, 1);
+ return;
+ }
- INC_CC(allctr->calls.sbmbc_free);
+ max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
+ erts_atomic_set_nob(&crr->cpool.max_size, max_size);
-#if HALFWORD_HEAP
- if (allctr->mseg_opt.low_mem)
- erts_free(ERTS_ALC_T_SBMBC_LOW, crr);
+ crr->next = NULL;
+ crr->prev = allctr->cpool.dc_list.last;
+ if (allctr->cpool.dc_list.last) {
+ check_pending_dealloc = 1;
+ allctr->cpool.dc_list.last->next = crr;
+ }
+ else {
+ check_pending_dealloc = 0;
+ allctr->cpool.dc_list.first = crr;
+ }
+ allctr->cpool.dc_list.last = crr;
+ if (check_pending_dealloc)
+ check_pending_dealloc_carrier(allctr, NULL, NULL, NULL);
+ erts_alloc_ensure_handle_delayed_dealloc_call(allctr->ix);
+}
+
+static ERTS_INLINE void
+cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
+{
+ erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL);
+ crr->cpool.orig_allctr = allctr;
+ crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID;
+ erts_atomic_init_nob(&crr->cpool.max_size, 0);
+ crr->cpool.blocks = 0;
+ crr->cpool.blocks_size = 0;
+ if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ crr->cpool.abandon_limit = 0;
+ else {
+ UWord csz = CARRIER_SZ(crr);
+ UWord limit = csz*allctr->cpool.util_limit;
+ if (limit > csz)
+ limit /= 100;
+ else
+ limit = (csz/100)*allctr->cpool.util_limit;
+ crr->cpool.abandon_limit = limit;
+ }
+ crr->cpool.abandoned.next = NULL;
+ crr->cpool.abandoned.prev = NULL;
+}
+
+static void
+set_new_allctr_abandon_limit(Allctr_t *allctr)
+{
+ UWord limit;
+ UWord csz;
+
+ allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
+
+ csz = allctr->mbcs.curr.norm.mseg.size;
+ csz += allctr->mbcs.curr.norm.sys_alloc.size;
+
+ limit = csz*allctr->cpool.util_limit;
+ if (limit > csz)
+ limit /= 100;
else
+ limit = (csz/100)*allctr->cpool.util_limit;
+
+ allctr->cpool.abandon_limit = limit;
+}
+
+static void
+abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
+{
+ erts_aint_t max_size;
+
+ STAT_MBC_CPOOL_INSERT(allctr, crr);
+
+ unlink_carrier(&allctr->mbc_list, crr);
+ if (crr->cpool.orig_allctr == allctr) {
+ link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ }
+
+ allctr->remove_mbc(allctr, crr);
+
+ max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
+ erts_atomic_set_nob(&crr->cpool.max_size, max_size);
+
+ cpool_insert(allctr, crr);
+
+ set_new_allctr_abandon_limit(allctr);
+}
+
+static void
+cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp)
+{
+ int i;
+ UWord noc = 0, csz = 0, nob = 0, bsz = 0;
+
+ /*
+ * We try to get consistent values, but after
+ * ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS failed
+ * tries we give up and present what we got...
+ */
+ for (i = 0; i <= ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS; i++) {
+ UWord tnoc, tcsz, tnob, tbsz;
+
+ tnoc = (UWord) (nocp
+ ? erts_atomic_read_nob(&allctr->cpool.stat.no_carriers)
+ : 0);
+ tcsz = (UWord) (cszp
+ ? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size)
+ : 0);
+ tnob = (UWord) (nobp
+ ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks)
+ : 0);
+ tbsz = (UWord) (bszp
+ ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size)
+ : 0);
+ if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz)
+ break;
+ noc = tnoc;
+ csz = tcsz;
+ nob = tnob;
+ bsz = tbsz;
+ ERTS_THR_READ_MEMORY_BARRIER;
+ }
+
+ if (nocp)
+ *nocp = noc;
+ if (cszp)
+ *cszp = csz;
+ if (nobp)
+ *nobp = nob;
+ if (bszp)
+ *bszp = bsz;
+}
+
+
+#endif /* ERTS_SMP */
+
+#ifdef DEBUG
+
+#if ERTS_SA_MB_CARRIERS
+#define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % ERTS_SACRR_UNIT_SZ == 0)
+#else
+#define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ)
#endif
- erts_free(ERTS_ALC_T_SBMBC, crr);
+
+static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
+ UWord CSZ, Block_t* B, UWord BSZ)
+{
+ ASSERT(IS_LAST_BLK((B)));
+ ASSERT((CSZ) == CARRIER_SZ((C)));
+ ASSERT((BSZ) % sizeof(Unit_t) == 0);
+ if ((SBC)) {
+ ASSERT((BSZ) == SBC_BLK_SZ((B)));
+ ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE);
+ ASSERT(IS_SBC_BLK((B)));
+ ASSERT(IS_SB_CARRIER((C)));
+ }
+ else {
+ ASSERT(IS_FREE_BLK(B));
+ ASSERT((BSZ) == MBC_FBLK_SZ((B)));
+ ASSERT(IS_MBC_FIRST_FBLK(A, (B)));
+ ASSERT(IS_MBC_BLK((B)));
+ ASSERT(IS_MB_CARRIER((C)));
+ ASSERT(FBLK_TO_MBC(B) == (C));
+ if ((MSEGED)) {
+ ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE((CSZ));
+ }
+ }
+ if ((MSEGED)) {
+ ASSERT(IS_MSEG_CARRIER((C)));
+ }
+ else {
+ ASSERT(IS_SYS_ALLOC_CARRIER((C)));
+ ASSERT((CSZ) % sizeof(Unit_t) == 0);
+ }
}
+#else
+#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
+#endif
+
static Block_t *
create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
{
@@ -2056,21 +3487,50 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
int is_mseg = 0;
#endif
-#if HALFWORD_HEAP
- flags |= CFLG_FORCE_MSEG;
-#elif HAVE_SUPER_ALIGNED_MB_CARRIERS
- if (flags & CFLG_MBC) {
+ if (HALFWORD_HEAP
+ || (ERTS_SUPER_ALIGNED_MSEG_ONLY && (flags & CFLG_MBC))
+ || !allow_sys_alloc_carriers) {
flags |= CFLG_FORCE_MSEG;
- }
+ flags &= ~CFLG_FORCE_SYS_ALLOC;
+#if !HAVE_ERTS_MSEG
+ return NULL;
#endif
+ }
ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
|| (flags & CFLG_MBC && !(flags & CFLG_SBC)));
ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
+ if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) {
+ /* Do an overly conservative _overflow_ check here so we don't
+ * have to deal with it from here on. I guess we could be more accurate
+ * but I don't think the need to allocate over 99% of the address space
+ * will ever arise on any machine, neither 32 nor 64 bit.
+ */
+ return NULL;
+ }
+
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
+#ifdef ERTS_SMP
+ allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;
+
+ if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC
+ && ERTS_ALC_IS_CPOOL_ENABLED(allctr)
+ && erts_thr_progress_is_managed_thread()) {
+ crr = cpool_fetch(allctr, blk_sz);
+ if (crr) {
+ STAT_MBC_CPOOL_FETCH(allctr, crr);
+ link_carrier(&allctr->mbc_list, crr);
+ (*allctr->add_mbc)(allctr, crr);
+ blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0);
+ ASSERT(blk);
+ return blk;
+ }
+ }
+#endif
+
#if HAVE_ERTS_MSEG
if (flags & CFLG_FORCE_SYS_ALLOC)
@@ -2083,7 +3543,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
goto try_sys_alloc;
}
-#if !HAVE_SUPER_ALIGNED_MB_CARRIERS
+#if !ERTS_SUPER_ALIGNED_MSEG_ONLY
else {
if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
goto try_sys_alloc;
@@ -2146,12 +3606,12 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
? UNIT_CEILING(bcrr_sz)
: SYS_ALLOC_CARRIER_CEILING(bcrr_sz));
- crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz);
+ crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC);
if (!crr) {
if (crr_sz > UNIT_CEILING(bcrr_sz)) {
crr_sz = UNIT_CEILING(bcrr_sz);
- crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz);
+ crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz, flags & CFLG_MBC);
}
if (!crr) {
#if HAVE_ERTS_MSEG
@@ -2198,11 +3658,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
allctr->main_carrier = crr;
}
+#ifdef ERTS_SMP
+ cpool_init_carrier_data(allctr, crr);
+#endif
+
link_carrier(&allctr->mbc_list, crr);
CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
if (allctr->creating_mbc)
- (*allctr->creating_mbc)(allctr, crr, 0);
+ (*allctr->creating_mbc)(allctr, crr);
}
@@ -2245,7 +3709,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
- new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz);
+ new_crr_sz = ERTS_SACRR_UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_mseg_realloc(allctr,
old_crr,
old_crr_sz,
@@ -2295,7 +3759,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
(void *) old_crr,
- new_crr_sz);
+ new_crr_sz,
+ old_crr_sz,
+ 0);
if (new_crr) {
sys_realloc_success:
SET_CARRIER_SZ(new_crr, new_crr_sz);
@@ -2314,7 +3780,9 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
new_crr_sz = UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
(void *) old_crr,
- new_crr_sz);
+ new_crr_sz,
+ old_crr_sz,
+ 0);
if (new_crr)
goto sys_realloc_success;
}
@@ -2333,27 +3801,36 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
(void *) BLK2UMEM(old_blk),
MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
unlink_carrier(&allctr->sbc_list, old_crr);
- alcu_sys_free(allctr, old_crr);
+ alcu_sys_free(allctr, old_crr, 0);
}
else {
/* Old carrier unchanged; restore... */
STAT_SYS_ALLOC_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
}
- DEBUG_SAVE_ALIGNMENT(new_crr);
return new_blk;
}
#endif
}
static void
-destroy_carrier(Allctr_t *allctr, Block_t *blk)
+dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned)
{
- Uint crr_sz;
- Carrier_t *crr;
#if HAVE_ERTS_MSEG
- Uint is_mseg = 0;
- Uint mseg_flags = ERTS_MSEG_FLG_NONE;
+ if (IS_MSEG_CARRIER(crr))
+ alcu_mseg_dealloc(allctr, crr, CARRIER_SZ(crr),
+ (superaligned
+ ? ERTS_MSEG_FLG_2POW
+ : ERTS_MSEG_FLG_NONE));
+ else
#endif
+ alcu_sys_free(allctr, crr, superaligned);
+}
+
+static void
+destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
+{
+ Uint crr_sz;
+ Carrier_t *crr;
if (IS_SBC_BLK(blk)) {
Uint blk_sz = SBC_BLK_SZ(blk);
@@ -2366,8 +3843,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
- is_mseg++;
- ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
}
else
@@ -2376,6 +3851,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
unlink_carrier(&allctr->sbc_list, crr);
+ dealloc_carrier(allctr, crr, 0);
}
else {
ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
@@ -2394,30 +3870,40 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
}
#endif
-#if HAVE_ERTS_MSEG
- if (IS_MSEG_CARRIER(crr)) {
- is_mseg++;
- ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
- STAT_MSEG_MBC_FREE(allctr, crr_sz);
- mseg_flags = ERTS_MSEG_FLG_2POW;
+ if (allctr->destroying_mbc)
+ (*allctr->destroying_mbc)(allctr, crr);
+
+#ifdef ERTS_SMP
+ if (busy_pcrr_pp && *busy_pcrr_pp) {
+ ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
+ *busy_pcrr_pp = NULL;
+ ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
+ == (((erts_aint_t) allctr)
+ | ERTS_CRR_ALCTR_FLG_IN_POOL
+ | ERTS_CRR_ALCTR_FLG_BUSY));
+ erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+ cpool_delete(allctr, allctr, crr);
}
else
#endif
- STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz);
-
- unlink_carrier(&allctr->mbc_list, crr);
- if (allctr->destroying_mbc)
- (*allctr->destroying_mbc)(allctr, crr, 0);
- }
-
-
+ {
+ unlink_carrier(&allctr->mbc_list, crr);
#if HAVE_ERTS_MSEG
- if (is_mseg) {
- alcu_mseg_dealloc(allctr, crr, crr_sz, mseg_flags);
- }
- else
+ if (IS_MSEG_CARRIER(crr)) {
+ ASSERT(crr_sz % ERTS_SACRR_UNIT_SZ == 0);
+ STAT_MSEG_MBC_FREE(allctr, crr_sz);
+ }
+ else
+#endif
+ STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz);
+ }
+
+#ifdef ERTS_SMP
+ schedule_dealloc_carrier(allctr, crr);
+#else
+ dealloc_carrier(allctr, crr, 1);
#endif
- alcu_sys_free(allctr, crr);
+ }
}
@@ -2451,19 +3937,20 @@ static struct {
Eterm lmbcs;
Eterm smbcs;
Eterm mbcgs;
- Eterm sbmbcs;
- Eterm sbmbct;
+ Eterm acul;
#if HAVE_ERTS_MSEG
Eterm mmc;
#endif
Eterm ycs;
-
- /* Eterm sbmbcs; */
+ Eterm sac;
Eterm fix_types;
Eterm mbcs;
+#ifdef ERTS_SMP
+ Eterm mbcs_pool;
+#endif
Eterm sbcs;
Eterm sys_alloc_carriers_size;
@@ -2488,8 +3975,6 @@ static struct {
Eterm mseg_dealloc;
Eterm mseg_realloc;
#endif
- Eterm sbmbc_alloc;
- Eterm sbmbc_free;
#ifdef DEBUG
Eterm end_of_atoms;
#endif
@@ -2508,12 +3993,6 @@ static erts_mtx_t init_atoms_mtx;
static void
init_atoms(Allctr_t *allctr)
{
-
-#ifdef USE_THREADS
- if (allctr && allctr->thread_safe)
- erts_mtx_unlock(&allctr->mutex);
-#endif
-
erts_mtx_lock(&init_atoms_mtx);
if (!atoms_initialized) {
@@ -2551,19 +4030,20 @@ init_atoms(Allctr_t *allctr)
AM_INIT(lmbcs);
AM_INIT(smbcs);
AM_INIT(mbcgs);
- AM_INIT(sbmbcs);
- AM_INIT(sbmbct);
+ AM_INIT(acul);
#if HAVE_ERTS_MSEG
AM_INIT(mmc);
#endif
AM_INIT(ycs);
-
- /*AM_INIT(sbmbcs);*/
+ AM_INIT(sac);
AM_INIT(fix_types);
AM_INIT(mbcs);
+#ifdef ERTS_SMP
+ AM_INIT(mbcs_pool);
+#endif
AM_INIT(sbcs);
AM_INIT(sys_alloc_carriers_size);
@@ -2588,8 +4068,6 @@ init_atoms(Allctr_t *allctr)
AM_INIT(mseg_dealloc);
AM_INIT(mseg_realloc);
#endif
- AM_INIT(sbmbc_free);
- AM_INIT(sbmbc_alloc);
#ifdef DEBUG
for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
@@ -2604,18 +4082,13 @@ init_atoms(Allctr_t *allctr)
fix_type_atoms[ix] = am_atom_put(name, len);
}
}
-
- if (allctr) {
+ if (allctr && !allctr->atoms_initialized) {
make_name_atoms(allctr);
(*allctr->init_atoms)();
-#ifdef USE_THREADS
- if (allctr->thread_safe)
- erts_mtx_lock(&allctr->mutex);
-#endif
allctr->atoms_initialized = 1;
}
@@ -2642,19 +4115,22 @@ ensure_atoms_initialized(Allctr_t *allctr)
* that would fit a small when size check is done may need to be built
* as a big when the actual build is performed. Caller is required to
* HRelease after build.
+ *
+ * Note, bld_unstable_uint() should have been called bld_unstable_uword()
+ * but we do not want to rename it...
*/
static ERTS_INLINE Eterm
-bld_unstable_uint(Uint **hpp, Uint *szp, Uint ui)
+bld_unstable_uint(Uint **hpp, Uint *szp, UWord ui)
{
Eterm res = THE_NON_VALUE;
if (szp)
- *szp += BIG_UINT_HEAP_SIZE;
+ *szp += BIG_UWORD_HEAP_SIZE(~((UWord) 0));
if (hpp) {
if (IS_USMALL(0, ui))
res = make_small(ui);
else {
- res = uint_to_big(ui, *hpp);
- *hpp += BIG_UINT_HEAP_SIZE;
+ res = uword_to_big(ui, *hpp);
+ *hpp += BIG_UWORD_HEAP_SIZE(ui);
}
}
return res;
@@ -2680,8 +4156,24 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
}
+static ERTS_INLINE void
+add_fix_types(Allctr_t *allctr, int internal, Uint **hpp, Uint *szp,
+ Eterm *lp, Eterm fix)
+{
+ if (allctr->fix) {
+ if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ add_2tup(hpp, szp, lp, am.fix_types, fix);
+ else if (internal)
+ add_3tup(hpp, szp, lp,
+ am.fix_types,
+ erts_bld_uword(hpp, szp, ~((UWord) 0)),
+ fix);
+ }
+}
+
static Eterm
sz_info_fix(Allctr_t *allctr,
+ int internal,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
@@ -2689,36 +4181,67 @@ sz_info_fix(Allctr_t *allctr,
{
Eterm res;
int ix;
- ErtsAlcFixList_t *fix = allctr->fix;
- ASSERT(fix);
+ ASSERT(allctr->fix);
res = NIL;
- for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
- ErtsAlcType_t n = ix + ERTS_ALC_N_MIN_A_FIXED_SIZE;
- Uint alloced = (fix[ix].type_size * fix[ix].allocated);
- Uint used = fix[ix].type_size*fix[ix].used;
-
- if (print_to_p) {
- int to = *print_to_p;
- void *arg = print_to_arg;
- erts_print(to,
- arg,
- "fix type: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(n),
- alloced,
- used);
- }
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+
+ if (internal) {
+ for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
+ ErtsAlcFixList_t *fix = &allctr->fix[ix];
+ UWord alloced = fix->type_size * fix->u.cpool.allocated;
+ UWord used = fix->type_size * fix->u.cpool.used;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to,
+ arg,
+ "fix type internal: %s %bpu %bpu\n",
+ (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE
+ + ix),
+ alloced,
+ used);
+ }
- if (hpp || szp) {
- add_3tup(hpp, szp, &res,
- fix_type_atoms[ix],
- bld_unstable_uint(hpp, szp, alloced),
- bld_unstable_uint(hpp, szp, used));
+ if (hpp || szp) {
+ add_3tup(hpp, szp, &res,
+ fix_type_atoms[ix],
+ bld_unstable_uint(hpp, szp, alloced),
+ bld_unstable_uint(hpp, szp, used));
+ }
+ }
}
}
+ else {
+
+ for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
+ ErtsAlcFixList_t *fix = &allctr->fix[ix];
+ UWord alloced = fix->type_size * fix->u.nocpool.allocated;
+ UWord used = fix->type_size*fix->u.nocpool.used;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to,
+ arg,
+ "fix type: %s %bpu %bpu\n",
+ (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE
+ + ix),
+ alloced,
+ used);
+ }
+ if (hpp || szp) {
+ add_3tup(hpp, szp, &res,
+ fix_type_atoms[ix],
+ bld_unstable_uint(hpp, szp, alloced),
+ bld_unstable_uint(hpp, szp, used));
+ }
+ }
+ }
return res;
}
@@ -2732,9 +4255,7 @@ sz_info_carriers(Allctr_t *allctr,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
- Uint curr_size = (cs == &allctr->sbmbcs
- ? cs->curr.small_block.size
- : cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size);
+ UWord curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
if (print_to_p) {
int to = *print_to_p;
@@ -2748,7 +4269,7 @@ sz_info_carriers(Allctr_t *allctr,
cs->blocks.max_ever.size);
erts_print(to,
arg,
- "%scarriers size: %beu %bpu %bpu\n",
+ "%scarriers size: %bpu %bpu %bpu\n",
prefix,
curr_size,
cs->max.size,
@@ -2772,6 +4293,62 @@ sz_info_carriers(Allctr_t *allctr,
return res;
}
+#ifdef ERTS_SMP
+
+static Eterm
+info_cpool(Allctr_t *allctr,
+ int sz_only,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+ UWord noc, csz, nob, bsz;
+
+ noc = csz = nob = bsz = ~0;
+ if (print_to_p || hpp) {
+ if (sz_only)
+ cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
+ else
+ cpool_read_stat(allctr, &noc, &csz, &nob, &bsz);
+ }
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ if (!sz_only)
+ erts_print(to, arg, "%sblocks: %bpu\n", prefix, nob);
+ erts_print(to, arg, "%sblocks size: %bpu\n", prefix, bsz);
+ if (!sz_only)
+ erts_print(to, arg, "%scarriers: %bpu\n", prefix, noc);
+ erts_print(to, arg, "%scarriers size: %bpu\n", prefix, csz);
+ }
+
+ if (hpp || szp) {
+ res = NIL;
+ add_2tup(hpp, szp, &res,
+ am.carriers_size,
+ bld_unstable_uint(hpp, szp, csz));
+ if (!sz_only)
+ add_2tup(hpp, szp, &res,
+ am.carriers,
+ bld_unstable_uint(hpp, szp, noc));
+ add_2tup(hpp, szp, &res,
+ am.blocks_size,
+ bld_unstable_uint(hpp, szp, bsz));
+ if (!sz_only)
+ add_2tup(hpp, szp, &res,
+ am.blocks,
+ bld_unstable_uint(hpp, szp, nob));
+ }
+
+ return res;
+}
+
+#endif /* ERTS_SMP */
+
static Eterm
info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
@@ -2782,17 +4359,10 @@ info_carriers(Allctr_t *allctr,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
- Uint curr_no, curr_size;
- int small_block = cs == &allctr->sbmbcs;
-
- if (small_block) {
- curr_no = cs->curr.small_block.no;
- curr_size = cs->curr.small_block.size;
- }
- else {
- curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
- curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
- }
+ UWord curr_no, curr_size;
+
+ curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
+ curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
if (print_to_p) {
int to = *print_to_p;
@@ -2813,75 +4383,67 @@ info_carriers(Allctr_t *allctr,
cs->blocks.max_ever.size);
erts_print(to,
arg,
- "%scarriers: %beu %bpu %bpu\n",
+ "%scarriers: %bpu %bpu %bpu\n",
prefix,
curr_no,
cs->max.no,
cs->max_ever.no);
- if (!small_block) {
#if HAVE_ERTS_MSEG
- erts_print(to,
- arg,
- "%smseg carriers: %bpu\n",
- prefix,
- cs->curr.norm.mseg.no);
-#endif
- erts_print(to,
- arg,
- "%ssys_alloc carriers: %bpu\n",
- prefix,
- cs->curr.norm.sys_alloc.no);
- }
erts_print(to,
arg,
- "%scarriers size: %beu %bpu %bpu\n",
+ "%smseg carriers: %bpu\n",
+ prefix,
+ cs->curr.norm.mseg.no);
+#endif
+ erts_print(to,
+ arg,
+ "%ssys_alloc carriers: %bpu\n",
+ prefix,
+ cs->curr.norm.sys_alloc.no);
+ erts_print(to,
+ arg,
+ "%scarriers size: %bpu %bpu %bpu\n",
prefix,
curr_size,
cs->max.size,
cs->max_ever.size);
- if (!small_block) {
#if HAVE_ERTS_MSEG
- erts_print(to,
- arg,
- "%smseg carriers size: %bpu\n",
- prefix,
- cs->curr.norm.mseg.size);
-#endif
- erts_print(to,
- arg,
- "%ssys_alloc carriers size: %bpu\n",
- prefix,
- cs->curr.norm.sys_alloc.size);
- }
+ erts_print(to,
+ arg,
+ "%smseg carriers size: %bpu\n",
+ prefix,
+ cs->curr.norm.mseg.size);
+#endif
+ erts_print(to,
+ arg,
+ "%ssys_alloc carriers size: %bpu\n",
+ prefix,
+ cs->curr.norm.sys_alloc.size);
}
if (hpp || szp) {
res = NIL;
- if (!small_block) {
- add_2tup(hpp, szp, &res,
- am.sys_alloc_carriers_size,
- bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size));
+ add_2tup(hpp, szp, &res,
+ am.sys_alloc_carriers_size,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size));
#if HAVE_ERTS_MSEG
- add_2tup(hpp, szp, &res,
- am.mseg_alloc_carriers_size,
- bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size));
+ add_2tup(hpp, szp, &res,
+ am.mseg_alloc_carriers_size,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size));
#endif
- }
add_4tup(hpp, szp, &res,
am.carriers_size,
bld_unstable_uint(hpp, szp, curr_size),
bld_unstable_uint(hpp, szp, cs->max.size),
bld_unstable_uint(hpp, szp, cs->max_ever.size));
- if (!small_block) {
- add_2tup(hpp, szp, &res,
- am.sys_alloc_carriers,
- bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no));
+ add_2tup(hpp, szp, &res,
+ am.sys_alloc_carriers,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no));
#if HAVE_ERTS_MSEG
- add_2tup(hpp, szp, &res,
- am.mseg_alloc_carriers,
- bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no));
+ add_2tup(hpp, szp, &res,
+ am.mseg_alloc_carriers,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no));
#endif
- }
add_4tup(hpp, szp, &res,
am.carriers,
bld_unstable_uint(hpp, szp, curr_no),
@@ -2940,16 +4502,10 @@ info_calls(Allctr_t *allctr,
if (print_to_p) {
#define PRINT_CC_4(TO, TOA, NAME, CC) \
- if ((CC).giga_no == 0) \
- erts_print(TO, TOA, "%s calls: %b32u\n", NAME, CC.no); \
- else \
- erts_print(TO, TOA, "%s calls: %b32u%09lu\n", NAME, CC.giga_no, CC.no)
+ erts_print(TO, TOA, "%s calls: %b64u\n", NAME, CC)
#define PRINT_CC_5(TO, TOA, PRFX, NAME, CC) \
- if ((CC).giga_no == 0) \
- erts_print(TO, TOA, "%s%s calls: %b32u\n",PRFX,NAME,CC.no); \
- else \
- erts_print(TO, TOA, "%s%s calls: %b32u%09lu\n",PRFX,NAME,CC.giga_no,CC.no)
+ erts_print(TO, TOA, "%s%s calls: %b64u\n",PRFX,NAME,CC)
char *prefix = allctr->name_prefix;
int to = *print_to_p;
@@ -2959,9 +4515,6 @@ info_calls(Allctr_t *allctr,
PRINT_CC_5(to, arg, prefix, "free", allctr->calls.this_free);
PRINT_CC_5(to, arg, prefix, "realloc", allctr->calls.this_realloc);
- PRINT_CC_4(to, arg, "sbmbc_alloc", allctr->calls.sbmbc_alloc);
- PRINT_CC_4(to, arg, "sbmbc_free", allctr->calls.sbmbc_free);
-
#if HAVE_ERTS_MSEG
PRINT_CC_4(to, arg, "mseg_alloc", allctr->calls.mseg_alloc);
PRINT_CC_4(to, arg, "mseg_dealloc", allctr->calls.mseg_dealloc);
@@ -2988,50 +4541,42 @@ info_calls(Allctr_t *allctr,
add_3tup(hpp, szp, &res,
am.sys_realloc,
- bld_unstable_uint(hpp, szp, allctr->calls.sys_realloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.sys_realloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_realloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_realloc)));
add_3tup(hpp, szp, &res,
am.sys_free,
- bld_unstable_uint(hpp, szp, allctr->calls.sys_free.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.sys_free.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_free)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_free)));
add_3tup(hpp, szp, &res,
am.sys_alloc,
- bld_unstable_uint(hpp, szp, allctr->calls.sys_alloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.sys_alloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_alloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_alloc)));
#if HAVE_ERTS_MSEG
add_3tup(hpp, szp, &res,
am.mseg_realloc,
- bld_unstable_uint(hpp, szp, allctr->calls.mseg_realloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.mseg_realloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_realloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_realloc)));
add_3tup(hpp, szp, &res,
am.mseg_dealloc,
- bld_unstable_uint(hpp, szp, allctr->calls.mseg_dealloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.mseg_dealloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_dealloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_dealloc)));
add_3tup(hpp, szp, &res,
am.mseg_alloc,
- bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_alloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_alloc)));
#endif
add_3tup(hpp, szp, &res,
- am.sbmbc_free,
- bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_free.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_free.no));
- add_3tup(hpp, szp, &res,
- am.sbmbc_alloc,
- bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_alloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_alloc.no));
- add_3tup(hpp, szp, &res,
allctr->name.realloc,
- bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_realloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_realloc)));
add_3tup(hpp, szp, &res,
allctr->name.free,
- bld_unstable_uint(hpp, szp, allctr->calls.this_free.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.this_free.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_free)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_free)));
add_3tup(hpp, szp, &res,
allctr->name.alloc,
- bld_unstable_uint(hpp, szp, allctr->calls.this_alloc.giga_no),
- bld_unstable_uint(hpp, szp, allctr->calls.this_alloc.no));
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_alloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_alloc)));
}
return res;
@@ -3045,6 +4590,7 @@ info_options(Allctr_t *allctr,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
+ int acul;
if (!allctr) {
if (print_to_p)
@@ -3056,6 +4602,12 @@ info_options(Allctr_t *allctr,
return res;
}
+#ifdef ERTS_SMP
+ acul = allctr->cpool.util_limit;
+#else
+ acul = 0;
+#endif
+
if (print_to_p) {
char topt[21]; /* Enough for any 64-bit integer */
if (allctr->t)
@@ -3085,8 +4637,7 @@ info_options(Allctr_t *allctr,
"option lmbcs: %beu\n"
"option smbcs: %beu\n"
"option mbcgs: %beu\n"
- "option sbmbcs: %beu\n"
- "option sbmbct: %beu\n",
+ "option acul: %d\n",
topt,
allctr->ramv ? "true" : "false",
#if HALFWORD_HEAP
@@ -3107,8 +4658,7 @@ info_options(Allctr_t *allctr,
allctr->largest_mbc_size,
allctr->smallest_mbc_size,
allctr->mbc_growth_stages,
- allctr->sbmbc_size,
- allctr->sbmbc_threshold);
+ acul);
}
res = (*allctr->info_options)(allctr, "option ", print_to_p, print_to_arg,
@@ -3116,11 +4666,8 @@ info_options(Allctr_t *allctr,
if (hpp || szp) {
add_2tup(hpp, szp, &res,
- am.sbmbct,
- bld_uint(hpp, szp, allctr->sbmbc_threshold));
- add_2tup(hpp, szp, &res,
- am.sbmbcs,
- bld_uint(hpp, szp, allctr->sbmbc_size));
+ am.acul,
+ bld_uint(hpp, szp, (UWord) acul));
add_2tup(hpp, szp, &res,
am.mbcgs,
bld_uint(hpp, szp, allctr->mbc_growth_stages));
@@ -3210,17 +4757,22 @@ erts_alcu_au_info_options(int *print_to_p, void *print_to_arg,
#if HAVE_ERTS_MSEG
"option mmc: %beu\n"
#endif
- "option ycs: %beu\n",
+ "option ycs: %beu\n"
+ "option sac: %s\n",
#if HAVE_ERTS_MSEG
max_mseg_carriers,
#endif
- sys_alloc_carrier_size);
+ sys_alloc_carrier_size,
+ allow_sys_alloc_carriers ? "true" : "false");
}
if (hpp || szp) {
res = NIL;
ensure_atoms_initialized(NULL);
add_2tup(hpp, szp, &res,
+ am.sac,
+ allow_sys_alloc_carriers ? am_true : am_false);
+ add_2tup(hpp, szp, &res,
am.ycs,
bld_uint(hpp, szp, sys_alloc_carrier_size));
#if HAVE_ERTS_MSEG
@@ -3243,17 +4795,21 @@ erts_alcu_info_options(Allctr_t *allctr,
{
Eterm res;
+ if (hpp || szp)
+ ensure_atoms_initialized(allctr);
#ifdef USE_THREADS
- if (allctr->thread_safe)
+ if (allctr->thread_safe) {
+ erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
+ }
#endif
- if (hpp || szp)
- ensure_atoms_initialized(allctr);
res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
#ifdef USE_THREADS
- if (allctr->thread_safe)
+ if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
+ erts_allctr_wrapper_pre_unlock();
+ }
#endif
return res;
}
@@ -3262,13 +4818,17 @@ erts_alcu_info_options(Allctr_t *allctr,
Eterm
erts_alcu_sz_info(Allctr_t *allctr,
+ int internal,
int begin_max_period,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
- Eterm res, sbmbcs, mbcs, sbcs, fix = THE_NON_VALUE;
+ Eterm res, mbcs, sbcs, fix = THE_NON_VALUE;
+#ifdef ERTS_SMP
+ Eterm mbcs_pool;
+#endif
res = THE_NON_VALUE;
@@ -3280,67 +4840,81 @@ erts_alcu_sz_info(Allctr_t *allctr,
return am_false;
}
+ if (hpp || szp)
+ ensure_atoms_initialized(allctr);
+
#ifdef USE_THREADS
- if (allctr->thread_safe)
+ if (allctr->thread_safe) {
+ erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
+ }
#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- if (hpp || szp)
- ensure_atoms_initialized(allctr);
-
/* Update sbc values not continously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
- update_max_ever_values(&allctr->sbmbcs);
update_max_ever_values(&allctr->mbcs);
update_max_ever_values(&allctr->sbcs);
if (allctr->fix)
- fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp);
- sbmbcs = sz_info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p,
- print_to_arg, hpp, szp);
+ fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
+#ifdef ERTS_SMP
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p,
+ print_to_arg, hpp, szp);
+ else
+ mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
+#endif
sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
+#ifdef ERTS_SMP
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
+#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
- add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs);
- if (allctr->fix)
- add_2tup(hpp, szp, &res, am.fix_types, fix);
+ add_fix_types(allctr, internal, hpp, szp, &res, fix);
}
if (begin_max_period) {
- reset_max_values(&allctr->sbmbcs);
reset_max_values(&allctr->mbcs);
reset_max_values(&allctr->sbcs);
}
#ifdef USE_THREADS
- if (allctr->thread_safe)
+ if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
+ erts_allctr_wrapper_pre_unlock();
+ }
#endif
return res;
}
+
Eterm
erts_alcu_info(Allctr_t *allctr,
+ int internal,
int begin_max_period,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
- Eterm res, sett, sbmbcs, mbcs, sbcs, calls, fix = THE_NON_VALUE;
+ Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE;
+#ifdef ERTS_SMP
+ Eterm mbcs_pool;
+#endif
res = THE_NON_VALUE;
@@ -3352,22 +4926,23 @@ erts_alcu_info(Allctr_t *allctr,
return am_false;
}
+ if (hpp || szp)
+ ensure_atoms_initialized(allctr);
+
#ifdef USE_THREADS
- if (allctr->thread_safe)
+ if (allctr->thread_safe) {
+ erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
+ }
#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- if (hpp || szp)
- ensure_atoms_initialized(allctr);
-
/* Update sbc values not continously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
- update_max_ever_values(&allctr->sbmbcs);
update_max_ever_values(&allctr->mbcs);
update_max_ever_values(&allctr->sbcs);
@@ -3381,11 +4956,16 @@ erts_alcu_info(Allctr_t *allctr,
sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
if (allctr->fix)
- fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp);
- sbmbcs = info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p,
- print_to_arg, hpp, szp);
+ fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
+#ifdef ERTS_SMP
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p,
+ print_to_arg, hpp, szp);
+ else
+ mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
+#endif
sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
@@ -3395,10 +4975,12 @@ erts_alcu_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.calls, calls);
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
+#ifdef ERTS_SMP
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
+#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
- add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs);
- if (allctr->fix)
- add_2tup(hpp, szp, &res, am.fix_types, fix);
+ add_fix_types(allctr, internal, hpp, szp, &res, fix);
add_2tup(hpp, szp, &res, am.options, sett);
add_3tup(hpp, szp, &res,
am.versions,
@@ -3407,15 +4989,16 @@ erts_alcu_info(Allctr_t *allctr,
}
if (begin_max_period) {
- reset_max_values(&allctr->sbmbcs);
reset_max_values(&allctr->mbcs);
reset_max_values(&allctr->sbcs);
}
#ifdef USE_THREADS
- if (allctr->thread_safe)
+ if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
+ erts_allctr_wrapper_pre_unlock();
+ }
#endif
return res;
@@ -3433,22 +5016,37 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
size->carriers = allctr->mbcs.curr.norm.mseg.size;
size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
- size->carriers += allctr->sbmbcs.curr.small_block.size;
size->carriers += allctr->sbcs.curr.norm.mseg.size;
size->carriers += allctr->sbcs.curr.norm.sys_alloc.size;
size->blocks = allctr->mbcs.blocks.curr.size;
- size->blocks += allctr->sbmbcs.blocks.curr.size;
size->blocks += allctr->sbcs.blocks.curr.size;
+#ifdef ERTS_SMP
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+ UWord csz, bsz;
+ cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
+ size->blocks += bsz;
+ size->carriers += csz;
+ }
+#endif
+
if (fi) {
int ix;
for (ix = 0; ix < fisz; ix++) {
if (allctr->fix) {
- fi[ix].allocated += (allctr->fix[ix].type_size
- * allctr->fix[ix].allocated);
- fi[ix].used += (allctr->fix[ix].type_size
- * allctr->fix[ix].used);
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+ fi[ix].allocated += (allctr->fix[ix].type_size
+ * allctr->fix[ix].u.cpool.allocated);
+ fi[ix].used += (allctr->fix[ix].type_size
+ * allctr->fix[ix].u.cpool.used);
+ }
+ else {
+ fi[ix].allocated += (allctr->fix[ix].type_size
+ * allctr->fix[ix].u.nocpool.allocated);
+ fi[ix].used += (allctr->fix[ix].type_size
+ * allctr->fix[ix].u.nocpool.used);
+ }
}
}
}
@@ -3466,7 +5064,6 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
void *res;
- ErtsAlcFixList_t *fix;
ASSERT(initialized);
@@ -3484,56 +5081,21 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
INC_CC(allctr->calls.this_alloc);
- fix = allctr->fix;
- if (fix) {
- int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
- fix[ix].used++;
- res = fix[ix].list;
- if (res) {
- fix[ix].list_size--;
- fix[ix].list = *((void **) res);
- if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
- void *p = fix[ix].list;
- Block_t *blk;
- fix[ix].list = *((void **) p);
- fix[ix].list_size--;
- blk = UMEM2BLK(p);
- if (IS_SBC_BLK(blk))
- destroy_carrier(allctr, blk);
- else
- mbc_free(allctr, p);
- fix[ix].allocated--;
- }
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
- return res;
- }
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
- if (fix[ix].limit < fix[ix].used)
- fix[ix].limit = fix[ix].used;
- if (fix[ix].max_used < fix[ix].used)
- fix[ix].max_used = fix[ix].used;
- fix[ix].allocated++;
+ if (allctr->fix) {
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ return fix_cpool_alloc(allctr, type, size);
+ else
+ return fix_nocpool_alloc(allctr, type, size);
}
if (size >= allctr->sbc_threshold) {
Block_t *blk;
-#ifdef ERTS_SMP
- if (allctr->dd.use)
- ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
-#endif
blk = create_carrier(allctr, size, CFLG_SBC);
res = blk ? BLK2UMEM(blk) : NULL;
}
else
res = mbc_alloc(allctr, size);
- if (!res && fix) {
- int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
- fix[ix].allocated--;
- fix[ix].used--;
- }
return res;
}
@@ -3602,9 +5164,22 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
+#ifdef ERTS_SMP
+ ASSERT(pref_allctr->dd.use);
+ ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
+#endif
+
ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
res = do_erts_alcu_alloc(type, pref_allctr, size);
+
+#ifdef ERTS_SMP
+ if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
+ /* Cleaned up a bit more; try one more time... */
+ res = do_erts_alcu_alloc(type, pref_allctr, size);
+ }
+#endif
+
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
@@ -3621,9 +5196,9 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void
-do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
+do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
+ Carrier_t **busy_pcrr_pp)
{
- int ix;
Allctr_t *allctr = (Allctr_t *) extra;
ASSERT(initialized);
@@ -3635,57 +5210,28 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (p) {
- ErtsAlcFixList_t *fix = allctr->fix;
- Block_t *blk;
INC_CC(allctr->calls.this_free);
- if (fix) {
- ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
- fix[ix].used--;
- if (fix[ix].allocated < fix[ix].limit
- && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
- *((void **) p) = fix[ix].list;
- fix[ix].list = p;
- fix[ix].list_size++;
- if (!allctr->fix_shrink_scheduled) {
- allctr->fix_shrink_scheduled = 1;
- erts_set_aux_work_timeout(
- allctr->ix,
- (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
- | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
- 1);
- }
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
- return;
- }
- fix[ix].allocated--;
- if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
- blk = UMEM2BLK(p);
- if (IS_SBC_BLK(blk))
- destroy_carrier(allctr, blk);
- else
- mbc_free(allctr, p);
- p = fix[ix].list;
- fix[ix].list = *((void **) p);
- fix[ix].list_size--;
- fix[ix].allocated--;
- }
+ if (allctr->fix) {
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
+ fix_cpool_free(allctr, type, p, busy_pcrr_pp);
+ else
+ fix_nocpool_free(allctr, type, p);
+ }
+ else {
+ Block_t *blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk, NULL);
+ else
+ mbc_free(allctr, p, busy_pcrr_pp);
}
-
- blk = UMEM2BLK(p);
- if (IS_SBC_BLK(blk))
- destroy_carrier(allctr, blk);
- else
- mbc_free(allctr, p);
- ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
}
}
void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
{
- do_erts_alcu_free(type, extra, p);
+ do_erts_alcu_free(type, extra, p, NULL);
}
#ifdef USE_THREADS
@@ -3695,7 +5241,7 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
{
Allctr_t *allctr = (Allctr_t *) extra;
erts_mtx_lock(&allctr->mutex);
- do_erts_alcu_free(type, extra, p);
+ do_erts_alcu_free(type, extra, p, NULL);
erts_mtx_unlock(&allctr->mutex);
}
@@ -3717,7 +5263,7 @@ erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- do_erts_alcu_free(type, allctr, p);
+ do_erts_alcu_free(type, allctr, p, NULL);
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -3727,10 +5273,12 @@ void
erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
{
if (p) {
+ Carrier_t *busy_pcrr_p;
Allctr_t *pref_allctr, *used_allctr;
pref_allctr = get_pref_allctr(extra);
- used_allctr = get_used_allctr(extra, p, NULL);
+ used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED,
+ p, NULL, &busy_pcrr_p);
if (pref_allctr != used_allctr)
enqueue_dealloc_other_instance(type,
used_allctr,
@@ -3738,12 +5286,11 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
(used_allctr->dd.ix
- pref_allctr->dd.ix));
else {
- if (used_allctr->thread_safe)
- erts_mtx_lock(&used_allctr->mutex);
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
- do_erts_alcu_free(type, used_allctr, p);
- if (used_allctr->thread_safe)
- erts_mtx_unlock(&used_allctr->mutex);
+ do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
+ clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
}
}
}
@@ -3759,7 +5306,8 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
void *extra,
void *p,
Uint size,
- Uint32 alcu_flgs)
+ Uint32 alcu_flgs,
+ Carrier_t **busy_pcrr_pp)
{
Allctr_t *allctr = (Allctr_t *) extra;
Block_t *blk;
@@ -3784,7 +5332,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
#if ALLOC_ZERO_EQ_NULL
if (!size) {
ASSERT(p);
- do_erts_alcu_free(type, extra, p);
+ do_erts_alcu_free(type, extra, p, busy_pcrr_pp);
INC_CC(allctr->calls.this_realloc);
DEC_CC(allctr->calls.this_free);
return NULL;
@@ -3795,32 +5343,9 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
blk = UMEM2BLK(p);
- if (allctr->sbmbc_threshold > 0) {
- Uint old_sz, new_sz, lim;
- lim = allctr->sbmbc_threshold;
- old_sz = BLK_SZ(blk);
- new_sz = UMEMSZ2BLKSZ(allctr, size);
- if ((old_sz < lim && lim <= new_sz)
- || (new_sz < lim && lim <= old_sz)) {
- /* *Need* to move it... */
-
- INC_CC(allctr->calls.this_realloc);
- res = do_erts_alcu_alloc(type, extra, size);
- DEC_CC(allctr->calls.this_alloc);
-
- sys_memcpy(res, p, MIN(size, old_sz - ABLK_HDR_SZ));
-
- do_erts_alcu_free(type, extra, p);
- DEC_CC(allctr->calls.this_free);
- return res;
- }
- if (old_sz < lim)
- alcu_flgs |= ERTS_ALCU_FLG_SBMBC;
- }
-
if (size < allctr->sbc_threshold) {
if (IS_MBC_BLK(blk))
- res = mbc_realloc(allctr, p, size, alcu_flgs);
+ res = mbc_realloc(allctr, p, size, alcu_flgs, busy_pcrr_pp);
else {
Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;
Uint crr_sz;
@@ -3833,7 +5358,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
#if HAVE_ERTS_MSEG
else
- crr_sz = MSEG_UNIT_CEILING(used_sz);
+ crr_sz = ERTS_SACRR_UNIT_CEILING(used_sz);
#endif
diff_sz_val = crr_sz - used_sz;
if (diff_sz_val < (~((Uint) 0) / 100))
@@ -3859,16 +5384,12 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
sys_memcpy((void*) res,
(void*) p,
MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size));
- destroy_carrier(allctr, blk);
+ destroy_carrier(allctr, blk, NULL);
}
}
}
else {
Block_t *new_blk;
-#ifdef ERTS_SMP
- if (allctr->dd.use)
- ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
-#endif
if(IS_SBC_BLK(blk)) {
do_carrier_resize:
#if HALFWORD_HEAP
@@ -3887,7 +5408,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
sys_memcpy((void *) res,
(void *) p,
MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size));
- mbc_free(allctr, p);
+ mbc_free(allctr, p, busy_pcrr_pp);
}
else
res = NULL;
@@ -3901,7 +5422,7 @@ void *
erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
void *res;
- res = do_erts_alcu_realloc(type, extra, p, size, 0);
+ res = do_erts_alcu_realloc(type, extra, p, size, 0, NULL);
DEBUG_CHECK_ALIGNMENT(res);
return res;
}
@@ -3922,7 +5443,7 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, extra, p);
+ do_erts_alcu_free(type, extra, p, NULL);
}
DEBUG_CHECK_ALIGNMENT(res);
return res;
@@ -3937,7 +5458,7 @@ erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
Allctr_t *allctr = (Allctr_t *) extra;
void *res;
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_realloc(type, extra, ptr, size, 0);
+ res = do_erts_alcu_realloc(type, extra, ptr, size, 0, NULL);
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
return res;
@@ -3961,7 +5482,7 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, extra, p);
+ do_erts_alcu_free(type, extra, p, NULL);
}
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
@@ -3988,7 +5509,7 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_realloc(type, allctr, ptr, size, 0);
+ res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -4031,7 +5552,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, ptr, cpy_size);
- do_erts_alcu_free(type, allctr, ptr);
+ do_erts_alcu_free(type, allctr, ptr, NULL);
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
}
@@ -4048,40 +5569,64 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
void *res;
Allctr_t *pref_allctr, *used_allctr;
UWord old_user_size;
+ Carrier_t *busy_pcrr_p;
+#ifdef ERTS_SMP
+ int retried;
+#endif
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
pref_allctr = get_pref_allctr(extra);
- used_allctr = get_used_allctr(extra, p, &old_user_size);
+
+ if (pref_allctr->thread_safe)
+ erts_mtx_lock(&pref_allctr->mutex);
+
+#ifdef ERTS_SMP
+ ASSERT(pref_allctr->dd.use);
+ ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
+ retried = 0;
+restart:
+#endif
+
+ used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO,
+ p, &old_user_size, &busy_pcrr_p);
ASSERT(used_allctr && pref_allctr);
if (!force_move && used_allctr == pref_allctr) {
- if (used_allctr->thread_safe)
- erts_mtx_lock(&used_allctr->mutex);
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
res = do_erts_alcu_realloc(type,
used_allctr,
p,
size,
- 0);
- if (used_allctr->thread_safe)
- erts_mtx_unlock(&used_allctr->mutex);
+ 0,
+ &busy_pcrr_p);
+ clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
+#ifdef ERTS_SMP
+ if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
+ /* Cleaned up a bit more; try one more time... */
+ retried = 1;
+ goto restart;
+ }
+#endif
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
}
else {
- if (pref_allctr->thread_safe)
- erts_mtx_lock(&pref_allctr->mutex);
res = do_erts_alcu_alloc(type, pref_allctr, size);
- if (pref_allctr->thread_safe && used_allctr != pref_allctr) {
- erts_mtx_unlock(&pref_allctr->mutex);
- }
- if (res) {
- DEBUG_CHECK_ALIGNMENT(res);
+ if (!res)
+ goto unlock_ts_return;
+ else {
- sys_memcpy(res, p, MIN(size,old_user_size));
+ DEBUG_CHECK_ALIGNMENT(res);
if (used_allctr != pref_allctr) {
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
+
+ sys_memcpy(res, p, MIN(size, old_user_size));
+
enqueue_dealloc_other_instance(type,
used_allctr,
p,
@@ -4089,8 +5634,14 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
- pref_allctr->dd.ix));
}
else {
- do_erts_alcu_free(type, used_allctr, p);
+
+ sys_memcpy(res, p, MIN(size, old_user_size));
+
+ do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
ASSERT(pref_allctr == used_allctr);
+ clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
+
+ unlock_ts_return:
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
}
@@ -4124,6 +5675,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
{
/* erts_alcu_start assumes that allctr has been zeroed */
+ if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) {
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n",
+ __FILE__, __LINE__);
+ }
+
if (!initialized)
goto error;
@@ -4175,7 +5731,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mbc_move_threshold = init->rmbcmt;
#if HAVE_ERTS_MSEG
allctr->max_mseg_sbcs = init->mmsbc;
-# if HAVE_SUPER_ALIGNED_MB_CARRIERS
+# if ERTS_SUPER_ALIGNED_MSEG_ONLY
allctr->max_mseg_mbcs = ~(Uint)0;
# else
allctr->max_mseg_mbcs = init->mmmbc;
@@ -4198,13 +5754,27 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
#if ERTS_SMP
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
- sz += ERTS_ALCU_DD_FIX_TYPE_OFFS*sizeof(UWord);
- if (init->fix)
- sz += sizeof(UWord);
+ sz += (init->fix ?
+ sizeof(ErtsAllctrFixDDBlock_t) : sizeof(ErtsAllctrDDBlock_t));
sz = UNIT_CEILING(sz);
if (sz > allctr->min_block_size)
allctr->min_block_size = sz;
}
+
+ allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list;
+ allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list;
+ allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list;
+ allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list;
+ allctr->cpool.dc_list.first = NULL;
+ allctr->cpool.dc_list.last = NULL;
+ allctr->cpool.abandon_limit = 0;
+ allctr->cpool.disable_abandon = 0;
+ erts_atomic_init_nob(&allctr->cpool.stat.blocks_size, 0);
+ erts_atomic_init_nob(&allctr->cpool.stat.no_blocks, 0);
+ erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0);
+ erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
+ allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
+ allctr->cpool.util_limit = init->ts ? 0 : init->acul;
#endif
allctr->sbc_threshold = init->sbct;
@@ -4224,27 +5794,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
}
#endif
-
-
- allctr->sbmbc_threshold = init->sbmbct;
-
- if (!erts_have_sbmbc_alloc
- || ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no))
- allctr->sbmbc_threshold = 0;
-
- if (!allctr->sbmbc_threshold)
- allctr->sbmbc_size = 0;
- else {
- Uint min_size;
- allctr->sbmbc_size = init->sbmbcs;
- min_size = allctr->sbmbc_threshold;
- min_size += allctr->min_block_size;
- min_size += MBC_HEADER_SIZE(allctr);
- if (allctr->sbmbc_size < min_size)
- allctr->sbmbc_size = min_size;
- }
-
-
#if HAVE_ERTS_MSEG
if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100)
allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
@@ -4256,17 +5805,13 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x_opt(&allctr->mutex,
- ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no)
- ? "sbmbc_alloc"
- : "alcu_allocator",
+ "alcu_allocator",
make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC);
+ ERTS_LCNT_LT_ALLOC,1);
#else
erts_mtx_init_x(&allctr->mutex,
- ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no)
- ? "sbmbc_alloc"
- : "alcu_allocator",
- make_small(allctr->alloc_no));
+ "alcu_allocator",
+ make_small(allctr->alloc_no),1);
#endif /*ERTS_ENABLE_LOCK_COUNT*/
#ifdef DEBUG
@@ -4284,6 +5829,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (!allctr->get_next_mbc_size)
allctr->get_next_mbc_size = get_next_mbc_size;
+ if (allctr->mbc_header_size < sizeof(Carrier_t))
+ goto error;
#ifdef ERTS_SMP
allctr->dd.use = 0;
if (init->tpref) {
@@ -4292,6 +5839,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->dd.ix = init->ix;
}
#endif
+ allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ + ABLK_HDR_SZ)
+ - ABLK_HDR_SZ);
if (allctr->main_carrier_size) {
Block_t *blk;
@@ -4300,14 +5850,22 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->main_carrier_size,
CFLG_MBC
| CFLG_FORCE_SIZE
-#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
+ | CFLG_NO_CPOOL
+#if !HALFWORD_HEAP && !ERTS_SUPER_ALIGNED_MSEG_ONLY
| CFLG_FORCE_SYS_ALLOC
#endif
| CFLG_MAIN_CARRIER);
- if (!blk)
- goto error;
+ if (!blk) {
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_destroy(&allctr->mutex);
+#endif
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to create main carrier for %salloc\n",
+ init->name_prefix);
+ }
- (*allctr->link_free_block)(allctr, blk, 0);
+ (*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
@@ -4318,13 +5876,24 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->fix = init->fix;
allctr->fix_shrink_scheduled = 0;
for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {
- allctr->fix[i].max_used = 0;
- allctr->fix[i].limit = 0;
allctr->fix[i].type_size = init->fix_type_size[i];
allctr->fix[i].list_size = 0;
allctr->fix[i].list = NULL;
- allctr->fix[i].allocated = 0;
- allctr->fix[i].used = 0;
+#ifdef ERTS_SMP
+ ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t));
+#endif
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+ allctr->fix[i].u.cpool.min_list_size = 0;
+ allctr->fix[i].u.cpool.shrink_list = 0;
+ allctr->fix[i].u.cpool.allocated = 0;
+ allctr->fix[i].u.cpool.used = 0;
+ }
+ else {
+ allctr->fix[i].u.nocpool.max_used = 0;
+ allctr->fix[i].u.nocpool.limit = 0;
+ allctr->fix[i].u.nocpool.allocated = 0;
+ allctr->fix[i].u.nocpool.used = 0;
+ }
}
}
@@ -4349,11 +5918,9 @@ erts_alcu_stop(Allctr_t *allctr)
allctr->stopped = 1;
while (allctr->sbc_list.first)
- destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first));
+ destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first), NULL);
while (allctr->mbc_list.first)
- destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first));
- while (allctr->sbmbc_list.first)
- destroy_sbmbc(allctr, MBC_TO_FIRST_BLK(allctr, allctr->sbmbc_list.first));
+ destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL);
#ifdef USE_THREADS
if (allctr->thread_safe)
@@ -4367,14 +5934,23 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
+#ifdef ERTS_SMP
+ int i;
+ for (i = 0; i <= ERTS_ALC_A_MAX; i++) {
+ ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel;
+ erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
+ erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
+ }
+#endif
ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
- ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ);
+ ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
max_mseg_carriers = init->mmc;
- sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs);
+ sys_alloc_carrier_size = ERTS_SACRR_UNIT_CEILING(init->ycs);
#else /* #if HAVE_ERTS_MSEG */
sys_alloc_carrier_size = ((init->ycs + 4095) / 4096) * 4096;
#endif
+ allow_sys_alloc_carriers = init->sac;
#ifdef DEBUG
carrier_alignment = sizeof(Unit_t);
@@ -4394,43 +5970,70 @@ erts_alcu_init(AlcUInit_t *init)
* to erts_alcu_test() *
\* */
-unsigned long
-erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
+UWord
+erts_alcu_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
- case 0x000: return (unsigned long) BLK_SZ((Block_t *) a1);
- case 0x001: return (unsigned long) BLK_UMEM_SZ((Block_t *) a1);
- case 0x002: return (unsigned long) IS_PREV_BLK_FREE((Block_t *) a1);
- case 0x003: return (unsigned long) IS_FREE_BLK((Block_t *) a1);
- case 0x004: return (unsigned long) IS_LAST_BLK((Block_t *) a1);
- case 0x005: return (unsigned long) UMEM2BLK((void *) a1);
- case 0x006: return (unsigned long) BLK2UMEM((Block_t *) a1);
- case 0x007: return (unsigned long) IS_SB_CARRIER((Carrier_t *) a1);
- case 0x008: return (unsigned long) IS_SBC_BLK((Block_t *) a1);
- case 0x009: return (unsigned long) IS_MB_CARRIER((Carrier_t *) a1);
- case 0x00a: return (unsigned long) IS_MSEG_CARRIER((Carrier_t *) a1);
- case 0x00b: return (unsigned long) CARRIER_SZ((Carrier_t *) a1);
- case 0x00c: return (unsigned long) SBC2BLK((Allctr_t *) a1,
+ case 0x000: return (UWord) BLK_SZ((Block_t *) a1);
+ case 0x001: return (UWord) BLK_UMEM_SZ((Block_t *) a1);
+ case 0x002: return (UWord) IS_PREV_BLK_FREE((Block_t *) a1);
+ case 0x003: return (UWord) IS_FREE_BLK((Block_t *) a1);
+ case 0x004: return (UWord) IS_LAST_BLK((Block_t *) a1);
+ case 0x005: return (UWord) UMEM2BLK((void *) a1);
+ case 0x006: return (UWord) BLK2UMEM((Block_t *) a1);
+ case 0x007: return (UWord) IS_SB_CARRIER((Carrier_t *) a1);
+ case 0x008: return (UWord) IS_SBC_BLK((Block_t *) a1);
+ case 0x009: return (UWord) IS_MB_CARRIER((Carrier_t *) a1);
+ case 0x00a: return (UWord) IS_MSEG_CARRIER((Carrier_t *) a1);
+ case 0x00b: return (UWord) CARRIER_SZ((Carrier_t *) a1);
+ case 0x00c: return (UWord) SBC2BLK((Allctr_t *) a1,
(Carrier_t *) a2);
- case 0x00d: return (unsigned long) BLK_TO_SBC((Block_t *) a2);
- case 0x00e: return (unsigned long) MBC_TO_FIRST_BLK((Allctr_t *) a1,
+ case 0x00d: return (UWord) BLK_TO_SBC((Block_t *) a2);
+ case 0x00e: return (UWord) MBC_TO_FIRST_BLK((Allctr_t *) a1,
(Carrier_t *) a2);
- case 0x00f: return (unsigned long) FIRST_BLK_TO_MBC((Allctr_t *) a1,
+ case 0x00f: return (UWord) FIRST_BLK_TO_MBC((Allctr_t *) a1,
(Block_t *) a2);
- case 0x010: return (unsigned long) ((Allctr_t *) a1)->mbc_list.first;
- case 0x011: return (unsigned long) ((Allctr_t *) a1)->mbc_list.last;
- case 0x012: return (unsigned long) ((Allctr_t *) a1)->sbc_list.first;
- case 0x013: return (unsigned long) ((Allctr_t *) a1)->sbc_list.last;
- case 0x014: return (unsigned long) ((Carrier_t *) a1)->next;
- case 0x015: return (unsigned long) ((Carrier_t *) a1)->prev;
- case 0x016: return (unsigned long) ABLK_HDR_SZ;
- case 0x017: return (unsigned long) ((Allctr_t *) a1)->min_block_size;
- case 0x018: return (unsigned long) NXT_BLK((Block_t *) a1);
- case 0x019: return (unsigned long) PREV_BLK((Block_t *) a1);
- case 0x01a: return (unsigned long) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
- case 0x01b: return (unsigned long) sizeof(Unit_t);
- default: ASSERT(0); return ~((unsigned long) 0);
+ case 0x010: return (UWord) ((Allctr_t *) a1)->mbc_list.first;
+ case 0x011: return (UWord) ((Allctr_t *) a1)->mbc_list.last;
+ case 0x012: return (UWord) ((Allctr_t *) a1)->sbc_list.first;
+ case 0x013: return (UWord) ((Allctr_t *) a1)->sbc_list.last;
+ case 0x014: return (UWord) ((Carrier_t *) a1)->next;
+ case 0x015: return (UWord) ((Carrier_t *) a1)->prev;
+ case 0x016: return (UWord) ABLK_HDR_SZ;
+ case 0x017: return (UWord) ((Allctr_t *) a1)->min_block_size;
+ case 0x018: return (UWord) NXT_BLK((Block_t *) a1);
+ case 0x019: return (UWord) PREV_BLK((Block_t *) a1);
+ case 0x01a: return (UWord) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
+ case 0x01b: return (UWord) sizeof(Unit_t);
+ case 0x01c: return (unsigned long) BLK_TO_MBC((Block_t*) a1);
+ case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
+ case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
+#ifdef ERTS_SMP
+ case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t);
+ case 0x020:
+ SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1);
+ cpool_init_carrier_data((Allctr_t *) a1, (Carrier_t *) a2);
+ return (UWord) a2;
+ case 0x021:
+ cpool_insert((Allctr_t *) a1, (Carrier_t *) a2);
+ return (UWord) a2;
+ case 0x022:
+ cpool_delete((Allctr_t *) a1, (Allctr_t *) a1, (Carrier_t *) a2);
+ return (UWord) a2;
+ case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1);
+ case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2);
+#else
+ case 0x01f: return (UWord) 0;
+ case 0x020: return (UWord) 0;
+ case 0x021: return (UWord) 0;
+ case 0x022: return (UWord) 0;
+ case 0x023: return (UWord) 0;
+ case 0x024: return (UWord) 0;
+#endif
+
+ default: ASSERT(0); return ~((UWord) 0);
}
+ return 0;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -4438,6 +6041,20 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
\* */
void
+erts_alcu_assert_failed(char* expr, char* file, int line, char *func)
+{
+ fflush(stdout);
+ fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n",
+ file, line, func, expr);
+ fflush(stderr);
+#if defined(__WIN__) || defined(__WIN32__)
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
+void
erts_alcu_verify_unused(Allctr_t *allctr)
{
UWord no;
@@ -4445,12 +6062,10 @@ erts_alcu_verify_unused(Allctr_t *allctr)
no = allctr->sbcs.curr.norm.mseg.no;
no += allctr->sbcs.curr.norm.sys_alloc.no;
no += allctr->mbcs.blocks.curr.no;
- no += allctr->sbmbcs.blocks.curr.no;
if (no) {
UWord sz = allctr->sbcs.blocks.curr.size;
sz += allctr->mbcs.blocks.curr.size;
- sz += allctr->sbmbcs.blocks.curr.size;
erl_exit(ERTS_ABORT_EXIT,
"%salloc() used when expected to be unused!\n"
"Total amount of blocks allocated: %bpu\n"
@@ -4493,7 +6108,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(sbc)) {
- ASSERT(CARRIER_SZ(sbc) % MSEG_UNIT_SZ == 0);
+ ASSERT(CARRIER_SZ(sbc) % ERTS_SACRR_UNIT_SZ == 0);
}
#endif
crr = sbc;
@@ -4578,13 +6193,13 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
- ASSERT(CARRIER_SZ(crr) % MSEG_UNIT_SZ == 0);
+ ASSERT(CARRIER_SZ(crr) % ERTS_SACRR_UNIT_SZ == 0);
}
#endif
cl = &allctr->mbc_list;
}
-#if 0 /* FIXIT sbmbc */
+#ifdef DEBUG
if (cl->first == crr) {
ASSERT(!crr->prev);
}
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index e0754e7f69..eee920e66c 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,6 +32,7 @@ typedef struct Allctr_t_ Allctr_t;
typedef struct {
UWord ycs;
UWord mmc;
+ int sac;
} AlcUInit_t;
typedef struct {
@@ -55,8 +56,7 @@ typedef struct {
UWord lmbcs;
UWord smbcs;
UWord mbcgs;
- UWord sbmbct;
- UWord sbmbcs;
+ int acul;
void *fix;
size_t *fix_type_size;
@@ -76,7 +76,8 @@ typedef struct {
#define ERTS_DEFAULT_ALCU_INIT { \
1024*1024, /* (bytes) ycs: sys_alloc carrier size */\
- 1024 /* (amount) mmc: max mseg carriers */\
+ ~((UWord) 0), /* (amount) mmc: max mseg carriers */\
+ 1 /* (bool) sac: sys_alloc carriers */\
}
#define ERTS_DEFAULT_ALLCTR_INIT { \
@@ -96,12 +97,11 @@ typedef struct {
50, /* (%) rmbcmt: rel mbc move threshold */\
1024*1024, /* (bytes) mmbcs: main multiblock carrier size */\
256, /* (amount) mmsbc: max mseg sbcs */\
- 10, /* (amount) mmmbc: max mseg mbcs */\
+ ~((UWord) 0), /* (amount) mmmbc: max mseg mbcs */ \
10*1024*1024, /* (bytes) lmbcs: largest mbc size */\
1024*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
- 256, /* (bytes) sbmbct: small block mbc threshold */\
- 8*1024, /* (bytes) sbmbcs: small block mbc size */ \
+ 0, /* (%) acul: abandon carrier utilization limit */\
/* --- Data not options -------------------------------------------- */\
NULL, /* (ptr) fix */\
NULL /* (ptr) fix_type_size */\
@@ -111,7 +111,8 @@ typedef struct {
#define ERTS_DEFAULT_ALCU_INIT { \
128*1024, /* (bytes) ycs: sys_alloc carrier size */\
- 1024 /* (amount) mmc: max mseg carriers */\
+ 1024, /* (amount) mmc: max mseg carriers */\
+ 1 /* (bool) sac: sys_alloc carriers */\
}
#define ERTS_DEFAULT_ALLCTR_INIT { \
@@ -130,12 +131,11 @@ typedef struct {
80, /* (%) rsbcmt: rel sbc move threshold */\
128*1024, /* (bytes) mmbcs: main multiblock carrier size */\
256, /* (amount) mmsbc: max mseg sbcs */\
- 10, /* (amount) mmmbc: max mseg mbcs */\
+ ~((UWord) 0), /* (amount) mmmbc: max mseg mbcs */ \
1024*1024, /* (bytes) lmbcs: largest mbc size */\
128*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
- 256, /* (bytes) sbmbct: small block mbc threshold */\
- 8*1024, /* (bytes) sbmbcs: small block mbc size */ \
+ 0, /* (%) acul: abandon carrier utilization limit */\
/* --- Data not options -------------------------------------------- */\
NULL, /* (ptr) fix */\
NULL /* (ptr) fix_type_size */\
@@ -165,8 +165,8 @@ void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
#endif
Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *);
Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *);
-Eterm erts_alcu_sz_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
-Eterm erts_alcu_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
+Eterm erts_alcu_sz_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *);
+Eterm erts_alcu_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
ErtsAlcUFixInfo_t *, int);
@@ -181,7 +181,6 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#define ERL_ALLOC_UTIL_IMPL__
#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE (((Uint32) 1) << 0)
-#define ERTS_ALCU_FLG_SBMBC (((Uint32) 1) << 1)
#ifdef USE_THREADS
#define ERL_THREADS_EMU_INTERNAL__
@@ -221,48 +220,87 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#define MBC_FBLK_SZ_MASK UNIT_MASK
#define CARRIER_SZ_MASK UNIT_MASK
-
-#if HAVE_ERTS_MSEG
+#if ERTS_HAVE_MSEG_SUPER_ALIGNED \
+ || (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC)
+# ifndef MSEG_ALIGN_BITS
+# define ERTS_SUPER_ALIGN_BITS MSEG_ALIGN_BITS
+# else
+# define ERTS_SUPER_ALIGN_BITS 18
+# endif
# ifdef ARCH_64
# define MBC_ABLK_OFFSET_BITS 24
-# elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+# else
# define MBC_ABLK_OFFSET_BITS 9
/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
# endif
-#endif
-#ifndef MBC_ABLK_OFFSET_BITS
+# define ERTS_SACRR_UNIT_SHIFT ERTS_SUPER_ALIGN_BITS
+# define ERTS_SACRR_UNIT_SZ (1 << ERTS_SACRR_UNIT_SHIFT)
+# define ERTS_SACRR_UNIT_MASK ((~(UWord)0) << ERTS_SACRR_UNIT_SHIFT)
+# define ERTS_SACRR_UNIT_FLOOR(X) ((X) & ERTS_SACRR_UNIT_MASK)
+# define ERTS_SACRR_UNIT_CEILING(X) ERTS_SACRR_UNIT_FLOOR((X) + ~ERTS_SACRR_UNIT_MASK)
+# define ERTS_SA_MB_CARRIERS 1
+#else
+# define ERTS_SA_MB_CARRIERS 0
# define MBC_ABLK_OFFSET_BITS 0 /* no carrier offset in block header */
#endif
+#if ERTS_HAVE_MSEG_SUPER_ALIGNED && !ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+# define ERTS_SUPER_ALIGNED_MSEG_ONLY 1
+#else
+# define ERTS_SUPER_ALIGNED_MSEG_ONLY 0
+#endif
#if MBC_ABLK_OFFSET_BITS
# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS)
# define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT)
# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK)
-# define HAVE_ERTS_SBMBC 0
#else
# define MBC_ABLK_SZ_MASK (~FLG_MASK)
-# define HAVE_ERTS_SBMBC 1
#endif
-#define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
-#define MBC_FBLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
-#define SBC_BLK_SZ(B) (ASSERT_EXPR(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
+#define MBC_ABLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
+#define MBC_FBLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
+#define SBC_BLK_SZ(B) (ASSERT(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
#define CARRIER_SZ(C) \
((C)->chdr & CARRIER_SZ_MASK)
-extern int erts_have_sbmbc_alloc;
-
typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
+#ifdef ERTS_SMP
+
+typedef struct ErtsDoubleLink_t_ {
+ struct ErtsDoubleLink_t_ *next;
+ struct ErtsDoubleLink_t_ *prev;
+}ErtsDoubleLink_t;
+
+typedef struct {
+ erts_atomic_t next;
+ erts_atomic_t prev;
+ Allctr_t *orig_allctr;
+ ErtsThrPrgrVal thr_prgr;
+ erts_atomic_t max_size;
+ UWord abandon_limit;
+ UWord blocks;
+ UWord blocks_size;
+ ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
+} ErtsAlcCPoolData_t;
+
+#endif
+
typedef struct Carrier_t_ Carrier_t;
struct Carrier_t_ {
UWord chdr;
Carrier_t *next;
Carrier_t *prev;
- Allctr_t *allctr;
+ erts_smp_atomic_t allctr;
+#ifdef ERTS_SMP
+ ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
+#endif
};
+#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
+ ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
+
typedef struct {
Carrier_t *first;
Carrier_t *last;
@@ -280,12 +318,46 @@ typedef struct {
#endif
} Block_t;
+#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
+#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
+#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
+
+#define SBC_BLK_HDR_FLG /* Special flag combo for (allocated) SBC blocks */\
+ (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
+
+/*
+ * FREE_LAST_MBC_BLK_HDR_FLGS is a special flag combo used for
+ * distinguishing empty mbc's from allocated blocks in
+ * handle_delayed_dealloc().
+ */
+#define FREE_LAST_MBC_BLK_HDR_FLGS (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
+
+#define IS_FREE_LAST_MBC_BLK(B) \
+ (((B)->bhdr & FLG_MASK) == FREE_LAST_MBC_BLK_HDR_FLGS)
+
+#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
+#define IS_MBC_BLK(B) (!IS_SBC_BLK((B)))
+#define IS_FREE_BLK(B) (ASSERT(IS_MBC_BLK(B)), \
+ (B)->bhdr & THIS_FREE_BLK_HDR_FLG)
+
+#if MBC_ABLK_OFFSET_BITS
+# define FBLK_TO_MBC(B) (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \
+ (B)->u.carrier)
+# define ABLK_TO_MBC(B) \
+ (ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \
+ (Carrier_t*)((ERTS_SACRR_UNIT_FLOOR((UWord)(B)) - \
+ (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << ERTS_SACRR_UNIT_SHIFT))))
+# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))
+#else
+# define FBLK_TO_MBC(B) ((B)->carrier)
+# define ABLK_TO_MBC(B) ((B)->carrier)
+# define BLK_TO_MBC(B) ((B)->carrier)
+#endif
+#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B))
+
typedef UWord FreeBlkFtr_t; /* Footer of a free block */
-typedef struct {
- UWord giga_no;
- UWord no;
-} CallCounter_t;
+typedef Uint64 CallCounter_t;
typedef struct {
UWord no;
@@ -298,7 +370,6 @@ typedef struct {
StatValues_t mseg;
StatValues_t sys_alloc;
} norm;
- StatValues_t small_block;
} curr;
StatValues_t max;
StatValues_t max_ever;
@@ -358,10 +429,20 @@ typedef struct {
size_t type_size;
SWord list_size;
void *list;
- SWord max_used;
- SWord limit;
- SWord allocated;
- SWord used;
+ union {
+ struct {
+ SWord max_used;
+ SWord limit;
+ SWord allocated;
+ SWord used;
+ } nocpool;
+ struct {
+ int min_list_size;
+ int shrink_list;
+ UWord allocated;
+ UWord used;
+ } cpool;
+ } u;
} ErtsAlcFixList_t;
struct Allctr_t_ {
@@ -409,37 +490,61 @@ struct Allctr_t_ {
Uint largest_mbc_size;
Uint smallest_mbc_size;
Uint mbc_growth_stages;
- Uint sbmbc_threshold;
- Uint sbmbc_size;
#if HAVE_ERTS_MSEG
ErtsMsegOpt_t mseg_opt;
#endif
/* */
+ Uint mbc_header_size;
Uint min_mbc_size;
Uint min_mbc_first_free_size;
Uint min_block_size;
/* Carriers */
- CarrierList_t sbmbc_list;
CarrierList_t mbc_list;
CarrierList_t sbc_list;
+#ifdef ERTS_SMP
+ struct {
+ /* pooled_list, traitor list and dc_list contain only
+ carriers _created_ by this allocator */
+ ErtsDoubleLink_t pooled_list;
+ ErtsDoubleLink_t traitor_list;
+ CarrierList_t dc_list;
+
+ UWord abandon_limit;
+ int disable_abandon;
+ int check_limit_count;
+ int util_limit;
+ struct {
+ erts_atomic_t blocks_size;
+ erts_atomic_t no_blocks;
+ erts_atomic_t carriers_size;
+ erts_atomic_t no_carriers;
+ } stat;
+ } cpool;
+#endif
/* Main carrier (if there is one) */
Carrier_t * main_carrier;
/* Callback functions (first 4 are mandatory) */
Block_t * (*get_free_block) (Allctr_t *, Uint,
- Block_t *, Uint, Uint32);
- void (*link_free_block) (Allctr_t *, Block_t *, Uint32);
- void (*unlink_free_block) (Allctr_t *, Block_t *, Uint32);
+ Block_t *, Uint);
+ void (*link_free_block) (Allctr_t *, Block_t *);
+ void (*unlink_free_block) (Allctr_t *, Block_t *);
Eterm (*info_options) (Allctr_t *, char *, int *,
void *, Uint **, Uint *);
Uint (*get_next_mbc_size) (Allctr_t *);
- void (*creating_mbc) (Allctr_t *, Carrier_t *, Uint32);
- void (*destroying_mbc) (Allctr_t *, Carrier_t *, Uint32);
+ void (*creating_mbc) (Allctr_t *, Carrier_t *);
+ void (*destroying_mbc) (Allctr_t *, Carrier_t *);
+
+ /* The three callbacks below are needed to support carrier migration */
+ void (*add_mbc) (Allctr_t *, Carrier_t *);
+ void (*remove_mbc) (Allctr_t *, Carrier_t *);
+ UWord (*largest_fblk_in_mbc) (Allctr_t *, Carrier_t *);
+
void (*init_atoms) (void);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -471,8 +576,6 @@ struct Allctr_t_ {
CallCounter_t this_alloc;
CallCounter_t this_free;
CallCounter_t this_realloc;
- CallCounter_t sbmbc_alloc;
- CallCounter_t sbmbc_free;
CallCounter_t mseg_alloc;
CallCounter_t mseg_dealloc;
CallCounter_t mseg_realloc;
@@ -483,8 +586,7 @@ struct Allctr_t_ {
CarriersStats_t sbcs;
CarriersStats_t mbcs;
- CarriersStats_t sbmbcs;
-
+
#ifdef DEBUG
#ifdef USE_THREADS
struct {
@@ -501,7 +603,9 @@ void erts_alcu_stop(Allctr_t *);
void erts_alcu_verify_unused(Allctr_t *);
void erts_alcu_verify_unused_ts(Allctr_t *allctr);
-unsigned long erts_alcu_test(unsigned long, unsigned long, unsigned long);
+UWord erts_alcu_test(UWord, UWord, UWord);
+
+void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
#ifdef DEBUG
int is_sbc_blk(Block_t*);
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 6ce209085c..396aa88e0b 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -28,11 +28,17 @@
*
* This module is a callback-module for erl_alloc_util.c
*
- * Algorithm: The tree nodes are free-blocks ordered in address order.
+ * AOFF Algorithm:
+ * The tree nodes are ordered in address order.
* Every node also keeps the size of the largest block in its
- * sub-tree ('max_size'). By that we can start from root and keep
+ * sub-tree ('max_sz'). By that we can start from root and keep
* left (for low addresses) while dismissing entire sub-trees with
* too small blocks.
+ * Bestfit within carrier:
+ * The only difference for "bestfit within carrier" is the tree
+ * sorting order. Blocks within the same carrier are sorted
+ * wrt size instead of address. The 'max_sz' field is maintained
+ * in order to dismiss entire carriers with too small blocks.
*
* Authors: Rickard Green/Sverker Eriksson
*/
@@ -62,6 +68,15 @@
# define LEFT_VISITED_FLG (((Uint) 1) << 2)
# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
#endif
+#ifdef DEBUG
+# define IS_BF_FLG (((Uint) 1) << 4)
+#endif
+
+#define IS_TREE_NODE(N) (((AOFF_RBTree_t *) (N))->flags & TREE_NODE_FLG)
+#define IS_LIST_ELEM(N) (!IS_TREE_NODE(((AOFF_RBTree_t *) (N))))
+
+#define SET_TREE_NODE(N) (((AOFF_RBTree_t *) (N))->flags |= TREE_NODE_FLG)
+#define SET_LIST_ELEM(N) (((AOFF_RBTree_t *) (N))->flags &= ~TREE_NODE_FLG)
#define IS_RED(N) (((AOFF_RBTree_t *) (N)) \
&& ((AOFF_RBTree_t *) (N))->flags & RED_FLG)
@@ -70,9 +85,6 @@
#define SET_RED(N) (((AOFF_RBTree_t *) (N))->flags |= RED_FLG)
#define SET_BLACK(N) (((AOFF_RBTree_t *) (N))->flags &= ~RED_FLG)
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
#if 1
#define RBT_ASSERT ASSERT
#else
@@ -85,21 +97,55 @@ typedef struct AOFF_RBTree_t_ AOFF_RBTree_t;
struct AOFF_RBTree_t_ {
Block_t hdr;
- Uint flags;
AOFF_RBTree_t *parent;
AOFF_RBTree_t *left;
AOFF_RBTree_t *right;
- Uint max_sz; /* of all blocks in this sub-tree */
+ Uint32 flags;
+ Uint32 max_sz; /* of all blocks in this sub-tree */
};
#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
+/* BF block nodes keeps list of all with equal size
+ */
+typedef struct {
+ AOFF_RBTree_t t;
+ AOFF_RBTree_t *next;
+}AOFF_RBTreeList_t;
+
+#define LIST_NEXT(N) (((AOFF_RBTreeList_t*) (N))->next)
+#define LIST_PREV(N) (((AOFF_RBTreeList_t*) (N))->t.parent)
+
+typedef struct AOFF_Carrier_t_ AOFF_Carrier_t;
+
+struct AOFF_Carrier_t_ {
+ Carrier_t crr;
+ AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
+ AOFF_RBTree_t* root; /* Root of my block tree */
+};
+#define RBT_NODE_TO_MBC(PTR) ((AOFF_Carrier_t*)((char*)(PTR) - offsetof(AOFF_Carrier_t, rbt_node)))
+
+/*
+ To support carrier migration we keep two kinds of rb-trees:
+ 1. One tree of carriers for each allocator instance.
+ 2. One tree of free blocks for each carrier.
+ Both trees use the same node structure AOFF_RBTree_t and implementation.
+ Carrier nodes thus contain a phony Block_t header 'rbt_node.hdr'.
+ The size value of such a phony block is the size of the largest free block in
+ that carrier, i.e same as 'max_sz' of the root node of its block tree.
+*/
+
#ifdef HARD_DEBUG
-static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint);
+# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE)
+# define HARD_CHECK_TREE(CRR,FLV,ROOT,SZ) check_tree(CRR, FLV, ROOT, SZ)
+static AOFF_RBTree_t * check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, Uint);
+#else
+# define HARD_CHECK_IS_MEMBER(ROOT,NODE)
+# define HARD_CHECK_TREE(CRR,FLV,ROOT,SZ)
#endif
-/* Calculate 'max_size' of tree node x by only looking at the direct children
- * of x and x itself.
+/* Calculate 'max_sz' of tree node x by only looking at 'max_sz' of the
+ * direct children of x and the size x itself.
*/
static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x)
{
@@ -113,7 +159,7 @@ static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x)
return sz;
}
-/* Set new possibly lower 'max_size' of node and propagate change toward root
+/* Set new possibly lower 'max_sz' of node and propagate change toward root
*/
static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node,
AOFF_RBTree_t* stop_at)
@@ -132,32 +178,53 @@ static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node,
else ASSERT(new_max == old_max);
}
+static ERTS_INLINE SWord cmp_blocks(enum AOFF_Flavor flavor,
+ AOFF_RBTree_t* lhs, AOFF_RBTree_t* rhs)
+{
+ ASSERT(lhs != rhs);
+ ASSERT(flavor == AOFF_AOFF || FBLK_TO_MBC(&lhs->hdr) == FBLK_TO_MBC(&rhs->hdr));
+ if (flavor != AOFF_AOFF) {
+ SWord diff = (SWord)AOFF_BLK_SZ(lhs) - (SWord)AOFF_BLK_SZ(rhs);
+ if (diff || flavor == AOFF_BF) return diff;
+ }
+ return (char*)lhs - (char*)rhs;
+}
+
+static ERTS_INLINE SWord cmp_cand_blk(enum AOFF_Flavor flavor,
+ Block_t* cand_blk, AOFF_RBTree_t* rhs)
+{
+ if (flavor != AOFF_AOFF) {
+ if (BLK_TO_MBC(cand_blk) == FBLK_TO_MBC(&rhs->hdr)) {
+ SWord diff = (SWord)MBC_BLK_SZ(cand_blk) - (SWord)MBC_FBLK_SZ(&rhs->hdr);
+ if (diff || flavor == AOFF_BF) return diff;
+ }
+ }
+ return (char*)cand_blk - (char*)rhs;
+}
+
/* Prototypes of callback functions */
-static Block_t* aoff_get_free_block(Allctr_t *, Uint, Block_t *, Uint, Uint32 flags);
-static void aoff_link_free_block(Allctr_t *, Block_t*, Uint32 flags);
-static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags);
+static Block_t* aoff_get_free_block(Allctr_t *, Uint, Block_t *, Uint);
+static void aoff_link_free_block(Allctr_t *, Block_t*);
+static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *del);
+static void aoff_creating_mbc(Allctr_t*, Carrier_t*);
+static void aoff_destroying_mbc(Allctr_t*, Carrier_t*);
+static void aoff_add_mbc(Allctr_t*, Carrier_t*);
+static void aoff_remove_mbc(Allctr_t*, Carrier_t*);
+static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*);
+
+/* Generic tree functions used by both carrier and block trees. */
+static void rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del);
+static void rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk);
+static AOFF_RBTree_t* rbt_search(AOFF_RBTree_t* root, Uint size);
+#ifdef HARD_DEBUG
+static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node);
+#endif
static Eterm info_options(Allctr_t *, char *, int *, void *, Uint **, Uint *);
static void init_atoms(void);
-
-#ifdef DEBUG
-
-/* Destroy all tree fields */
-#define DESTROY_TREE_NODE(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(AOFF_RBTree_t) - sizeof(Block_t)))
-
-#else
-
-#define DESTROY_TREE_NODE(N)
-
-#endif
-
-
static int atoms_initialized = 0;
void
@@ -184,9 +251,12 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
+ alc->flavor = aoffinit->flavor;
+ allctr->mbc_header_size = sizeof(AOFF_Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
- allctr->min_block_size = sizeof(AOFF_RBTree_t);
+ allctr->min_block_size = (aoffinit->flavor == AOFF_BF ?
+ sizeof(AOFF_RBTreeList_t):sizeof(AOFF_RBTree_t));
allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR;
@@ -195,12 +265,15 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
allctr->get_free_block = aoff_get_free_block;
allctr->link_free_block = aoff_link_free_block;
- allctr->unlink_free_block = aoff_unlink_free_block;
+ allctr->unlink_free_block = aoff_unlink_free_block;
allctr->info_options = info_options;
allctr->get_next_mbc_size = NULL;
- allctr->creating_mbc = NULL;
- allctr->destroying_mbc = NULL;
+ allctr->creating_mbc = aoff_creating_mbc;
+ allctr->destroying_mbc = aoff_destroying_mbc;
+ allctr->add_mbc = aoff_add_mbc;
+ allctr->remove_mbc = aoff_remove_mbc;
+ allctr->largest_fblk_in_mbc = aoff_largest_fblk_in_mbc;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -303,10 +376,7 @@ replace(AOFF_RBTree_t **root, AOFF_RBTree_t *x, AOFF_RBTree_t *y)
y->parent = x->parent;
y->right = x->right;
y->left = x->left;
-
- y->max_sz = x->max_sz;
- lower_max_size(y, NULL);
- DESTROY_TREE_NODE(x);
+ y->max_sz = x->max_sz;
}
static void
@@ -403,21 +473,72 @@ tree_insert_fixup(AOFF_RBTree_t** root, AOFF_RBTree_t *blk)
}
static void
-aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags)
+aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
+{
+ AOFFAllctr_t* alc = (AOFFAllctr_t*)allctr;
+ AOFF_RBTree_t* del = (AOFF_RBTree_t*)blk;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(&del->hdr);
+
+ ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz);
+ HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0);
+
+ if (alc->flavor == AOFF_BF) {
+ ASSERT(del->flags & IS_BF_FLG);
+ if (IS_LIST_ELEM(del)) {
+ /* Remove from list */
+ ASSERT(LIST_PREV(del));
+ ASSERT(LIST_PREV(del)->flags & IS_BF_FLG);
+ LIST_NEXT(LIST_PREV(del)) = LIST_NEXT(del);
+ if (LIST_NEXT(del)) {
+ ASSERT(LIST_NEXT(del)->flags & IS_BF_FLG);
+ LIST_PREV(LIST_NEXT(del)) = LIST_PREV(del);
+ }
+ return;
+ }
+ else if (LIST_NEXT(del)) {
+ /* Replace tree node by next element in list... */
+
+ ASSERT(AOFF_BLK_SZ(LIST_NEXT(del)) == AOFF_BLK_SZ(del));
+ ASSERT(IS_LIST_ELEM(LIST_NEXT(del)));
+
+ replace(&crr->root, (AOFF_RBTree_t*)del, LIST_NEXT(del));
+
+ HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0);
+ return;
+ }
+ }
+
+ rbt_delete(&crr->root, (AOFF_RBTree_t*)del);
+
+ HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0);
+
+ /* Update the carrier tree with a potentially new (lower) max_sz
+ */
+ if (crr->root) {
+ if (crr->rbt_node.hdr.bhdr == crr->root->max_sz) {
+ return;
+ }
+ ASSERT(crr->rbt_node.hdr.bhdr > crr->root->max_sz);
+ crr->rbt_node.hdr.bhdr = crr->root->max_sz;
+ }
+ else {
+ crr->rbt_node.hdr.bhdr = 0;
+ }
+ lower_max_size(&crr->rbt_node, NULL);
+}
+
+
+static void
+rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del)
{
- AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
- AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &alc->sbmbc_root : &alc->mbc_root);
Uint spliced_is_black;
- AOFF_RBTree_t *x, *y, *z = (AOFF_RBTree_t *) del;
+ AOFF_RBTree_t *x, *y, *z = del;
AOFF_RBTree_t null_x; /* null_x is used to get the fixup started when we
splice out a node without children. */
- null_x.parent = NULL;
+ HARD_CHECK_IS_MEMBER(*root, del);
-#ifdef HARD_DEBUG
- check_tree(*root, 0);
-#endif
+ null_x.parent = NULL;
/* Remove node from tree... */
@@ -461,7 +582,9 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags)
}
if (y != z) {
/* We spliced out the successor of z; replace z by the successor */
+ ASSERT(z != &null_x);
replace(root, z, y);
+ lower_max_size(y, NULL);
}
if (spliced_is_black) {
@@ -572,28 +695,48 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags)
RBT_ASSERT(!null_x.right);
}
}
+}
- DESTROY_TREE_NODE(del);
+static void
+aoff_link_free_block(Allctr_t *allctr, Block_t *block)
+{
+ AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr;
+ AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;
+ AOFF_RBTree_t *crr_node;
+ AOFF_Carrier_t *blk_crr = (AOFF_Carrier_t*) FBLK_TO_MBC(block);
+ Uint blk_sz = AOFF_BLK_SZ(blk);
-#ifdef HARD_DEBUG
- check_tree(*root, 0);
-#endif
+ ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr));
+ ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0));
+ HARD_CHECK_TREE(&blk_crr->crr, alc->flavor, blk_crr->root, 0);
+
+ rbt_insert(alc->flavor, &blk_crr->root, blk);
+
+ /* Update the carrier tree with a potentially new (larger) max_sz
+ */
+ crr_node = &blk_crr->rbt_node;
+ if (blk_sz > crr_node->hdr.bhdr) {
+ ASSERT(blk_sz == blk_crr->root->max_sz);
+ crr_node->hdr.bhdr = blk_sz;
+ while (blk_sz > crr_node->max_sz) {
+ crr_node->max_sz = blk_sz;
+ crr_node = crr_node->parent;
+ if (!crr_node) break;
+ }
+ }
+ HARD_CHECK_TREE(&blk_crr->crr, alc->flavor, blk_crr->root, 0);
}
static void
-aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
{
- AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
- AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;
- AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &alc->sbmbc_root : &alc->mbc_root);
Uint blk_sz = AOFF_BLK_SZ(blk);
-#ifdef HARD_DEBUG
- check_tree(*root, 0);
+#ifdef DEBUG
+ blk->flags = (flavor == AOFF_BF) ? IS_BF_FLG : 0;
+#else
+ blk->flags = 0;
#endif
-
- blk->flags = 0;
blk->left = NULL;
blk->right = NULL;
blk->max_sz = blk_sz;
@@ -606,10 +749,12 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
else {
AOFF_RBTree_t *x = *root;
while (1) {
+ SWord diff;
if (x->max_sz < blk_sz) {
x->max_sz = blk_sz;
}
- if (blk < x) {
+ diff = cmp_blocks(flavor, blk, x);
+ if (diff < 0) {
if (!x->left) {
blk->parent = x;
x->left = blk;
@@ -617,7 +762,7 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
x = x->left;
}
- else {
+ else if (diff > 0) {
if (!x->right) {
blk->parent = x;
x->right = blk;
@@ -625,7 +770,18 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
x = x->right;
}
-
+ else {
+ ASSERT(flavor == AOFF_BF);
+ ASSERT(blk->flags & IS_BF_FLG);
+ ASSERT(x->flags & IS_BF_FLG);
+ SET_LIST_ELEM(blk);
+ LIST_NEXT(blk) = LIST_NEXT(x);
+ LIST_PREV(blk) = x;
+ if (LIST_NEXT(x))
+ LIST_PREV(LIST_NEXT(x)) = blk;
+ LIST_NEXT(x) = blk;
+ return;
+ }
}
/* Insert block into size tree */
@@ -635,38 +791,63 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
if (IS_RED(blk->parent))
tree_insert_fixup(root, blk);
}
-
-#ifdef HARD_DEBUG
- check_tree(*root, 0);
-#endif
+ if (flavor == AOFF_BF) {
+ SET_TREE_NODE(blk);
+ LIST_NEXT(blk) = NULL;
+ }
}
-static Block_t *
-aoff_get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size, Uint32 flags)
+static AOFF_RBTree_t*
+rbt_search(AOFF_RBTree_t* root, Uint size)
{
- AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
- AOFF_RBTree_t *x = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? alc->sbmbc_root : alc->mbc_root);
- AOFF_RBTree_t *blk = NULL;
-#ifdef HARD_DEBUG
- AOFF_RBTree_t* dbg_blk = check_tree(x, size);
-#endif
+ AOFF_RBTree_t* x = root;
- ASSERT(!cand_blk || cand_size >= size);
-
- while (x) {
+ ASSERT(x);
+ for (;;) {
if (x->left && x->left->max_sz >= size) {
x = x->left;
}
else if (AOFF_BLK_SZ(x) >= size) {
- blk = x;
- break;
+ return x;
}
else {
x = x->right;
+ if (!x) {
+ return NULL;
+ }
}
}
+}
+
+static Block_t *
+aoff_get_free_block(Allctr_t *allctr, Uint size,
+ Block_t *cand_blk, Uint cand_size)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_RBTree_t *crr_node = alc->mbc_root;
+ AOFF_Carrier_t* crr;
+ AOFF_RBTree_t *blk = NULL;
+#ifdef HARD_DEBUG
+ AOFF_RBTree_t* dbg_blk;
+#endif
+
+ ASSERT(!cand_blk || cand_size >= size);
+
+ /* Get first-fit carrier
+ */
+ if (!crr_node || !(blk=rbt_search(crr_node, size))) {
+ return NULL;
+ }
+ crr = RBT_NODE_TO_MBC(blk);
+
+ /* Get block within carrier tree
+ */
+#ifdef HARD_DEBUG
+ dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, size);
+#endif
+
+ blk = rbt_search(crr->root, size);
+ ASSERT(blk);
#ifdef HARD_DEBUG
ASSERT(blk == dbg_blk);
@@ -675,15 +856,87 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
if (!blk)
return NULL;
- if (cand_blk && cand_blk < &blk->hdr) {
+ if (cand_blk && cmp_cand_blk(alc->flavor, cand_blk, blk) < 0) {
return NULL; /* cand_blk was better */
}
- aoff_unlink_free_block(allctr, (Block_t *) blk, flags);
+ aoff_unlink_free_block(allctr, (Block_t *) blk);
return (Block_t *) blk;
}
+static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
+ AOFF_RBTree_t **root = &alc->mbc_root;
+
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+
+ /* Link carrier in address order tree
+ */
+ crr->rbt_node.hdr.bhdr = 0;
+ rbt_insert(AOFF_AOFF, root, &crr->rbt_node);
+
+ /* aoff_link_free_block will add free block later */
+ crr->root = NULL;
+
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+}
+
+static void aoff_destroying_mbc(Allctr_t *allctr, Carrier_t *carrier)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
+ AOFF_RBTree_t *root = alc->mbc_root;
+
+ if (crr->rbt_node.parent || &crr->rbt_node == root) {
+ aoff_remove_mbc(allctr, carrier);
+ }
+ /*else already removed */
+}
+
+static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
+ AOFF_RBTree_t **root = &alc->mbc_root;
+
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+
+ /* Link carrier in address order tree
+ */
+ rbt_insert(AOFF_AOFF, root, &crr->rbt_node);
+
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+}
+
+static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
+ AOFF_RBTree_t **root = &alc->mbc_root;
+
+ ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier));
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+
+ rbt_delete(root, &crr->rbt_node);
+ crr->rbt_node.parent = NULL;
+ crr->rbt_node.left = NULL;
+ crr->rbt_node.right = NULL;
+ crr->rbt_node.max_sz = crr->rbt_node.hdr.bhdr;
+
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
+}
+
+static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier)
+{
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
+
+ ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier));
+ ASSERT(crr->rbt_node.hdr.bhdr == (crr->root ? crr->root->max_sz : 0));
+ return crr->rbt_node.hdr.bhdr;
+}
/*
* info_options()
@@ -692,6 +945,8 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
static struct {
Eterm as;
Eterm aoff;
+ Eterm aoffcaobf;
+ Eterm aoffcbf;
#ifdef DEBUG
Eterm end_of_atoms;
#endif
@@ -720,6 +975,8 @@ init_atoms(void)
#endif
AM_INIT(as);
AM_INIT(aoff);
+ AM_INIT(aoffcaobf);
+ AM_INIT(aoffcbf);
#ifdef DEBUG
for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
@@ -749,14 +1006,17 @@ info_options(Allctr_t *allctr,
Uint **hpp,
Uint *szp)
{
+ AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr;
Eterm res = THE_NON_VALUE;
+ const char* flavor_str[3] = {"aoff", "aoffcaobf", "aoffcbf"};
+ Eterm flavor_atom[3] = {am.aoff, am.aoffcaobf, am.aoffcbf};
if (print_to_p) {
erts_print(*print_to_p,
print_to_arg,
"%sas: %s\n",
prefix,
- "aoff");
+ flavor_str[alc->flavor]);
}
if (hpp || szp) {
@@ -766,7 +1026,7 @@ info_options(Allctr_t *allctr,
__FILE__, __LINE__);;
res = NIL;
- add_2tup(hpp, szp, &res, am.as, am.aoff);
+ add_2tup(hpp, szp, &res, am.as, flavor_atom[alc->flavor]);
}
return res;
@@ -780,19 +1040,28 @@ info_options(Allctr_t *allctr,
* to erts_aoffalc_test() *
\* */
-unsigned long
-erts_aoffalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+UWord
+erts_aoffalc_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
- case 0x500: return (unsigned long) 0; /* IS_AOBF */
- case 0x501: return (unsigned long) ((AOFFAllctr_t *) a1)->mbc_root;
- case 0x502: return (unsigned long) ((AOFF_RBTree_t *) a1)->parent;
- case 0x503: return (unsigned long) ((AOFF_RBTree_t *) a1)->left;
- case 0x504: return (unsigned long) ((AOFF_RBTree_t *) a1)->right;
- case 0x506: return (unsigned long) IS_BLACK((AOFF_RBTree_t *) a1);
- case 0x508: return (unsigned long) 1; /* IS_AOFF */
- case 0x509: return (unsigned long) ((AOFF_RBTree_t *) a1)->max_sz;
- default: ASSERT(0); return ~((unsigned long) 0);
+ case 0x500: return (UWord) ((AOFFAllctr_t *) a1)->flavor == AOFF_AOBF;
+ case 0x501: {
+ AOFF_RBTree_t *node = ((AOFFAllctr_t *) a1)->mbc_root;
+ Uint size = (Uint) a2;
+ node = node ? rbt_search(node, size) : NULL;
+ return (UWord) (node ? RBT_NODE_TO_MBC(node)->root : NULL);
+ }
+ case 0x502: return (UWord) ((AOFF_RBTree_t *) a1)->parent;
+ case 0x503: return (UWord) ((AOFF_RBTree_t *) a1)->left;
+ case 0x504: return (UWord) ((AOFF_RBTree_t *) a1)->right;
+ case 0x505: return (UWord) LIST_NEXT(a1);
+ case 0x506: return (UWord) IS_BLACK((AOFF_RBTree_t *) a1);
+ case 0x507: return (UWord) IS_TREE_NODE((AOFF_RBTree_t *) a1);
+ case 0x508: return (UWord) 0; /* IS_BF_ALGO */
+ case 0x509: return (UWord) ((AOFF_RBTree_t *) a1)->max_sz;
+ case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->flavor == AOFF_BF;
+ case 0x50b: return (UWord) LIST_PREV(a1);
+ default: ASSERT(0); return ~((UWord) 0);
}
}
@@ -804,6 +1073,16 @@ erts_aoffalc_test(unsigned long op, unsigned long a1, unsigned long a2)
#ifdef HARD_DEBUG
+static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node)
+{
+ while (node != root) {
+ ASSERT(node->parent);
+ ASSERT(node->parent->left == node || node->parent->right == node);
+ node = node->parent;
+ }
+ return 1;
+}
+
#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
@@ -840,16 +1119,19 @@ static void print_tree(AOFF_RBTree_t*);
*/
static AOFF_RBTree_t *
-check_tree(AOFF_RBTree_t* root, Uint size)
+check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, Uint size)
{
AOFF_RBTree_t *res = NULL;
Sint blacks;
Sint curr_blacks;
AOFF_RBTree_t *x;
+ Carrier_t* crr;
+ Uint depth, max_depth, node_cnt;
#ifdef PRINT_TREE
print_tree(root);
#endif
+ ASSERT(within_crr || flavor == AOFF_AOFF);
if (!root)
return res;
@@ -859,12 +1141,16 @@ check_tree(AOFF_RBTree_t* root, Uint size)
ASSERT(!x->parent);
curr_blacks = 1;
blacks = -1;
+ depth = 1;
+ max_depth = 0;
+ node_cnt = 0;
while (x) {
if (!IS_LEFT_VISITED(x)) {
SET_LEFT_VISITED(x);
if (x->left) {
x = x->left;
+ ++depth;
if (IS_BLACK(x))
curr_blacks++;
continue;
@@ -880,6 +1166,7 @@ check_tree(AOFF_RBTree_t* root, Uint size)
SET_RIGHT_VISITED(x);
if (x->right) {
x = x->right;
+ ++depth;
if (IS_BLACK(x))
curr_blacks++;
continue;
@@ -891,6 +1178,30 @@ check_tree(AOFF_RBTree_t* root, Uint size)
}
}
+ ++node_cnt;
+ if (depth > max_depth)
+ max_depth = depth;
+
+ if (within_crr) {
+ crr = FBLK_TO_MBC(&x->hdr);
+ ASSERT(crr == within_crr);
+ ASSERT((char*)x > (char*)crr);
+ ASSERT(((char*)x + AOFF_BLK_SZ(x)) <= ((char*)crr + CARRIER_SZ(crr)));
+
+ }
+ if (flavor == AOFF_BF) {
+ AOFF_RBTree_t* y = x;
+ AOFF_RBTree_t* nxt = LIST_NEXT(y);
+ ASSERT(IS_TREE_NODE(x));
+ while (nxt) {
+ ASSERT(IS_LIST_ELEM(nxt));
+ ASSERT(AOFF_BLK_SZ(nxt) == AOFF_BLK_SZ(x));
+ ASSERT(FBLK_TO_MBC(&nxt->hdr) == within_crr);
+ ASSERT(LIST_PREV(nxt) == y);
+ y = nxt;
+ nxt = LIST_NEXT(nxt);
+ }
+ }
if (IS_RED(x)) {
ASSERT(IS_BLACK(x->right));
@@ -901,13 +1212,13 @@ check_tree(AOFF_RBTree_t* root, Uint size)
if (x->left) {
ASSERT(x->left->parent == x);
- ASSERT(x->left < x);
+ ASSERT(cmp_blocks(flavor, x->left, x) < 0);
ASSERT(x->left->max_sz <= x->max_sz);
}
if (x->right) {
ASSERT(x->right->parent == x);
- ASSERT(x->right > x);
+ ASSERT(cmp_blocks(flavor, x->right, x) > 0);
ASSERT(x->right->max_sz <= x->max_sz);
}
ASSERT(x->max_sz >= AOFF_BLK_SZ(x));
@@ -916,7 +1227,7 @@ check_tree(AOFF_RBTree_t* root, Uint size)
|| x->max_sz == (x->right ? x->right->max_sz : 0));
if (size && AOFF_BLK_SZ(x) >= size) {
- if (!res || x < res) {
+ if (!res || cmp_blocks(flavor, x, res) < 0) {
res = x;
}
}
@@ -926,10 +1237,11 @@ check_tree(AOFF_RBTree_t* root, Uint size)
if (IS_BLACK(x))
curr_blacks--;
x = x->parent;
-
+ --depth;
}
-
+ ASSERT(depth == 0 || (!root && depth==1));
ASSERT(curr_blacks == 0);
+ ASSERT((1 << (max_depth/2)) <= node_cnt);
UNSET_LEFT_VISITED(root);
UNSET_RIGHT_VISITED(root);
@@ -954,9 +1266,9 @@ print_tree_aux(AOFF_RBTree_t *x, int indent)
for (i = 0; i < indent; i++) {
putc(' ', stderr);
}
- fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%lu\r\n",
+ fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%u\r\n",
IS_BLACK(x) ? "BLACK" : "RED",
- AOFF_BLK_SZ(x), (Uint)x, x->max_sz);
+ AOFF_BLK_SZ(x), (Uint)x, (unsigned)x->max_sz);
print_tree_aux(x->left, indent + INDENT_STEP);
}
}
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index 6fa626f723..25b344c6a8 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -27,8 +27,14 @@
typedef struct AOFFAllctr_t_ AOFFAllctr_t;
+enum AOFF_Flavor {
+ AOFF_AOFF = 0,
+ AOFF_AOBF = 1,
+ AOFF_BF = 2
+};
+
typedef struct {
- int dummy;
+ enum AOFF_Flavor flavor;
} AOFFAllctrInit_t;
#define ERTS_DEFAULT_AOFF_ALLCTR_INIT {0/*dummy*/}
@@ -51,10 +57,10 @@ struct AOFFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
struct AOFF_RBTree_t_* mbc_root;
- struct AOFF_RBTree_t_* sbmbc_root;
+ enum AOFF_Flavor flavor;
};
-unsigned long erts_aoffalc_test(unsigned long, unsigned long, unsigned long);
+UWord erts_aoffalc_test(UWord, UWord, UWord);
#endif /* #if defined(GET_ERL_AOFF_ALLOC_IMPL)
&& !defined(ERL_AOFF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 054d1a48f6..decae6b2ca 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -166,6 +166,7 @@ async_ready_q(Uint sched_id)
#endif
+
void
erts_init_async(void)
{
@@ -226,11 +227,23 @@ erts_init_async(void)
thr_opts.suggested_stack_size
= erts_async_thread_suggested_stack_size;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1));
+#endif
+
for (i = 0; i < erts_async_max_threads; i++) {
ErtsAsyncQ *aq = async_q(i);
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(thr_opts.name, "async_%d", i+1);
+#endif
+
erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
}
+#ifdef ETHR_HAVE_THREAD_NAMES
+ free(thr_opts.name);
+#endif
/* Wait for async threads to initialize... */
erts_mtx_lock(&async->init.data.mtx);
@@ -279,7 +292,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
if (DTRACE_ENABLED(aio_pool_add)) {
DTRACE_CHARBUF(port_str, 16);
- erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", a->port);
/* DTRACE TODO: Get the queue length from erts_thr_q_enqueue() ? */
len = -1;
DTRACE2(aio_pool_add, port_str, len);
@@ -314,7 +328,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
if (DTRACE_ENABLED(aio_pool_get)) {
DTRACE_CHARBUF(port_str, 16);
- erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", a->port);
/* DTRACE TODO: Get the length from erts_thr_q_dequeue() ? */
len = -1;
DTRACE2(aio_pool_get, port_str, len);
@@ -583,12 +598,26 @@ int erts_async_ready_clean(void *varq, void *val)
#endif
/*
+** Generate a fair async key prom an ErlDrvPort
+** The port data gives a fair distribution grom port pointer
+** to unsigned integer - to be used in key for driver_async below.
+*/
+unsigned int driver_async_port_key(ErlDrvPort port)
+{
+ ErlDrvTermData td = driver_mk_port(port);
+ if (td == (ErlDrvTermData) NIL) {
+ return 0;
+ }
+ return (unsigned int) (UWord) internal_port_data(td);
+}
+
+/*
** Schedule async_invoke on a worker thread
** NOTE will be syncrounous when threads are unsupported
** return values:
** 0 completed
** -1 error
-** N handle value (used with async_cancel)
+** N handle value
** arguments:
** ix driver index
** key pointer to secedule queue (NULL means round robin)
@@ -673,23 +702,3 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
return id;
}
-
-int driver_async_cancel(unsigned int id)
-{
- /*
- * Not supported anymore. Always fail (which is backward
- * compatible).
- *
- * This functionality could be implemented again. However,
- * it is (and always has been) completely useless since
- * it doesn't give you any guarantees whatsoever. The user
- * needs to (and always have had to) synchronize in his/her
- * own code in order to get any guarantees.
- */
- return 0;
-}
-
-
-
-
-
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 5625622e3f..59c14899a2 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -75,9 +75,6 @@
#define BF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
#if 1
#define RBT_ASSERT ASSERT
#else
@@ -89,21 +86,21 @@
static RBTree_t * check_tree(RBTree_t, int, Uint);
#endif
-static void tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags);
+static void tree_delete(Allctr_t *allctr, Block_t *del);
/* Prototypes of callback functions */
/* "address order best fit" specific callback functions */
static Block_t * aobf_get_free_block (Allctr_t *, Uint,
- Block_t *, Uint, Uint32);
-static void aobf_link_free_block (Allctr_t *, Block_t *, Uint32);
+ Block_t *, Uint);
+static void aobf_link_free_block (Allctr_t *, Block_t *);
#define aobf_unlink_free_block tree_delete
/* "best fit" specific callback functions */
static Block_t * bf_get_free_block (Allctr_t *, Uint,
- Block_t *, Uint, Uint32);
-static void bf_link_free_block (Allctr_t *, Block_t *, Uint32);
-static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *, Uint32);
+ Block_t *, Uint);
+static void bf_link_free_block (Allctr_t *, Block_t *);
+static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *);
static Eterm info_options (Allctr_t *, char *, int *,
@@ -179,6 +176,7 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
bfallctr->address_order = bfinit->ao;
+ allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = (bfinit->ao
@@ -207,6 +205,9 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
allctr->get_next_mbc_size = NULL;
allctr->creating_mbc = NULL;
allctr->destroying_mbc = NULL;
+ allctr->add_mbc = NULL;
+ allctr->remove_mbc = NULL;
+ allctr->largest_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -407,13 +408,11 @@ tree_insert_fixup(RBTree_t **root, RBTree_t *blk)
* callback function in the address order case.
*/
static void
-tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags)
+tree_delete(Allctr_t *allctr, Block_t *del)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
Uint spliced_is_black;
- RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &bfallctr->sbmbc_root
- : &bfallctr->mbc_root);
+ RBTree_t **root = &bfallctr->mbc_root;
RBTree_t *x, *y, *z = (RBTree_t *) del;
RBTree_t null_x; /* null_x is used to get the fixup started when we
splice out a node without children. */
@@ -586,12 +585,10 @@ tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags)
\* */
static void
-aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+aobf_link_free_block(Allctr_t *allctr, Block_t *block)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &bfallctr->sbmbc_root
- : &bfallctr->mbc_root);
+ RBTree_t **root = &bfallctr->mbc_root;
RBTree_t *blk = (RBTree_t *) block;
Uint blk_sz = BF_BLK_SZ(blk);
@@ -647,21 +644,18 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
#if 0 /* tree_delete() is directly used instead */
static void
-aobf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+aobf_unlink_free_block(Allctr_t *allctr, Block_t *block)
{
- tree_delete(allctr, block, flags);
+ tree_delete(allctr, block);
}
#endif
static Block_t *
aobf_get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size,
- Uint32 flags)
+ Block_t *cand_blk, Uint cand_size)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &bfallctr->sbmbc_root
- : &bfallctr->mbc_root);
+ RBTree_t **root = &bfallctr->mbc_root;
RBTree_t *x = *root;
RBTree_t *blk = NULL;
Uint blk_sz;
@@ -694,7 +688,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
return NULL; /* cand_blk was better */
}
- aobf_unlink_free_block(allctr, (Block_t *) blk, flags);
+ aobf_unlink_free_block(allctr, (Block_t *) blk);
return (Block_t *) blk;
}
@@ -705,12 +699,10 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
\* */
static void
-bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+bf_link_free_block(Allctr_t *allctr, Block_t *block)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &bfallctr->sbmbc_root
- : &bfallctr->mbc_root);
+ RBTree_t **root = &bfallctr->mbc_root;
RBTree_t *blk = (RBTree_t *) block;
Uint blk_sz = BF_BLK_SZ(blk);
@@ -779,12 +771,10 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
static ERTS_INLINE void
-bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &bfallctr->sbmbc_root
- : &bfallctr->mbc_root);
+ RBTree_t **root = &bfallctr->mbc_root;
RBTree_t *x = (RBTree_t *) block;
if (IS_LIST_ELEM(x)) {
@@ -812,7 +802,7 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
else {
/* Remove from tree */
- tree_delete(allctr, block, flags);
+ tree_delete(allctr, block);
}
DESTROY_LIST_ELEM(x);
@@ -821,13 +811,10 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
static Block_t *
bf_get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size,
- Uint32 flags)
+ Block_t *cand_blk, Uint cand_size)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
- ? &bfallctr->sbmbc_root
- : &bfallctr->mbc_root);
+ RBTree_t **root = &bfallctr->mbc_root;
RBTree_t *x = *root;
RBTree_t *blk = NULL;
Uint blk_sz;
@@ -867,7 +854,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
the tree node */
blk = LIST_NEXT(blk) ? LIST_NEXT(blk) : blk;
- bf_unlink_free_block(allctr, (Block_t *) blk, flags);
+ bf_unlink_free_block(allctr, (Block_t *) blk);
return (Block_t *) blk;
}
@@ -972,20 +959,22 @@ info_options(Allctr_t *allctr,
* to erts_bfalc_test() *
\* */
-unsigned long
-erts_bfalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+UWord
+erts_bfalc_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
- case 0x200: return (unsigned long) ((BFAllctr_t *) a1)->address_order;
- case 0x201: return (unsigned long) ((BFAllctr_t *) a1)->mbc_root;
- case 0x202: return (unsigned long) ((RBTree_t *) a1)->parent;
- case 0x203: return (unsigned long) ((RBTree_t *) a1)->left;
- case 0x204: return (unsigned long) ((RBTree_t *) a1)->right;
- case 0x205: return (unsigned long) ((RBTreeList_t *) a1)->next;
- case 0x206: return (unsigned long) IS_BLACK((RBTree_t *) a1);
- case 0x207: return (unsigned long) IS_TREE_NODE((RBTree_t *) a1);
- case 0x208: return (unsigned long) 0; /* IS_AOFF */
- default: ASSERT(0); return ~((unsigned long) 0);
+ case 0x200: return (UWord) ((BFAllctr_t *) a1)->address_order; /* IS_AOBF */
+ case 0x201: return (UWord) ((BFAllctr_t *) a1)->mbc_root;
+ case 0x202: return (UWord) ((RBTree_t *) a1)->parent;
+ case 0x203: return (UWord) ((RBTree_t *) a1)->left;
+ case 0x204: return (UWord) ((RBTree_t *) a1)->right;
+ case 0x205: return (UWord) LIST_NEXT(a1);
+ case 0x206: return (UWord) IS_BLACK((RBTree_t *) a1);
+ case 0x207: return (UWord) IS_TREE_NODE((RBTree_t *) a1);
+ case 0x208: return (UWord) 1; /* IS_BF_ALGO */
+ case 0x20a: return (UWord) !((BFAllctr_t *) a1)->address_order; /* IS_BF */
+ case 0x20b: return (UWord) LIST_PREV(a1);
+ default: ASSERT(0); return ~((UWord) 0);
}
}
diff --git a/erts/emulator/beam/erl_bestfit_alloc.h b/erts/emulator/beam/erl_bestfit_alloc.h
index 0c29662852..870439e886 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.h
+++ b/erts/emulator/beam/erl_bestfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -55,11 +55,10 @@ struct BFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
RBTree_t * mbc_root;
- RBTree_t * sbmbc_root;
int address_order;
};
-unsigned long erts_bfalc_test(unsigned long, unsigned long, unsigned long);
+UWord erts_bfalc_test(UWord, UWord, UWord);
#endif /* #if defined(GET_ERL_BF_ALLOC_IMPL)
&& !defined(ERL_BF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 0db19a1ee6..3bf78adce7 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -927,6 +927,9 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)
if (binary_bitsize(b) != 0) {
goto badarg;
}
+ if (binary_size(b) == 0) {
+ goto badarg;
+ }
++words;
characters += binary_size(b);
}
@@ -1321,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1552,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1641,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2210,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p,
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2291,18 +2294,11 @@ BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
BIF_ERROR(BIF_P,BADARG);
}
-/*
- * Ok, erlang:list_to_binary does not interrupt, and we really don't want
- * an alternative implementation for the exact same thing, why we
- * have descided to use the old non-restarting implementation for now.
- * In reality, there are seldom many iterations involved in doing this, so the
- * problem of long-running bifs is not really that big in this case.
- * So, for now we use the old implementation also in the module binary.
- */
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1)
BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
{
- return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_binary_list_to_bin_1]);
}
typedef struct {
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 1c3e955f47..56cd2ba04f 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -182,7 +182,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
Eterm name_term = BIF_ARG_2;
Eterm options = BIF_ARG_3;
char *path = NULL;
- ErlDrvSizeT path_len;
+ Sint path_len;
char *name = NULL;
DE_Handle *dh;
erts_driver_t *drv;
@@ -198,6 +198,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
int kill_ports = 0;
int do_build_load_error = 0;
int build_this_load_error = 0;
+ int encoding;
for(l = options; is_list(l); l = CDR(list_val(l))) {
Eterm opt = CAR(list_val(l));
@@ -257,18 +258,23 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
goto error;
}
- if (erts_iolist_size(path_term, &path_len)) {
- goto error;
+ encoding = erts_get_native_filename_encoding();
+ if (encoding == ERL_FILENAME_WIN_WCHAR) {
+ /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
+ /* since lib_name is used in error messages */
+ encoding = ERL_FILENAME_UTF8;
}
- path = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, path_len + 1 /* might need path separator */ + sys_strlen(name) + 1);
- if (erts_iolist_to_buf(path_term, path, path_len) != 0) {
+ path = erts_convert_filename_to_encoding(path_term, NULL, 0,
+ ERTS_ALC_T_DDLL_TMP_BUF, 1, 0,
+ encoding, &path_len,
+ sys_strlen(name) + 2); /* might need path separator */
+ if (!path) {
goto error;
}
- while (path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) {
- --path_len;
- }
+ ASSERT(path_len > 0 && path[path_len-1] == 0);
+ while (--path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/'))
+ ;
path[path_len++] = '/';
- /*path[path_len] = '\0';*/
sys_strcpy(path+path_len,name);
#if DDLL_SMP
@@ -1524,7 +1530,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
assert_drv_list_rwlocked();
- if ((res = erts_sys_ddll_open(path, &(dh->handle))) != ERL_DE_NO_ERROR) {
+ if ((res = erts_sys_ddll_open(path, &(dh->handle), NULL)) != ERL_DE_NO_ERROR) {
return res;
}
@@ -1542,8 +1548,10 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
switch (dp->extended_marker) {
case ERL_DRV_EXTENDED_MARKER:
- if (ERL_DRV_EXTENDED_MAJOR_VERSION != dp->major_version
- || ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version) {
+ if (dp->major_version < ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
+ || (ERL_DRV_EXTENDED_MAJOR_VERSION < dp->major_version
+ || (ERL_DRV_EXTENDED_MAJOR_VERSION == dp->major_version
+ && ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version))) {
/* Incompatible driver version */
res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
goto error;
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index a715756c15..bbd8aa31d9 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -33,6 +33,7 @@
#include "bif.h"
#include "big.h"
#include "erl_binary.h"
+#include "erl_map.h"
static Eterm gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live);
@@ -455,6 +456,28 @@ Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live)
}
}
+Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg = reg[live];
+ if (is_map(arg)) {
+ map_t *mp = (map_t*)map_val(arg);
+ Uint size = map_get_size(mp);
+ if (IS_USMALL(0, size)) {
+ return make_small(size);
+ } else {
+ Eterm* hp;
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(size, hp);
+ }
+ } else {
+ BIF_ERROR(p, BADARG);
+ }
+}
+
Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live)
{
Eterm arg;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 8582a8954b..b90362d82c 100755..100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -27,9 +27,11 @@
#include "erl_process.h"
#include "error.h"
#include "erl_driver.h"
+#include "erl_nif.h"
#include "bif.h"
#include "big.h"
#include "erl_version.h"
+#include "erl_compile_flags.h"
#include "erl_db_util.h"
#include "erl_message.h"
#include "erl_binary.h"
@@ -59,12 +61,15 @@ static Export* alloc_info_trap = NULL;
static Export* alloc_sizes_trap = NULL;
static Export *gather_sched_wall_time_res_trap;
+static Export *gather_gc_info_res_trap;
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+static char otp_version[] = ERLANG_OTP_VERSION;
/* Keep erts_system_version as a global variable for easy access from a core */
-static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
- " (erts-" ERLANG_VERSION ")"
+static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
+ "%s"
+ " [erts-" ERLANG_VERSION "]"
#if !HEAP_ON_C_STACK && !HALFWORD_HEAP
" [no-c-stack-objects]"
#endif
@@ -86,6 +91,9 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
" [smp:%beu:%beu]"
#endif
#ifdef USE_THREADS
+#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
+ " [ds:%beu:%beu:%beu]"
+#endif
" [async-threads:%d]"
#endif
#ifdef HIPE
@@ -114,6 +122,9 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#ifdef VALGRIND
" [valgrind-compiled]"
#endif
+#ifdef ERTS_FRMPTR
+ " [frame-pointer]"
+#endif
#ifdef USE_DTRACE
" [dtrace]"
#endif
@@ -299,13 +310,39 @@ make_link_list(Process *p, ErtsLink *root, Eterm tail)
int
erts_print_system_version(int to, void *arg, Process *c_p)
{
+ int i, rc = -1;
+ char *rc_str = "";
+ char rc_buf[100];
+ char *ov = otp_version;
#ifdef ERTS_SMP
Uint total, online, active;
- (void) erts_schedulers_state(&total, &online, &active, 0);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Uint dirty_cpu, dirty_cpu_onln, dirty_io;
+
+ (void) erts_schedulers_state(&total, &online, &active, &dirty_cpu, &dirty_cpu_onln, &dirty_io, 0);
+#else
+ (void) erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 0);
#endif
- return erts_print(to, arg, erts_system_version
+#endif
+ for (i = 0; i < sizeof(otp_version)-4; i++) {
+ if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
+ rc = atoi(&ov[i+3]);
+ }
+ if (rc >= 0) {
+ if (rc == 0)
+ rc_str = " [DEVELOPMENT]";
+ else {
+ erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc);
+ rc_str = rc_buf;
+ }
+ }
+ return erts_print(to, arg, erts_system_version,
+ rc_str
#ifdef ERTS_SMP
, total, online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , dirty_cpu, dirty_cpu_onln, dirty_io
+#endif
#endif
#ifdef USE_THREADS
, erts_async_max_threads
@@ -486,27 +523,6 @@ collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
}
}
-
-static void one_link_size(ErtsLink *lnk, void *vpu)
-{
- Uint *pu = vpu;
- *pu += ERTS_LINK_SIZE*sizeof(Uint);
- if(!IS_CONST(lnk->pid))
- *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
- if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
- erts_doforall_links(ERTS_LINK_ROOT(lnk),&one_link_size,vpu);
- }
-}
-static void one_mon_size(ErtsMonitor *mon, void *vpu)
-{
- Uint *pu = vpu;
- *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
- if(!IS_CONST(mon->pid))
- *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
- if(!IS_CONST(mon->ref))
- *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
-}
-
/*
* process_info/[1,2]
*/
@@ -1420,41 +1436,8 @@ process_info_aux(Process *BIF_P,
}
case am_memory: { /* Memory consumed in bytes */
- ErlMessage *mp;
- Uint size = 0;
Uint hsz = 3;
- struct saved_calls *scb;
- size += sizeof(Process);
-
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
-
- erts_doforall_links(ERTS_P_LINKS(rp), &one_link_size, &size);
- erts_doforall_monitors(ERTS_P_MONITORS(rp), &one_mon_size, &size);
- size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
- if (rp->old_hend && rp->old_heap)
- size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
-
- size += rp->msg.len * sizeof(ErlMessage);
-
- for (mp = rp->msg.first; mp; mp = mp->next)
- if (mp->data.attached)
- size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
-
- if (rp->arg_reg != rp->def_arg_reg) {
- size += rp->arity * sizeof(rp->arg_reg[0]);
- }
-
- if (rp->psd)
- size += sizeof(ErtsPSD);
-
- scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
- if (scb) {
- size += (sizeof(struct saved_calls)
- + (scb->len-1) * sizeof(scb->ct[0]));
- }
-
- size += erts_dicts_mem_size(rp);
-
+ Uint size = erts_process_memory(rp);
(void) erts_bld_uint(NULL, &hsz, size);
hp = HAlloc(BIF_P, hsz);
res = erts_bld_uint(&hp, NULL, size);
@@ -1721,13 +1704,22 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
sel = *tp++;
- if (sel == am_allocator_sizes) {
+ if (sel == am_memory_internal) {
+ switch (arity) {
+ case 3:
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 1))
+ return am_true;
+ default:
+ goto badarg;
+ }
+ }
+ else if (sel == am_allocator_sizes) {
switch (arity) {
case 2:
ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
return ret;
case 3:
- if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1))
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 0))
return am_true;
default:
goto badarg;
@@ -1786,7 +1778,7 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
return ret;
case 3:
- if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0))
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0, 0))
return am_true;
default:
goto badarg;
@@ -1811,7 +1803,11 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
#if defined(PURIFY)
BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
#elif defined(VALGRIND)
+# ifdef VALGRIND_DO_ADDED_LEAK_CHECK
+ VALGRIND_DO_ADDED_LEAK_CHECK;
+# else
VALGRIND_DO_LEAK_CHECK;
+# endif
BIF_RET(make_small(0));
#endif
} else if (*tp == am_fd) {
@@ -2096,6 +2092,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#elif defined(ERTS_ENABLE_LOCK_COUNT)
ERTS_DECL_AM(lcnt);
BIF_RET(AM_lcnt);
+#elif defined(ERTS_FRMPTR)
+ ERTS_DECL_AM(frmptr);
+ BIF_RET(AM_frmptr);
#else
BIF_RET(am_opt);
#endif
@@ -2128,7 +2127,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
val = erts_get_system_seq_tracer();
- ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
+ ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
@@ -2461,6 +2460,13 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ERL_DRV_EXTENDED_MINOR_VERSION);
hp = HAlloc(BIF_P, 2*n);
BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
+ } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
+ char buf[42];
+ int n = erts_snprintf(buf, 42, "%d.%d",
+ ERL_NIF_MAJOR_VERSION,
+ ERL_NIF_MINOR_VERSION);
+ hp = HAlloc(BIF_P, 2*n);
+ BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
} else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
#ifdef ERTS_SMP
BIF_RET(am_true);
@@ -2487,6 +2493,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
switch (erts_schedulers_state(&total,
&online,
&active,
+ NULL,
+ NULL,
+ NULL,
1)) {
case ERTS_SCHDLR_SSPND_DONE: {
Eterm *hp = HAlloc(BIF_P, 4);
@@ -2510,7 +2519,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(1));
#else
Uint total, online, active;
- switch (erts_schedulers_state(&total, &online, &active, 1)) {
+ switch (erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 1)) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(online));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
@@ -2527,7 +2536,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(1));
#else
Uint total, online, active;
- switch (erts_schedulers_state(&total, &online, &active, 1)) {
+ switch (erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 1)) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(active));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
@@ -2539,6 +2548,20 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
}
#endif
+#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
+ Uint dirty_cpu;
+ erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, 1);
+ BIF_RET(make_small(dirty_cpu));
+ } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
+ Uint dirty_cpu_onln;
+ erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, 1);
+ BIF_RET(make_small(dirty_cpu_onln));
+ } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
+ Uint dirty_io;
+ erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, &dirty_io, 1);
+ BIF_RET(make_small(dirty_io));
+#endif
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
@@ -2615,74 +2638,14 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
BIF_RET(res);
- } else if (ERTS_IS_ATOM_STR("print_ethread_info", BIF_ARG_1)) {
-#if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
- || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
- || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
- int i;
- char **str;
-#endif
-#ifdef ETHR_NATIVE_ATOMIC32_IMPL
- erts_printf("32-bit native atomics: %s\n",
- ETHR_NATIVE_ATOMIC32_IMPL);
- str = ethr_native_atomic32_ops();
- for (i = 0; str[i]; i++)
- erts_printf("ethr_native_atomic32_%s()\n", str[i]);
-#endif
-#ifdef ETHR_NATIVE_ATOMIC64_IMPL
- erts_printf("64-bit native atomics: %s\n",
- ETHR_NATIVE_ATOMIC64_IMPL);
- str = ethr_native_atomic64_ops();
- for (i = 0; str[i]; i++)
- erts_printf("ethr_native_atomic64_%s()\n", str[i]);
-#endif
-#ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
- if (ethr_have_native_dw_atomic()) {
- erts_printf("Double word native atomics: %s\n",
- ETHR_NATIVE_DW_ATOMIC_IMPL);
- str = ethr_native_dw_atomic_ops();
- for (i = 0; str[i]; i++)
- erts_printf("ethr_native_dw_atomic_%s()\n", str[i]);
- str = ethr_native_su_dw_atomic_ops();
- for (i = 0; str[i]; i++)
- erts_printf("ethr_native_su_dw_atomic_%s()\n", str[i]);
- }
-#endif
-#ifdef ETHR_NATIVE_SPINLOCK_IMPL
- erts_printf("Native spin-locks: %s\n", ETHR_NATIVE_SPINLOCK_IMPL);
-#endif
-#ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
- erts_printf("Native rwspin-locks: %s\n", ETHR_NATIVE_RWSPINLOCK_IMPL);
-#endif
-#ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
- erts_printf("SSE2 support: %s\n", (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
- ? "yes" : "no"));
-#endif
-#ifdef ETHR_X86_OUT_OF_ORDER
- erts_printf("x86"
-#ifdef ARCH_64
- "_64"
-#endif
- " out of order\n");
-#endif
-#ifdef ETHR_SPARC_TSO
- erts_printf("Sparc TSO\n");
-#endif
-#ifdef ETHR_SPARC_PSO
- erts_printf("Sparc PSO\n");
-#endif
-#ifdef ETHR_SPARC_RMO
- erts_printf("Sparc RMO\n");
-#endif
-#if defined(ETHR_PPC_HAVE_LWSYNC)
- erts_printf("Have lwsync instruction: yes\n");
-#elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
- erts_printf("Have lwsync instruction: no\n");
-#elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
- erts_printf("Have lwsync instruction: %s (runtime test)\n",
- ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no");
-#endif
- BIF_RET(am_true);
+ } else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
+ BIF_RET(erts_get_ethread_info(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("emu_args", BIF_ARG_1)) {
+ BIF_RET(erts_get_emu_args(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("beam_jump_table", BIF_ARG_1)) {
+ BIF_RET(erts_beam_jump_table() ? am_true : am_false);
}
else if (ERTS_IS_ATOM_STR("dynamic_trace", BIF_ARG_1)) {
#if defined(USE_DTRACE)
@@ -2708,6 +2671,42 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_true);
}
#endif
+ else if (ERTS_IS_ATOM_STR("compile_info",BIF_ARG_1)) {
+ Uint sz;
+ Eterm res = NIL, tup, text;
+ Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
+ 2*(strlen(erts_build_flags_CONFIG_H) +
+ strlen(erts_build_flags_CFLAGS) +
+ strlen(erts_build_flags_LDFLAGS)));
+
+ sz = strlen(erts_build_flags_CONFIG_H);
+ text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
+ tup = TUPLE2(hp, am_config_h, text); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
+ sz = strlen(erts_build_flags_CFLAGS);
+ text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
+ tup = TUPLE2(hp, am_cflags, text); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
+ sz = strlen(erts_build_flags_LDFLAGS);
+ text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
+ tup = TUPLE2(hp, am_ldflags, text); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
+ BIF_RET(res);
+ }
+ else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_db_get_max_tabs()));
+ }
+ else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
+ BIF_RET(erts_disable_tolerant_timeofday
+ ? am_disabled
+ : am_enabled);
+ }
+ else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
+ BIF_RET(erts_eager_check_io ? am_true : am_false);
+ }
BIF_ERROR(BIF_P, BADARG);
}
@@ -2861,12 +2860,10 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
included though).
*/
Uint size = 0;
- ErlHeapFragment* bp;
- erts_doforall_links(ERTS_P_LINKS(prt), &one_link_size, &size);
+ erts_doforall_links(ERTS_P_LINKS(prt), &erts_one_link_size, &size);
- for (bp = prt->bp; bp; bp = bp->next)
- size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
+ size += erts_port_data_size(prt);
if (prt->linebuf)
size += sizeof(LineBuf) + prt->linebuf->ovsiz;
@@ -3069,6 +3066,25 @@ fun_info_2(BIF_ALIST_2)
return TUPLE2(hp, what, val);
}
+BIF_RETTYPE
+fun_info_mfa_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm* hp;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ }
+ BIF_ERROR(p, BADARG);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
if(is_internal_pid(BIF_ARG_1)) {
@@ -3077,7 +3093,7 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
if (BIF_ARG_1 == BIF_P->common.id)
BIF_RET(am_true);
- rp = erts_proc_lookup(BIF_ARG_1);
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
if (!rp) {
BIF_RET(am_false);
}
@@ -3153,18 +3169,10 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, cs, SMALL_ZERO);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection) {
- Uint hsz = 4;
- ErtsGCInfo gc_info;
- Eterm gcs;
- Eterm recl;
- erts_gc_info(&gc_info);
- (void) erts_bld_uint(NULL, &hsz, gc_info.garbage_collections);
- (void) erts_bld_uint(NULL, &hsz, gc_info.reclaimed);
- hp = HAlloc(BIF_P, hsz);
- gcs = erts_bld_uint(&hp, NULL, gc_info.garbage_collections);
- recl = erts_bld_uint(&hp, NULL, gc_info.reclaimed);
- res = TUPLE3(hp, gcs, recl, SMALL_ZERO);
- BIF_RET(res);
+ res = erts_gc_info_request(BIF_P);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_gc_info_res_trap, BIF_P, res);
} else if (BIF_ARG_1 == am_reductions) {
Uint reds;
Uint diff;
@@ -3307,17 +3315,38 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(make_small((Uint) words));
}
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
- /* Used by (emulator) */
- int res;
+ /* Used by driver_SUITE (emulator) */
+ Uint sz, *szp;
+ Eterm res, *hp, **hpp;
+ int no_errors;
+ ErtsCheckIoDebugInfo ciodi = {0};
#ifdef HAVE_ERTS_CHECK_IO_DEBUG
erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
- res = erts_check_io_debug();
+ no_errors = erts_check_io_debug(&ciodi);
erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
#else
- res = 0;
+ no_errors = 0;
#endif
- ASSERT(res >= 0);
- BIF_RET(erts_make_integer((Uint) res, BIF_P));
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ res = erts_bld_tuple(hpp, szp, 4,
+ erts_bld_uint(hpp, szp,
+ (Uint) no_errors),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_used_fds),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_driver_select_structs),
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_driver_event_structs));
+ if (hpp)
+ break;
+ hp = HAlloc(BIF_P, sz);
+ szp = NULL;
+ hpp = &hp;
+ }
+ BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
@@ -3368,6 +3397,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
erts_smp_thr_progress_unblock();
BIF_RET(res);
}
+ else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
+ BIF_RET(erts_mmap_debug_info(BIF_P));
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3675,6 +3707,20 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_true);
}
}
+ else if (ERTS_IS_ATOM_STR("gc_state", BIF_ARG_1)) {
+ /* Used by process_SUITE (emulator) */
+ int res, enable;
+
+ switch (BIF_ARG_2) {
+ case am_true: enable = 1; break;
+ case am_false: enable = 0; break;
+ default: BIF_ERROR(BIF_P, BADARG); break;
+ }
+
+ res = (BIF_P->flags & F_DISABLE_GC) ? am_false : am_true;
+ erts_set_gc_state(BIF_P, enable);
+ BIF_RET(res);
+ }
else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) {
/* Used by signal_SUITE (emulator) */
@@ -3861,16 +3907,19 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
Uint tries = 0, colls = 0;
unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
unsigned int line = 0;
+ unsigned int i;
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
+ Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
+ * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }]
*/
-
+
tries = (Uint) ethr_atomic_read(&stats->tries);
colls = (Uint) ethr_atomic_read(&stats->colls);
@@ -3879,23 +3928,27 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
timer_ns = stats->timer.ns;
timer_n = stats->timer_n;
- af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
+ af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
uil = erts_bld_uint( hpp, szp, line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
-
+ uit = erts_bld_uint( hpp, szp, tries);
+ uic = erts_bld_uint( hpp, szp, colls);
+
uits = erts_bld_uint( hpp, szp, timer_s);
uitns = erts_bld_uint( hpp, szp, timer_ns);
uitn = erts_bld_uint( hpp, szp, timer_n);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
-
- t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
+ vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ }
+ thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
+
+ t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3916,13 +3969,13 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
ASSERT(ltype);
- type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
+ type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
if (lock->flag & ERTS_LCNT_LT_ALLOC) {
/* use allocator types names as id's for allocator locks */
ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
proc = erts_proc_lookup(lock->id);
@@ -3933,16 +3986,15 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
id = lock->id;
}
} else {
- id = lock->id;
+ id = lock->id;
}
-
+
for (i = 0; i < lock->n_stats; i++) {
stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
}
-
- t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3962,12 +4014,12 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *da
dtns = erts_bld_uint( hpp, szp, data->duration.ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
- adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+ adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
- aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+ aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
@@ -4132,6 +4184,8 @@ erts_bif_info_init(void)
alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
gather_sched_wall_time_res_trap
= erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1);
+ gather_gc_info_res_trap
+ = erts_export_put(am_erlang, am_gather_gc_info_result, 1);
process_info_init();
os_info_init();
}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 1805366cfe..820ed2385d 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -43,7 +43,7 @@ static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
Eterm* hp;
int i;
- if ((i = list_length(A)) < 0) {
+ if ((i = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
}
if (i == 0) {
@@ -102,10 +102,10 @@ static Eterm subtract(Process* p, Eterm A, Eterm B)
int n;
int m;
- if ((n = list_length(A)) < 0) {
+ if ((n = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
}
- if ((m = list_length(B)) < 0) {
+ if ((m = erts_list_length(B)) < 0) {
BIF_ERROR(p, BADARG);
}
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index adac0052d6..37dd6457db 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -36,6 +36,7 @@
#include "dist.h"
#include "erl_version.h"
#include "erl_binary.h"
+#include "erl_map.h"
BIF_RETTYPE and_2(BIF_ALIST_2)
{
@@ -321,7 +322,10 @@ BIF_RETTYPE is_record_3(BIF_ALIST_3)
BIF_RET(am_false);
}
-
-
-
-
+BIF_RETTYPE is_map_1(BIF_ALIST_1)
+{
+ if (is_map(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 1062d4379b..e07c622928 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -180,3 +180,25 @@ BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
BIF_RET(am_true);
}
+BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1)
+{
+ char *key_buf;
+ char buf[STATIC_BUF_SIZE];
+
+ key_buf = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE,
+ ERTS_ALC_T_TMP,0,0,NULL);
+ if (!key_buf) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (erts_sys_unsetenv(key_buf)) {
+ if (key_buf != buf) {
+ erts_free(ERTS_ALC_T_TMP, key_buf);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (key_buf != buf) {
+ erts_free(ERTS_ALC_T_TMP, key_buf);
+ }
+ BIF_RET(am_true);
+}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 44fa41c7b6..64bd598ba6 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -84,7 +84,7 @@ BIF_RETTYPE open_port_2(BIF_ALIST_2)
}
static ERTS_INLINE Port *
-lookup_port(Process *c_p, Eterm id_or_name)
+lookup_port(Process *c_p, Eterm id_or_name, Uint32 invalid_flags)
{
/* TODO: Implement nicer lookup in register... */
Eterm id;
@@ -92,7 +92,19 @@ lookup_port(Process *c_p, Eterm id_or_name)
id = erts_whereis_name_to_id(c_p, id_or_name);
else
id = id_or_name;
- return erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ return erts_port_lookup(id, invalid_flags);
+}
+
+static ERTS_INLINE Port *
+sig_lookup_port(Process *c_p, Eterm id_or_name)
+{
+ return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+}
+
+static ERTS_INLINE Port *
+data_lookup_port(Process *c_p, Eterm id_or_name)
+{
+ return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_LOOKUP);
}
/*
@@ -125,7 +137,7 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
BIF_RET(am_badarg);
}
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_badarg);
@@ -185,7 +197,7 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
unsigned int op;
erts_aint32_t state;
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_badarg);
@@ -235,7 +247,7 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
unsigned int op;
erts_aint32_t state;
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_badarg);
@@ -290,7 +302,7 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
ref = NIL;
#endif
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_badarg);
@@ -320,7 +332,7 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
Eterm ref;
Port* prt;
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_badarg);
@@ -352,7 +364,7 @@ BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
Port* prt;
if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_undefined);
}
@@ -391,7 +403,7 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
Port* prt;
if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = sig_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
BIF_RET(am_undefined);
}
@@ -423,57 +435,173 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
}
}
+/*
+ * The erlang:port_set_data()/erlang:port_get_data() operations should
+ * be viewed as operations on a table (inet_db) with data values
+ * associated with port identifier keys. That is, these operations are
+ * *not* signals to/from ports.
+ */
-BIF_RETTYPE erts_internal_port_set_data_2(BIF_ALIST_2)
+#if (TAG_PRIMARY_IMMED1 & 0x3) == 0
+# error "erlang:port_set_data()/erlang:port_get_data() needs to be rewritten!"
+#endif
+
+typedef struct {
+ ErtsThrPrgrLaterOp later_op;
+ Uint hsize;
+ Eterm data;
+ ErlOffHeap off_heap;
+ Eterm heap[1];
+} ErtsPortDataHeap;
+
+static void
+free_port_data_heap(void *vpdhp)
{
- Eterm ref;
+ erts_cleanup_offheap(&((ErtsPortDataHeap *) vpdhp)->off_heap);
+ erts_free(ERTS_ALC_T_PORT_DATA_HEAP, vpdhp);
+}
+
+static ERTS_INLINE void
+cleanup_old_port_data(erts_aint_t data)
+{
+ if ((data & 0x3) != 0) {
+ ASSERT(is_immed((Eterm) data));
+ }
+ else {
+#ifdef ERTS_SMP
+ ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
+ size_t size;
+ ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ size = sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm) - 1);
+ erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap,
+ (void *) pdhp,
+ &pdhp->later_op,
+ size);
+#else
+ free_port_data_heap((void *) data);
+#endif
+ }
+}
+
+void
+erts_init_port_data(Port *prt)
+{
+ erts_smp_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined);
+}
+
+void
+erts_cleanup_port_data(Port *prt)
+{
+ ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data,
+ (erts_aint_t) NULL));
+}
+
+Uint
+erts_port_data_size(Port *prt)
+{
+ erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);
+
+ if ((data & 0x3) != 0) {
+ ASSERT(is_immed((Eterm) (UWord) data));
+ return (Uint) 0;
+ }
+ else {
+ ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
+ return (Uint) sizeof(ErtsPortDataHeap) + pdhp->hsize*(sizeof(Eterm)-1);
+ }
+}
+
+ErlOffHeap *
+erts_port_data_offheap(Port *prt)
+{
+ erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);
+
+ if ((data & 0x3) != 0) {
+ ASSERT(is_immed((Eterm) (UWord) data));
+ return NULL;
+ }
+ else {
+ ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
+ return &pdhp->off_heap;
+ }
+}
+
+BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
+{
+ /*
+ * This is not a signal. See comment above.
+ */
+ erts_aint_t data;
Port* prt;
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = data_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
- BIF_RET(am_badarg);
+ BIF_ERROR(BIF_P, BADARG);
- switch (erts_port_set_data(BIF_P, prt, BIF_ARG_2, &ref)) {
- case ERTS_PORT_OP_CALLER_EXIT:
- case ERTS_PORT_OP_BADARG:
- case ERTS_PORT_OP_DROPPED:
- BIF_RET(am_badarg);
- case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
- BIF_RET(ref);
- case ERTS_PORT_OP_DONE:
- BIF_RET(am_true);
- default:
- ERTS_INTERNAL_ERROR("Unexpected erts_port_set_data() result");
- BIF_RET(am_internal_error);
+ if (is_immed(BIF_ARG_2)) {
+ data = (erts_aint_t) BIF_ARG_2;
+ ASSERT((data & 0x3) != 0);
}
+ else {
+ ErtsPortDataHeap *pdhp;
+ Uint hsize;
+ Eterm *hp;
+
+ hsize = size_object(BIF_ARG_2);
+ pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP,
+ sizeof(ErtsPortDataHeap) + hsize*(sizeof(Eterm)-1));
+ hp = &pdhp->heap[0];
+ pdhp->off_heap.first = NULL;
+ pdhp->off_heap.overhead = 0;
+ pdhp->hsize = hsize;
+ pdhp->data = copy_struct(BIF_ARG_2, hsize, &hp, &pdhp->off_heap);
+ data = (erts_aint_t) pdhp;
+ ASSERT((data & 0x3) == 0);
+ }
+
+ data = erts_smp_atomic_xchg_wb(&prt->data, data);
+
+ if (data == (erts_aint_t)NULL) {
+ /* Port terminated by racing thread */
+ data = erts_smp_atomic_xchg_wb(&prt->data, data);
+ ASSERT(data != (erts_aint_t)NULL);
+ cleanup_old_port_data(data);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ cleanup_old_port_data(data);
+ BIF_RET(am_true);
}
-BIF_RETTYPE erts_internal_port_get_data_1(BIF_ALIST_1)
+BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
{
- Eterm retval;
+ /*
+ * This is not a signal. See comment above.
+ */
+ Eterm res;
+ erts_aint_t data;
Port* prt;
- prt = lookup_port(BIF_P, BIF_ARG_1);
+ prt = data_lookup_port(BIF_P, BIF_ARG_1);
if (!prt)
- BIF_RET(am_badarg);
+ BIF_ERROR(BIF_P, BADARG);
- switch (erts_port_get_data(BIF_P, prt, &retval)) {
- case ERTS_PORT_OP_CALLER_EXIT:
- case ERTS_PORT_OP_BADARG:
- case ERTS_PORT_OP_DROPPED:
- BIF_RET(am_badarg);
- case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
- BIF_RET(retval);
- case ERTS_PORT_OP_DONE:
- ASSERT(is_not_internal_ref(retval));
- BIF_RET(retval);
- default:
- ERTS_INTERNAL_ERROR("Unexpected erts_port_get_data() result");
- BIF_RET(am_internal_error);
+ data = erts_smp_atomic_read_ddrb(&prt->data);
+ if (data == (erts_aint_t)NULL)
+ BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */
+
+ if ((data & 0x3) != 0) {
+ res = (Eterm) (UWord) data;
+ ASSERT(is_immed(res));
}
+ else {
+ ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
+ Eterm *hp = HAlloc(BIF_P, pdhp->hsize);
+ res = copy_struct(pdhp->data, pdhp->hsize, &hp, &MSO(BIF_P));
+ }
+
+ BIF_RET(res);
}
/*
@@ -677,43 +805,29 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
goto badarg;
}
- if (*tp == am_spawn || *tp == am_spawn_driver) { /* A process port */
+ if (*tp == am_spawn || *tp == am_spawn_driver || *tp == am_spawn_executable) { /* A process port */
+ int encoding;
if (arity != make_arityval(2)) {
goto badarg;
}
name = tp[1];
- if (is_atom(name)) {
- name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP,
- atom_tab(atom_val(name))->len+1);
- sys_memcpy((void *) name_buf,
- (void *) atom_tab(atom_val(name))->name,
- atom_tab(atom_val(name))->len);
- name_buf[atom_tab(atom_val(name))->len] = '\0';
- } else if ((i = is_string(name))) {
- name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
- if (intlist_to_buf(name, name_buf, i) != i)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- name_buf[i] = '\0';
- } else {
+ encoding = erts_get_native_filename_encoding();
+ /* Do not convert the command to utf-16le yet, do that in win32 specific code */
+ /* since the cmd is used for comparsion with drivers names and copied to port info */
+ if (encoding == ERL_FILENAME_WIN_WCHAR) {
+ encoding = ERL_FILENAME_UTF8;
+ }
+ if ((name_buf = erts_convert_filename_to_encoding(name, NULL, 0, ERTS_ALC_T_TMP,0,1, encoding, NULL, 0))
+ == NULL) {
goto badarg;
}
+
if (*tp == am_spawn_driver) {
opts.spawn_type = ERTS_SPAWN_DRIVER;
+ } else if (*tp == am_spawn_executable) {
+ opts.spawn_type = ERTS_SPAWN_EXECUTABLE;
}
- driver = &spawn_driver;
- } else if (*tp == am_spawn_executable) { /* A program */
- /*
- * {spawn_executable,Progname}
- */
-
- if (arity != make_arityval(2)) {
- goto badarg;
- }
- name = tp[1];
- if ((name_buf = erts_convert_filename_to_native(name, NULL, 0, ERTS_ALC_T_TMP,0,1, NULL)) == NULL) {
- goto badarg;
- }
- opts.spawn_type = ERTS_SPAWN_EXECUTABLE;
+
driver = &spawn_driver;
} else if (*tp == am_fd) { /* An fd port */
int n;
@@ -754,29 +868,8 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
}
if (edir != NIL) {
- /* A working directory is expressed differently if spawn_executable, i.e. Unicode is handles
- for spawn_executable... */
- if (opts.spawn_type != ERTS_SPAWN_EXECUTABLE) {
- Eterm iolist;
- DeclareTmpHeap(heap,4,p);
- int r;
-
- UseTmpHeap(4,p);
- heap[0] = edir;
- heap[1] = make_list(heap+2);
- heap[2] = make_small(0);
- heap[3] = NIL;
- iolist = make_list(heap);
- r = erts_iolist_to_buf(iolist, (char*) dir, MAXPATHLEN);
- UnUseTmpHeap(4,p);
- if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
- goto badarg;
- }
- opts.wd = (char *) dir;
- } else {
- if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) {
- goto badarg;
- }
+ if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) {
+ goto badarg;
}
}
@@ -798,7 +891,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
dtrace_proc_str(p, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", port->common.id);
DTRACE3(port_open, process_str, name_buf, port_str);
}
#endif
@@ -854,11 +947,12 @@ static char **convert_args(Eterm l)
int n;
int i = 0;
Eterm str;
- /* We require at least one element in list (argv[0]) */
if (is_not_list(l) && is_not_nil(l)) {
return NULL;
}
- n = list_length(l);
+
+ n = erts_list_length(l);
+ /* We require at least one element in argv[0] + NULL at end */
pp = erts_alloc(ERTS_ALC_T_TMP, (n + 2) * sizeof(char **));
pp[i++] = erts_default_arg0;
while (is_list(l)) {
@@ -902,7 +996,7 @@ static byte* convert_environment(Process* p, Eterm env)
byte* bytes;
int encoding = erts_get_native_filename_encoding();
- if ((n = list_length(env)) < 0) {
+ if ((n = erts_list_length(env)) < 0) {
return NULL;
}
heap_size = 2*(5*n+1);
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index 3d34c2a77f..448c6f6f6d 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -180,6 +180,9 @@ static Eterm make_signed_integer(int x, Process *p)
#define PARSE_FLAG_STARTOFFSET 8
#define PARSE_FLAG_CAPTURE_OPT 16
#define PARSE_FLAG_GLOBAL 32
+#define PARSE_FLAG_REPORT_ERRORS 64
+#define PARSE_FLAG_MATCH_LIMIT 128
+#define PARSE_FLAG_MATCH_LIMIT_RECURSION 256
#define CAPSPEC_VALUES 0
#define CAPSPEC_TYPE 1
@@ -192,7 +195,9 @@ parse_options(Eterm listp, /* in */
int *exec_options, /* out */
int *flags,/* out */
int *startoffset, /* out */
- Eterm *capture_spec) /* capture_spec[CAPSPEC_SIZE] */ /* out */
+ Eterm *capture_spec, /* capture_spec[CAPSPEC_SIZE] */ /* out */
+ int *match_limit, /* out */
+ int *match_limit_recursion) /* out */
{
int copt,eopt,fl;
Eterm item;
@@ -234,7 +239,7 @@ parse_options(Eterm listp, /* in */
case am_offset:
{
int tmp;
- if (!term_to_int(tp[2],&tmp)) {
+ if (!term_to_int(tp[2],&tmp) || tmp < 0) {
return -1;
}
if (startoffset != NULL) {
@@ -243,6 +248,31 @@ parse_options(Eterm listp, /* in */
}
fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT|PARSE_FLAG_STARTOFFSET);
break;
+ case am_match_limit:
+ {
+ int tmp;
+ if (!term_to_int(tp[2],&tmp) || tmp < 0) {
+ return -1;
+ }
+ if (match_limit != NULL) {
+ *match_limit = tmp;
+ }
+ }
+ fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT|PARSE_FLAG_MATCH_LIMIT);
+ break;
+ case am_match_limit_recursion:
+ {
+ int tmp;
+ if (!term_to_int(tp[2],&tmp) || tmp < 0) {
+ return -1;
+ }
+ if (match_limit_recursion != NULL) {
+ *match_limit_recursion = tmp;
+ }
+ }
+ fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT|
+ PARSE_FLAG_MATCH_LIMIT_RECURSION);
+ break;
case am_newline:
if (!is_atom(tp[2])) {
return -1;
@@ -276,7 +306,7 @@ parse_options(Eterm listp, /* in */
default:
return -1;
}
- }else if (is_not_atom(item)) {
+ } else if (is_not_atom(item)) {
return -1;
} else {
switch(item) {
@@ -288,6 +318,10 @@ parse_options(Eterm listp, /* in */
eopt |= PCRE_NOTEMPTY;
fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
break;
+ case am_notempty_atstart:
+ eopt |= PCRE_NOTEMPTY_ATSTART;
+ fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
+ break;
case am_notbol:
eopt |= PCRE_NOTBOL;
fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
@@ -296,6 +330,10 @@ parse_options(Eterm listp, /* in */
eopt |= PCRE_NOTEOL;
fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
break;
+ case am_no_start_optimize:
+ copt |= PCRE_NO_START_OPTIMIZE;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
case am_caseless:
copt |= PCRE_CASELESS;
fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
@@ -332,6 +370,18 @@ parse_options(Eterm listp, /* in */
copt |= PCRE_UNGREEDY;
fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
break;
+ case am_ucp:
+ copt |= PCRE_UCP;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_never_utf:
+ copt |= PCRE_NEVER_UTF;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_report_errors:
+ fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT |
+ PARSE_FLAG_REPORT_ERRORS);
+ break;
case am_unicode:
copt |= PCRE_UTF8;
fl |= (PARSE_FLAG_UNIQUE_COMPILE_OPT | PARSE_FLAG_UNICODE);
@@ -359,7 +409,7 @@ parse_options(Eterm listp, /* in */
if (compile_options != NULL) {
*compile_options = copt;
}
- if (exec_options != NULL) {
+ if (exec_options != NULL) {
*exec_options = eopt;
}
if (flags != NULL) {
@@ -373,34 +423,49 @@ parse_options(Eterm listp, /* in */
*/
static Eterm
-build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, const char *errstr, int errofset, int unicode, int with_ok)
+build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, const char *errstr, int errofset, int unicode, int with_ok, Eterm extra_err_tag)
{
Eterm *hp;
Eterm ret;
size_t pattern_size;
int capture_count;
+ int use_crlf;
+ unsigned long options;
if (!result) {
/* Return {error_tag, {Code, String, Offset}} */
int elen = sys_strlen(errstr);
int need = 3 /* tuple of 2 */ +
3 /* tuple of 2 */ +
- (2 * elen) /* The error string list */;
+ (2 * elen) /* The error string list */ +
+ ((extra_err_tag != NIL) ? 3 : 0);
hp = HAlloc(p, need);
ret = buf_to_intlist(&hp, (char *) errstr, elen, NIL);
ret = TUPLE2(hp, ret, make_small(errofset));
hp += 3;
+ if (extra_err_tag != NIL) {
+ /* Return {error_tag, {extra_tag,
+ {Code, String, Offset}}} instead */
+ ret = TUPLE2(hp, extra_err_tag, ret);
+ hp += 3;
+ }
ret = TUPLE2(hp, error_tag, ret);
} else {
erts_pcre_fullinfo(result, NULL, PCRE_INFO_SIZE, &pattern_size);
erts_pcre_fullinfo(result, NULL, PCRE_INFO_CAPTURECOUNT, &capture_count);
+ erts_pcre_fullinfo(result, NULL, PCRE_INFO_OPTIONS, &options);
+ options &= PCRE_NEWLINE_CR|PCRE_NEWLINE_LF | PCRE_NEWLINE_CRLF |
+ PCRE_NEWLINE_ANY | PCRE_NEWLINE_ANYCRLF;
+ use_crlf = (options == PCRE_NEWLINE_ANY ||
+ options == PCRE_NEWLINE_CRLF ||
+ options == PCRE_NEWLINE_ANYCRLF);
/* XXX: Optimize - keep in offheap binary to allow this to
be kept across traps w/o need of copying */
ret = new_binary(p, (byte *) result, pattern_size);
erts_pcre_free(result);
- hp = HAlloc(p, (with_ok) ? (3+5) : 5);
- ret = TUPLE4(hp,am_re_pattern, make_small(capture_count), make_small(unicode),ret);
+ hp = HAlloc(p, (with_ok) ? (3+6) : 6);
+ ret = TUPLE5(hp,am_re_pattern, make_small(capture_count), make_small(unicode),make_small(use_crlf),ret);
if (with_ok) {
- hp += 5;
+ hp += 6;
ret = TUPLE2(hp,am_ok,ret);
}
}
@@ -424,9 +489,12 @@ re_compile(Process* p, Eterm arg1, Eterm arg2)
int options = 0;
int pflags = 0;
int unicode = 0;
+#ifdef DEBUG
+ int buffres;
+#endif
- if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL)
+ if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL,NULL,NULL)
< 0) {
BIF_ERROR(p,BADARG);
}
@@ -445,16 +513,19 @@ re_compile(Process* p, Eterm arg1, Eterm arg2)
BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (erts_iolist_to_buf(arg1, expr, slen) != 0) {
- erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- BIF_ERROR(p,BADARG);
- }
+#ifdef DEBUG
+ buffres =
+#endif
+ erts_iolist_to_buf(arg1, expr, slen);
+
+ ASSERT(buffres >= 0);
+
expr[slen]='\0';
result = erts_pcre_compile2(expr, options, &errcode,
&errstr, &errofset, default_table);
ret = build_compile_result(p, am_error, result, errcode,
- errstr, errofset, unicode, 1);
+ errstr, errofset, unicode, 1, NIL);
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_RET(ret);
}
@@ -492,7 +563,7 @@ typedef struct _return_info {
} ReturnInfo;
typedef struct _restart_context {
- pcre_extra extra;
+ erts_pcre_extra extra;
void *restart_data;
Uint32 flags;
char *subject; /* to be able to free it when done */
@@ -502,6 +573,7 @@ typedef struct _restart_context {
} RestartContext;
#define RESTART_FLAG_SUBJECT_IN_BINARY 0x1
+#define RESTART_FLAG_REPORT_MATCH_LIMIT 0x2
static void cleanup_restart_context(RestartContext *rc)
{
@@ -542,13 +614,29 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete
Eterm res;
Eterm *hp;
if (rc <= 0) {
- res = am_nomatch;
+ if (restartp->flags & RESTART_FLAG_REPORT_MATCH_LIMIT) {
+ if (rc == PCRE_ERROR_MATCHLIMIT) {
+ hp = HAlloc(p,3);
+ res = TUPLE2(hp,am_error,am_match_limit);
+ } else if (rc == PCRE_ERROR_RECURSIONLIMIT) {
+ hp = HAlloc(p,3);
+ res = TUPLE2(hp,am_error,am_match_limit_recursion);
+ } else {
+ res = am_nomatch;
+ }
+ } else {
+ res = am_nomatch;
+ }
} else {
- ReturnInfo *ri = restartp->ret_info;
+ ReturnInfo *ri;
ReturnInfo defri = {RetIndex,0,{0}};
- if (ri == NULL) {
+
+ if (restartp->ret_info == NULL) {
ri = &defri;
+ } else {
+ ri = restartp->ret_info;
}
+
if (ri->type == RetNone) {
res = am_match;
} else if (ri->type == RetIndex){
@@ -577,6 +665,17 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete
ri->num_spec * 2 * sizeof(Eterm));
for (i = 0; i < ri->num_spec; ++i) {
x = ri->v[i];
+ if (x < -1) {
+ int n = i-x+1;
+ int j;
+ for (j = i+1; j < ri->num_spec && j < n; ++j) {
+ if (restartp->ovector[(ri->v[j])*2] >= 0) {
+ x = ri->v[j];
+ break;
+ }
+ }
+ i = n-1;
+ }
if (x < rc && x >= 0) {
tmp_vect[n*2] = make_signed_integer(restartp->ovector[x*2],p);
tmp_vect[n*2+1] = make_signed_integer(restartp->ovector[x*2+1]-restartp->ovector[x*2],p);
@@ -658,6 +757,17 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete
ri->num_spec * sizeof(Eterm));
for (i = 0; i < ri->num_spec; ++i) {
x = ri->v[i];
+ if (x < -1) {
+ int n = i-x+1;
+ int j;
+ for (j = i+1; j < ri->num_spec && j < n; ++j) {
+ if (restartp->ovector[(ri->v[j])*2] >= 0) {
+ x = ri->v[j];
+ break;
+ }
+ }
+ i = n-1;
+ }
if (x < rc && x >= 0) {
char *cp;
int len;
@@ -722,6 +832,49 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete
*/
#define RINFO_SIZ(Num) (sizeof(ReturnInfo) + (sizeof(int) * (Num - 1)))
+#define PICK_INDEX(NameEntry) \
+ ((int) ((((unsigned) ((unsigned char *) (NameEntry))[0]) << 8) + \
+ ((unsigned) ((unsigned char *) (NameEntry))[1])))
+
+
+static void build_one_capture(const pcre *code, ReturnInfo **ri, int *sallocated, int has_dupnames, char *name)
+{
+ ReturnInfo *r = (*ri);
+ if (has_dupnames) {
+ /* Build a sequence of positions, starting with -size if
+ more than one, otherwise just put the index there... */
+ char *first,*last;
+ int esize = erts_pcre_get_stringtable_entries(code,name,&first,&last);
+ if (esize == PCRE_ERROR_NOSUBSTRING) {
+ r->v[r->num_spec - 1] = -1;
+ } else if(last == first) {
+ r->v[r->num_spec - 1] = PICK_INDEX(first);
+ } else {
+ int num = ((last - first) / esize) + 1;
+ int i;
+ ASSERT(num > 1);
+ r->v[r->num_spec - 1] = -num; /* A value less than -1 means
+ multiple indexes for same name */
+ for (i = 0; i < num; ++i) {
+ ++(r->num_spec);
+ if(r->num_spec > (*sallocated)) {
+ (*sallocated) += 10;
+ r = erts_realloc(ERTS_ALC_T_RE_SUBJECT, r,
+ RINFO_SIZ((*sallocated)));
+ }
+ r->v[r->num_spec - 1] = PICK_INDEX(first);
+ first += esize;
+ }
+ }
+ } else {
+ /* Use the faster binary search if no duplicate names are present */
+ if ((r->v[r->num_spec - 1] = erts_pcre_get_stringnumber(code,name)) ==
+ PCRE_ERROR_NOSUBSTRING) {
+ r->v[r->num_spec - 1] = -1;
+ }
+ }
+ *ri = r;
+}
static ReturnInfo *
build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
@@ -770,13 +923,58 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
}
ri->v[ri->num_spec - 1] = 0;
break;
+ case am_all_names:
+ {
+ int rc,i,top;
+ int entrysize;
+ unsigned char *nametable, *last = NULL;
+ int has_dupnames;
+ unsigned long options;
+
+ if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0)
+ goto error;
+ if ((rc = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top)) != 0)
+ goto error;
+ if (top <= 0) {
+ ri->num_spec = 0;
+ ri->type = RetNone;
+ break;
+ }
+ if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize) != 0)
+ goto error;
+ if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable) != 0)
+ goto error;
+
+ has_dupnames = ((options & PCRE_DUPNAMES) != 0);
+
+ for(i=0;i<top;++i) {
+ if (last == NULL || !has_dupnames || strcmp((char *) last+2,(char *) nametable+2)) {
+ ASSERT(ri->num_spec >= 0);
+ ++(ri->num_spec);
+ if(ri->num_spec > sallocated) {
+ sallocated += 10;
+ ri = erts_realloc(ERTS_ALC_T_RE_SUBJECT, ri, RINFO_SIZ(sallocated));
+ }
+ if (has_dupnames) {
+ /* This could be more effective, we actually have
+ the names and could fill in the vector
+ immediately. Now we lookup the name again. */
+ build_one_capture(code,&ri,&sallocated,has_dupnames,(char *) nametable+2);
+ } else {
+ ri->v[ri->num_spec - 1] = PICK_INDEX(nametable);
+ }
+ }
+ last = nametable;
+ nametable += entrysize;
+ }
+ break;
+ }
default:
if (is_list(capture_spec[CAPSPEC_VALUES])) {
for(l=capture_spec[CAPSPEC_VALUES];is_list(l);l = CDR(list_val(l))) {
int x;
Eterm val = CAR(list_val(l));
- if (ri->num_spec < 0)
- ri->num_spec = 0;
+ ASSERT(ri->num_spec >= 0);
++(ri->num_spec);
if(ri->num_spec > sallocated) {
sallocated += 10;
@@ -785,6 +983,11 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
if (term_to_int(val,&x)) {
ri->v[ri->num_spec - 1] = x;
} else if (is_atom(val) || is_binary(val) || is_list(val)) {
+ int has_dupnames;
+ unsigned long options;
+ if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0)
+ goto error;
+ has_dupnames = ((options & PCRE_DUPNAMES) != 0);
if (is_atom(val)) {
Atom *ap = atom_tab(atom_val(val));
if ((ap->len + 1) > tmpbsiz) {
@@ -799,6 +1002,10 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
tmpb[ap->len] = '\0';
} else {
ErlDrvSizeT slen;
+#ifdef DEBUG
+ int buffres;
+#endif
+
if (erts_iolist_size(val, &slen)) {
goto error;
}
@@ -810,15 +1017,15 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
(tmpbsiz = slen + 1));
}
}
- if (erts_iolist_to_buf(val, tmpb, slen) != 0) {
- goto error;
- }
+
+#ifdef DEBUG
+ buffres =
+#endif
+ erts_iolist_to_buf(val, tmpb, slen);
+ ASSERT(buffres >= 0);
tmpb[slen] = '\0';
}
- if ((ri->v[ri->num_spec - 1] = erts_pcre_get_stringnumber(code,tmpb)) ==
- PCRE_ERROR_NOSUBSTRING) {
- ri->v[ri->num_spec - 1] = -1;
- }
+ build_one_capture(code,&ri,&sallocated,has_dupnames,tmpb);
} else {
goto error;
}
@@ -867,15 +1074,18 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
unsigned long loop_count;
Eterm capture[CAPSPEC_SIZE] = CAPSPEC_INIT;
int is_list_cap;
+ int match_limit = 0;
+ int match_limit_recursion = 0;
- if (parse_options(arg3,&comp_options,&options,&pflags,&startoffset,capture)
+ if (parse_options(arg3,&comp_options,&options,&pflags,&startoffset,capture,
+ &match_limit,&match_limit_recursion)
< 0) {
BIF_ERROR(p,BADARG);
}
is_list_cap = ((pflags & PARSE_FLAG_CAPTURE_OPT) &&
(capture[CAPSPEC_TYPE] == am_list));
- if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) {
+ if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 5)) {
if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) {
/* Compile from textual RE */
ErlDrvSizeT slen;
@@ -885,6 +1095,9 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
const char *errstr = "";
int errofset = 0;
int capture_count;
+#ifdef DEBUG
+ int buffres;
+#endif
if (pflags & PARSE_FLAG_UNICODE &&
(!is_binary(arg2) || !is_binary(arg1) ||
@@ -897,18 +1110,32 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (erts_iolist_to_buf(arg2, expr, slen) != 0) {
- erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- BIF_ERROR(p,BADARG);
- }
+
+#ifdef DEBUG
+ buffres =
+#endif
+ erts_iolist_to_buf(arg2, expr, slen);
+
+ ASSERT(buffres >= 0);
+
expr[slen]='\0';
result = erts_pcre_compile2(expr, comp_options, &errcode,
&errstr, &errofset, default_table);
if (!result) {
- erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
/* Compilation error gives badarg except in the compile
- function */
- BIF_ERROR(p,BADARG);
+ function or if we have PARSE_FLAG_REPORT_ERRORS */
+ if (pflags & PARSE_FLAG_REPORT_ERRORS) {
+ res = build_compile_result(p, am_error, result, errcode,
+ errstr, errofset,
+ (pflags &
+ PARSE_FLAG_UNICODE) ? 1 : 0,
+ 1, am_compile);
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ BIF_RET(res);
+ } else {
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ BIF_ERROR(p,BADARG);
+ }
}
if (pflags & PARSE_FLAG_GLOBAL) {
Eterm precompiled =
@@ -917,7 +1144,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
errstr, errofset,
(pflags &
PARSE_FLAG_UNICODE) ? 1 : 0,
- 0);
+ 0, NIL);
Eterm *hp,r;
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
hp = HAlloc(p,4);
@@ -947,7 +1174,8 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
tp = tuple_val(arg2);
if (tp[1] != am_re_pattern || is_not_small(tp[2]) ||
- is_not_small(tp[3]) || is_not_binary(tp[4])) {
+ is_not_small(tp[3]) || is_not_small(tp[4]) ||
+ is_not_binary(tp[5])) {
BIF_ERROR(p,BADARG);
}
@@ -967,9 +1195,9 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
}
ovsize = 3*(unsigned_val(tp[2])+1);
- code_size = binary_size(tp[4]);
- if ((code_tmp = (const pcre *)
- erts_get_aligned_binary_bytes(tp[4], &temp_alloc)) == NULL) {
+ code_size = binary_size(tp[5]);
+ code_tmp = (const pcre *) erts_get_aligned_binary_bytes(tp[5], &temp_alloc);
+ if (code_tmp == NULL || code_size < 4) {
erts_free_aligned_binary_bytes(temp_alloc);
BIF_ERROR(p, BADARG);
}
@@ -994,6 +1222,16 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
restart.extra.restart_flags = 0;
restart.extra.loop_counter_return = &loop_count;
restart.ret_info = NULL;
+
+ if (pflags & PARSE_FLAG_MATCH_LIMIT) {
+ restart.extra.flags |= PCRE_EXTRA_MATCH_LIMIT;
+ restart.extra.match_limit = match_limit;
+ }
+
+ if (pflags & PARSE_FLAG_MATCH_LIMIT_RECURSION) {
+ restart.extra.flags |= PCRE_EXTRA_MATCH_LIMIT_RECURSION;
+ restart.extra.match_limit_recursion = match_limit_recursion;
+ }
if (pflags & PARSE_FLAG_CAPTURE_OPT) {
if ((restart.ret_info = build_capture(capture,restart.code)) == NULL) {
@@ -1002,7 +1240,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
BIF_ERROR(p,BADARG);
}
}
-
+
/* Optimized - if already in binary off heap, keep that and avoid
copying, also binary returns can be sub binaries in that case */
@@ -1029,6 +1267,9 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
restart.subject = (char *) (pb->bytes+offset);
restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY;
} else {
+#ifdef DEBUG
+ int buffres;
+#endif
handle_iolist:
if (erts_iolist_size(arg1, &slength)) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
@@ -1040,24 +1281,30 @@ handle_iolist:
}
restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
- if (erts_iolist_to_buf(arg1, restart.subject, slength) != 0) {
- erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
- erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
- erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject);
- if (restart.ret_info != NULL) {
- erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
- }
- BIF_ERROR(p,BADARG);
- }
+#ifdef DEBUG
+ buffres =
+#endif
+ erts_iolist_to_buf(arg1, restart.subject, slength);
+ ASSERT(buffres >= 0);
}
+ if (pflags & PARSE_FLAG_REPORT_ERRORS) {
+ restart.flags |= RESTART_FLAG_REPORT_MATCH_LIMIT;
+ }
#ifdef DEBUG
loop_count = 0xFFFFFFFF;
#endif
+
+ rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject,
+ slength, startoffset,
+ options, restart.ovector, ovsize);
+
+ if (rc == PCRE_ERROR_BADENDIANNESS || rc == PCRE_ERROR_BADMAGIC) {
+ cleanup_restart_context(&restart);
+ BIF_ERROR(p,BADARG);
+ }
- rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject, slength, startoffset,
- options, restart.ovector, ovsize);
ASSERT(loop_count != 0xFFFFFFFF);
BUMP_REDS(p, loop_count / LOOP_FACTOR);
if (rc == PCRE_ERROR_LOOP_LIMIT) {
@@ -1077,7 +1324,7 @@ handle_iolist:
arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
magic_bin);
}
-
+
res = build_exec_return(p, rc, &restart, arg1);
cleanup_restart_context(&restart);
@@ -1149,6 +1396,120 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
BIF_RET(res);
}
+BIF_RETTYPE
+re_inspect_2(BIF_ALIST_2)
+{
+ Eterm *tp,*tmp_vec,*hp;
+ int i,top,j;
+ int entrysize;
+ unsigned char *nametable, *last,*name;
+ int has_dupnames;
+ unsigned long options;
+ int num_names;
+ Eterm res;
+ const pcre *code;
+ byte *temp_alloc = NULL;
+#ifdef DEBUG
+ int infores;
+#endif
+
+
+ if (is_not_tuple(BIF_ARG_1) || (arityval(*tuple_val(BIF_ARG_1)) != 5)) {
+ goto error;
+ }
+ tp = tuple_val(BIF_ARG_1);
+ if (tp[1] != am_re_pattern || is_not_small(tp[2]) ||
+ is_not_small(tp[3]) || is_not_small(tp[4]) ||
+ is_not_binary(tp[5])) {
+ goto error;
+ }
+ if (BIF_ARG_2 != am_namelist) {
+ goto error;
+ }
+ if ((code = (const pcre *)
+ erts_get_aligned_binary_bytes(tp[5], &temp_alloc)) == NULL) {
+ goto error;
+ }
+
+ /* OK, so let's try to get some info */
+
+ if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0)
+ goto error;
+
+#ifdef DEBUG
+ infores =
+#endif
+ erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top);
+
+ ASSERT(infores == 0);
+
+ if (top <= 0) {
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp,am_namelist,NIL);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(res);
+ }
+#ifdef DEBUG
+ infores =
+#endif
+ erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize);
+
+ ASSERT(infores == 0);
+
+#ifdef DEBUG
+ infores =
+#endif
+ erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable);
+
+ ASSERT(infores == 0);
+
+ has_dupnames = ((options & PCRE_DUPNAMES) != 0);
+ /* First, count the names */
+ num_names = 0;
+ last = NULL;
+ name = nametable;
+ for(i=0;i<top;++i) {
+ if (last == NULL || !has_dupnames || strcmp((char *) last+2,
+ (char *) name+2)) {
+ ++num_names;
+ }
+ last = name;
+ name += entrysize;
+ }
+ tmp_vec = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,
+ num_names * sizeof(Eterm));
+ /* Re-iterate and fill tmp_vec */
+ last = NULL;
+ name = nametable;
+ j = 0;
+ for(i=0;i<top;++i) {
+ if (last == NULL || !has_dupnames || strcmp((char *) last+2,
+ (char *) name+2)) {
+ tmp_vec[j++] = new_binary(BIF_P, (byte *) name+2, strlen((char *) name+2));
+ }
+ last = name;
+ name += entrysize;
+ }
+ ASSERT(j == num_names);
+ hp = HAlloc(BIF_P, 3+2*j);
+ res = NIL;
+ for(i = j-1 ;i >= 0; --i) {
+ res = CONS(hp,tmp_vec[i],res);
+ hp += 2;
+ }
+ res = TUPLE2(hp,am_namelist,res);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, tmp_vec);
+ BIF_RET(res);
+
+ error:
+ /* tmp_vec never allocated when we reach here */
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+
+
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index d67695e533..03ac97283c 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -616,7 +616,7 @@ erts_print_bif_timer_info(int to, void *to_arg)
: btm->receiver.proc.ess->common.id);
erts_print(to, to_arg, "=timer:%T\n", receiver);
erts_print(to, to_arg, "Message: %T\n", btm->message);
- erts_print(to, to_arg, "Time left: %u ms\n",
+ erts_print(to, to_arg, "Time left: %u\n",
erts_time_left(&btm->tm));
}
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 41c98bbffb..06fbbea123 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1224,9 +1224,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
match_spec_meta = NIL;
}
if (r & FUNC_TRACE_COUNT_TRACE) {
- c = count < 0 ?
- erts_make_integer(-count-1, p) :
- erts_make_integer(count, p);
+ c = erts_make_integer(count, p);
}
if (r & FUNC_TRACE_TIME_TRACE) {
ct = call_time;
@@ -2014,6 +2012,7 @@ void erts_system_monitor_clear(Process *c_p) {
#endif
erts_set_system_monitor(NIL);
erts_system_monitor_long_gc = 0;
+ erts_system_monitor_long_schedule = 0;
erts_system_monitor_large_heap = 0;
erts_system_monitor_flags.busy_port = 0;
erts_system_monitor_flags.busy_dist_port = 0;
@@ -2038,12 +2037,17 @@ static Eterm system_monitor_get(Process *p)
Uint hsz = 3 + (erts_system_monitor_flags.busy_dist_port ? 2 : 0) +
(erts_system_monitor_flags.busy_port ? 2 : 0);
Eterm long_gc = NIL;
+ Eterm long_schedule = NIL;
Eterm large_heap = NIL;
if (erts_system_monitor_long_gc != 0) {
hsz += 2+3;
(void) erts_bld_uint(NULL, &hsz, erts_system_monitor_long_gc);
}
+ if (erts_system_monitor_long_schedule != 0) {
+ hsz += 2+3;
+ (void) erts_bld_uint(NULL, &hsz, erts_system_monitor_long_schedule);
+ }
if (erts_system_monitor_large_heap != 0) {
hsz += 2+3;
(void) erts_bld_uint(NULL, &hsz, erts_system_monitor_large_heap);
@@ -2053,6 +2057,10 @@ static Eterm system_monitor_get(Process *p)
if (erts_system_monitor_long_gc != 0) {
long_gc = erts_bld_uint(&hp, NULL, erts_system_monitor_long_gc);
}
+ if (erts_system_monitor_long_schedule != 0) {
+ long_schedule = erts_bld_uint(&hp, NULL,
+ erts_system_monitor_long_schedule);
+ }
if (erts_system_monitor_large_heap != 0) {
large_heap = erts_bld_uint(&hp, NULL, erts_system_monitor_large_heap);
}
@@ -2061,6 +2069,10 @@ static Eterm system_monitor_get(Process *p)
Eterm t = TUPLE2(hp, am_long_gc, long_gc); hp += 3;
res = CONS(hp, t, res); hp += 2;
}
+ if (long_schedule != NIL) {
+ Eterm t = TUPLE2(hp, am_long_schedule, long_schedule); hp += 3;
+ res = CONS(hp, t, res); hp += 2;
+ }
if (large_heap != NIL) {
Eterm t = TUPLE2(hp, am_large_heap, large_heap); hp += 3;
res = CONS(hp, t, res); hp += 2;
@@ -2115,7 +2127,7 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
}
if (is_not_list(list)) goto error;
else {
- Uint long_gc, large_heap;
+ Uint long_gc, long_schedule, large_heap;
int busy_port, busy_dist_port;
system_blocked = 1;
@@ -2125,7 +2137,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
goto error;
- for (long_gc = 0, large_heap = 0, busy_port = 0, busy_dist_port = 0;
+ for (long_gc = 0, long_schedule = 0, large_heap = 0,
+ busy_port = 0, busy_dist_port = 0;
is_list(list);
list = CDR(list_val(list))) {
Eterm t = CAR(list_val(list));
@@ -2135,6 +2148,9 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
if (tp[1] == am_long_gc) {
if (! term_to_Uint(tp[2], &long_gc)) goto error;
if (long_gc < 1) long_gc = 1;
+ } else if (tp[1] == am_long_schedule) {
+ if (! term_to_Uint(tp[2], &long_schedule)) goto error;
+ if (long_schedule < 1) long_schedule = 1;
} else if (tp[1] == am_large_heap) {
if (! term_to_Uint(tp[2], &large_heap)) goto error;
if (large_heap < 16384) large_heap = 16384;
@@ -2150,6 +2166,7 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
prev = system_monitor_get(p);
erts_set_system_monitor(monitor_pid);
erts_system_monitor_long_gc = long_gc;
+ erts_system_monitor_long_schedule = long_schedule;
erts_system_monitor_large_heap = large_heap;
erts_system_monitor_flags.busy_port = !!busy_port;
erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 506c4813fa..06dfeb1260 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -153,7 +153,7 @@ do { \
#define binary_bytes(Bin) \
(*binary_val(Bin) == HEADER_PROC_BIN ? \
((ProcBin *) binary_val(Bin))->bytes : \
- (ASSERT_EXPR(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \
+ (ASSERT(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \
(byte *)(&(((ErlHeapBin *) binary_val(Bin))->data))))
void erts_init_binary(void);
@@ -166,7 +166,7 @@ Eterm erts_bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size,
* Common implementation for erlang:list_to_binary/1 and binary:list_to_bin/1
*/
-BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg);
+BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg, Export *bif);
BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple);
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
@@ -183,7 +183,7 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
#endif
#define ERTS_CHK_BIN_ALIGNMENT(B) \
- do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)) } while(0)
+ do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)); } while(0)
ERTS_GLB_INLINE byte* erts_get_aligned_binary_bytes(Eterm bin, byte** base_ptr);
ERTS_GLB_INLINE void erts_free_aligned_binary_bytes(byte* buf);
@@ -225,7 +225,7 @@ erts_free_aligned_binary_bytes(byte* buf)
** These extra bytes where earlier (< R13B04) added by an alignment-bug
** in this code. Do we dare remove this in some major release (R14?) maybe?
*/
-#ifdef DEBUG
+#if defined(DEBUG) || defined(VALGRIND)
# define CHICKEN_PAD 0
#else
# define CHICKEN_PAD (sizeof(void*) - 1)
@@ -236,6 +236,8 @@ erts_bin_drv_alloc_fnf(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ return NULL;
res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -246,6 +248,8 @@ erts_bin_drv_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size);
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -257,6 +261,8 @@ erts_bin_nrml_alloc(Uint size)
{
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
void *res;
+ if (bsize < size) /* overflow */
+ erts_alloc_enomem(ERTS_ALC_T_BINARY, size);
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
return (Binary *) res;
@@ -267,11 +273,12 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (bsize < size) /* overflow */
+ return NULL;
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
return nbp;
}
@@ -281,17 +288,14 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
+ ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY;
ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
- if (bp->flags & BIN_FLAG_DRV)
- nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
- else
- nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (bsize < size) /* overflow */
+ erts_realloc_enomem(type, bp, size);
+ nbp = erts_realloc_fnf(type, (void *) bp, bsize);
if (!nbp)
- erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV
- ? ERTS_ALC_T_DRV_BINARY
- : ERTS_ALC_T_BINARY),
- bp,
- bsize);
+ erts_realloc_enomem(type, bp, bsize);
ERTS_CHK_BIN_ALIGNMENT(nbp);
return nbp;
}
@@ -312,6 +316,7 @@ erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
{
Uint bsize = ERTS_MAGIC_BIN_SIZE(size);
Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ ASSERT(bsize > size);
if (!bptr)
erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 3753b618e1..73765772c8 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -484,8 +484,16 @@ erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer
ERTS_FP_ERROR_THOROUGH(p, f32, return THE_NON_VALUE);
f.fd = f32;
} else {
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ FloatDef ftmp;
+ ftmp.fd = f64;
+ f.fw[0] = ftmp.fw[1];
+ f.fw[1] = ftmp.fw[0];
+ ERTS_FP_ERROR_THOROUGH(p, f.fd, return THE_NON_VALUE);
+#else
ERTS_FP_ERROR_THOROUGH(p, f64, return THE_NON_VALUE);
f.fd = f64;
+#endif
}
mb->offset += num_bits;
hp = HeapOnlyAlloc(p, FLOAT_SIZE_OBJECT);
@@ -1014,8 +1022,13 @@ erts_new_bs_put_float(Process *c_p, Eterm arg, Uint num_bits, int flags)
#endif
} else if (is_small(arg)) {
u.f64 = (double) signed_val(arg);
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ a = u.i32[1];
+ b = u.i32[0];
+#else
a = u.i32[0];
b = u.i32[1];
+#endif
} else if (is_big(arg)) {
if (big_to_double(arg, &u.f64) < 0) {
return 0;
@@ -1118,21 +1131,42 @@ erts_new_bs_put_float(Process *c_p, Eterm arg, Uint num_bits, int flags)
byte *bptr;
double f64;
float f32;
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ FloatDef fbuf, ftmp;
+#endif
if (num_bits == 64) {
if (is_float(arg)) {
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ FloatDef *fdp = (FloatDef*)(float_val(arg) + 1);
+ ftmp = *fdp;
+#else
bptr = (byte *) (float_val(arg) + 1);
+#endif
} else if (is_small(arg)) {
f64 = (double) signed_val(arg);
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ ftmp.fd = f64;
+#else
bptr = (byte *) &f64;
+#endif
} else if (is_big(arg)) {
if (big_to_double(arg, &f64) < 0) {
return 0;
}
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ ftmp.fd = f64;
+#else
bptr = (byte *) &f64;
+#endif
} else {
return 0;
}
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ fbuf.fw[0] = ftmp.fw[1];
+ fbuf.fw[1] = ftmp.fw[0];
+ bptr = fbuf.fb;
+#endif
} else if (num_bits == 32) {
if (is_float(arg)) {
FloatDef f;
@@ -1457,7 +1491,7 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
bptr->flags = 0;
bptr->orig_size = new_size;
erts_refc_init(&bptr->refc, 1);
- sys_memcpy(bptr->orig_bytes, binp->orig_bytes, pb->size);
+ sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size);
pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
pb->val = bptr;
pb->bytes = (byte *) bptr->orig_bytes;
@@ -1776,6 +1810,11 @@ erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t siz
Uint rshift;
int cmp;
+ ASSERT(a_offs < 8 && b_offs < 8);
+
+ if (size == 0)
+ return 0;
+
if (((a_offs | b_offs | size) & 7) == 0) {
int byte_size = size >> 3;
return sys_memcmp(a_ptr, b_ptr, byte_size);
@@ -1784,58 +1823,72 @@ erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t siz
/* Compare bit by bit until a_ptr is aligned on byte boundary */
a = *a_ptr++;
b = *b_ptr++;
- while (size > 0) {
- a_bit = get_bit(a, a_offs);
- b_bit = get_bit(b, b_offs);
- if ((cmp = (a_bit-b_bit)) != 0) {
- return cmp;
- }
- size--;
- b_offs++;
- if (b_offs == 8) {
- b_offs = 0;
- b = *b_ptr++;
- }
- a_offs++;
- if (a_offs == 8) {
- a_offs = 0;
- a = *a_ptr++;
- break;
+ if (a_offs) {
+ for (;;) {
+ a_bit = get_bit(a, a_offs);
+ b_bit = get_bit(b, b_offs);
+ if ((cmp = (a_bit-b_bit)) != 0) {
+ return cmp;
+ }
+ if (--size == 0)
+ return 0;
+
+ b_offs++;
+ if (b_offs == 8) {
+ b_offs = 0;
+ b = *b_ptr++;
+ }
+ a_offs++;
+ if (a_offs == 8) {
+ a_offs = 0;
+ a = *a_ptr++;
+ break;
+ }
}
}
/* Compare byte by byte as long as at least 8 bits remain */
- lshift = b_offs;
- rshift = 8 - lshift;
- while (size >= 8) {
- byte b_cmp = (b << lshift);
- b = *b_ptr++;
- b_cmp |= b >> rshift;
- if ((cmp = (a - b_cmp)) != 0) {
- return cmp;
- }
+ if (size >= 8) {
+ lshift = b_offs;
+ rshift = 8 - lshift;
+ for (;;) {
+ byte b_cmp = (b << lshift);
+ b = *b_ptr++;
+ b_cmp |= b >> rshift;
+ if ((cmp = (a - b_cmp)) != 0) {
+ return cmp;
+ }
+ size -= 8;
+ if (size < 8)
+ break;
+ a = *a_ptr++;
+ }
+
+ if (size == 0)
+ return 0;
a = *a_ptr++;
- size -= 8;
}
/* Compare the remaining bits bit by bit */
- while (size > 0) {
- a_bit = get_bit(a, a_offs);
- b_bit = get_bit(b, b_offs);
- if ((cmp = (a_bit-b_bit)) != 0) {
- return cmp;
- }
- a_offs++;
- if (a_offs == 8) {
- a_offs = 0;
- a = *a_ptr++;
- }
- b_offs++;
- if (b_offs == 8) {
- b_offs = 0;
- b = *b_ptr++;
- }
- size--;
+ if (size > 0) {
+ for (;;) {
+ a_bit = get_bit(a, a_offs);
+ b_bit = get_bit(b, b_offs);
+ if ((cmp = (a_bit-b_bit)) != 0) {
+ return cmp;
+ }
+ if (--size == 0)
+ return 0;
+
+ a_offs++;
+ ASSERT(a_offs < 8);
+
+ b_offs++;
+ if (b_offs == 8) {
+ b_offs = 0;
+ b = *b_ptr++;
+ }
+ }
}
return 0;
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 88c6c34881..f594cb9392 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1699,7 +1699,7 @@ erts_early_init_cpu_topology(int no_schedulers,
}
max_main_threads = erts_get_cpu_configured(cpuinfo);
- if (max_main_threads > no_schedulers)
+ if (max_main_threads > no_schedulers || max_main_threads < 0)
max_main_threads = no_schedulers;
*max_main_threads_p = max_main_threads;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index d0afc8ed8d..8f246ffa07 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -125,6 +125,7 @@ get_meta_main_tab_lock(unsigned slot)
static erts_smp_spinlock_t meta_main_tab_main_lock;
static Uint meta_main_tab_first_free; /* Index of first free slot */
static int meta_main_tab_cnt; /* Number of active tables */
+static int meta_main_tab_top; /* Highest ever used slot + 1 */
static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */
static Uint meta_main_tab_seq_incr;
static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
@@ -258,15 +259,17 @@ static void schedule_free_dbtable(DbTable* tb)
/*
* NON-SMP case: Caller is *not* allowed to access the *tb
* structure after this function has returned!
- * SMP case: Caller is allowed to access the *tb structure
- * until the bif has returned (we typically
- * need to unlock the table lock after this
- * function has returned).
+ * SMP case: Caller is allowed to access the *common* part of the *tb
+ * structure until the bif has returned (we typically need to
+ * unlock the table lock after this function has returned).
+ * Caller is *not* allowed to access the specialized part
+ * (hash or tree) of *tb after this function has returned.
*/
ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
- erts_schedule_thr_prgr_later_op(free_dbtable,
- (void *) tb,
- &tb->release.data);
+ erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
+ (void *) tb,
+ &tb->release.data,
+ sizeof(DbTable));
}
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
@@ -1468,6 +1471,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
ASSERT(slot>=0 && slot<db_max_tabs);
meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot);
meta_main_tab_cnt++;
+ if (slot >= meta_main_tab_top) {
+ ASSERT(slot == meta_main_tab_top);
+ meta_main_tab_top = slot + 1;
+ }
if (is_named) {
ret = BIF_ARG_1;
@@ -2057,27 +2064,31 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0)
{
DbTable* tb;
Eterm previous;
- int i, j;
+ int i;
Eterm* hp;
Eterm* hendp;
int t_tabs_cnt;
- int t_max_tabs;
+ int t_top;
erts_smp_spin_lock(&meta_main_tab_main_lock);
t_tabs_cnt = meta_main_tab_cnt;
- t_max_tabs = db_max_tabs;
+ t_top = meta_main_tab_top;
erts_smp_spin_unlock(&meta_main_tab_main_lock);
hp = HAlloc(BIF_P, 2*t_tabs_cnt);
hendp = hp + 2*t_tabs_cnt;
previous = NIL;
- j = 0;
- for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
+ for(i = 0; i < t_top; i++) {
erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(i)) {
- j++;
+ if (hp == hendp) {
+ /* Racing table creator, grab some more heap space */
+ t_tabs_cnt = 10;
+ hp = HAlloc(BIF_P, 2*t_tabs_cnt);
+ hendp = hp + 2*t_tabs_cnt;
+ }
tb = meta_main_tab[i].u.tb;
previous = CONS(hp, tb->common.id, previous);
hp += 2;
@@ -2235,7 +2246,7 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
CHECK_TABLES();
tptr = tuple_val(a1);
- ASSERT(arityval(*tptr) >= 1)
+ ASSERT(arityval(*tptr) >= 1);
if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
BIF_ERROR(p, BADARG);
@@ -2402,7 +2413,7 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
CHECK_TABLES();
tptr = tuple_val(a1);
- ASSERT(arityval(*tptr) >= 1)
+ ASSERT(arityval(*tptr) >= 1);
if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
BIF_ERROR(p, BADARG);
}
@@ -2848,6 +2859,7 @@ void init_db(void)
ERTS_ETS_MISC_MEM_ADD(size);
meta_main_tab_cnt = 0;
+ meta_main_tab_top = 0;
for (i=1; i<db_max_tabs; i++) {
SET_NEXT_FREE_SLOT(i-1,i);
}
@@ -3268,34 +3280,37 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
}
erts_smp_rwmtx_runlock(mmtl);
if (tb) {
- int reds;
- DbFixation** pp;
+ int reds = 0;
db_lock(tb, LCK_WRITE_REC);
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
- reds = 10;
-
- for (pp = &tb->common.fixations; *pp != NULL;
- pp = &(*pp)->next) {
- if ((*pp)->pid == pid) {
- DbFixation* fix = *pp;
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
- *pp = fix->next;
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
- break;
+ if (!(tb->common.status & DB_DELETE)) {
+ DbFixation** pp;
+
+ #ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+ #endif
+ reds = 10;
+
+ for (pp = &tb->common.fixations; *pp != NULL;
+ pp = &(*pp)->next) {
+ if ((*pp)->pid == pid) {
+ DbFixation* fix = *pp;
+ erts_aint_t diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&tb->common.ref,diff,0);
+ *pp = fix->next;
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ tb, fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ break;
+ }
+ }
+ #ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+ #endif
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ db_unfix_table_hash(&(tb->hash));
+ reds += 40;
}
- }
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
- if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
- db_unfix_table_hash(&(tb->hash));
- reds += 40;
}
db_unlock(tb, LCK_WRITE_REC);
BUMP_REDS(c_p, reds);
@@ -3810,6 +3825,13 @@ erts_db_foreach_offheap(DbTable *tb,
tb->common.meth->db_foreach_offheap(tb, func, arg);
}
+/* retrieve max number of ets tables */
+Uint
+erts_db_get_max_tabs()
+{
+ return db_max_tabs;
+}
+
/*
* For testing of meta tables only.
*
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 6b62e10eb7..5b4681fc90 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -79,6 +79,8 @@ extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
+Uint erts_db_get_max_tabs(void);
+
#endif
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 2fea4671e1..06dac8f161 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -2106,7 +2106,7 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
DbTableHash *tb = &tbl->hash;
int i;
- erts_print(to, to_arg, "Buckets: %d \n", NACTIVE(tb));
+ erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
if (show) {
for (i = 0; i < NACTIVE(tb); i++) {
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index d17bd9f693..908cec11d4 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -33,7 +33,12 @@ typedef struct hash_db_term {
DbTerm dbterm; /* The actual term */
} HashDbTerm;
+#ifdef ERTS_DB_HASH_LOCK_CNT
+#define DB_HASH_LOCK_CNT ERTS_DB_HASH_LOCK_CNT
+#else
#define DB_HASH_LOCK_CNT 64
+#endif
+
typedef struct db_table_hash_fine_locks {
union {
erts_smp_rwmtx_t lck;
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 25029ba90f..a62a83a928 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -485,7 +485,7 @@ static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
*ret = am_EOT;
return DB_ERROR_NONE;
}
- /* Walk down to the tree to the left */
+ /* Walk down the tree to the left */
if ((stack = get_static_stack(tb)) != NULL) {
stack->pos = stack->slot = 0;
}
@@ -531,7 +531,7 @@ static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
*ret = am_EOT;
return DB_ERROR_NONE;
}
- /* Walk down to the tree to the left */
+ /* Walk down the tree to the right */
if ((stack = get_static_stack(tb)) != NULL) {
stack->pos = stack->slot = 0;
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 713ac0ba18..3927615e04 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,7 @@
#include "bif.h"
#include "big.h"
#include "erl_binary.h"
+#include "erl_map.h"
#include "erl_thr_progress.h"
#include "erl_db_util.h"
@@ -482,7 +483,8 @@ void
match_pseudo_process_init(void)
{
#ifdef ERTS_SMP
- erts_smp_tsd_key_create(&match_pseudo_process_key);
+ erts_smp_tsd_key_create(&match_pseudo_process_key,
+ "erts_match_pseudo_process_key");
erts_smp_install_exit_handler(destroy_match_pseudo_process);
#else
match_pseudo_process = create_match_pseudo_process();
@@ -565,6 +567,12 @@ static DMCGuardBif guard_tab[] =
DBIF_ALL
},
{
+ am_is_map,
+ &is_map_1,
+ 1,
+ DBIF_ALL
+ },
+ {
am_is_binary,
&is_binary_1,
1,
@@ -631,6 +639,12 @@ static DMCGuardBif guard_tab[] =
DBIF_ALL
},
{
+ am_map_size,
+ &map_size_1,
+ 1,
+ DBIF_ALL
+ },
+ {
am_bit_size,
&bit_size_1,
1,
@@ -1838,7 +1852,7 @@ restart:
ep = termp;
break;
case matchArrayBind: /* When the array size is unknown. */
- ASSERT(termp);
+ ASSERT(termp || arity==0);
n = *pc++;
variables[n].term = dpm_array_to_list(psp, termp, arity);
break;
@@ -2319,6 +2333,8 @@ restart:
break;
case matchSilent:
--esp;
+ if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT)
+ break;
if (*esp == am_true) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT;
@@ -4971,7 +4987,8 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
save_cp = p->cp;
p->cp = NULL;
res = erts_match_set_run(p, mps, arr, n,
- ERTS_PAM_COPY_RESULT, &ret_flags);
+ ERTS_PAM_COPY_RESULT|ERTS_PAM_IGNORE_TRACE_SILENT,
+ &ret_flags);
p->cp = save_cp;
} else {
n = 0;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 90b79e6044..328b19dfc9 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -457,7 +457,7 @@ int erts_db_is_compiled_ms(Eterm term);
&& ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
#define Binary2MatchProg(BP) \
- (ASSERT_EXPR(IsMatchProgBinary((BP))), \
+ (ASSERT(IsMatchProgBinary((BP))), \
((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
/*
** Debugging
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index b90d00f236..50bdc79506 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -29,6 +29,7 @@
#include "bif.h"
#include "beam_catches.h"
#include "erl_debug.h"
+#include "erl_map.h"
#define WITHIN(ptr, x, y) ((x) <= (ptr) && (ptr) < (y))
@@ -299,6 +300,9 @@ void erts_check_for_holes(Process* p)
ErlHeapFragment* hf;
Eterm* start;
+ if (p->flags & F_DISABLE_GC)
+ return;
+
start = p->last_htop ? p->last_htop : HEAP_START(p);
check_memory(start, HEAP_TOP(p));
p->last_htop = HEAP_TOP(p);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index e280563de1..f9938fc66c 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -132,10 +132,26 @@ typedef struct {
#define DO_WRITE ERL_DRV_WRITE
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
-#define ERL_DRV_EXTENDED_MAJOR_VERSION 2
+#define ERL_DRV_EXTENDED_MAJOR_VERSION 3
#define ERL_DRV_EXTENDED_MINOR_VERSION 1
/*
+ * The emulator will refuse to load a driver with a major version
+ * lower than ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD. The load
+ * may however fail if user have not removed use of deprecated
+ * symbols.
+ *
+ * The ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD have to allow
+ * loading of drivers built at least two major OTP releases
+ * ago.
+ *
+ * Bump of major version to 3 happened in OTP 17. That is, in
+ * OTP 19 we can increase ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
+ * to 3.
+ */
+#define ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD 2
+
+/*
* The emulator will refuse to load a driver with different major
* version than the one used by the emulator.
*/
@@ -182,7 +198,7 @@ typedef long long ErlDrvSInt64;
#error No 64-bit integer type
#endif
-#if defined(__WIN32__)
+#if defined(__WIN32__) || defined(_WIN32)
typedef ErlDrvUInt ErlDrvSizeT;
typedef ErlDrvSInt ErlDrvSSizeT;
#else
@@ -271,7 +287,6 @@ typedef struct ErlDrvCond_ ErlDrvCond;
typedef struct ErlDrvRWLock_ ErlDrvRWLock;
typedef int ErlDrvTSDKey;
-
/*
*
*/
@@ -365,17 +380,23 @@ typedef struct erl_drv_entry {
* It must initialize a ErlDrvEntry structure and return a pointer to it.
*/
+#ifdef STATIC_ERLANG_DRIVER
+# define ERLANG_DRIVER_NAME(NAME) NAME ## _driver_init
+#else
+# define ERLANG_DRIVER_NAME(NAME) driver_init
+#endif
+
/* For windows dynamic drivers */
#ifndef ERL_DRIVER_TYPES_ONLY
#if defined(__WIN32__)
# define DRIVER_INIT(DRIVER_NAME) \
- __declspec(dllexport) ErlDrvEntry* driver_init(void); \
- __declspec(dllexport) ErlDrvEntry* driver_init(void)
+ __declspec(dllexport) ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \
+ __declspec(dllexport) ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void)
#else
# define DRIVER_INIT(DRIVER_NAME) \
- ErlDrvEntry* driver_init(void); \
- ErlDrvEntry* driver_init(void)
+ ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \
+ ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void)
#endif
#define ERL_DRV_BUSY_MSGQ_DISABLED (~((ErlDrvSizeT) 0))
@@ -546,6 +567,11 @@ EXTERN int erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2);
EXTERN void erl_drv_thread_exit(void *resp);
EXTERN int erl_drv_thread_join(ErlDrvTid, void **respp);
+EXTERN char* erl_drv_mutex_name(ErlDrvMutex *mtx);
+EXTERN char* erl_drv_cond_name(ErlDrvCond *cnd);
+EXTERN char* erl_drv_rwlock_name(ErlDrvRWLock *rwlck);
+EXTERN char* erl_drv_thread_name(ErlDrvTid tid);
+
/*
* Misc.
*/
@@ -594,6 +620,8 @@ EXTERN int null_func(void);
#define ERL_DRV_INT64 ((ErlDrvTermData) 15) /* ErlDrvSInt64 * */
#define ERL_DRV_UINT64 ((ErlDrvTermData) 16) /* ErlDrvUInt64 * */
+#define ERL_DRV_MAP ((ErlDrvTermData) 17) /* ErlDrvUInt */
+
#ifndef ERL_DRIVER_TYPES_ONLY
/* make terms for driver_output_term and driver_send_term */
@@ -638,18 +666,14 @@ EXTERN int erl_drv_send_term(ErlDrvTermData port,
int len);
/* Async IO functions */
+EXTERN unsigned int driver_async_port_key(ErlDrvPort port);
+
EXTERN long driver_async(ErlDrvPort ix,
unsigned int* key,
void (*async_invoke)(void*),
void* async_data,
void (*async_free)(void*));
-/*
- * driver_async_cancel() is deprecated. It is scheduled for removal
- * in OTP-R16. For more information see the erl_driver(3) documentation.
- */
-EXTERN int driver_async_cancel(unsigned int key) ERL_DRV_DEPRECATED_FUNC;
-
/* Locks the driver in the machine "forever", there is
no unlock function. Note that this is almost never useful, as an open
port towards the driver locks it until the port is closed, why unexpected
@@ -671,6 +695,16 @@ EXTERN char *driver_dl_error(void);
EXTERN int erl_drv_putenv(char *key, char *value);
EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
+#ifdef __OSE__
+typedef ErlDrvUInt ErlDrvOseEventId;
+EXTERN union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent ev);
+EXTERN ErlDrvEvent erl_drv_ose_event_alloc(SIGSELECT sig, ErlDrvOseEventId handle,
+ ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig), void *extra);
+EXTERN void erl_drv_ose_event_free(ErlDrvEvent ev);
+EXTERN void erl_drv_ose_event_fetch(ErlDrvEvent ev, SIGSELECT *sig,
+ ErlDrvOseEventId *handle, void **extra);
+#endif
+
#endif /* !ERL_DRIVER_TYPES_ONLY */
#ifdef WIN32_DYNAMIC_ERL_DRIVER
@@ -681,6 +715,3 @@ EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
/* also in global.h, but driver's can't include global.h */
void dtrace_drvport_str(ErlDrvPort port, char *port_buf);
-
-
-
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index ea013a49a3..4e8c6dc68b 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -35,12 +35,20 @@ typedef struct {
int scheduler_threads;
int nif_major_version;
int nif_minor_version;
+ int dirty_scheduler_support;
} ErlDrvSysInfo;
typedef struct {
int suggested_stack_size;
} ErlDrvThreadOpts;
+#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT)
+typedef enum {
+ ERL_DRV_DIRTY_JOB_CPU_BOUND = 1,
+ ERL_DRV_DIRTY_JOB_IO_BOUND = 2
+} ErlDrvDirtyJobFlags;
+#endif
+
#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index a49a155701..147249f751 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -78,8 +78,6 @@ struct ErlDrvTid_ {
static ethr_tsd_key tid_key;
-static ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
-
#else /* USE_THREADS */
static Uint tsd_len;
static void **tsd;
@@ -123,7 +121,7 @@ void erl_drv_thr_init(void)
{
int i;
#ifdef USE_THREADS
- int res = ethr_tsd_key_create(&tid_key);
+ int res = ethr_tsd_key_create(&tid_key,"erts_tid_key");
if (res == 0)
res = ethr_install_exit_handler(thread_exit_handler);
if (res != 0)
@@ -188,6 +186,17 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
#endif
}
+
+char *
+erl_drv_mutex_name(ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ return dmtx ? dmtx->name : NULL;
+#else
+ return NULL;
+#endif
+}
+
int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
@@ -258,6 +267,15 @@ erl_drv_cond_destroy(ErlDrvCond *dcnd)
#endif
}
+char *
+erl_drv_cond_name(ErlDrvCond *dcnd)
+{
+#ifdef USE_THREADS
+ return dcnd ? dcnd->name : NULL;
+#else
+ return NULL;
+#endif
+}
void
erl_drv_cond_signal(ErlDrvCond *dcnd)
@@ -331,6 +349,16 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
#endif
}
+char *
+erl_drv_rwlock_name(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ return drwlck ? drwlck->name : NULL;
+#else
+ return NULL;
+#endif
+}
+
int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
@@ -575,6 +603,7 @@ erl_drv_thread_create(char *name,
struct ErlDrvTid_ *dtid;
ethr_thr_opts ethr_opts;
ethr_thr_opts *use_opts;
+ ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
if (!opts)
use_opts = NULL;
@@ -617,6 +646,18 @@ erl_drv_thread_create(char *name,
#endif
}
+char *
+erl_drv_thread_name(ErlDrvTid tid)
+{
+#ifdef USE_THREADS
+ struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
+ return dtid ? dtid->name : NULL;
+#else
+ return NULL;
+#endif
+}
+
+
ErlDrvTid
erl_drv_thread_self(void)
{
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 298909c921..0db42d4325 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -28,6 +28,7 @@
#include "beam_catches.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "erl_map.h"
#include "error.h"
#include "big.h"
#include "erl_gc.h"
@@ -47,10 +48,6 @@
*/
#define ALENGTH(a) (sizeof(a)/sizeof(a[0]))
-static erts_smp_spinlock_t info_lck;
-static Uint garbage_cols; /* no of garbage collections */
-static Uint reclaimed; /* no of words reclaimed in GCs */
-
# define STACK_SZ_ON_HEAP(p) ((p)->hend - (p)->stop)
# define OverRunCheck(P) \
if ((P)->stop < (P)->htop) { \
@@ -120,6 +117,8 @@ static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size);
static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size);
+static void init_gc_info(ErtsGCInfo *gcip);
+
#ifdef HARDDEBUG
static void disallow_heap_frag_ref_in_heap(Process* p);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
@@ -137,13 +136,41 @@ static int num_heap_sizes; /* Number of heap sizes. */
Uint erts_test_long_gc_sleep; /* Only used for testing... */
+typedef struct {
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ Uint req_sched;
+ erts_smp_atomic32_t refc;
+} ErtsGCInfoReq;
+
+#if !HALFWORD_HEAP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
+ ErtsGCInfoReq,
+ 5,
+ ERTS_ALC_T_GC_INFO_REQ)
+#else
+static ERTS_INLINE ErtsGCInfoReq *
+gcireq_alloc(void)
+{
+ return erts_alloc(ERTS_ALC_T_GC_INFO_REQ,
+ sizeof(ErtsGCInfoReq));
+}
+
+static ERTS_INLINE void
+gcireq_free(ErtsGCInfoReq *ptr)
+{
+ erts_free(ERTS_ALC_T_GC_INFO_REQ, ptr);
+}
+#endif
+
/*
* Initialize GC global data.
*/
void
erts_init_gc(void)
{
- int i = 0;
+ int i = 0, ix;
Sint max_heap_size = 0;
ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
@@ -156,9 +183,6 @@ erts_init_gc(void)
ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
- erts_smp_spinlock_init(&info_lck, "gc_info");
- garbage_cols = 0;
- reclaimed = 0;
erts_test_long_gc_sleep = 0;
/*
@@ -199,6 +223,16 @@ erts_init_gc(void)
}
}
num_heap_sizes = i;
+
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ init_gc_info(&esdp->gc_info);
+ }
+
+#if !HALFWORD_HEAP
+ init_gcireq_alloc();
+#endif
+
}
/*
@@ -287,17 +321,6 @@ erts_heap_sizes(Process* p)
return res;
}
-void
-erts_gc_info(ErtsGCInfo *gcip)
-{
- if (gcip) {
- erts_smp_spin_lock(&info_lck);
- gcip->garbage_collections = garbage_cols;
- gcip->reclaimed = reclaimed;
- erts_smp_spin_unlock(&info_lck);
- }
-}
-
void
erts_offset_heap(Eterm* hp, Uint sz, Sint offs, Eterm* low, Eterm* high)
{
@@ -378,9 +401,18 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
Uint reclaimed_now = 0;
int done = 0;
Uint ms1, s1, us1;
+ ErtsSchedulerData *esdp;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
+
+ if (p->flags & F_DISABLE_GC) {
+ ASSERT(need == 0);
+ return 1;
+ }
+
+ esdp = erts_get_scheduler_data();
+
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_start);
}
@@ -455,11 +487,9 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
monitor_large_heap(p);
}
- erts_smp_spin_lock(&info_lck);
- garbage_cols++;
- reclaimed += reclaimed_now;
- erts_smp_spin_unlock(&info_lck);
-
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+
FLAGS(p) &= ~F_FORCE_GC;
#ifdef CHECK_FOR_HOLES
@@ -511,6 +541,9 @@ erts_garbage_collect_hibernate(Process* p)
Uint area_size;
Sint offs;
+ if (p->flags & F_DISABLE_GC)
+ ERTS_INTERNAL_ERROR("GC disabled");
+
/*
* Preliminaries.
*/
@@ -646,6 +679,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint n;
struct erl_off_heap_header** prev;
+ if (p->flags & F_DISABLE_GC)
+ return;
/*
* Set GC state.
*/
@@ -1125,7 +1160,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
old_htop = sweep_one_area(OLD_HTOP(p), old_htop, heap, heap_size);
}
OLD_HTOP(p) = old_htop;
- HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop;
+ HIGH_WATER(p) = n_htop;
if (MSO(p).first) {
sweep_off_heap(p, 0);
@@ -1983,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
+
+ /*
+ * If a NIF has saved arguments, they need to be added
+ */
+ if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
+ Eterm* argv;
+ int argc;
+ if (erts_setup_nif_gc(p, &argv, &argc)) {
+ roots[n].v = argv;
+ roots[n].sz = argc;
+ n++;
+ }
+ }
+
ASSERT(n <= rootset->size);
mp = p->msg.first;
@@ -2520,6 +2569,7 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
p->dictionary->used,
offs, area, area_size);
}
+
offset_heap_ptr(&p->fvalue, 1, offs, area, area_size);
offset_heap_ptr(&p->ftrace, 1, offs, area, area_size);
offset_heap_ptr(&p->seq_trace_token, 1, offs, area, area_size);
@@ -2543,6 +2593,110 @@ offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
offset_one_rootset(p, offs, area, area_size, objv, nobj);
}
+static void
+init_gc_info(ErtsGCInfo *gcip)
+{
+ gcip->reclaimed = 0;
+ gcip->garbage_cols = 0;
+}
+
+static void
+reply_gc_info(void *vgcirp)
+{
+ Uint64 reclaimed = 0, garbage_cols = 0;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsGCInfoReq *gcirp = (ErtsGCInfoReq *) vgcirp;
+ ErtsProcLocks rp_locks = (gcirp->req_sched == esdp->no
+ ? ERTS_PROC_LOCK_MAIN
+ : 0);
+ Process *rp = gcirp->proc;
+ Eterm ref_copy = NIL, msg;
+ Eterm *hp = NULL;
+ Eterm **hpp;
+ Uint sz, *szp;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+
+ ASSERT(esdp);
+
+ reclaimed = esdp->gc_info.reclaimed;
+ garbage_cols = esdp->gc_info.garbage_cols;
+
+ sz = 0;
+ hpp = NULL;
+ szp = &sz;
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, gcirp->ref);
+ else
+ *szp += REF_THING_SIZE;
+
+ msg = erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, garbage_cols),
+ erts_bld_uint64(hpp, szp, reclaimed));
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ if (hpp)
+ break;
+
+ hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ szp = NULL;
+ hpp = &hp;
+ }
+
+ erts_queue_message(rp, &rp_locks, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+
+ if (gcirp->req_sched == esdp->no)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+ erts_smp_proc_dec_refc(rp);
+
+ if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0)
+ gcireq_free(vgcirp);
+}
+
+Eterm
+erts_gc_info_request(Process *c_p)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ Eterm ref;
+ ErtsGCInfoReq *gcirp;
+ Eterm *hp;
+
+ gcirp = gcireq_alloc();
+ ref = erts_make_ref(c_p);
+ hp = &gcirp->ref_heap[0];
+
+ gcirp->proc = c_p;
+ gcirp->ref = STORE_NC(&hp, NULL, ref);
+ gcirp->req_sched = esdp->no;
+ erts_smp_atomic32_init_nob(&gcirp->refc,
+ (erts_aint32_t) erts_no_schedulers);
+
+ erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+
+#ifdef ERTS_SMP
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_gc_info,
+ (void *) gcirp);
+#endif
+
+ reply_gc_info((void *) gcirp);
+
+ return ref;
+}
+
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
static int
@@ -2628,7 +2782,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
refc = erts_refc_read(&u.ext->node->refc, 1);
break;
default:
- ASSERT(!!"erts_check_off_heap2: Invalid thing_word");
+ ASSERT(!"erts_check_off_heap2: Invalid thing_word");
}
ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 1801df359a..5203dda263 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -20,6 +20,8 @@
#ifndef __ERL_GC_H__
#define __ERL_GC_H__
+#include "erl_map.h"
+
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -42,23 +44,24 @@ do { \
HTOP += 2; /* update tospace htop */ \
} while(0)
-#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- Sint nelts; \
- \
- ASSERT(is_header(HDR)); \
- gval = make_boxed(HTOP); \
- *ORIG = gval; \
- *HTOP++ = HDR; \
- *PTR++ = gval; \
- nelts = header_arity(HDR); \
- switch ((HDR) & _HEADER_SUBTAG_MASK) { \
- case SUB_BINARY_SUBTAG: nelts++; break; \
- case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR-1))->num_free+1; break; \
- } \
- while (nelts--) \
- *HTOP++ = *PTR++; \
+#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
+do { \
+ Eterm gval; \
+ Sint nelts; \
+ \
+ ASSERT(is_header(HDR)); \
+ nelts = header_arity(HDR); \
+ switch ((HDR) & _HEADER_SUBTAG_MASK) { \
+ case SUB_BINARY_SUBTAG: nelts++; break; \
+ case MAP_SUBTAG: nelts+=map_get_size(PTR) + 1; break; \
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \
+ } \
+ gval = make_boxed(HTOP); \
+ *ORIG = gval; \
+ *HTOP++ = HDR; \
+ *PTR++ = gval; \
+ while (nelts--) *HTOP++ = *PTR++; \
+ \
} while(0)
#define in_area(ptr,start,nbytes) \
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index c2088929e9..e9d8249ee1 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -163,10 +163,10 @@ BKT_MIN_SZ(GFAllctr_t *gfallctr, int ix)
/* Prototypes of callback functions */
static Block_t * get_free_block (Allctr_t *, Uint,
- Block_t *, Uint, Uint32);
-static void link_free_block (Allctr_t *, Block_t *, Uint32);
-static void unlink_free_block (Allctr_t *, Block_t *, Uint32);
-static void update_last_aux_mbc (Allctr_t *, Carrier_t *, Uint32);
+ Block_t *, Uint);
+static void link_free_block (Allctr_t *, Block_t *);
+static void unlink_free_block (Allctr_t *, Block_t *);
+static void update_last_aux_mbc (Allctr_t *, Carrier_t *);
static Eterm info_options (Allctr_t *, char *, int *,
void *, Uint **, Uint *);
static void init_atoms (void);
@@ -203,8 +203,7 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
sys_memcpy((void *) gfallctr, (void *) &zero.allctr, sizeof(GFAllctr_t));
- init->sbmbct = 0; /* Small mbc not yet supported by goodfit */
-
+ allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(GFFreeBlock_t);
@@ -222,7 +221,9 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
allctr->get_next_mbc_size = NULL;
allctr->creating_mbc = update_last_aux_mbc;
allctr->destroying_mbc = update_last_aux_mbc;
-
+ allctr->add_mbc = NULL;
+ allctr->remove_mbc = NULL;
+ allctr->largest_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -384,7 +385,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size)
static Block_t *
get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size, Uint32 flags)
+ Block_t *cand_blk, Uint cand_size)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
int unsafe_bi, min_bi;
@@ -403,7 +404,7 @@ get_free_block(Allctr_t *allctr, Uint size,
if (blk) {
if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
- unlink_free_block(allctr, blk, flags);
+ unlink_free_block(allctr, blk);
return blk;
}
if (min_bi < NO_OF_BKTS - 1) {
@@ -423,14 +424,14 @@ get_free_block(Allctr_t *allctr, Uint size,
ASSERT(blk);
if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
- unlink_free_block(allctr, blk, flags);
+ unlink_free_block(allctr, blk);
return blk;
}
static void
-link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+link_free_block(Allctr_t *allctr, Block_t *block)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
@@ -451,7 +452,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
static void
-unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+unlink_free_block(Allctr_t *allctr, Block_t *block)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
@@ -472,7 +473,7 @@ unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
}
static void
-update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc, Uint32 flags)
+update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
@@ -589,16 +590,16 @@ info_options(Allctr_t *allctr,
* to erts_gfalc_test() *
\* */
-unsigned long
-erts_gfalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+UWord
+erts_gfalc_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
- case 0x100: return (unsigned long) BKT_IX((GFAllctr_t *) a1, (Uint) a2);
- case 0x101: return (unsigned long) BKT_MIN_SZ((GFAllctr_t *) a1, (int) a2);
- case 0x102: return (unsigned long) NO_OF_BKTS;
- case 0x103: return (unsigned long)
+ case 0x100: return (UWord) BKT_IX((GFAllctr_t *) a1, (Uint) a2);
+ case 0x101: return (UWord) BKT_MIN_SZ((GFAllctr_t *) a1, (int) a2);
+ case 0x102: return (UWord) NO_OF_BKTS;
+ case 0x103: return (UWord)
find_bucket(&((GFAllctr_t *) a1)->bucket_mask, (int) a2);
- default: ASSERT(0); return ~((unsigned long) 0);
+ default: ASSERT(0); return ~((UWord) 0);
}
}
diff --git a/erts/emulator/beam/erl_goodfit_alloc.h b/erts/emulator/beam/erl_goodfit_alloc.h
index a554a6f466..385de0da23 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.h
+++ b/erts/emulator/beam/erl_goodfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -82,7 +82,7 @@ struct GFAllctr_t_ {
};
-unsigned long erts_gfalc_test(unsigned long, unsigned long, unsigned long);
+UWord erts_gfalc_test(UWord, UWord, UWord);
#endif /* #if defined(GET_ERL_GF_ALLOC_IMPL)
&& !defined(ERL_GF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index e7e4030900..61f8385efc 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -131,8 +131,10 @@ extern void ConWaitForExit(void);
static void erl_init(int ncpu,
int proc_tab_sz,
+ int legacy_proc_tab,
int port_tab_sz,
- int port_tab_sz_ignore_files);
+ int port_tab_sz_ignore_files,
+ int legacy_port_tab);
static erts_atomic_t exiting;
@@ -176,6 +178,11 @@ int erts_compat_rel;
static int no_schedulers;
static int no_schedulers_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static int no_dirty_cpu_schedulers;
+static int no_dirty_cpu_schedulers_online;
+static int no_dirty_io_schedulers;
+#endif
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -280,7 +287,9 @@ erts_short_init(void)
int ncpu = early_init(NULL, NULL);
erl_init(ncpu,
ERTS_DEFAULT_MAX_PROCESSES,
+ 0,
ERTS_DEFAULT_MAX_PORTS,
+ 0,
0);
erts_initialized = 1;
}
@@ -288,26 +297,33 @@ erts_short_init(void)
static void
erl_init(int ncpu,
int proc_tab_sz,
+ int legacy_proc_tab,
int port_tab_sz,
- int port_tab_sz_ignore_files)
+ int port_tab_sz_ignore_files,
+ int legacy_port_tab)
{
init_benchmarking();
erts_init_monitors();
- erts_init_gc();
erts_init_time();
erts_init_sys_common_misc();
- erts_init_process(ncpu, proc_tab_sz);
+ erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
- no_schedulers_online);
+ no_schedulers_online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , no_dirty_cpu_schedulers,
+ no_dirty_cpu_schedulers_online,
+ no_dirty_io_schedulers
+#endif
+ );
erts_init_cpu_topology(); /* Must be after init_scheduling */
+ erts_init_gc(); /* Must be after init_scheduling */
erts_alloc_late_init();
H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0);
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
erts_init_trace();
- erts_init_binary();
erts_init_bits();
erts_code_ix_init();
erts_init_fun_table();
@@ -320,6 +336,7 @@ erl_init(int ncpu,
erts_ddll_init();
init_emulator();
erts_ptab_init(); /* Must be after init_emulator() */
+ erts_init_binary(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
erts_bif_timer_init();
@@ -327,13 +344,14 @@ erl_init(int ncpu,
init_dist();
erl_drv_thr_init();
erts_init_async();
- erts_init_io(port_tab_sz, port_tab_sz_ignore_files);
+ erts_init_io(port_tab_sz, port_tab_sz_ignore_files, legacy_port_tab);
init_load();
erts_init_bif();
erts_init_bif_chksum();
erts_init_bif_binary();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
+ erts_init_external();
erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
erts_late_init_process();
#if HAVE_ERTS_MSEG
@@ -477,87 +495,109 @@ void erts_usage(void)
/* erts_fprintf(stderr, "-# number set the number of items to be used in traces etc\n"); */
- erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n");
- erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n",
+ erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n");
+ erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n",
ERTS_ASYNC_THREAD_MIN_STACK_SIZE,
ERTS_ASYNC_THREAD_MAX_STACK_SIZE);
- erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n");
- erts_fprintf(stderr, " valid range is [0-%d]\n",
+ erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n");
+ erts_fprintf(stderr, " valid range is [0-%d]\n",
ERTS_MAX_NO_OF_ASYNC_THREADS);
- erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n");
- erts_fprintf(stderr, " d (or no extra option) to disable the break\n");
- erts_fprintf(stderr, " handler, i to ignore break signals\n");
+ erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n");
+ erts_fprintf(stderr, " d (or no extra option) to disable the break\n");
+ erts_fprintf(stderr, " handler, i to ignore break signals\n");
/* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */
- erts_fprintf(stderr, "-c disable continuous date/time correction with\n");
- erts_fprintf(stderr, " respect to uptime\n");
+ erts_fprintf(stderr, "-c disable continuous date/time correction with\n");
+ erts_fprintf(stderr, " respect to uptime\n");
- erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
- erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
- erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n");
- erts_fprintf(stderr, "-hms size set minimum heap size in words (default %d)\n",
+ erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
+ erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
+ erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n");
+ erts_fprintf(stderr, "-hms size set minimum heap size in words (default %d)\n",
H_DEFAULT_SIZE);
- erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
+ erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
VH_DEFAULT_SIZE);
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
- erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
- erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
- erts_fprintf(stderr, " Note that this flag is deprecated!\n");
- erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
- erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
- erts_fprintf(stderr, "-pc <set> Control what characters are considered printable (default latin1)\n");
- erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
+ erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
+ erts_fprintf(stderr, " Note that this flag is deprecated!\n");
+ erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
+ erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
+ erts_fprintf(stderr, "-pc <set> Control what characters are considered printable (default latin1)\n");
+ erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
- erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
ERTS_MIN_PORTS, ERTS_MAX_PORTS);
- erts_fprintf(stderr, "-R number set compatibility release number,\n");
- erts_fprintf(stderr, " valid range [%d-%d]\n",
+ erts_fprintf(stderr, "-R number set compatibility release number,\n");
+ erts_fprintf(stderr, " valid range [%d-%d]\n",
this_rel-2, this_rel);
- erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
- erts_fprintf(stderr, "-rg amount set reader groups limit\n");
- erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
- erts_fprintf(stderr, "-stbt type u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
- erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n");
- erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n");
- erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
- erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
- erts_fprintf(stderr, "-sct cput set cpu topology,\n");
- erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
- erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n");
- erts_fprintf(stderr, " default|legacy.\n");
- erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
- erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
- erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-rg amount set reader groups limit\n");
+ erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
+ erts_fprintf(stderr, "-stbt type u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
+ erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n");
+ erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n");
+ erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-sct cput set cpu topology,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-secio bool enable/disable eager check I/O scheduling,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+ erts_fprintf(stderr, "-sub bool enable/disable scheduler utilization balancing,\n");
+#else
+ erts_fprintf(stderr, "-sub false disable scheduler utilization balancing,\n");
+#endif
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n");
+ erts_fprintf(stderr, " default|legacy.\n");
+ erts_fprintf(stderr, "-swct val set scheduler wake cleanup threshold, valid values are:\n");
+ erts_fprintf(stderr, " very_lazy|lazy|medium|eager|very_eager.\n");
+ erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
+ erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
+ erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
ERTS_SCHED_THREAD_MAX_STACK_SIZE);
- erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
- erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
- erts_fprintf(stderr, " schedulers online (n2), valid range for both\n");
- erts_fprintf(stderr, " numbers are [1-%d]\n",
+ erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
+ erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
+ erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
+ erts_fprintf(stderr, " numbers is %d\n",
ERTS_MAX_NO_OF_SCHEDULERS);
- erts_fprintf(stderr, "-t size set the maximum number of atoms the "
- "emulator can handle\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n");
+ erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
+ erts_fprintf(stderr, " processors available, respectively\n");
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n");
+ erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n");
+ erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n",
+ ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS);
+ erts_fprintf(stderr, "-SDPcpu p1:p2 specify dirty CPU schedulers (p1) and dirty CPU schedulers\n");
+ erts_fprintf(stderr, " online (p2) as percentages of logical processors configured\n");
+ erts_fprintf(stderr, " and logical processors available, respectively\n");
+ erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n",
+ ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
+#endif
+ erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE);
- erts_fprintf(stderr, "-T number set modified timing level,\n");
- erts_fprintf(stderr, " valid range is [0-%d]\n",
+ erts_fprintf(stderr, "-T number set modified timing level, valid range is [0-%d]\n",
ERTS_MODIFIED_TIMING_LEVELS-1);
- erts_fprintf(stderr, "-V print Erlang version\n");
+ erts_fprintf(stderr, "-V print Erlang version\n");
- erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n");
+ erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n");
- erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n");
- erts_fprintf(stderr, " see error_logger documentation for details\n");
- erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n");
- erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024);
+ erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n");
+ erts_fprintf(stderr, " see error_logger documentation for details\n");
+ erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n");
+ erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024);
erts_fprintf(stderr, "\n");
erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
@@ -622,12 +662,23 @@ early_init(int *argc, char **argv) /*
int ncpuavail;
int schdlrs;
int schdlrs_onln;
+ int schdlrs_percentage = 100;
+ int schdlrs_onln_percentage = 100;
int max_main_threads;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_cpu_scheds;
+ int dirty_cpu_scheds_online;
+ int dirty_cpu_scheds_pctg = 100;
+ int dirty_cpu_scheds_onln_pctg = 100;
+ int dirty_io_scheds;
+#endif
int max_reader_groups;
int reader_groups;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
+ erts_save_emu_args(*argc, argv);
+
erts_sched_compact_load = 1;
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
@@ -671,7 +722,7 @@ early_init(int *argc, char **argv) /*
#endif
#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
- erts_tsd_key_create(&erts_is_crash_dumping_key);
+ erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key");
#else
erts_writing_erl_crash_dump = 0;
#endif
@@ -696,6 +747,12 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ dirty_cpu_scheds = no_schedulers;
+ dirty_cpu_scheds_online = no_schedulers_online;
+ dirty_io_scheds = 10;
+#endif
+
envbufsz = sizeof(envbuf);
/* erts_sys_getenv(_raw)() not initialized yet; need erts_sys_getenv__() */
@@ -736,7 +793,7 @@ early_init(int *argc, char **argv) /*
case 'A': {
/* set number of threads in thread pool */
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (((erts_async_max_threads = atoi(arg)) < 0) ||
+ if (((erts_async_max_threads = atoi(arg)) < ERTS_MIN_NO_OF_ASYNC_THREADS) ||
(erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
erts_fprintf(stderr,
"bad number of async threads %s\n",
@@ -747,63 +804,257 @@ early_init(int *argc, char **argv) /*
}
break;
}
- case 'S' : {
- int tot, onln;
- char *arg = get_arg(argv[i]+2, argv[i+1], &i);
- switch (sscanf(arg, "%d:%d", &tot, &onln)) {
- case 0:
- switch (sscanf(arg, ":%d", &onln)) {
+ case 'S' :
+ if (argv[i][2] == 'P') {
+ int ptot, ponln;
+ char *arg = get_arg(argv[i]+3, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &ptot, &ponln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &ponln)) {
+ case 1:
+ if (ponln < 0)
+ goto bad_SP;
+ ptot = 100;
+ goto chk_SP;
+ default:
+ goto bad_SP;
+ }
case 1:
- tot = no_schedulers;
- goto chk_S;
+ if (ptot < 0)
+ goto bad_SP;
+ ponln = ptot < 100 ? ptot : 100;
+ goto chk_SP;
+ case 2:
+ if (ptot < 0 || ponln < 0)
+ goto bad_SP;
+ chk_SP:
+ schdlrs_percentage = ptot;
+ schdlrs_onln_percentage = ponln;
+ break;
default:
- goto bad_S;
- }
- case 1:
- onln = tot < schdlrs_onln ? tot : schdlrs_onln;
- case 2:
- chk_S:
- if (tot > 0)
- schdlrs = tot;
- else
- schdlrs = no_schedulers + tot;
- if (onln > 0)
- schdlrs_onln = onln;
- else
- schdlrs_onln = no_schedulers_online + onln;
- if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
+ bad_SP:
+ erts_fprintf(stderr,
+ "bad schedulers percentage specifier %s\n",
+ arg);
+ erts_usage();
+ break;
+ }
+
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d scheduler percentages\n",
+ schdlrs_percentage, schdlrs_onln_percentage));
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (argv[i][2] == 'D') {
+ char *arg;
+ char *type = argv[i]+3;
+ if (strncmp(type, "Pcpu", 4) == 0) {
+ int ptot, ponln;
+ arg = get_arg(argv[i]+7, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &ptot, &ponln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &ponln)) {
+ case 1:
+ if (ponln < 0)
+ goto bad_SDPcpu;
+ ptot = 100;
+ goto chk_SDPcpu;
+ default:
+ goto bad_SDPcpu;
+ }
+ case 1:
+ if (ptot < 0)
+ goto bad_SDPcpu;
+ ponln = ptot < 100 ? ptot : 100;
+ goto chk_SDPcpu;
+ case 2:
+ if (ptot < 0 || ponln < 0)
+ goto bad_SDPcpu;
+ chk_SDPcpu:
+ dirty_cpu_scheds_pctg = ptot;
+ dirty_cpu_scheds_onln_pctg = ponln;
+ break;
+ default:
+ bad_SDPcpu:
+ erts_fprintf(stderr,
+ "bad dirty CPU schedulers percentage specifier %s\n",
+ arg);
+ erts_usage();
+ break;
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d dirty CPU scheduler percentages\n",
+ dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg));
+ } else if (strncmp(type, "cpu", 3) == 0) {
+ int tot, onln;
+ arg = get_arg(argv[i]+6, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &tot, &onln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &onln)) {
+ case 1:
+ tot = no_schedulers;
+ goto chk_SDcpu;
+ default:
+ goto bad_SDcpu;
+ }
+ case 1:
+ onln = tot < dirty_cpu_scheds_online ?
+ tot : dirty_cpu_scheds_online;
+ case 2:
+ chk_SDcpu:
+ if (tot > 0)
+ dirty_cpu_scheds = tot;
+ else
+ dirty_cpu_scheds = no_schedulers + tot;
+ if (onln > 0)
+ dirty_cpu_scheds_online = onln;
+ else
+ dirty_cpu_scheds_online = no_schedulers_online + onln;
+ if (dirty_cpu_scheds < 1 ||
+ ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS < dirty_cpu_scheds) {
+ erts_fprintf(stderr,
+ "bad amount of dirty CPU schedulers %d\n",
+ tot);
+ erts_usage();
+ }
+ if (dirty_cpu_scheds_online < 1 ||
+ dirty_cpu_scheds < dirty_cpu_scheds_online) {
+ erts_fprintf(stderr,
+ "bad amount of dirty CPU schedulers online %d "
+ "(total amount of dirty CPU schedulers %d)\n",
+ dirty_cpu_scheds_online, dirty_cpu_scheds);
+ erts_usage();
+ }
+ break;
+ default:
+ bad_SDcpu:
+ erts_fprintf(stderr,
+ "bad amount of dirty CPU schedulers %s\n",
+ arg);
+ erts_usage();
+ break;
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d dirty CPU scheduler(s)\n", tot, onln));
+ } else if (strncmp(type, "io", 2) == 0) {
+ arg = get_arg(argv[i]+5, argv[i+1], &i);
+ dirty_io_scheds = atoi(arg);
+ if (dirty_io_scheds < 0 ||
+ dirty_io_scheds > ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS) {
+ erts_fprintf(stderr,
+ "bad number of dirty I/O schedulers %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d dirty I/O scheduler(s)\n", dirty_io_scheds));
+ } else {
erts_fprintf(stderr,
- "bad amount of schedulers %d\n",
- tot);
+ "bad or missing dirty scheduler specifier: %s\n",
+ argv[i]);
erts_usage();
+ break;
}
- if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) {
+ }
+#endif
+ else {
+ int tot, onln;
+ char *arg = get_arg(argv[i]+2, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &tot, &onln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &onln)) {
+ case 1:
+ tot = no_schedulers;
+ goto chk_S;
+ default:
+ goto bad_S;
+ }
+ case 1:
+ onln = tot < schdlrs_onln ? tot : schdlrs_onln;
+ case 2:
+ chk_S:
+ if (tot > 0)
+ schdlrs = tot;
+ else
+ schdlrs = no_schedulers + tot;
+ if (onln > 0)
+ schdlrs_onln = onln;
+ else
+ schdlrs_onln = no_schedulers_online + onln;
+ if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
+ erts_fprintf(stderr,
+ "bad amount of schedulers %d\n",
+ tot);
+ erts_usage();
+ }
+ if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) {
+ erts_fprintf(stderr,
+ "bad amount of schedulers online %d "
+ "(total amount of schedulers %d)\n",
+ schdlrs_onln, schdlrs);
+ erts_usage();
+ }
+ break;
+ default:
+ bad_S:
erts_fprintf(stderr,
- "bad amount of schedulers online %d "
- "(total amount of schedulers %d)\n",
- schdlrs_onln, schdlrs);
+ "bad amount of schedulers %s\n",
+ arg);
erts_usage();
+ break;
}
- break;
- default:
- bad_S:
- erts_fprintf(stderr,
- "bad amount of schedulers %s\n",
- arg);
- erts_usage();
- break;
- }
- VERBOSE(DEBUG_SYSTEM,
- ("using %d:%d scheduler(s)\n", tot, onln));
- break;
- }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d scheduler(s)\n", tot, onln));
+ }
+ break;
default:
break;
}
}
i++;
}
+
+#ifdef ERTS_SMP
+ /* apply any scheduler percentages */
+ if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) {
+ schdlrs = schdlrs * schdlrs_percentage / 100;
+ schdlrs_onln = schdlrs_onln * schdlrs_onln_percentage / 100;
+ if (schdlrs < 1)
+ schdlrs = 1;
+ if (ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
+ erts_fprintf(stderr,
+ "bad schedulers percentage %d "
+ "(total amount of schedulers %d)\n",
+ schdlrs_percentage, schdlrs);
+ erts_usage();
+ }
+ if (schdlrs_onln < 1)
+ schdlrs_onln = 1;
+ if (schdlrs < schdlrs_onln) {
+ erts_fprintf(stderr,
+ "bad schedulers online percentage %d "
+ "(total amount of schedulers %d, online %d)\n",
+ schdlrs_onln_percentage, schdlrs, schdlrs_onln);
+ erts_usage();
+ }
+ }
+#else
+ /* Silence gcc warnings */
+ (void)schdlrs_percentage;
+ (void)schdlrs_onln_percentage;
+#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /* apply any dirty scheduler precentages */
+ if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) {
+ dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100;
+ dirty_cpu_scheds_online = dirty_cpu_scheds_online * dirty_cpu_scheds_onln_pctg / 100;
+ }
+ if (dirty_cpu_scheds > schdlrs)
+ dirty_cpu_scheds = schdlrs;
+ if (dirty_cpu_scheds_online > schdlrs_onln)
+ dirty_cpu_scheds_online = schdlrs_onln;
+#endif
}
#ifndef USE_THREADS
@@ -816,6 +1067,11 @@ early_init(int *argc, char **argv) /*
erts_no_schedulers = (Uint) no_schedulers;
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
+ no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
+ erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds;
+#endif
erts_early_init_scheduling(no_schedulers);
alloc_opts.ncpu = ncpu;
@@ -833,10 +1089,18 @@ early_init(int *argc, char **argv) /*
*
* * Unmanaged threads that need to register:
* ** Async threads (see erl_async.c)
+ * ** Dirty scheduler threads
*/
erts_thr_progress_init(no_schedulers,
no_schedulers+2,
- erts_async_max_threads);
+#ifndef ERTS_DIRTY_SCHEDULERS
+ erts_async_max_threads
+#else
+ erts_async_max_threads +
+ erts_no_dirty_cpu_schedulers +
+ erts_no_dirty_io_schedulers
+#endif
+ );
#endif
erts_thr_q_init();
erts_init_utils();
@@ -921,6 +1185,9 @@ erl_start(int argc, char **argv)
int proc_tab_sz = ERTS_DEFAULT_MAX_PROCESSES;
int port_tab_sz = ERTS_DEFAULT_MAX_PORTS;
int port_tab_sz_ignore_files = 0;
+ int legacy_proc_tab = 0;
+ int legacy_port_tab = 0;
+
envbufsz = sizeof(envbuf);
if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
@@ -1266,31 +1533,50 @@ erl_start(int argc, char **argv)
case 'P': /* set maximum number of processes */
arg = get_arg(argv[i]+2, argv[i+1], &i);
- errno = 0;
- proc_tab_sz = strtol(arg, NULL, 10);
- if (errno != 0
- || proc_tab_sz < ERTS_MIN_PROCESSES
- || ERTS_MAX_PROCESSES < proc_tab_sz) {
- erts_fprintf(stderr, "bad number of processes %s\n", arg);
- erts_usage();
+ if (strcmp(arg, "legacy") == 0)
+ legacy_proc_tab = 1;
+ else {
+ errno = 0;
+ proc_tab_sz = strtol(arg, NULL, 10);
+ if (errno != 0
+ || proc_tab_sz < ERTS_MIN_PROCESSES
+ || ERTS_MAX_PROCESSES < proc_tab_sz) {
+ erts_fprintf(stderr, "bad number of processes %s\n", arg);
+ erts_usage();
+ }
}
break;
case 'Q': /* set maximum number of ports */
arg = get_arg(argv[i]+2, argv[i+1], &i);
- errno = 0;
- port_tab_sz = strtol(arg, NULL, 10);
- if (errno != 0
- || port_tab_sz < ERTS_MIN_PROCESSES
- || ERTS_MAX_PROCESSES < port_tab_sz) {
- erts_fprintf(stderr, "bad number of ports %s\n", arg);
- erts_usage();
+ if (strcmp(arg, "legacy") == 0)
+ legacy_port_tab = 1;
+ else {
+ errno = 0;
+ port_tab_sz = strtol(arg, NULL, 10);
+ if (errno != 0
+ || port_tab_sz < ERTS_MIN_PROCESSES
+ || ERTS_MAX_PROCESSES < port_tab_sz) {
+ erts_fprintf(stderr, "bad number of ports %s\n", arg);
+ erts_usage();
+ }
+ port_tab_sz_ignore_files = 1;
}
- port_tab_sz_ignore_files = 1;
break;
case 'S' : /* Was handled in early_init() just read past it */
- (void) get_arg(argv[i]+2, argv[i+1], &i);
+ if (argv[i][2] == 'D') {
+ char* type = argv[i]+3;
+ if (strcmp(type, "Pcpu") == 0)
+ (void) get_arg(argv[i]+7, argv[i+1], &i);
+ if (strcmp(type, "cpu") == 0)
+ (void) get_arg(argv[i]+6, argv[i+1], &i);
+ else if (strcmp(type, "io") == 0)
+ (void) get_arg(argv[i]+5, argv[i+1], &i);
+ } else if (argv[i][2] == 'P')
+ (void) get_arg(argv[i]+3, argv[i+1], &i);
+ else
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
break;
case 's' : {
@@ -1334,8 +1620,10 @@ erl_start(int argc, char **argv)
}
else if (has_prefix("cl", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
- if (sys_strcmp("true", arg) == 0)
+ if (sys_strcmp("true", arg) == 0) {
erts_sched_compact_load = 1;
+ erts_sched_balance_util = 0;
+ }
else if (sys_strcmp("false", arg) == 0)
erts_sched_compact_load = 0;
else {
@@ -1388,6 +1676,22 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("ecio", sub_param)) {
+ arg = get_arg(sub_param+4, argv[i+1], &i);
+#ifndef __OSE__
+ if (sys_strcmp("true", arg) == 0)
+ erts_eager_check_io = 1;
+ else
+#endif
+ if (sys_strcmp("false", arg) == 0)
+ erts_eager_check_io = 0;
+ else {
+ erts_fprintf(stderr,
+ "bad schedule eager check I/O value '%s'\n",
+ arg);
+ erts_usage();
+ }
+ }
else if (has_prefix("pp", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (sys_strcmp(arg, "true") == 0)
@@ -1413,7 +1717,37 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (sys_strcmp("wt", sub_param) == 0) {
+ else if (has_prefix("ub", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sys_strcmp("true", arg) == 0) {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+ erts_sched_balance_util = 1;
+#else
+ erts_fprintf(stderr,
+ "scheduler utilization balancing not "
+ "supported on this system\n");
+ erts_usage();
+#endif
+ }
+ else if (sys_strcmp("false", arg) == 0)
+ erts_sched_balance_util = 0;
+ else {
+ erts_fprintf(stderr, "bad scheduler utilization balancing "
+ " value '%s'\n", arg);
+ erts_usage();
+ }
+ }
+ else if (has_prefix("wct", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if (erts_sched_set_wake_cleanup_threshold(arg) != 0) {
+ erts_fprintf(stderr, "scheduler wake cleanup threshold: %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("scheduler wake cleanup threshold: %s\n", arg));
+ }
+ else if (has_prefix("wt", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (erts_sched_set_wakeup_other_thresold(arg) != 0) {
erts_fprintf(stderr, "scheduler wakeup threshold: %s\n",
@@ -1423,7 +1757,7 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wakeup threshold: %s\n", arg));
}
- else if (sys_strcmp("ws", sub_param) == 0) {
+ else if (has_prefix("ws", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (erts_sched_set_wakeup_other_type(arg) != 0) {
erts_fprintf(stderr, "scheduler wakeup strategy: %s\n",
@@ -1450,6 +1784,22 @@ erl_start(int argc, char **argv)
("suggested scheduler thread stack size %d kilo words\n",
erts_sched_thread_suggested_stack_size));
}
+ else if (has_prefix("fwi", sub_param)) {
+ long val;
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ errno = 0;
+ val = strtol(arg, NULL, 10);
+ if (errno != 0 || val < 0) {
+ erts_fprintf(stderr,
+ "bad scheduler forced wakeup "
+ "interval %s\n",
+ arg);
+ erts_usage();
+ }
+#ifdef ERTS_SMP
+ erts_runq_supervision_interval = val;
+#endif
+ }
else {
erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]);
erts_usage();
@@ -1630,8 +1980,10 @@ erl_start(int argc, char **argv)
erl_init(ncpu,
proc_tab_sz,
+ legacy_proc_tab,
port_tab_sz,
- port_tab_sz_ignore_files);
+ port_tab_sz_ignore_files,
+ legacy_port_tab);
load_preloaded();
erts_end_staging_code_ix();
@@ -1732,8 +2084,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
system_cleanup(flush_async);
save_statistics();
-
- an = abs(n);
+ if (n < 0)
+ an = -(unsigned int)n;
+ else
+ an = n;
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index b5b245288b..df7c443387 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -271,6 +271,18 @@ stat_upd_realloc(ErtsAlcType_t n, Uint size, Uint old_size)
* stat instrumentation callback functions
*/
+static void stat_pre_lock(void)
+{
+ erts_mtx_lock(&instr_mutex);
+}
+
+static void stat_pre_unlock(void)
+{
+ erts_mtx_unlock(&instr_mutex);
+}
+
+static ErtsAllocatorWrapper_t instr_wrapper;
+
static void *
stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
@@ -278,7 +290,9 @@ stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint ssize;
void *res;
- erts_mtx_lock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&instr_mutex);
+ }
ssize = size + STAT_BLOCK_HEADER_SIZE;
res = (*real_af->alloc)(n, real_af->extra, ssize);
@@ -293,7 +307,9 @@ stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
res = (void *) ((StatBlock_t *) res)->mem;
}
- erts_mtx_unlock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&instr_mutex);
+ }
return res;
}
@@ -307,7 +323,9 @@ stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
void *sptr;
void *res;
- erts_mtx_lock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&instr_mutex);
+ }
if (ptr) {
sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
@@ -329,7 +347,9 @@ stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
res = (void *) ((StatBlock_t *) res)->mem;
}
- erts_mtx_unlock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&instr_mutex);
+ }
return res;
}
@@ -340,7 +360,9 @@ stat_free(ErtsAlcType_t n, void *extra, void *ptr)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *sptr;
- erts_mtx_lock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&instr_mutex);
+ }
if (ptr) {
sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
@@ -352,7 +374,9 @@ stat_free(ErtsAlcType_t n, void *extra, void *ptr)
(*real_af->free)(n, real_af->extra, sptr);
- erts_mtx_unlock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&instr_mutex);
+ }
}
@@ -360,6 +384,18 @@ stat_free(ErtsAlcType_t n, void *extra, void *ptr)
* map stat instrumentation callback functions
*/
+static void map_stat_pre_lock(void)
+{
+ erts_mtx_lock(&instr_x_mutex);
+ erts_mtx_lock(&instr_mutex);
+}
+
+static void map_stat_pre_unlock(void)
+{
+ erts_mtx_unlock(&instr_mutex);
+ erts_mtx_unlock(&instr_x_mutex);
+}
+
static void *
map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
@@ -367,7 +403,9 @@ map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint msize;
void *res;
- erts_mtx_lock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&instr_mutex);
+ }
msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
res = (*real_af->alloc)(n, real_af->extra, msize);
@@ -388,7 +426,9 @@ map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
res = (void *) mb->mem;
}
- erts_mtx_unlock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&instr_mutex);
+ }
return res;
}
@@ -402,8 +442,10 @@ map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
void *mptr;
void *res;
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&instr_x_mutex);
+ erts_mtx_lock(&instr_mutex);
+ }
if (ptr) {
mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
@@ -449,9 +491,10 @@ map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
res = (void *) mb->mem;
}
-
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&instr_mutex);
+ erts_mtx_unlock(&instr_x_mutex);
+ }
return res;
}
@@ -462,8 +505,10 @@ map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *mptr;
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&instr_x_mutex);
+ erts_mtx_lock(&instr_mutex);
+ }
if (ptr) {
MapStatBlock_t *mb;
@@ -486,8 +531,10 @@ map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
(*real_af->free)(n, real_af->extra, mptr);
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&instr_mutex);
+ erts_mtx_unlock(&instr_x_mutex);
+ }
}
@@ -496,8 +543,10 @@ static void dump_memory_map_to_stream(FILE *fp)
ErtsAlcType_t n;
MapStatBlock_t *bp;
int lock = !ERTS_IS_CRASH_DUMPING;
- if (lock)
+ if (lock) {
+ ASSERT(!erts_is_allctr_wrapper_prelocked());
erts_mtx_lock(&instr_mutex);
+ }
/* Write header */
@@ -1155,6 +1204,7 @@ erts_instr_get_type_info(Process *proc)
Uint
erts_instr_init(int stat, int map_stat)
{
+ Uint extra_sz;
int i;
am_tot = NULL;
@@ -1186,8 +1236,6 @@ erts_instr_init(int stat, int map_stat)
sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1));
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i))
- continue;
if (erts_allctrs_info[i].enabled)
stats->ap[i] = &stats->a[i];
else
@@ -1201,27 +1249,28 @@ erts_instr_init(int stat, int map_stat)
erts_instr_memory_map = 1;
erts_instr_stat = 1;
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i))
- continue;
erts_allctrs[i].alloc = map_stat_alloc;
erts_allctrs[i].realloc = map_stat_realloc;
erts_allctrs[i].free = map_stat_free;
erts_allctrs[i].extra = (void *) &real_allctrs[i];
}
- return MAP_STAT_BLOCK_HEADER_SIZE;
+ instr_wrapper.lock = map_stat_pre_lock;
+ instr_wrapper.unlock = map_stat_pre_unlock;
+ extra_sz = MAP_STAT_BLOCK_HEADER_SIZE;
}
else {
erts_instr_stat = 1;
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i))
- continue;
erts_allctrs[i].alloc = stat_alloc;
erts_allctrs[i].realloc = stat_realloc;
erts_allctrs[i].free = stat_free;
erts_allctrs[i].extra = (void *) &real_allctrs[i];
}
- return STAT_BLOCK_HEADER_SIZE;
+ instr_wrapper.lock = stat_pre_lock;
+ instr_wrapper.unlock = stat_pre_unlock;
+ extra_sz = STAT_BLOCK_HEADER_SIZE;
}
-
+ erts_allctr_wrapper_prelock_init(&instr_wrapper);
+ return extra_sz;
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 69bb4be717..b105ece6f1 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -123,6 +123,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
{ "run_queue", "address" },
+#ifdef ERTS_DIRTY_SCHEDULERS
+ { "dirty_run_queue_sleep_list", "address" },
+#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
@@ -132,10 +135,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#endif /* __WIN32__ */
{ "alcu_init_atoms", NULL },
{ "mseg_init_atoms", NULL },
+ { "mmap_init_atoms", NULL },
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
- { "sys_msg_q", NULL },
{ "atom_tab", NULL },
{ "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
@@ -143,21 +146,21 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "ptimer_pre_alloc_lock", "address", },
{ "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
+ { "port_sched_lock", "port_id" },
+ { "sys_msg_q", NULL },
+ { "port_table", NULL },
#endif
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
{ "alcu_allocator", "index" },
- { "sbmbc_alloc", "index" },
{ "mseg", NULL },
#if HALFWORD_HEAP
{ "pmmap", NULL },
#endif
#ifdef ERTS_SMP
- { "port_sched_lock", "port_id" },
{ "port_task_pre_alloc_lock", "address" },
{ "proclist_pre_alloc_lock", "address" },
- { "port_table", NULL },
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
{ "gc_info", NULL },
@@ -181,7 +184,14 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "efile_drv dtrace mutex", NULL },
#endif
{ "mtrace_buf", NULL },
- { "erts_alloc_hard_debug", NULL }
+#ifdef __WIN32__
+#ifdef ERTS_SMP
+ { "sys_gethrtime", NULL },
+#endif
+#endif
+ { "erts_alloc_hard_debug", NULL },
+ { "hard_dbg_mseg", NULL },
+ { "erts_mmap", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
@@ -217,8 +227,7 @@ rw_op_str(Uint16 flags)
case ERTS_LC_FLG_LO_READ:
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
- erts_fprintf(stderr, "\nInternal error\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Only write flag present");
default:
break;
}
@@ -231,6 +240,8 @@ struct erts_lc_locked_lock_t_ {
erts_lc_locked_lock_t *prev;
UWord extra;
Sint16 id;
+ char *file;
+ unsigned int line;
Uint16 flags;
};
@@ -258,9 +269,9 @@ union erts_lc_free_block_t_ {
static ethr_tsd_key locks_key;
-static erts_lc_locked_locks_t *erts_locked_locks;
+static erts_lc_locked_locks_t *erts_locked_locks = NULL;
-static erts_lc_free_block_t *free_blocks;
+static erts_lc_free_block_t *free_blocks = NULL;
#ifdef ERTS_LC_STATIC_ALLOC
#define ERTS_LC_FB_CHUNK_SIZE 10000
@@ -299,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p)
static void *lc_core_alloc(void)
{
lc_unlock();
- erts_fprintf(stderr, "Lock checker out of memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
}
#else
@@ -313,8 +323,7 @@ static void *lc_core_alloc(void)
fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
- erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -354,11 +363,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
@@ -420,47 +429,51 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags)
+new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
l_lck->next = NULL;
l_lck->prev = NULL;
l_lck->id = lck->id;
l_lck->extra = lck->extra;
+ l_lck->file = file;
+ l_lck->line = line;
l_lck->flags = lck->flags | op_flags;
return l_lck;
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+ char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
? erts_lock_order[id].name
: "unknown");
+ erts_fprintf(stderr,"%s'%s:",prefix,lname);
+
if (is_not_immed(extra))
- erts_fprintf(stderr,
- "%s'%s:%p%s'%s%s",
- prefix,
- lname,
- _unchecked_boxed_val(extra),
- lock_type(flags),
- rw_op_str(flags),
- suffix);
+ erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
- erts_fprintf(stderr,
- "%s'%s:%T%s'%s%s",
- prefix,
- lname,
- extra,
- lock_type(flags),
- rw_op_str(flags),
- suffix);
+ erts_fprintf(stderr,"%T",extra);
+ erts_fprintf(stderr,"%s",lock_type(flags));
+
+ if (file)
+ erts_fprintf(stderr,"(%s:%d)",file,line);
+
+ erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+}
+
+static void
+print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+{
+ raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
static void
print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
{
- print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix);
+ raw_print_lock(prefix, lck->id, lck->extra, lck->flags, NULL, 0, suffix);
}
static void
@@ -476,7 +489,8 @@ print_curr_locks(erts_lc_locked_locks_t *l_lcks)
"Currently these locks are locked by the %s thread:\n",
l_lcks->thread_name);
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
- print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n");
+ raw_print_lock(" ", l_lck->id, l_lck->extra, l_lck->flags,
+ l_lck->file, l_lck->line, "\n");
}
}
@@ -674,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("strdup failed");
}
l_lcks->emu_thread = 1;
}
@@ -992,7 +1006,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1004,7 +1019,7 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags) : NULL;
+ l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1045,13 +1060,14 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags);
+ l_lck = new_locked_lock(lck, op_flags, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1119,7 +1135,8 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1131,7 +1148,7 @@ void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags);
+ l_lck = new_locked_lock(lck, op_flags, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1222,15 +1239,15 @@ erts_lc_trylock_force_busy(erts_lc_lock_t *lck)
}
void
-erts_lc_trylock(int locked, erts_lc_lock_t *lck)
+erts_lc_trylock_x(int locked, erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_trylock_flg(locked, lck, 0);
+ erts_lc_trylock_flg_x(locked, lck, 0, file, line);
}
void
-erts_lc_lock(erts_lc_lock_t *lck)
+erts_lc_lock_x(erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_lock_flg(lck, 0);
+ erts_lc_lock_flg_x(lck, 0, file, line);
}
void
@@ -1244,9 +1261,9 @@ void erts_lc_might_unlock(erts_lc_lock_t *lck)
erts_lc_might_unlock_flg(lck, 0);
}
-void erts_lc_require_lock(erts_lc_lock_t *lck)
+void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_require_lock_flg(lck, 0);
+ erts_lc_require_lock_flg(lck, 0, file, line);
}
void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
@@ -1310,9 +1327,9 @@ erts_lc_init(void)
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- lc_abort();
+ ERTS_INTERNAL_ERROR("spinlock_init failed");
- erts_tsd_key_create(&locks_key);
+ erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
void
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 068340abe7..3f7f417e61 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -35,6 +35,11 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifndef ERTS_ENABLE_LOCK_POSITION
+/* Enable in order for _x variants of mtx functions to be used. */
+#define ERTS_ENABLE_LOCK_POSITION 1
+#endif
+
typedef struct {
int inited;
Sint16 id;
@@ -79,13 +84,16 @@ void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
-void erts_lc_trylock(int locked, erts_lc_lock_t *lck);
-void erts_lc_lock(erts_lc_lock_t *lck);
+void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
+ char* file, unsigned int line);
+void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
@@ -96,10 +104,11 @@ int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_require_lock(erts_lc_lock_t *lck);
+void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
int erts_lc_is_emu_thr(void);
@@ -116,4 +125,9 @@ int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) ((void) 1)
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
+#define erts_lc_lock(lck) erts_lc_lock_x(lck,__FILE__,__LINE__)
+#define erts_lc_trylock(res,lck) erts_lc_trylock_x(res,lck,__FILE__,__LINE__)
+#define erts_lc_lock_flg(lck) erts_lc_lock_flg_x(lck,__FILE__,__LINE__)
+#define erts_lc_trylock_flg(res,lck) erts_lc_trylock_flg_x(res,lck,__FILE__,__LINE__)
+
#endif /* #ifndef ERTS_LOCK_CHECK_H__ */
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 5f75b0ac0b..cf6996ea06 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -61,6 +61,25 @@ static ERTS_INLINE void lcnt_unlock(void) {
ethr_mutex_unlock(&lcnt_data_lock);
}
+const int log2_tab64[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+static ERTS_INLINE int lcnt_log2(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
static char* lcnt_lock_type(Uint16 flag) {
switch(flag & ERTS_LCNT_LT_ALL) {
@@ -81,19 +100,20 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
stats->timer_n = 0;
stats->file = (char *)str_undefined;
stats->line = 0;
+ sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
}
static void lcnt_time(erts_lcnt_time_t *time) {
-#ifdef HAVE_GETHRTIME
+#if 0 || defined(HAVE_GETHRTIME)
SysHrTime hr_time;
hr_time = sys_gethrtime();
time->s = (unsigned long)(hr_time / 1000000000LL);
time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
-#else
- SysTimeval tv;
- sys_gettimeofday(&tv);
- time->s = tv.tv_sec;
- time->ns = tv.tv_usec*1000LL;
+#else
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ time->s = tv.tv_sec;
+ time->ns = tv.tv_usec*1000LL;
#endif
}
@@ -111,28 +131,29 @@ static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_
dns += 1000000000LL;
}
+ ASSERT(ds >= 0);
+
d->s = ds;
d->ns = dns;
}
-/* difference d must be positive */
+/* difference d must be non-negative */
static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- unsigned long ngns = 0;
-
t->s += d->s;
t->ns += d->ns;
- ngns = t->ns / 1000000000LL;
+ t->s += t->ns / 1000000000LL;
t->ns = t->ns % 1000000000LL;
-
- t->s += ngns;
}
static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
erts_lcnt_thread_data_t *eltd;
eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
+ if (!eltd) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
@@ -158,59 +179,64 @@ static char* lock_opt(Uint16 flag) {
return "--";
}
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- erts_aint_t colls, tries, w_state, r_state;
- erts_lcnt_lock_stats_t *stats = NULL;
-
+static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
+ erts_aint_t w_state, r_state;
char *type;
- int i;
-
+
+ if (strcmp(lock->name, "run_queue") != 0) return;
type = lcnt_lock_type(lock->flag);
r_state = ethr_atomic_read(&lock->r_state);
w_state = ethr_atomic_read(&lock->w_state);
-
if (lock->flag & flag) {
- erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- lock->id,
- extra);
+ erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
+ action,
+ lock->name,
+ r_state,
+ w_state,
+ type,
+ lock->id);
}
}
-
-static void print_lock(erts_lcnt_lock_t *lock, char *action) {
- if (strcmp(lock->name, "proc_main") == 0) {
- print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
- }
-}
-
#endif
static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
unsigned int i;
erts_lcnt_lock_stats_t *stats = NULL;
-
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
+ for (i = 0; i < lock->n_stats; i++) {
+ if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
+ return &(lock->stats[i]);
+ }
+ }
+ if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ stats = &lock->stats[lock->n_stats];
+ lock->n_stats++;
+ stats->file = file;
+ stats->line = line;
+ return stats;
+ }
}
return &lock->stats[0];
+}
+static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
+ int idx;
+ unsigned long r;
+
+ if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+ if (r) idx = lcnt_log2(r);
+ else idx = 0;
+ }
+ hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) {
+static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
+ erts_lcnt_time_t *time_wait) {
ethr_atomic_inc(&stats->tries);
@@ -220,6 +246,7 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic
if (time_wait) {
lcnt_time_add(&(stats->timer), time_wait);
stats->timer_n++;
+ lcnt_update_stats_hist(&stats->hist,time_wait);
}
}
@@ -236,7 +263,7 @@ void erts_lcnt_init() {
/* init tsd */
lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key);
+ ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
lcnt_lock();
@@ -248,6 +275,9 @@ void erts_lcnt_init() {
/* init lcnt structure */
erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
+ if (!erts_lcnt_data) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
erts_lcnt_data->current_locks = erts_lcnt_list_init();
erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
@@ -269,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
erts_lcnt_lock_list_t *list;
list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
+ if (!list) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
list->head = NULL;
list->tail = NULL;
list->n = 0;
@@ -330,8 +363,9 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock)
/* interface to erl_threads.h */
/* only lock on init and destroy, all others should use atomics */
void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, am_undefined);
+ erts_lcnt_init_lock_x(lock, name, flag, NIL);
}
+
void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
int i;
if (!name) {
@@ -360,7 +394,6 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
}
erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
-
lcnt_unlock();
}
@@ -375,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* copy structure and insert the copy */
deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+ if (!deleted_lock) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
deleted_lock->next = NULL;
@@ -417,8 +453,9 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
if ((w_state > 0) || (r_state > 0)) {
eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
} else {
eltd->lock_in_conflict = 0;
@@ -433,7 +470,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc( &lock->w_state);
+ ethr_atomic_inc(&lock->w_state);
eltd = lcnt_get_thread_data();
@@ -446,10 +483,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
* 'atomicly'. All other locks will block the thread if w_state > 0
* i.e. locked.
*/
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
-
} else {
eltd->lock_in_conflict = 0;
}
@@ -459,11 +496,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
/* should check if this thread was "waiting" */
-
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- ethr_atomic_dec( &lock->w_state);
+ ethr_atomic_dec(&lock->w_state);
}
/* erts_lcnt_lock_post
@@ -491,7 +527,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
+ ethr_atomic_inc(&lock->flowstate);
}
#endif
@@ -500,19 +536,12 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
ASSERT(eltd);
/* if lock was in conflict, time it */
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- stats = lcnt_get_lock_stats(lock, file, line);
- } else {
- stats = &lock->stats[0];
- }
-
+ stats = lcnt_get_lock_stats(lock, file, line);
if (eltd->timer_set) {
lcnt_time(&timer);
lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
-
eltd->timer_set--;
ASSERT(eltd->timer_set >= 0);
} else {
@@ -541,11 +570,11 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
/* flowstate */
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
- ethr_atomic_dec( &lock->flowstate);
+ ethr_atomic_dec(&lock->flowstate);
/* write state */
w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0)
+ ASSERT(w_state > 0);
#endif
ethr_atomic_dec(&lock->w_state);
}
@@ -582,9 +611,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
ethr_atomic_inc( &lock->flowstate);
#endif
ethr_atomic_inc(&lock->w_state);
-
lcnt_update_stats(&(lock->stats[0]), 0, NULL);
-
} else {
ethr_atomic_inc(&lock->stats[0].tries);
ethr_atomic_inc(&lock->stats[0].colls);
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index a4fc91b510..ffbb93da1b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -35,6 +35,10 @@
* | | | - collisions (including trylock busy)
* | | | - timer (time spent in waiting for lock)
* | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
*
* Each instance of a lock is the unique lock, i.e. set and id in that set.
* For each lock there is a set of statistics with where and what impact
@@ -61,9 +65,24 @@
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifndef ERTS_ENABLE_LOCK_POSITION
+/* Enable in order for _x variants of mtx functions to be used. */
+#define ERTS_ENABLE_LOCK_POSITION 1
+#endif
+
#include "ethread.h"
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+
+/* histogram */
+#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
+#if 0 || defined(HAVE_GETHRTIME)
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (0)
+#else
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
+#endif
#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
@@ -98,6 +117,10 @@ typedef struct {
extern erts_lcnt_time_t timer_start;
+typedef struct {
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
+} erts_lcnt_hist_t;
+
typedef struct erts_lcnt_lock_stats_s {
/* "tries" and "colls" needs to be atomic since
* trylock busy does not aquire a lock and there
@@ -112,6 +135,7 @@ typedef struct erts_lcnt_lock_stats_s {
unsigned long timer_n; /* #times waited for lock */
erts_lcnt_time_t timer; /* total wait time for lock */
+ erts_lcnt_hist_t hist;
} erts_lcnt_lock_stats_t;
/* rw locks uses both states, other locks only uses w_state */
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
new file mode 100644
index 0000000000..5e740aacdd
--- /dev/null
+++ b/erts/emulator/beam/erl_map.c
@@ -0,0 +1,837 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ *
+ * Author: Björn-Egil Dahlberg
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+
+#include "erl_map.h"
+
+/* BIFs
+ *
+ * DONE:
+ * - erlang:is_map/1
+ * - erlang:map_size/1
+ *
+ * - maps:find/2
+ * - maps:from_list/1
+ * - maps:get/2
+ * - maps:is_key/2
+ * - maps:keys/1
+ * - maps:merge/2
+ * - maps:new/0
+ * - maps:put/3
+ * - maps:remove/2
+ * - maps:to_list/1
+ * - maps:update/3
+ * - maps:values/1
+ *
+ * TODO:
+ * - maps:foldl/3
+ * - maps:foldr/3
+ * - maps:map/3
+ * - maps:size/1
+ * - maps:without/2
+ *
+ * DEBUG: for sharing calculation
+ * - erts_internal:map_to_tuple_keys/1
+ */
+
+/* erlang:map_size/1
+ * the corresponding instruction is implemented in:
+ * beam/erl_bif_guard.c
+ */
+
+BIF_RETTYPE map_size_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ Eterm *hp;
+ Uint hsz = 0;
+ map_t *mp = (map_t*)map_val(BIF_ARG_1);
+ Uint n = map_get_size(mp);
+
+ erts_bld_uint(NULL, &hsz, n);
+ hp = HAlloc(BIF_P, hsz);
+ BIF_RET(erts_bld_uint(&hp, NULL, n));
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:to_list/1
+ */
+
+BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ Uint n;
+ Eterm* hp;
+ Eterm *ks,*vs, res, tup;
+ map_t *mp = (map_t*)map_val(BIF_ARG_1);
+
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+ n = map_get_size(mp);
+ hp = HAlloc(BIF_P, (2 + 3) * n);
+ res = NIL;
+
+ while(n--) {
+ tup = TUPLE2(hp, ks[n], vs[n]); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+ }
+
+ BIF_RET(res);
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:find/2
+ * return value if key *matches* a key in the map
+ */
+
+int erts_maps_find(Eterm key, Eterm map, Eterm *value) {
+
+ Eterm *ks,*vs;
+ map_t *mp;
+ Uint n,i;
+
+ mp = (map_t*)map_val(map);
+ n = map_get_size(mp);
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ for( i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ *value = vs[i];
+ return 1;
+ }
+ }
+ return 0;
+}
+
+BIF_RETTYPE maps_find_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ Eterm *hp, value,res;
+
+ if (erts_maps_find(BIF_ARG_1, BIF_ARG_2, &value)) {
+ hp = HAlloc(BIF_P, 3);
+ res = make_tuple(hp);
+ *hp++ = make_arityval(2);
+ *hp++ = am_ok;
+ *hp++ = value;
+ BIF_RET(res);
+ }
+
+ BIF_RET(am_error);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+/* maps:get/2
+ * return value if key *matches* a key in the map
+ * exception bad_key if none matches
+ */
+
+
+int erts_maps_get(Eterm key, Eterm map, Eterm *value) {
+ Eterm *ks,*vs;
+ map_t *mp;
+ Uint n,i;
+
+ mp = (map_t*)map_val(map);
+ n = map_get_size(mp);
+
+ if (n == 0)
+ return 0;
+
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ if (is_immed(key)) {
+ for( i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ *value = vs[i];
+ return 1;
+ }
+ }
+ }
+
+ for( i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ *value = vs[i];
+ return 1;
+ }
+ }
+ return 0;
+}
+
+BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ Eterm *hp;
+ Eterm value, error;
+ char *s_error;
+
+ if (erts_maps_get(BIF_ARG_1, BIF_ARG_2, &value)) {
+ BIF_RET(value);
+ }
+
+ s_error = "bad_key";
+ error = am_atom_put(s_error, sys_strlen(s_error));
+
+ hp = HAlloc(BIF_P, 3);
+ BIF_P->fvalue = TUPLE2(hp, error, BIF_ARG_1);
+ BIF_ERROR(BIF_P, EXC_ERROR_2);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:from_list/1
+ * List may be unsorted [{K,V}]
+ */
+
+BIF_RETTYPE maps_from_list_1(BIF_ALIST_1) {
+ Eterm *kv, item = BIF_ARG_1;
+ Eterm *hp, *thp,*vs, *ks, keys, res;
+ map_t *mp;
+ Uint size = 0, unused_size = 0;
+ Sint c = 0;
+ Sint idx = 0;
+
+ if (is_list(item) || is_nil(item)) {
+
+ /* Calculate size and check validity */
+
+ while(is_list(item)) {
+ res = CAR(list_val(item));
+ if (is_not_tuple(res))
+ goto error;
+
+ kv = tuple_val(res);
+ if (*kv != make_arityval(2))
+ goto error;
+
+ size++;
+ item = CDR(list_val(item));
+ }
+
+ if (is_not_nil(item))
+ goto error;
+
+ hp = HAlloc(BIF_P, 3 + 1 + (2 * size));
+ thp = hp;
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(size);
+ ks = hp;
+ hp += size;
+ mp = (map_t*)hp;
+ res = make_map(mp);
+ hp += MAP_HEADER_SIZE;
+ vs = hp;
+
+ mp->thing_word = MAP_HEADER;
+ mp->size = size; /* set later, might shrink*/
+ mp->keys = keys;
+
+ if (size == 0)
+ BIF_RET(res);
+
+ item = BIF_ARG_1;
+
+ /* first entry */
+ kv = tuple_val(CAR(list_val(item)));
+ ks[0] = kv[1];
+ vs[0] = kv[2];
+ size = 1;
+ item = CDR(list_val(item));
+
+ /* insert sort key/value pairs */
+ while(is_list(item)) {
+
+ kv = tuple_val(CAR(list_val(item)));
+
+ /* compare ks backwards
+ * idx represent word index to be written (hole position).
+ * We cannot copy the elements when searching since we might
+ * have an equal key. So we search for just the index first =(
+ *
+ * It is perhaps faster to move the values in the first pass.
+ * Check for uniqueness during insert phase and then have a
+ * second phace compacting the map if duplicates are found
+ * during insert. .. or do someother sort .. shell-sort perhaps.
+ */
+
+ idx = size;
+
+ while(idx > 0 && (c = CMP_TERM(kv[1],ks[idx-1])) < 0) { idx--; }
+
+ if (c == 0) {
+ /* last compare was equal,
+ * i.e. we have to release memory
+ * and overwrite that key/value
+ */
+ ks[idx-1] = kv[1];
+ vs[idx-1] = kv[2];
+ unused_size++;
+ } else {
+ Uint i = size;
+ while(i > idx) {
+ ks[i] = ks[i-1];
+ vs[i] = vs[i-1];
+ i--;
+ }
+ ks[idx] = kv[1];
+ vs[idx] = kv[2];
+ size++;
+ }
+ item = CDR(list_val(item));
+ }
+
+ if (unused_size) {
+ /* the key tuple is embedded in the heap
+ * write a bignum to clear it.
+ */
+ /* release values as normal since they are on the top of the heap */
+
+ ks[size] = make_pos_bignum_header(unused_size - 1);
+ HRelease(BIF_P, vs + size + unused_size, vs + size);
+ }
+
+ *thp = make_arityval(size);
+ mp->size = size;
+ BIF_RET(res);
+ }
+
+error:
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:is_key/2
+ */
+
+BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ Eterm *ks, key;
+ map_t *mp;
+ Uint n,i;
+
+ mp = (map_t*)map_val(BIF_ARG_2);
+ key = BIF_ARG_1;
+ n = map_get_size(mp);
+ ks = map_get_keys(mp);
+
+ if (n == 0)
+ BIF_RET(am_false);
+
+ if (is_immed(key)) {
+ for( i = 0; i < n; i++) {
+ if (ks[i] == key) {
+ BIF_RET(am_true);
+ }
+ }
+ }
+
+ for( i = 0; i < n; i++) {
+ if (EQ(ks[i], key)) {
+ BIF_RET(am_true);
+ }
+ }
+ BIF_RET(am_false);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:keys/1
+ */
+
+BIF_RETTYPE maps_keys_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ Eterm *hp, *ks, res = NIL;
+ map_t *mp;
+ Uint n;
+
+ mp = (map_t*)map_val(BIF_ARG_1);
+ n = map_get_size(mp);
+
+ if (n == 0)
+ BIF_RET(res);
+
+ hp = HAlloc(BIF_P, (2 * n));
+ ks = map_get_keys(mp);
+
+ while(n--) {
+ res = CONS(hp, ks[n], res); hp += 2;
+ }
+
+ BIF_RET(res);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+/* maps:merge/2
+ */
+
+BIF_RETTYPE maps_merge_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_1) && is_map(BIF_ARG_2)) {
+ Eterm *hp,*thp;
+ Eterm tup;
+ Eterm *ks,*vs,*ks1,*vs1,*ks2,*vs2;
+ map_t *mp1,*mp2,*mp_new;
+ Uint n1,n2,i1,i2,need,unused_size=0;
+ int c = 0;
+
+ mp1 = (map_t*)map_val(BIF_ARG_1);
+ mp2 = (map_t*)map_val(BIF_ARG_2);
+ n1 = map_get_size(mp1);
+ n2 = map_get_size(mp2);
+
+ need = MAP_HEADER_SIZE + 1 + 2*(n1 + n2);
+
+ hp = HAlloc(BIF_P, need);
+ thp = hp;
+ tup = make_tuple(thp);
+ ks = hp + 1; hp += 1 + n1 + n2;
+ mp_new = (map_t*)hp; hp += MAP_HEADER_SIZE;
+ vs = hp; hp += n1 + n2;
+
+ mp_new->thing_word = MAP_HEADER;
+ mp_new->size = 0;
+ mp_new->keys = tup;
+
+ i1 = 0; i2 = 0;
+ ks1 = map_get_keys(mp1);
+ vs1 = map_get_values(mp1);
+ ks2 = map_get_keys(mp2);
+ vs2 = map_get_values(mp2);
+
+ while(i1 < n1 && i2 < n2) {
+ c = CMP_TERM(ks1[i1],ks2[i2]);
+ if ( c == 0) {
+ /* use righthand side arguments map value,
+ * but advance both maps */
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i1++, i2++, unused_size++;
+ } else if ( c < 0) {
+ *ks++ = ks1[i1];
+ *vs++ = vs1[i1];
+ i1++;
+ } else {
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i2++;
+ }
+ }
+
+ /* copy remaining */
+ while (i1 < n1) {
+ *ks++ = ks1[i1];
+ *vs++ = vs1[i1];
+ i1++;
+ }
+
+ while (i2 < n2) {
+ *ks++ = ks2[i2];
+ *vs++ = vs2[i2];
+ i2++;
+ }
+
+ if (unused_size) {
+ /* the key tuple is embedded in the heap, write a bignum to clear it.
+ *
+ * release values as normal since they are on the top of the heap
+ * size = n1 + n1 - unused_size
+ */
+
+ *ks = make_pos_bignum_header(unused_size - 1);
+ HRelease(BIF_P, vs + unused_size, vs);
+ }
+
+ mp_new->size = n1 + n2 - unused_size;
+ *thp = make_arityval(n1 + n2 - unused_size);
+
+ BIF_RET(make_map(mp_new));
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+/* maps:new/2
+ */
+
+BIF_RETTYPE maps_new_0(BIF_ALIST_0) {
+ Eterm* hp;
+ Eterm tup;
+ map_t *mp;
+
+ hp = HAlloc(BIF_P, (MAP_HEADER_SIZE + 1));
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(0);
+
+ mp = (map_t*)hp;
+ mp->thing_word = MAP_HEADER;
+ mp->size = 0;
+ mp->keys = tup;
+
+ BIF_RET(make_map(mp));
+}
+
+/* maps:put/3
+ */
+
+Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
+ Sint n,i;
+ Sint c = 0;
+ Eterm* hp, *shp;
+ Eterm *ks,*vs, res, tup;
+ map_t *mp = (map_t*)map_val(map);
+
+ n = map_get_size(mp);
+
+ if (n == 0) {
+ hp = HAlloc(p, MAP_HEADER_SIZE + 1 + 2);
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(1);
+ *hp++ = key;
+ res = make_map(hp);
+ *hp++ = MAP_HEADER;
+ *hp++ = 1;
+ *hp++ = tup;
+ *hp++ = value;
+
+ return res;
+ }
+
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ /* only allocate for values,
+ * assume key-tuple will be intact
+ */
+
+ hp = HAlloc(p, MAP_HEADER_SIZE + n);
+ shp = hp; /* save hp, used if optimistic update fails */
+ res = make_map(hp);
+ *hp++ = MAP_HEADER;
+ *hp++ = n;
+ *hp++ = mp->keys;
+
+ if (is_immed(key)) {
+ for( i = 0; i < n; i ++) {
+ if (ks[i] == key) {
+ *hp++ = value;
+ vs++;
+ c = 1;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ } else {
+ for( i = 0; i < n; i ++) {
+ if (EQ(ks[i], key)) {
+ *hp++ = value;
+ vs++;
+ c = 1;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ }
+
+ if (c)
+ return res;
+
+ /* need to make a new tuple,
+ * use old hp since it needs to be recreated anyway.
+ */
+ tup = make_tuple(shp);
+ *shp++ = make_arityval(n+1);
+
+ hp = HAlloc(p, 3 + n + 1);
+ res = make_map(hp);
+ *hp++ = MAP_HEADER;
+ *hp++ = n + 1;
+ *hp++ = tup;
+
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ ASSERT(n >= 0);
+
+ /* copy map in order */
+ while (n && ((c = CMP_TERM(*ks, key)) < 0)) {
+ *shp++ = *ks++;
+ *hp++ = *vs++;
+ n--;
+ }
+
+ *shp++ = key;
+ *hp++ = value;
+
+ ASSERT(n >= 0);
+
+ while(n--) {
+ *shp++ = *ks++;
+ *hp++ = *vs++;
+ }
+ /* we have one word remaining
+ * this will work out fine once we get the size word
+ * in the header.
+ */
+ *shp = make_pos_bignum_header(0);
+ return res;
+}
+
+BIF_RETTYPE maps_put_3(BIF_ALIST_3) {
+ if (is_map(BIF_ARG_3)) {
+ BIF_RET(erts_maps_put(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3));
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:remove/3
+ */
+
+int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res) {
+ Sint n;
+ Uint need;
+ Eterm *hp_start;
+ Eterm *thp, *mhp;
+ Eterm *ks, *vs, tup;
+ map_t *mp = (map_t*)map_val(map);
+
+ n = map_get_size(mp);
+
+ if (n == 0) {
+ *res = map;
+ return 1;
+ }
+
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ /* Assume key exists.
+ * Release allocated if it didn't.
+ * Allocate key tuple first.
+ */
+
+ need = n + 1 - 1 + 3 + n - 1; /* tuple - 1 + map - 1 */
+ hp_start = HAlloc(p, need);
+ thp = hp_start;
+ mhp = thp + n; /* offset with tuple heap size */
+
+ tup = make_tuple(thp);
+ *thp++ = make_arityval(n - 1);
+
+ *res = make_map(mhp);
+ *mhp++ = MAP_HEADER;
+ *mhp++ = n - 1;
+ *mhp++ = tup;
+
+ if (is_immed(key)) {
+ while (1) {
+ if (*ks == key) {
+ goto found_key;
+ } else if (--n) {
+ *mhp++ = *vs++;
+ *thp++ = *ks++;
+ } else
+ break;
+ }
+ } else {
+ while(1) {
+ if (EQ(*ks, key)) {
+ goto found_key;
+ } else if (--n) {
+ *mhp++ = *vs++;
+ *thp++ = *ks++;
+ } else
+ break;
+ }
+ }
+
+ /* Not found, remove allocated memory
+ * and return previous map.
+ */
+ HRelease(p, hp_start + need, hp_start);
+
+ *res = map;
+ return 1;
+
+found_key:
+ /* Copy rest of keys and values */
+ if (--n) {
+ sys_memcpy(mhp, vs+1, n*sizeof(Eterm));
+ sys_memcpy(thp, ks+1, n*sizeof(Eterm));
+ }
+ return 1;
+}
+
+BIF_RETTYPE maps_remove_2(BIF_ALIST_2) {
+ if (is_map(BIF_ARG_2)) {
+ Eterm res;
+ if (erts_maps_remove(BIF_P, BIF_ARG_1, BIF_ARG_2, &res)) {
+ BIF_RET(res);
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* maps:update/3
+ */
+
+int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res) {
+ Sint n,i;
+ Eterm* hp,*shp;
+ Eterm *ks,*vs;
+ map_t *mp = (map_t*)map_val(map);
+
+ if ((n = map_get_size(mp)) == 0) {
+ return 0;
+ }
+
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ /* only allocate for values,
+ * assume key-tuple will be intact
+ */
+
+ hp = HAlloc(p, MAP_HEADER_SIZE + n);
+ shp = hp;
+ *hp++ = MAP_HEADER;
+ *hp++ = n;
+ *hp++ = mp->keys;
+
+ if (is_immed(key)) {
+ for( i = 0; i < n; i ++) {
+ if (ks[i] == key) {
+ goto found_key;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ } else {
+ for( i = 0; i < n; i ++) {
+ if (EQ(ks[i], key)) {
+ goto found_key;
+ } else {
+ *hp++ = *vs++;
+ }
+ }
+ }
+
+ HRelease(p, shp + MAP_HEADER_SIZE + n, shp);
+ return 0;
+
+found_key:
+ *hp++ = value;
+ vs++;
+ if (++i < n)
+ sys_memcpy(hp, vs, (n - i)*sizeof(Eterm));
+ *res = make_map(shp);
+ return 1;
+}
+
+BIF_RETTYPE maps_update_3(BIF_ALIST_3) {
+ if (is_map(BIF_ARG_3)) {
+ Eterm res;
+ if (erts_maps_update(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &res)) {
+ BIF_RET(res);
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+/* maps:values/1
+ */
+
+BIF_RETTYPE maps_values_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ Eterm *hp, *vs, res = NIL;
+ map_t *mp;
+ Uint n;
+
+ mp = (map_t*)map_val(BIF_ARG_1);
+ n = map_get_size(mp);
+
+ if (n == 0)
+ BIF_RET(res);
+
+ hp = HAlloc(BIF_P, (2 * n));
+ vs = map_get_values(mp);
+
+ while(n--) {
+ res = CONS(hp, vs[n], res); hp += 2;
+ }
+
+ BIF_RET(res);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+int erts_validate_and_sort_map(map_t* mp)
+{
+ Eterm *ks = map_get_keys(mp);
+ Eterm *vs = map_get_values(mp);
+ Uint sz = map_get_size(mp);
+ Uint ix,jx;
+ Eterm tmp;
+ int c;
+
+ /* sort */
+
+ for (ix = 1; ix < sz; ix++) {
+ jx = ix;
+ while( jx > 0 && (c = CMP_TERM(ks[jx],ks[jx-1])) <= 0 ) {
+ /* identical key -> error */
+ if (c == 0) return 0;
+
+ tmp = ks[jx];
+ ks[jx] = ks[jx - 1];
+ ks[jx - 1] = tmp;
+
+ tmp = vs[jx];
+ vs[jx] = vs[jx - 1];
+ vs[jx - 1] = tmp;
+
+ jx--;
+ }
+ }
+ return 1;
+}
+
+/*
+ * erts_internal:map_to_tuple_keys/1
+ *
+ * Used in erts_debug:size/1
+ */
+
+BIF_RETTYPE erts_internal_map_to_tuple_keys_1(BIF_ALIST_1) {
+ if (is_map(BIF_ARG_1)) {
+ map_t *mp = (map_t*)map_val(BIF_ARG_1);
+ BIF_RET(mp->keys);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h
new file mode 100644
index 0000000000..cfacb2ec28
--- /dev/null
+++ b/erts/emulator/beam/erl_map.h
@@ -0,0 +1,72 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef __ERL_MAP_H__
+#define __ERL_MAP_H__
+
+#include "sys.h"
+/* MAP */
+
+typedef struct map_s {
+ Eterm thing_word;
+ Uint size;
+ Eterm keys; /* tuple */
+} map_t;
+/* map node
+ *
+ * -----------
+ * Eterm THING
+ * Uint size
+ * Eterm Keys -> {K1, K2, K3, ..., Kn} where n = size
+ * ----
+ * Eterm V1
+ * ...
+ * Eterm Vn, where n = size
+ * -----------
+ */
+
+
+
+/* erl_term.h stuff */
+#define make_map(x) make_boxed((Eterm*)(x))
+#define make_map_rel(x, BASE) make_boxed_rel((Eterm*)(x),(BASE))
+#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val((x))))
+#define is_map_rel(RTERM,BASE) is_map(rterm2wterm(RTERM,BASE))
+#define is_not_map(x) (!is_map((x)))
+#define is_map_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_MAP)
+#define header_is_map(x) ((((x) & (_HEADER_SUBTAG_MASK)) == MAP_SUBTAG))
+#define map_val(x) (_unchecked_boxed_val((x)))
+#define map_val_rel(RTERM, BASE) map_val(rterm2wterm(RTERM, BASE))
+
+#define map_get_values(x) (((Eterm *)(x)) + 3)
+#define map_get_keys(x) (((Eterm *)tuple_val(((map_t *)(x))->keys)) + 1)
+#define map_get_size(x) (((map_t*)(x))->size)
+
+#define MAP_HEADER _make_header(1,_TAG_HEADER_MAP)
+#define MAP_HEADER_SIZE (sizeof(map_t) / sizeof(Eterm))
+
+Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map);
+int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res);
+int erts_maps_find(Eterm key, Eterm map, Eterm *value);
+int erts_maps_get(Eterm key, Eterm map, Eterm *value);
+int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res);
+int erts_validate_and_sort_map(map_t* map);
+#endif
+
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 325d77e911..8870fac7d9 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -46,10 +46,12 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
+#ifdef DEBUG
static ERTS_INLINE int in_heapfrag(const Eterm* ptr, const ErlHeapFragment *bp)
{
return ((unsigned)(ptr - bp->mem) < bp->used_size);
}
+#endif
void
@@ -413,7 +415,13 @@ erts_queue_dist_message(Process *rcvr,
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr);
+ erts_proc_notify_new_message(rcvr,
+#ifdef ERTS_SMP
+ *rcvr_locks
+#else
+ 0
+#endif
+ );
}
}
@@ -540,7 +548,13 @@ queue_message(Process *c_p,
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver);
+ erts_proc_notify_new_message(receiver,
+#ifdef ERTS_SMP
+ *receiver_locks
+#else
+ 0
+#endif
+ );
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
@@ -894,8 +908,10 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
*sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send)) {
- erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
+ "%T", receiver->common.id);
}
#endif
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
@@ -1028,7 +1044,6 @@ erts_send_message(Process* sender,
}
BM_SWAP_TIMER(send,system);
} else {
-#ifdef ERTS_SMP
ErlOffHeap *ohp;
Eterm *hp;
erts_aint32_t state;
@@ -1060,42 +1075,6 @@ erts_send_message(Process* sender,
#endif
);
BM_SWAP_TIMER(send,system);
-#else
- ErlMessage* mp = message_alloc();
- Eterm *hp;
- BM_SWAP_TIMER(send,size);
- msize = size_object(message);
- BM_SWAP_TIMER(size,send);
-
- if (receiver->stop - receiver->htop <= msize) {
- BM_SWAP_TIMER(send,system);
- erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity);
- BM_SWAP_TIMER(system,send);
- }
- hp = receiver->htop;
- receiver->htop = hp + msize;
- BM_SWAP_TIMER(send,copy);
- message = copy_struct(message, msize, &hp, &receiver->off_heap);
- BM_MESSAGE_COPIED(msize);
- BM_SWAP_TIMER(copy,send);
- DTRACE6(message_send, sender_name, receiver_name,
- (uint32_t)msize, tok_label, tok_lastcnt, tok_serial);
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = NIL;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
-#endif
- mp->next = NULL;
- mp->data.attached = NULL;
- LINK_MESSAGE(receiver, mp);
- res = receiver->msg.len;
- erts_proc_notify_new_message(receiver);
-
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
- trace_receive(receiver, message);
- }
- BM_SWAP_TIMER(send,system);
-#endif /* #ifndef ERTS_SMP */
}
return res;
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 771eba431f..0f3bb8d281 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -46,6 +46,11 @@ typedef struct erl_off_heap {
Uint64 overhead; /* Administrative overhead (used to force GC). */
} ErlOffHeap;
+#define ERTS_INIT_OFF_HEAP(OHP) \
+ do { \
+ (OHP)->first = NULL; \
+ (OHP)->overhead = 0; \
+ } while (0)
#include "external.h"
#include "erl_process.h"
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 70e592cc5f..244a2b26db 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -1024,3 +1024,23 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
}
}
}
+
+void erts_one_link_size(ErtsLink *lnk, void *vpu)
+{
+ Uint *pu = vpu;
+ *pu += ERTS_LINK_SIZE*sizeof(Uint);
+ if(!IS_CONST(lnk->pid))
+ *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
+ if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
+ erts_doforall_links(ERTS_LINK_ROOT(lnk),&erts_one_link_size,vpu);
+ }
+}
+void erts_one_mon_size(ErtsMonitor *mon, void *vpu)
+{
+ Uint *pu = vpu;
+ *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
+ if(!IS_CONST(mon->pid))
+ *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
+ if(!IS_CONST(mon->ref))
+ *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
+}
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index 6a360a2336..fb11dbbd22 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -170,6 +170,8 @@ ErtsSuspendMonitor *erts_lookup_suspend_monitor(ErtsSuspendMonitor *root,
Eterm pid);
void erts_delete_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid);
void erts_init_monitors(void);
+void erts_one_link_size(ErtsLink *lnk, void *vpu);
+void erts_one_mon_size(ErtsMonitor *mon, void *vpu);
#define erts_doforall_monitors erts_sweep_monitors
#define erts_doforall_links erts_sweep_links
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index e538ba30c2..c8bb126687 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -222,6 +222,8 @@ static byte *tracep;
static byte *endp;
static SysTimeval last_tv;
+static ErtsAllocatorWrapper_t mtrace_wrapper;
+
#if ERTS_MTRACE_SEGMENT_ID >= ERTS_ALC_A_MIN || ERTS_MTRACE_SEGMENT_ID < 0
#error ERTS_MTRACE_SEGMENT_ID >= ERTS_ALC_A_MIN || ERTS_MTRACE_SEGMENT_ID < 0
#endif
@@ -555,6 +557,8 @@ write_trace_header(char *nodename, char *pid, char *hostname)
return 1;
}
+static void mtrace_pre_lock(void);
+static void mtrace_pre_unlock(void);
static void *mtrace_alloc(ErtsAlcType_t, void *, Uint);
static void *mtrace_realloc(ErtsAlcType_t, void *, void *, Uint);
static void mtrace_free(ErtsAlcType_t, void *, void *);
@@ -635,12 +639,16 @@ erts_mtrace_install_wrapper_functions(void)
erts_allctrs[i].free = mtrace_free;
erts_allctrs[i].extra = (void *) &real_allctrs[i];
}
+ mtrace_wrapper.lock = mtrace_pre_lock;
+ mtrace_wrapper.unlock = mtrace_pre_unlock;
+ erts_allctr_wrapper_prelock_init(&mtrace_wrapper);
}
}
void
erts_mtrace_stop(void)
{
+ ASSERT(!erts_is_allctr_wrapper_prelocked());
erts_mtx_lock(&mtrace_op_mutex);
erts_mtx_lock(&mtrace_buf_mutex);
if (erts_mtrace_enabled) {
@@ -677,6 +685,7 @@ erts_mtrace_stop(void)
void
erts_mtrace_exit(Uint32 exit_value)
{
+ ASSERT(!erts_is_allctr_wrapper_prelocked());
erts_mtx_lock(&mtrace_op_mutex);
erts_mtx_lock(&mtrace_buf_mutex);
if (erts_mtrace_enabled) {
@@ -935,18 +944,33 @@ write_free_entry(byte tag,
erts_mtx_unlock(&mtrace_buf_mutex);
}
+static void mtrace_pre_lock(void)
+{
+ erts_mtx_lock(&mtrace_op_mutex);
+}
+
+static void mtrace_pre_unlock(void)
+{
+ erts_mtx_unlock(&mtrace_op_mutex);
+}
+
+
static void *
mtrace_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *res;
- erts_mtx_lock(&mtrace_op_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&mtrace_op_mutex);
+ }
res = (*real_af->alloc)(n, real_af->extra, size);
write_alloc_entry(ERTS_MT_ALLOC_BDY_TAG, res, n, 0, size);
- erts_mtx_unlock(&mtrace_op_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&mtrace_op_mutex);
+ }
return res;
}
@@ -957,12 +981,16 @@ mtrace_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *res;
- erts_mtx_lock(&mtrace_op_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&mtrace_op_mutex);
+ }
res = (*real_af->realloc)(n, real_af->extra, ptr, size);
write_realloc_entry(ERTS_MT_REALLOC_BDY_TAG, res, n, 0, ptr, size);
- erts_mtx_unlock(&mtrace_op_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_unlock(&mtrace_op_mutex);
+ }
return res;
@@ -973,10 +1001,14 @@ mtrace_free(ErtsAlcType_t n, void *extra, void *ptr)
{
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- erts_mtx_lock(&mtrace_op_mutex);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ erts_mtx_lock(&mtrace_op_mutex);
+ }
(*real_af->free)(n, real_af->extra, ptr);
- write_free_entry(ERTS_MT_FREE_BDY_TAG, n, 0, ptr);
+ if (!erts_is_allctr_wrapper_prelocked()) {
+ write_free_entry(ERTS_MT_FREE_BDY_TAG, n, 0, ptr);
+ }
erts_mtx_unlock(&mtrace_op_mutex);
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index d4c2b5bdcc..adc3520ebb 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,9 +31,11 @@
#include "bif.h"
#include "error.h"
#include "big.h"
+#include "erl_map.h"
#include "beam_bp.h"
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
+#include "erl_process.h"
#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
#define HAVE_USE_DTRACE 1
#endif
@@ -310,9 +312,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Process* rp;
Process* c_p;
ErlHeapFragment* frags;
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
- ErtsProcLocks rp_had_locks;
-#endif
Eterm receiver = to_pid->pid;
int flush_me = 0;
int scheduler = erts_get_scheduler_id() != 0;
@@ -332,10 +331,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#endif
}
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
- rp_had_locks = rp_locks;
-#endif
-
rp = (scheduler
? erts_proc_lookup(receiver)
: erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
@@ -477,6 +472,18 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
+
+ if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term);
+ if (sb->is_writable) {
+ ProcBin* pb = (ProcBin*) binary_val(sb->orig);
+ ASSERT(pb->thing_word == HEADER_PROC_BIN);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ sb->is_writable = 0;
+ }
+ }
+ }
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
@@ -881,7 +888,7 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
{
if (is_not_list(term) && is_not_nil(term)) return 0;
- *len = list_length(term);
+ *len = erts_list_length(term);
return 1;
}
@@ -1220,7 +1227,8 @@ static void close_lib(struct erl_module_nif* lib)
lib->entry->unload(&env, lib->priv_data);
post_nif_noproc(&env);
}
- erts_sys_ddll_close(lib->handle);
+ if (!erts_is_static_nif(lib->handle))
+ erts_sys_ddll_close(lib->handle);
lib->handle = NULL;
}
@@ -1241,6 +1249,19 @@ static void steal_resource_type(ErlNifResourceType* type)
}
}
+/* The opened_rt_list is used by enif_open_resource_type()
+ * in order to rollback "creates" and "take-overs" in case the load fails.
+ */
+struct opened_resource_type
+{
+ struct opened_resource_type* next;
+
+ ErlNifResourceFlags op;
+ ErlNifResourceType* type;
+ ErlNifResourceDtor* new_dtor;
+};
+static struct opened_resource_type* opened_rt_list = NULL;
+
ErlNifResourceType*
enif_open_resource_type(ErlNifEnv* env,
const char* module_str,
@@ -1262,22 +1283,21 @@ enif_open_resource_type(ErlNifEnv* env,
if (type == NULL) {
if (flags & ERL_NIF_RT_CREATE) {
type = erts_alloc(ERTS_ALC_T_NIF,
- sizeof(struct enif_resource_type_t));
- type->dtor = dtor;
+ sizeof(struct enif_resource_type_t));
type->module = module_am;
type->name = name_am;
erts_refc_init(&type->refc, 1);
- type->owner = env->mod_nif;
- type->prev = &resource_type_list;
- type->next = resource_type_list.next;
- type->next->prev = type;
- type->prev->next = type;
op = ERL_NIF_RT_CREATE;
+ #ifdef DEBUG
+ type->dtor = (void*)1;
+ type->owner = (void*)2;
+ type->prev = (void*)3;
+ type->next = (void*)4;
+ #endif
}
}
else {
- if (flags & ERL_NIF_RT_TAKEOVER) {
- steal_resource_type(type);
+ if (flags & ERL_NIF_RT_TAKEOVER) {
op = ERL_NIF_RT_TAKEOVER;
}
else {
@@ -1285,12 +1305,13 @@ enif_open_resource_type(ErlNifEnv* env,
}
}
if (type != NULL) {
- type->owner = env->mod_nif;
- type->dtor = dtor;
- if (type->dtor != NULL) {
- erts_refc_inc(&type->owner->rt_dtor_cnt, 1);
- }
- erts_refc_inc(&type->owner->rt_cnt, 1);
+ struct opened_resource_type* ort = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(struct opened_resource_type));
+ ort->op = op;
+ ort->type = type;
+ ort->new_dtor = dtor;
+ ort->next = opened_rt_list;
+ opened_rt_list = ort;
}
if (tried != NULL) {
*tried = op;
@@ -1298,6 +1319,51 @@ enif_open_resource_type(ErlNifEnv* env,
return type;
}
+static void commit_opened_resource_types(struct erl_module_nif* lib)
+{
+ while (opened_rt_list) {
+ struct opened_resource_type* ort = opened_rt_list;
+
+ ErlNifResourceType* type = ort->type;
+
+ if (ort->op == ERL_NIF_RT_CREATE) {
+ type->prev = &resource_type_list;
+ type->next = resource_type_list.next;
+ type->next->prev = type;
+ type->prev->next = type;
+ }
+ else { /* ERL_NIF_RT_TAKEOVER */
+ steal_resource_type(type);
+ }
+
+ type->owner = lib;
+ type->dtor = ort->new_dtor;
+
+ if (type->dtor != NULL) {
+ erts_refc_inc(&lib->rt_dtor_cnt, 1);
+ }
+ erts_refc_inc(&lib->rt_cnt, 1);
+
+ opened_rt_list = ort->next;
+ erts_free(ERTS_ALC_T_TMP, ort);
+ }
+}
+
+static void rollback_opened_resource_types(void)
+{
+ while (opened_rt_list) {
+ struct opened_resource_type* ort = opened_rt_list;
+
+ if (ort->op == ERL_NIF_RT_CREATE) {
+ erts_free(ERTS_ALC_T_NIF, ort->type);
+ }
+
+ opened_rt_list = ort->next;
+ erts_free(ERTS_ALC_T_TMP, ort);
+ }
+}
+
+
static void nif_resource_dtor(Binary* bin)
{
ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(bin);
@@ -1323,6 +1389,8 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
{
Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor);
ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin);
+
+ ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
erts_refc_inc(&bin->refc, 1);
#ifdef DEBUG
@@ -1413,7 +1481,7 @@ void* enif_dlopen(const char* lib,
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
void* handle;
void* init_func;
- if (erts_sys_ddll_open2(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) {
+ if (erts_sys_ddll_open(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) {
if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) {
erts_sys_ddll_call_nif_init(init_func);
}
@@ -1457,6 +1525,576 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(env->proc) == 0;
}
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv
+ * members are used to track the MFA and arguments of the top NIF in case a
+ * chain of one or more enif_schedule_nif() calls results in an exception,
+ * since in that case the original MFA and registers have to be restored
+ * before returning to Erlang to ensure stacktrace information associated
+ * with the exception is correct.
+ */
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
+typedef struct {
+ Export exp;
+ struct erl_module_nif* m;
+ NativeFunPtr fp;
+ Eterm saved_mfa[3];
+ int saved_argc;
+ int alloced_argv_sz;
+ Eterm argv[1];
+} NifExport;
+
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. This function is declared in erl_process.h.
+ */
+int
+erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
+{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ int gc = (ep && ep->saved_argc > 0);
+
+ if (gc) {
+ *objv = ep->argv;
+ *nobj = ep->saved_argc;
+ }
+ return gc;
+}
+
+/*
+ * Allocate a NifExport and set it in proc specific data
+ */
+static NifExport*
+allocate_nif_sched_data(Process* proc, int argc)
+{
+ NifExport* ep;
+ size_t argv_extra, total;
+ int i;
+
+ argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0;
+ total = sizeof(NifExport) + argv_extra;
+ ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
+ sys_memset((void*) ep, 0, total);
+ ep->alloced_argv_sz = argc;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->exp.addressv[i] = &ep->exp.code[3];
+ }
+ ep->exp.code[3] = (BeamInstr) em_call_nif;
+ (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, ep);
+ return ep;
+}
+
+static ERTS_INLINE void
+destroy_nif_export(NifExport *nif_export)
+{
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
+}
+
+void
+erts_destroy_nif_export(void *nif_export)
+{
+ destroy_nif_export((NifExport *) nif_export);
+}
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save MFA and registers only if the need_save
+ * parameter is true.
+ */
+static ERL_NIF_TERM
+init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ int need_save, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ NifExport* ep;
+ int i;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ if (!ep)
+ ep = allocate_nif_sched_data(proc, argc);
+ else if (need_save && ep->alloced_argv_sz < argc) {
+ NifExport* new_ep = allocate_nif_sched_data(proc, argc);
+ destroy_nif_export(ep);
+ ep = new_ep;
+ }
+ ERTS_VBUMP_ALL_REDS(proc);
+ for (i = 0; i < argc; i++) {
+ if (need_save)
+ ep->argv[i] = reg[i];
+ reg[i] = (Eterm) argv[i];
+ }
+ if (need_save) {
+ ep->saved_mfa[0] = proc->current[0];
+ ep->saved_mfa[1] = proc->current[1];
+ ep->saved_mfa[2] = proc->current[2];
+ ep->saved_argc = argc;
+ }
+ proc->i = (BeamInstr*) ep->exp.addressv[0];
+ ep->exp.code[0] = (BeamInstr) proc->current[0];
+ ep->exp.code[1] = (BeamInstr) proc->current[1];
+ ep->exp.code[2] = argc;
+ ep->exp.code[4] = (BeamInstr) direct_fp;
+ ep->m = env->mod_nif;
+ ep->fp = indirect_fp;
+ proc->freason = TRAP;
+ proc->arity = argc;
+ return THE_NON_VALUE;
+}
+
+/*
+ * Restore saved MFA and registers. Registers are restored only when the
+ * exception flag is true.
+ */
+static void
+restore_nif_mfa(Process* proc, NifExport* ep, int exception)
+{
+ int i;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+
+ proc->current[0] = ep->saved_mfa[0];
+ proc->current[1] = ep->saved_mfa[1];
+ proc->current[2] = ep->saved_mfa[2];
+ if (exception)
+ for (i = 0; i < ep->saved_argc; i++)
+ reg[i] = ep->argv[i];
+ ep->saved_argc = 0;
+ ep->saved_mfa[0] = THE_NON_VALUE;
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+/*
+ * Finalize a dirty NIF call. This function is scheduled to cause the VM to
+ * switch the process off a dirty scheduler thread and back onto a regular
+ * scheduler thread, and then return the result from the dirty NIF. It also
+ * restores the original NIF MFA when necessary based on the value of
+ * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * means restore, NULL means do not restore.
+ */
+static ERL_NIF_TERM
+dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 0);
+ return argv[0];
+}
+
+/* Finalize a dirty NIF call that raised an exception. Otherwise same as
+ * the dirty_nif_finalizer() function.
+ */
+static ERL_NIF_TERM
+dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 1);
+ return enif_make_badarg(env);
+}
+
+/*
+ * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
+ * then check the result and schedule the appropriate finalizer function
+ * where needed. Also restore the original NIF MFA when appropriate.
+ */
+static ERL_NIF_TERM
+execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+
+ /*
+ * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
+ close_lib(env->mod_nif);
+ /*
+ * If no more NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF calling
+ * context. Reuse fp essentially as a boolean for this, passing it to
+ * init_nif_sched_data below. Both dirty_nif_exception and
+ * dirty_nif_finalizer then check ep->fp to decide whether or not to
+ * restore the original calling context.
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ fp = NULL;
+ if (is_non_value(result)) {
+ if (proc->freason != TRAP) {
+ ASSERT(proc->freason == BADARG);
+ return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
+ } else {
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, 1);
+ return result;
+ }
+ }
+ else
+ return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+}
+
+/*
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
+ * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
+ * type (CPU or I/O) is indicated in flags parameter.
+ */
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
+{
+ erts_aint32_t state, n, a;
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ int need_save;
+
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
+
+ a = erts_smp_atomic32_read_acqb(&proc->state);
+ while (1) {
+ n = state = a;
+ /*
+ * clear any current dirty flags and dirty queue indicators,
+ * in case the application is shifting a job from one type
+ * of dirty scheduler to the other
+ */
+ n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ n |= ERTS_PSFLG_DIRTY_CPU_PROC;
+ else
+ n |= ERTS_PSFLG_DIRTY_IO_PROC;
+ a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state);
+ if (a == state)
+ break;
+ }
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+ return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+}
+
+static ERL_NIF_TERM
+schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+}
+
+static ERL_NIF_TERM
+schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+/*
+ * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
+ * calls the actual NIF, restores original NIF MFA if necessary, and
+ * then returns the NIF result.
+ */
+static ERL_NIF_TERM
+execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ /*
+ * If no NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF MFA.
+ */
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP);
+ return result;
+}
+
+ERL_NIF_TERM
+enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
+ ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
+ int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+ ERL_NIF_TERM fun_name_atom, result;
+ int need_save;
+
+ if (argc > MAX_ARG)
+ return enif_make_badarg(env);
+ fun_name_atom = enif_make_atom(env, fun_name);
+ if (enif_is_exception(env, fun_name_atom))
+ return fun_name_atom;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+
+ if (flags) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ NativeFunPtr sched_fun;
+ int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
+ if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
+ sched_fun = schedule_dirty_io_nif;
+ else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ sched_fun = schedule_dirty_cpu_nif;
+ else
+ return enif_make_badarg(env);
+ result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
+#else
+ return enif_make_badarg(env);
+#endif
+ }
+ else
+ result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->exp.code[1] = (BeamInstr) fun_name_atom;
+ return result;
+}
+
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
+int
+enif_is_on_dirty_scheduler(ErlNifEnv* env)
+{
+ return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
+}
+
+#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
+
+/* Maps */
+
+int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ return is_map(term);
+}
+
+int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)
+{
+ if (is_map(term)) {
+ map_t *mp;
+ mp = (map_t*)map_val(term);
+ *size = map_get_size(mp);
+ return 1;
+ }
+ return 0;
+}
+
+ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
+{
+ Eterm* hp = alloc_heap(env,MAP_HEADER_SIZE+1);
+ Eterm tup;
+ map_t *mp;
+
+ tup = make_tuple(hp);
+ *hp++ = make_arityval(0);
+ mp = (map_t*)hp;
+ mp->thing_word = MAP_HEADER;
+ mp->size = 0;
+ mp->keys = tup;
+
+ return make_map(mp);
+}
+
+int enif_make_map_put(ErlNifEnv* env,
+ Eterm map_in,
+ Eterm key,
+ Eterm value,
+ Eterm *map_out)
+{
+ if (is_not_map(map_in)) {
+ return 0;
+ }
+ flush_env(env);
+ *map_out = erts_maps_put(env->proc, key, value, map_in);
+ cache_env(env);
+ return 1;
+}
+
+int enif_get_map_value(ErlNifEnv* env,
+ Eterm map,
+ Eterm key,
+ Eterm *value)
+{
+ if (is_not_map(map)) {
+ return 0;
+ }
+ return erts_maps_get(key, map, value);
+}
+
+int enif_make_map_update(ErlNifEnv* env,
+ Eterm map_in,
+ Eterm key,
+ Eterm value,
+ Eterm *map_out)
+{
+ int res;
+ if (is_not_map(map_in)) {
+ return 0;
+ }
+
+ flush_env(env);
+ res = erts_maps_update(env->proc, key, value, map_in, map_out);
+ cache_env(env);
+ return res;
+}
+
+int enif_make_map_remove(ErlNifEnv* env,
+ Eterm map_in,
+ Eterm key,
+ Eterm *map_out)
+{
+ int res;
+ if (is_not_map(map_in)) {
+ return 0;
+ }
+ flush_env(env);
+ res = erts_maps_remove(env->proc, key, map_in, map_out);
+ cache_env(env);
+ return res;
+}
+
+int enif_map_iterator_create(ErlNifEnv *env,
+ Eterm map,
+ ErlNifMapIterator *iter,
+ ErlNifMapIteratorEntry entry)
+{
+ if (is_map(map)) {
+ map_t *mp = (map_t*)map_val(map);
+ size_t offset;
+
+ switch (entry) {
+ case ERL_NIF_MAP_ITERATOR_HEAD: offset = 0; break;
+ case ERL_NIF_MAP_ITERATOR_TAIL: offset = map_get_size(mp) - 1; break;
+ default: goto error;
+ }
+
+ /* empty maps are ok but will leave the iterator
+ * in bad shape.
+ */
+
+ iter->map = map;
+ iter->ks = ((Eterm *)map_get_keys(mp)) + offset;
+ iter->vs = ((Eterm *)map_get_values(mp)) + offset;
+ iter->t_limit = map_get_size(mp) + 1;
+ iter->idx = offset + 1;
+
+ return 1;
+ }
+
+error:
+#ifdef DEBUG
+ iter->map = THE_NON_VALUE;
+#endif
+ return 0;
+}
+
+void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter)
+{
+ /* not used */
+#ifdef DEBUG
+ iter->map = THE_NON_VALUE;
+#endif
+
+}
+
+int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter)
+{
+ ASSERT(iter && is_map(iter->map));
+ ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1));
+ return (iter->t_limit == 1 || iter->idx == iter->t_limit);
+}
+
+int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter)
+{
+ ASSERT(iter && is_map(iter->map));
+ ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1));
+ return (iter->t_limit == 1 || iter->idx == 0);
+}
+
+
+int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter)
+{
+ ASSERT(iter && is_map(iter->map));
+ if (iter->idx < iter->t_limit) {
+ iter->idx++;
+ iter->ks++;
+ iter->vs++;
+ }
+ return (iter->idx != iter->t_limit);
+}
+
+int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter)
+{
+ ASSERT(iter && is_map(iter->map));
+ if (iter->idx > 0) {
+ iter->idx--;
+ iter->ks--;
+ iter->vs--;
+ }
+ return (iter->idx > 0);
+}
+
+int enif_map_iterator_get_pair(ErlNifEnv *env,
+ ErlNifMapIterator *iter,
+ Eterm *key,
+ Eterm *value)
+{
+ ASSERT(iter && is_map(iter->map));
+ if (iter->idx > 0 && iter->idx < iter->t_limit) {
+ ASSERT(iter->ks >= map_get_keys(map_val(iter->map)) &&
+ iter->ks < (map_get_keys(map_val(iter->map)) + map_get_size(map_val(iter->map))));
+ ASSERT(iter->vs >= map_get_values(map_val(iter->map)) &&
+ iter->vs < (map_get_values(map_val(iter->map)) + map_get_size(map_val(iter->map))));
+ *key = *(iter->ks);
+ *value = *(iter->vs);
+ return 1;
+ }
+ return 0;
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
@@ -1564,6 +2202,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+/*
+ * The function below is for looping through ErlNifFunc arrays, helping
+ * provide backwards compatibility across the version 2.7 change that added
+ * the "flags" field to ErlNifFunc.
+ */
+static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+{
+ ASSERT(incrp);
+ if (!*incrp) {
+ if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ *incrp = sizeof(ErlNifFunc);
+ else {
+ /*
+ * ErlNifFuncV1 below is what ErlNifFunc was before the
+ * addition of the flags field for 2.7, and is needed to handle
+ * backward compatibility.
+ */
+ typedef struct {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }ErlNifFuncV1;
+ *incrp = sizeof(ErlNifFuncV1);
+ }
+ }
+ return (ErlNifFunc*) ((char*)func + *incrp);
+}
+
+
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
@@ -1571,12 +2238,13 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
static const char upgrade[] = "upgrade";
char* lib_name = NULL;
void* handle = NULL;
- void* init_func;
+ void* init_func = NULL;
ErlNifEntry* entry = NULL;
ErlNifEnv env;
- int len, i, err;
+ int i, err, encoding;
Module* mod;
Eterm mod_atom;
+ const Atom* mod_atomp;
Eterm f_atom;
BeamInstr* caller;
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
@@ -1585,18 +2253,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
struct erl_module_nif* lib = NULL;
int reload_warning = 0;
- len = list_length(BIF_ARG_1);
- if (len < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ encoding = erts_get_native_filename_encoding();
+ if (encoding == ERL_FILENAME_WIN_WCHAR) {
+ /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
+ /* since lib_name is used in error messages */
+ encoding = ERL_FILENAME_UTF8;
}
-
- lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1);
-
- if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) {
- erts_free(ERTS_ALC_T_TMP, lib_name);
+ lib_name = erts_convert_filename_to_encoding(BIF_ARG_1, NULL, 0,
+ ERTS_ALC_T_TMP, 1, 0, encoding,
+ NULL, 0);
+ if (!lib_name) {
BIF_ERROR(BIF_P, BADARG);
}
- lib_name[len] = '\0';
if (!erts_try_seize_code_write_permission(BIF_P)) {
erts_free(ERTS_ALC_T_TMP, lib_name);
@@ -1620,13 +2288,19 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
mod=erts_get_module(mod_atom, erts_active_code_ix());
ASSERT(mod != NULL);
+ mod_atomp = atom_tab(atom_val(mod_atom));
+ init_func = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
+ if (init_func != NULL)
+ handle = init_func;
+
if (!in_area(caller, mod->curr.code, mod->curr.code_length)) {
ASSERT(in_area(caller, mod->old.code, mod->old.code_length));
ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
"module '%T' not allowed", mod_atom);
}
- else if ((err=erts_sys_ddll_open2(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
+ else if (init_func == NULL &&
+ (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
@@ -1635,7 +2309,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str);
}
}
- else if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) {
+ else if (init_func == NULL &&
+ erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) {
ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init"
" function: '%s'", errdesc.str);
@@ -1644,8 +2319,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
}
- else if (entry->major != ERL_NIF_MAJOR_VERSION
- || entry->minor > ERL_NIF_MINOR_VERSION) {
+ else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
+ || (ERL_NIF_MAJOR_VERSION < entry->major
+ || (ERL_NIF_MAJOR_VERSION == entry->major
+ && ERL_NIF_MINOR_VERSION < entry->minor))
+ || (entry->major==2 && entry->minor == 5)) { /* experimental maps */
ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
@@ -1662,22 +2340,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
/*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
+
+ int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
BeamInstr** code_pp;
- ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
|| (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
- }
- else if (code_pp[1] - code_pp[0] < (5+3)) {
+ }
+ else if (maybe_dirty_nifs && f->flags) {
+ /*
+ * If the flags field is non-zero and this emulator was
+ * built with dirty scheduler support, check that the flags
+ * value is legal. But if this emulator was built without
+ * dirty scheduler support, treat a non-zero flags field as
+ * a load error.
+ */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ f->flags, mod_atom, f->name, f->arity);
+#else
+ ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
+ mod_atom, f->name, f->arity);
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (code_pp[1] - code_pp[0] < (5+4))
+#else
+ else if (code_pp[1] - code_pp[0] < (5+3))
+#endif
+ {
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u to small)",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ " in module (%T:%s/%u too small)",
+ mod_atom, f->name, f->arity);
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ mod_atom, f->name, f->arity);*/
+ f = next_func(entry, &incr, f);
}
}
@@ -1694,10 +2398,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
lib->entry = entry;
erts_refc_init(&lib->rt_cnt, 0);
erts_refc_init(&lib->rt_dtor_cnt, 0);
+ ASSERT(opened_rt_list == NULL);
lib->mod = mod;
env.mod_nif = lib;
- if (mod->curr.nif != NULL) { /* Reload */
- int k;
+ if (mod->curr.nif != NULL) { /*************** Reload ******************/
+ /*
+ * Repeated load_nif calls from same Erlang module instance ("reload")
+ * is deprecated and was only ment as a development feature not to
+ * be used in production systems. (See warning below)
+ */
+ int k, old_incr = 0;
+ ErlNifFunc* old_func;
lib->priv_data = mod->curr.nif->priv_data;
ASSERT(mod->curr.nif->entry != NULL);
@@ -1706,13 +2417,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
/* Check that no NIF is removed */
+ old_func = mod->curr.nif->entry->funcs;
for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == entry->funcs[i].arity
- && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
+ if (old_func->arity == f->arity
+ && sys_strcmp(old_func->name, f->name) == 0) {
break;
}
+ f = next_func(entry, &incr, f);
}
if (i == entry->num_of_funcs) {
ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
@@ -1720,7 +2434,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
old_func->name, old_func->arity);
goto error;
}
- }
+ old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
+ }
erts_pre_nif(&env, BIF_P, lib);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
@@ -1728,6 +2443,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful.");
}
else {
+ commit_opened_resource_types(lib);
mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */
erts_unload_nif(mod->curr.nif);
reload_warning = 1;
@@ -1735,7 +2451,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
lib->priv_data = NULL;
- if (mod->old.nif != NULL) { /* Upgrade */
+ if (mod->old.nif != NULL) { /**************** Upgrade ***************/
void* prev_old_data = mod->old.nif->priv_data;
if (entry->upgrade == NULL) {
ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
@@ -1748,30 +2464,35 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
mod->old.nif->priv_data = prev_old_data;
ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
}
- /*else if (mod->old_nif->priv_data != prev_old_data) {
- refresh_cached_nif_data(mod->old_code, mod->old_nif);
- }*/
+ else
+ commit_opened_resource_types(lib);
}
- else if (entry->load != NULL) { /* Initial load */
+ else if (entry->load != NULL) { /********* Initial load ***********/
erts_pre_nif(&env, BIF_P, lib);
veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful.");
}
+ else
+ commit_opened_resource_types(lib);
}
}
if (ret == am_ok) {
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->curr.nif = lib;
+
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
+
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
- erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
-
+ erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity);
+
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
@@ -1779,19 +2500,31 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
GenericBp* g = (GenericBp *) code_ptr[1];
ASSERT(code_ptr[5+0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
- }
- code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ }
+ if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ code_ptr[5+3] = (BeamInstr) f->fptr;
+ code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) schedule_dirty_io_nif :
+ (BeamInstr) schedule_dirty_cpu_nif;
+#endif
+ }
+ else
+ code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
+ f = next_func(entry, &incr, f);
}
}
else {
error:
+ rollback_opened_resource_types();
ASSERT(ret != am_ok);
if (lib != NULL) {
erts_free(ERTS_ALC_T_NIF, lib);
}
- if (handle != NULL) {
+ if (handle != NULL && !erts_is_static_nif(handle)) {
erts_sys_ddll_close(handle);
}
erts_sys_ddll_free_error(&errdesc);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 8006741a63..849024453c 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -23,7 +23,11 @@
#ifndef __ERL_NIF_H__
#define __ERL_NIF_H__
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "erl_native_features_config.h"
#include "erl_drv_nif.h"
/* Version history:
@@ -34,9 +38,29 @@
** 2.2: R14B03 enif_is_exception
** 2.3: R15 enif_make_reverse_list, enif_is_number
** 2.4: R16 enif_consume_timeslice
+** 2.5: First experimental maps API additions (libs of this version is not compatible with any other VM)
+** 2.5: R17 Maps API additions
+** 2.6: R17 with maps
+** R17 dirty schedulers
+** 2.7: 17.3 add enif_schedule_nif
+** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer
+** add ErlNifEntry options
+** add ErlNifFunc flags
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 4
+#define ERL_NIF_MINOR_VERSION 7
+
+/*
+ * The emulator will refuse to load a nif-lib with a major version
+ * lower than ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD. The load
+ * may however fail if user have not removed use of deprecated
+ * symbols.
+ *
+ * The ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD have to allow
+ * loading of nif-libs built at least two major OTP releases
+ * ago.
+ */
+#define ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD 2
#include <stdlib.h>
@@ -95,6 +119,8 @@ typedef unsigned long long ERL_NIF_TERM;
# endif
#endif
+typedef ERL_NIF_TERM ERL_NIF_UINT;
+
struct enif_environment_t;
typedef struct enif_environment_t ErlNifEnv;
@@ -103,8 +129,10 @@ typedef struct
const char* name;
unsigned arity;
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
}ErlNifFunc;
+
typedef struct enif_entry_t
{
int major;
@@ -117,8 +145,11 @@ typedef struct enif_entry_t
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
const char* vm_variant;
+ unsigned options;
}ErlNifEntry;
+/* Field bits for ErlNifEntry options */
+#define ERL_NIF_DIRTY_NIF_OPTION 1
typedef struct
@@ -159,6 +190,29 @@ typedef int ErlNifTSDKey;
typedef ErlDrvThreadOpts ErlNifThreadOpts;
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+typedef enum
+{
+ ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND,
+ ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND
+}ErlNifDirtyTaskFlags;
+#endif
+
+typedef struct /* All fields all internal and may change */
+{
+ ERL_NIF_TERM map;
+ ERL_NIF_UINT t_limit;
+ ERL_NIF_UINT idx;
+ ERL_NIF_TERM *ks;
+ ERL_NIF_TERM *vs;
+ void* __spare__[2]; /* for future additions to be ABI compatible (same struct size) */
+} ErlNifMapIterator;
+
+typedef enum {
+ ERL_NIF_MAP_ITERATOR_HEAD = 1,
+ ERL_NIF_MAP_ITERATOR_TAIL = 2
+} ErlNifMapIteratorEntry;
+
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS
typedef struct {
@@ -168,7 +222,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# undef ERL_NIF_API_FUNC_DECL
#endif
-#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER)
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER) && !defined(STATIC_ERLANG_NIF)
# define ERL_NIF_API_FUNC_MACRO(NAME) (WinDynNifCallbacks.NAME)
# include "erl_nif_api_funcs.h"
/* note that we have to keep ERL_NIF_API_FUNC_MACRO defined */
@@ -180,17 +234,29 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# undef ERL_NIF_API_FUNC_DECL
#endif
-
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
-# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
+# ifdef STATIC_ERLANG_NIF
+# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* MODNAME ## _nif_init(TWinDynNifCallbacks* callbacks)
+# else
+# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
+# endif
# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
#else
# define ERL_NIF_INIT_GLOB
# define ERL_NIF_INIT_BODY
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
+# ifdef STATIC_ERLANG_NIF
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void)
+# else
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
+# endif
#endif
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
+#else
+# define ERL_NIF_ENTRY_OPTIONS 0
+#endif
#ifdef __cplusplus
}
@@ -216,7 +282,8 @@ ERL_NIF_INIT_DECL(NAME) \
sizeof(FUNCS) / sizeof(*FUNCS), \
FUNCS, \
LOAD, RELOAD, UPGRADE, UNLOAD, \
- ERL_NIF_VM_VARIANT \
+ ERL_NIF_VM_VARIANT, \
+ ERL_NIF_ENTRY_OPTIONS \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 2f841645e1..630cefae93 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -22,7 +22,7 @@
#endif
/*
-** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
+** WARNING: Add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
** to keep compatibility on Windows!!!
**
** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h
@@ -141,11 +141,36 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
+ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(int, enif_make_map_put, (ErlNifEnv* env, ERL_NIF_TERM map_in, ERL_NIF_TERM key, ERL_NIF_TERM value, ERL_NIF_TERM* map_out));
+ERL_NIF_API_FUNC_DECL(int, enif_get_map_value, (ErlNifEnv* env, ERL_NIF_TERM map, ERL_NIF_TERM key, ERL_NIF_TERM* value));
+ERL_NIF_API_FUNC_DECL(int, enif_make_map_update, (ErlNifEnv* env, ERL_NIF_TERM map_in, ERL_NIF_TERM key, ERL_NIF_TERM value, ERL_NIF_TERM* map_out));
+ERL_NIF_API_FUNC_DECL(int, enif_make_map_remove, (ErlNifEnv* env, ERL_NIF_TERM map_in, ERL_NIF_TERM key, ERL_NIF_TERM* map_out));
+ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_create, (ErlNifEnv *env, ERL_NIF_TERM map, ErlNifMapIterator *iter, ErlNifMapIteratorEntry entry));
+ERL_NIF_API_FUNC_DECL(void, enif_map_iterator_destroy, (ErlNifEnv *env, ErlNifMapIterator *iter));
+ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_head, (ErlNifEnv *env, ErlNifMapIterator *iter));
+ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMapIterator *iter));
+ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter));
+ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter));
+ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
/*
-** Add new entries here to keep compatibility on Windows!!!
+** ADD NEW ENTRIES HERE (before this comment) !!!
*/
+
+
+/*
+ * Conditional EXPERIMENTAL stuff always last.
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
#endif
+#endif /* ERL_NIF_API_FUNC_DECL */
/*
** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order
@@ -259,18 +284,41 @@ ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
#endif
-
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
+# define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map)
+# define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size)
+# define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map)
+# define enif_make_map_put ERL_NIF_API_FUNC_MACRO(enif_make_map_put)
+# define enif_get_map_value ERL_NIF_API_FUNC_MACRO(enif_get_map_value)
+# define enif_make_map_update ERL_NIF_API_FUNC_MACRO(enif_make_map_update)
+# define enif_make_map_remove ERL_NIF_API_FUNC_MACRO(enif_make_map_remove)
+# define enif_map_iterator_create ERL_NIF_API_FUNC_MACRO(enif_map_iterator_create)
+# define enif_map_iterator_destroy ERL_NIF_API_FUNC_MACRO(enif_map_iterator_destroy)
+# define enif_map_iterator_is_head ERL_NIF_API_FUNC_MACRO(enif_map_iterator_is_head)
+# define enif_map_iterator_is_tail ERL_NIF_API_FUNC_MACRO(enif_map_iterator_is_tail)
+# define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next)
+# define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev)
+# define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair)
+# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif)
/*
-** Add new entries here
+** ADD NEW ENTRIES HERE (before this comment)
*/
+
+/*
+ * Conditional EXPERIMENTAL stuff always last
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
#endif
+#endif /* ERL_NIF_API_FUNC_MACRO */
#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 0f93a3a9f0..17f6b32bb1 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -106,7 +106,7 @@
#define dist_entry_channel_no(x) \
((x) == erts_this_dist_entry \
? ((Uint) 0) \
- : (ASSERT_EXPR(is_atom((x)->sysname)), \
+ : (ASSERT(is_atom((x)->sysname)), \
(Uint) atom_val((x)->sysname)))
#define internal_channel_no(x) ((Uint) ERST_INTERNAL_CHANNEL_NO)
#define external_channel_no(x) \
@@ -122,10 +122,10 @@ extern ErtsPTab erts_proc;
(D), \
_TAG_IMMED1_PID)
-#define internal_pid_index(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+#define internal_pid_index(PID) (ASSERT(is_internal_pid((PID))), \
erts_ptab_id2pix(&erts_proc, (PID)))
-#define internal_pid_data(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+#define internal_pid_data(PID) (ASSERT(is_internal_pid((PID))), \
erts_ptab_id2data(&erts_proc, (PID)))
#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
@@ -193,10 +193,10 @@ extern ErtsPTab erts_port;
(D), \
_TAG_IMMED1_PORT)
-#define internal_port_index(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+#define internal_port_index(PRT) (ASSERT(is_internal_port((PRT))), \
erts_ptab_id2pix(&erts_port, (PRT)))
-#define internal_port_data(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+#define internal_port_data(PRT) (ASSERT(is_internal_port((PRT))), \
erts_ptab_id2data(&erts_port, (PRT)))
#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index ebfba065d1..c6d136f951 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1371,6 +1371,7 @@ setup_reference_table(void)
/* Insert all ports */
max = erts_ptab_max(&erts_port);
for (i = 0; i < max; i++) {
+ ErlOffHeap *ohp;
erts_aint32_t state;
Port *prt;
@@ -1389,8 +1390,9 @@ setup_reference_table(void)
if (ERTS_P_MONITORS(prt))
insert_monitors(ERTS_P_MONITORS(prt), prt->common.id);
/* Insert port data */
- for(hfp = prt->bp; hfp; hfp = hfp->next)
- insert_offheap(&(hfp->off_heap), HEAP_REF, prt->common.id);
+ ohp = erts_port_data_offheap(prt);
+ if (ohp)
+ insert_offheap(ohp, HEAP_REF, prt->common.id);
/* Insert controller */
if (prt->dist_entry)
insert_dist_entry(prt->dist_entry,
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 377aa72ed5..ad3f104a68 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -167,8 +167,7 @@ struct _erl_drv_port {
#endif
erts_atomic_t connected; /* A connected process */
Eterm caller; /* Current caller. */
- Eterm data; /* Data associated with port. */
- ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */
+ erts_smp_atomic_t data; /* Data associated with port. */
Uint bytes_in; /* Number of bytes read */
Uint bytes_out; /* Number of bytes written */
@@ -189,6 +188,12 @@ struct _erl_drv_port {
int reds; /* Only used while executing driver callbacks */
};
+
+void erts_init_port_data(Port *);
+void erts_cleanup_port_data(Port *);
+Uint erts_port_data_size(Port *);
+ErlOffHeap *erts_port_data_offheap(Port *);
+
#define ERTS_PORT_GET_CONNECTED(PRT) \
((Eterm) erts_atomic_read_nob(&(PRT)->connected))
#define ERTS_PORT_SET_CONNECTED(PRT, PID) \
@@ -326,8 +331,6 @@ extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
#define ERTS_PORT_REDS_CONTROL (CONTEXT_REDS/100)
#define ERTS_PORT_REDS_CALL (CONTEXT_REDS/50)
#define ERTS_PORT_REDS_INFO (CONTEXT_REDS/100)
-#define ERTS_PORT_REDS_SET_DATA (CONTEXT_REDS/100)
-#define ERTS_PORT_REDS_GET_DATA (CONTEXT_REDS/100)
#define ERTS_PORT_REDS_TERMINATE (CONTEXT_REDS/50)
void print_port_info(Port *, int, void *);
@@ -794,8 +797,6 @@ struct binary;
#define ERTS_P2P_SIG_TYPE_INFO 7
#define ERTS_P2P_SIG_TYPE_LINK 8
#define ERTS_P2P_SIG_TYPE_UNLINK 9
-#define ERTS_P2P_SIG_TYPE_SET_DATA 10
-#define ERTS_P2P_SIG_TYPE_GET_DATA 11
#define ERTS_P2P_SIG_TYPE_BITS 4
#define ERTS_P2P_SIG_TYPE_MASK \
@@ -810,6 +811,7 @@ struct binary;
#define ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG(4)
#define ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG(5)
#define ERTS_P2P_SIG_DATA_FLG_SCHED ERTS_P2P_SIG_DATA_FLG(6)
+#define ERTS_P2P_SIG_DATA_FLG_ASYNC ERTS_P2P_SIG_DATA_FLG(7)
struct ErtsProc2PortSigData_ {
int flags;
@@ -856,10 +858,6 @@ struct ErtsProc2PortSigData_ {
struct {
Eterm from;
} unlink;
- struct {
- ErlHeapFragment *bp;
- Eterm data;
- } set_data;
} u;
} ;
@@ -919,6 +917,7 @@ erts_schedule_proc2port_signal(Process *,
Eterm *,
ErtsProc2PortSigData *,
int,
+ ErtsPortTaskHandle *,
ErtsProc2PortSigCallback);
int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
@@ -932,6 +931,7 @@ int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
#define ERTS_PORT_SIG_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK
#define ERTS_PORT_SIG_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
#define ERTS_PORT_SIG_FLG_FORCE_SCHED ERTS_P2P_SIG_DATA_FLG_SCHED
+#define ERTS_PORT_SIG_FLG_ASYNC ERTS_P2P_SIG_DATA_FLG_ASYNC
/* ERTS_PORT_SIG_FLG_FORCE_IMM_CALL only when crash dumping... */
#define ERTS_PORT_SIG_FLG_FORCE_IMM_CALL ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
@@ -953,7 +953,5 @@ ErtsPortOpResult erts_port_unlink(Process *, Port *, Eterm, Eterm *);
ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm *);
ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *);
ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *);
-ErtsPortOpResult erts_port_set_data(Process *, Port *, Eterm, Eterm *);
-ErtsPortOpResult erts_port_get_data(Process *, Port *, Eterm *);
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index ce045ec94e..2aa0a27197 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -32,6 +32,7 @@
#include "global.h"
#include "erl_port_task.h"
#include "dist.h"
+#include "erl_check_io.h"
#include "dtrace-wrapper.h"
#include <stdarg.h>
@@ -68,6 +69,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
#endif
+#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \
+ ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
+ erts_smp_atomic_read_nob(&(PP)->run_queue))); \
+ } while (0)
+
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PT_STATE_SCHEDULED 0
@@ -151,9 +159,10 @@ static ERTS_INLINE void
schedule_port_task_free(ErtsPortTask *ptp)
{
#ifdef ERTS_SMP
- erts_schedule_thr_prgr_later_op(call_port_task_free,
- (void *) ptp,
- &ptp->u.release);
+ erts_schedule_thr_prgr_later_cleanup_op(call_port_task_free,
+ (void *) ptp,
+ &ptp->u.release,
+ sizeof(ErtsPortTask));
#else
port_task_free(ptp);
#endif
@@ -542,6 +551,16 @@ reset_handle(ErtsPortTask *ptp)
}
static ERTS_INLINE void
+reset_executed_io_task_handle(ErtsPortTask *ptp)
+{
+ if (ptp->u.alive.handle) {
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ erts_io_notify_port_task_executed(ptp->u.alive.handle);
+ reset_port_task_handle(ptp->u.alive.handle);
+ }
+}
+
+static ERTS_INLINE void
set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->u.alive.handle = pthp;
@@ -551,6 +570,19 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
}
}
+static ERTS_INLINE void
+set_tmp_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
+{
+ ptp->u.alive.handle = NULL;
+ if (pthp) {
+ /*
+ * IMPORTANT! Task either need to be aborted, or task handle
+ * need to be detached before thread progress has been made.
+ */
+ erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ }
+}
+
/*
* Busy port queue management
@@ -772,9 +804,10 @@ static void
schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
{
#ifdef ERTS_SMP
- erts_schedule_thr_prgr_later_op(free_port_task_handle_list,
- (void *) pthlp,
- &pthlp->u.release);
+ erts_schedule_thr_prgr_later_cleanup_op(free_port_task_handle_list,
+ (void *) pthlp,
+ &pthlp->u.release,
+ sizeof(ErtsPortTaskHandleList));
#else
erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp);
#endif
@@ -783,12 +816,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
static ERTS_INLINE void
abort_nosuspend_task(Port *pp,
ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp)
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
- if (!pp->sched.taskq.bpq)
+ if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
@@ -862,6 +896,11 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
ASSERT(runq->ports.start && runq->ports.end);
erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
+
+#ifdef ERTS_SMP
+ if (runq->halt_in_progress)
+ erts_non_empty_runq(runq);
+#endif
}
static ERTS_INLINE Port *
@@ -971,6 +1010,7 @@ static ERTS_INLINE int
finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
erts_aint32_t act;
+ unsigned int prof_runnable_ports;
if (!processing_busy_q)
pp->sched.taskq.local.first = *execq;
@@ -987,6 +1027,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -998,12 +1042,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
- ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
if (exp == act)
break;
}
+ if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS))
+ trace_sched_ports(pp, am_out);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS)))
+ profile_runnable_port(pp, am_inactive);
+ erts_port_task_sched_unlock(&pp->sched);
+ }
+ }
+
return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
@@ -1180,6 +1236,13 @@ erl_drv_consume_timeslice(ErlDrvPort dprt, int percent)
return 1;
}
+void
+erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *pthp)
+{
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ reset_port_task_handle(pthp);
+}
+
/*
* Abort a scheduled task.
*/
@@ -1204,7 +1267,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
ERTS_SMP_READ_MEMORY_BARRIER;
old_state = erts_smp_atomic32_read_nob(&ptp->state);
if (old_state == ERTS_PT_STATE_SCHEDULED) {
- ASSERT(saved_pthp == pthp);
+ ASSERT(!saved_pthp || saved_pthp == pthp);
}
#endif
@@ -1225,9 +1288,6 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
&erts_port_task_outstanding_io_tasks) > 0);
erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
break;
- case ERTS_PORT_TASK_PROC_SIG:
- ERTS_INTERNAL_ERROR("Aborted process to port signal");
- break;
default:
break;
}
@@ -1321,7 +1381,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
#endif
schedule_port_task_handle_list_free(pthlp);
- abort_nosuspend_task(pp, type, &td);
+ abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
}
}
@@ -1345,11 +1405,9 @@ erts_port_task_schedule(Eterm id,
Port *pp;
ErtsPortTask *ptp = NULL;
erts_aint32_t act, add_flags;
+ unsigned int prof_runnable_ports;
- if (pthp && erts_port_task_is_scheduled(pthp)) {
- ASSERT(0);
- erts_port_task_abort(pthp);
- }
+ ERTS_LC_ASSERT(!pthp || !erts_port_task_is_scheduled(pthp));
ASSERT(is_internal_port(id));
@@ -1402,7 +1460,6 @@ erts_port_task_schedule(Eterm id,
}
case ERTS_PORT_TASK_PROC_SIG: {
va_list argp;
- ASSERT(!pthp);
va_start(argp, type);
sigdp = va_arg(argp, ErtsProc2PortSigData *);
ptp = p2p_sig_data_to_task(sigdp);
@@ -1410,7 +1467,7 @@ erts_port_task_schedule(Eterm id,
ptp->u.alive.flags |= va_arg(argp, int);
va_end(argp);
if (!(ptp->u.alive.flags & ERTS_PT_FLG_NOSUSPEND))
- set_handle(ptp, pthp);
+ set_tmp_handle(ptp, pthp);
else {
ns_pthlp = erts_alloc(ERTS_ALC_T_PT_HNDL_LIST,
sizeof(ErtsPortTaskHandleList));
@@ -1434,6 +1491,10 @@ erts_port_task_schedule(Eterm id,
if (ns_pthlp)
add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1458,6 +1519,13 @@ erts_port_task_schedule(Eterm id,
goto done; /* Died after our task insert... */
}
+ if (prof_runnable_ports) {
+ if (!(act & ERTS_PTS_FLG_EXEC_IMM))
+ profile_runnable_port(pp, am_active);
+ erts_port_task_sched_unlock(&pp->sched);
+ prof_runnable_ports = 0;
+ }
+
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
@@ -1466,8 +1534,10 @@ erts_port_task_schedule(Eterm id,
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
- /* Port emigrated ... */
+ /* Emigrate port ... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = erts_port_runq(pp);
@@ -1477,10 +1547,6 @@ erts_port_task_schedule(Eterm id,
#endif
enqueue_port(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
- }
erts_smp_runq_unlock(runq);
@@ -1488,6 +1554,9 @@ erts_port_task_schedule(Eterm id,
done:
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&pp->sched);
+
#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
@@ -1502,7 +1571,7 @@ abort_nosuspend:
erts_port_dec_refc(pp);
#endif
- abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
@@ -1576,6 +1645,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
int fpe_was_unmasked;
erts_aint32_t state;
int active;
+ Uint64 start_time = 0;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
@@ -1585,6 +1655,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto done;
}
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+
erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
@@ -1635,7 +1707,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto aborted_port_task;
}
- reset_handle(ptp);
+ if (erts_system_monitor_long_schedule != 0) {
+ start_time = erts_timestamp_millis();
+ }
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -1643,6 +1717,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
switch (ptp->type) {
case ERTS_PORT_TASK_TIMEOUT:
+ reset_handle(ptp);
reds = ERTS_PORT_REDS_TIMEOUT;
if (!(state & ERTS_PORT_SFLGS_DEAD)) {
DTRACE_DRIVER(driver_timeout, pp);
@@ -1653,9 +1728,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
reds = ERTS_PORT_REDS_INPUT;
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_input, pp);
- /* NOTE some windows drivers use ->ready_input for input and output */
+ /* NOTE some windows/ose drivers use ->ready_input
+ for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_OUTPUT:
@@ -1664,6 +1741,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
DTRACE_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_EVENT:
@@ -1673,10 +1751,12 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
(*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event,
ptp->u.alive.td.io.event_data);
+ reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ reset_handle(ptp);
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
if (!pp->sched.taskq.bpq)
reds = ptp->u.alive.td.psig.callback(pp,
@@ -1694,6 +1774,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
break;
}
case ERTS_PORT_TASK_DIST_CMD:
+ reset_handle(ptp);
reds = erts_dist_command(pp, CONTEXT_REDS - pp->reds);
break;
default:
@@ -1705,6 +1786,14 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
reds += erts_port_driver_callback_epilogue(pp, &state);
+ if (start_time != 0) {
+ Sint64 diff = erts_timestamp_millis() - start_time;
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
+ monitor_long_schedule_port(pp,ptp->type,(Uint) diff);
+ }
+ }
+ start_time = 0;
+
aborted_port_task:
schedule_port_task_free(ptp);
@@ -1728,10 +1817,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
@@ -1754,11 +1839,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
- if (!active) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- }
- else {
+ if (active) {
#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
#endif
@@ -1767,6 +1848,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
@@ -1774,7 +1857,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
}
else {
- /* Port emigrated ... */
+ /* Emigrate port... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
@@ -1807,6 +1890,16 @@ release_port(void *vport)
{
erts_port_dec_refc((Port *) vport);
}
+
+static void
+schedule_release_port(void *vport) {
+ Port *pp = (Port*)vport;
+ /* This is only used when a port release was ordered from a non-scheduler */
+ erts_schedule_thr_prgr_later_op(release_port,
+ (void *) pp,
+ &pp->common.u.release);
+}
+
#endif
static void
@@ -1910,18 +2003,21 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
break;
case ERTS_PORT_TASK_INPUT:
erts_stale_drv_select(pp->common.id,
+ ERTS_Port2ErlDrvPort(pp),
ptp->u.alive.td.io.event,
DO_READ,
1);
break;
case ERTS_PORT_TASK_OUTPUT:
erts_stale_drv_select(pp->common.id,
+ ERTS_Port2ErlDrvPort(pp),
ptp->u.alive.td.io.event,
DO_WRITE,
1);
break;
case ERTS_PORT_TASK_EVENT:
erts_stale_drv_select(pp->common.id,
+ ERTS_Port2ErlDrvPort(pp),
ptp->u.alive.td.io.event,
0,
1);
@@ -1985,9 +2081,9 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
DTRACE_CHARBUF(pid_str, 16);
ErtsProcList* plp2 = plp;
- erts_snprintf(port_str, sizeof(port_str), "%T", pp->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", pp->common.id);
while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->pid);
DTRACE2(process_port_unblocked, pid_str, port_str);
}
}
@@ -1999,9 +2095,15 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
* Schedule cleanup of port structure...
*/
#ifdef ERTS_SMP
- erts_schedule_thr_prgr_later_op(release_port,
- (void *) pp,
- &pp->common.u.release);
+ /* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */
+ if (!erts_get_scheduler_data()) {
+ erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp);
+ } else {
+ /* Has to be more or less immediate to release any driver */
+ erts_schedule_thr_prgr_later_op(release_port,
+ (void *) pp,
+ &pp->common.u.release);
+ }
#else
pp->cleanup = 1;
#endif
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index ae6cd69ae2..406cd3c492 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -77,6 +77,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PTS_FLG_HAVE_NS_TASKS (((erts_aint32_t) 1) << 8)
#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
+#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11)
+#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12)
#define ERTS_PTS_FLGS_BUSY \
(ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
@@ -86,7 +88,9 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
| ERTS_PTS_FLG_HAVE_BUSY_TASKS \
| ERTS_PTS_FLG_HAVE_TASKS \
| ERTS_PTS_FLG_EXEC \
- | ERTS_PTS_FLG_FORCE_SCHED)
+ | ERTS_PTS_FLG_EXEC_IMM \
+ | ERTS_PTS_FLG_FORCE_SCHED \
+ | ERTS_PTS_FLG_EXITING)
#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH 8192
#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW 4096
@@ -135,6 +139,7 @@ ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp);
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
@@ -151,7 +156,7 @@ erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
ERTS_GLB_INLINE int
erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
{
- return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL;
+ return ((void *) erts_smp_atomic_read_acqb(pthp)) != NULL;
}
ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
@@ -182,11 +187,13 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
- lock_str = NULL;
+ (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
+#else
+ 1
#endif
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
+ );
#endif
}
@@ -225,6 +232,12 @@ erts_port_task_fini_sched(ErtsPortTaskSched *ptsp)
#endif
}
+ERTS_GLB_INLINE void
+erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp)
+{
+ erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
+}
+
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
ERTS_GLB_INLINE int
@@ -243,7 +256,9 @@ int erts_port_task_execute(ErtsRunQueue *, Port **);
void erts_port_task_init(void);
#endif
+void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *);
int erts_port_task_abort(ErtsPortTaskHandle *);
+
void erts_port_task_abort_nosuspend_tasks(Port *);
int erts_port_task_schedule(Eterm,
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 436147749e..d18760dc43 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -24,6 +24,7 @@
#include "erl_printf_term.h"
#include "sys.h"
#include "big.h"
+#include "erl_map.h"
#define PRINT_CHAR(CNT, FN, ARG, C) \
do { \
@@ -216,14 +217,15 @@ static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount)
}
-#define PRT_BAR ((Eterm) 0)
-#define PRT_COMMA ((Eterm) 1)
-#define PRT_CLOSE_LIST ((Eterm) 2)
-#define PRT_CLOSE_TUPLE ((Eterm) 3)
-#define PRT_TERM ((Eterm) 4)
-#define PRT_ONE_CONS ((Eterm) 5)
-#define PRT_PATCH_FUN_SIZE ((Eterm) 6)
-#define PRT_LAST_ARRAY_ELEMENT ((Eterm) 7) /* Note! Must be last... */
+#define PRT_BAR ((Eterm) 0)
+#define PRT_COMMA ((Eterm) 1)
+#define PRT_CLOSE_LIST ((Eterm) 2)
+#define PRT_CLOSE_TUPLE ((Eterm) 3)
+#define PRT_ASSOC ((Eterm) 4)
+#define PRT_TERM ((Eterm) 5)
+#define PRT_ONE_CONS ((Eterm) 6)
+#define PRT_PATCH_FUN_SIZE ((Eterm) 7)
+#define PRT_LAST_ARRAY_ELEMENT ((Eterm) 8) /* Note! Must be last... */
static int
print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
@@ -260,6 +262,9 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
case PRT_CLOSE_TUPLE:
PRINT_CHAR(res, fn, arg, '}');
goto L_outer_loop;
+ case PRT_ASSOC:
+ PRINT_STRING(res, fn, arg, "=>");
+ goto L_outer_loop;
default:
popped.word = WSTACK_POP(s);
@@ -483,6 +488,37 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
PRINT_CHAR(res, fn, arg, '>');
}
break;
+ case MAP_DEF:
+ {
+ Uint n;
+ Eterm *ks, *vs;
+ map_t *mp = (map_t *)map_val(wobj);
+ n = map_get_size(mp);
+ ks = map_get_keys(mp);
+ vs = map_get_values(mp);
+
+ PRINT_CHAR(res, fn, arg, '#');
+ PRINT_CHAR(res, fn, arg, '{');
+ WSTACK_PUSH(s, PRT_CLOSE_TUPLE);
+ if (n > 0) {
+ n--;
+ WSTACK_PUSH(s, vs[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ WSTACK_PUSH(s, PRT_ASSOC);
+ WSTACK_PUSH(s, ks[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+
+ while (n--) {
+ WSTACK_PUSH(s, PRT_COMMA);
+ WSTACK_PUSH(s, vs[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ WSTACK_PUSH(s, PRT_ASSOC);
+ WSTACK_PUSH(s, ks[n]);
+ WSTACK_PUSH(s, PRT_TERM);
+ }
+ }
+ }
+ break;
default:
PRINT_STRING(res, fn, arg, "<unknown:");
PRINT_POINTER(res, fn, arg, wobj);
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 00247b387a..e5bb1203c8 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -44,6 +44,7 @@
#include "dtrace-wrapper.h"
#include "erl_ptab.h"
+
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
@@ -53,7 +54,11 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
+#ifndef ERTS_SCHED_MIN_SPIN
#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+#else
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 1
+#endif
#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40
#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000
@@ -143,8 +148,27 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
+#ifdef __OSE__
+/* Eager check I/O not supported on OSE yet. */
+int erts_eager_check_io = 0;
+#else
+int erts_eager_check_io = 0;
+#endif
int erts_sched_compact_load;
+int erts_sched_balance_util = 0;
Uint erts_no_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+Uint erts_no_dirty_cpu_schedulers;
+Uint erts_no_dirty_io_schedulers;
+#endif
+
+#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024)
+#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY (512*1024)
+#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_MEDIUM (64*1024)
+#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_EAGER (16*1024)
+#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_EAGER (1024)
+
+static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_MEDIUM;
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
@@ -174,6 +198,13 @@ static ErtsAuxWorkData *aux_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_cpu_changing, (VAL))
+#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_io_changing, (VAL))
+#endif
+
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
@@ -184,6 +215,23 @@ do { \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_cpu_changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_io_changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+#endif
+
#endif
@@ -193,11 +241,29 @@ static struct {
int online;
int curr_online;
int wait_curr_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_cpu_online;
+ int dirty_cpu_curr_online;
+ int dirty_cpu_wait_curr_online;
+ int dirty_io_online;
+ int dirty_io_curr_online;
+ int dirty_io_wait_curr_online;
+#endif
erts_smp_atomic32_t changing;
erts_smp_atomic32_t active;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_t dirty_cpu_changing;
+ erts_smp_atomic32_t dirty_cpu_active;
+ erts_smp_atomic32_t dirty_io_changing;
+ erts_smp_atomic32_t dirty_io_active;
+#endif
struct {
int ongoing;
long wait_active;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ long dirty_cpu_wait_active;
+ long dirty_io_wait_active;
+#endif
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
} schdlr_sspnd;
@@ -238,6 +304,10 @@ static erts_smp_atomic32_t function_calls;
#ifdef ERTS_SMP
static erts_smp_atomic32_t doing_sys_schedule;
static erts_smp_atomic32_t no_empty_run_queues;
+long erts_runq_supervision_interval = 0;
+static ethr_event runq_supervision_event;
+static erts_tid_t runq_supervisor_tid;
+static erts_atomic_t runq_supervisor_sleeping;
#else /* !ERTS_SMP */
ErtsSchedulerData *erts_scheduler_data;
#endif
@@ -246,6 +316,10 @@ ErtsAlignedRunQueue *erts_aligned_run_queues;
Uint erts_no_run_queues;
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
+ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
+#endif
typedef union {
ErtsSchedulerSleepInfo ssi;
@@ -253,12 +327,19 @@ typedef union {
} ErtsAlignedSchedulerSleepInfo;
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
+#ifdef ERTS_DIRTY_SCHEDULERS
+#ifdef ERTS_SMP
+static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
+static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
+#endif
+#endif
static Uint last_reductions;
static Uint last_exact_reductions;
Uint erts_default_process_flags;
Eterm erts_system_monitor;
Eterm erts_system_monitor_long_gc;
+Uint erts_system_monitor_long_schedule;
Eterm erts_system_monitor_large_heap;
struct erts_system_monitor_flags_t erts_system_monitor_flags;
@@ -270,6 +351,40 @@ struct erts_system_profile_flags_t erts_system_profile_flags;
#error "Need to store process_count in another type"
#endif
+typedef enum {
+ ERTS_PSTT_GC, /* Garbage Collect */
+ ERTS_PSTT_CPC /* Check Process Code */
+} ErtsProcSysTaskType;
+
+#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
+
+struct ErtsProcSysTask_ {
+ ErtsProcSysTask *next;
+ ErtsProcSysTask *prev;
+ ErtsProcSysTaskType type;
+ Eterm requester;
+ Eterm reply_tag;
+ Eterm req_id;
+ Uint req_id_sz;
+ Eterm arg[ERTS_MAX_PROC_SYS_TASK_ARGS];
+ ErlOffHeap off_heap;
+ Eterm heap[1];
+};
+
+#define ERTS_PROC_SYS_TASK_SIZE(HSz) \
+ (sizeof(ErtsProcSysTask) - sizeof(Eterm) + sizeof(Eterm)*(HSz))
+
+struct ErtsProcSysTaskQs_ {
+ int qmask;
+ int ncount;
+ ErtsProcSysTask *q[ERTS_NO_PROC_PRIO_LEVELS];
+};
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proc_sys_task_queues,
+ ErtsProcSysTaskQs,
+ 50,
+ ERTS_ALC_T_PROC_SYS_TSK_QS)
+
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
ErtsMiscOpList,
10,
@@ -281,9 +396,19 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
ERTS_ALC_T_PROC_LIST)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT_EXPR(-1 <= ((int) (IX)) \
+ (ASSERT(-1 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_schedulers)), \
&aligned_sched_sleep_info[(IX)].ssi)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT(0 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \
+ &aligned_dirty_cpu_sched_sleep_info[(IX)].ssi)
+#define ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT(0 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \
+ &aligned_dirty_io_sched_sleep_info[(IX)].ssi)
+#endif
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -340,6 +465,14 @@ static void aux_work_timeout_early_init(int no_schedulers);
static void aux_work_timeout_late_init(void);
static void setup_aux_work_timer(void);
+static int execute_sys_tasks(Process *c_p,
+ erts_aint32_t *statep,
+ int in_reds);
+static int cleanup_sys_tasks(Process *c_p,
+ erts_aint32_t in_state,
+ int in_reds);
+
+
#if defined(DEBUG) || 0
#define ERTS_DBG_CHK_AUX_WORK_VAL(V) dbg_chk_aux_work_val((V))
static void
@@ -386,7 +519,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#ifdef ERTS_SMP
static void handle_pending_exiters(ErtsProcList *);
-
+static void wake_scheduler(ErtsRunQueue *rq);
#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
@@ -426,7 +559,7 @@ void
erts_pre_init_process(void)
{
#ifdef USE_THREADS
- erts_tsd_key_create(&sched_data_key);
+ erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -458,6 +591,16 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
= ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].get_locks
+ = ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
+ = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
+
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks);
@@ -477,7 +620,7 @@ release_process(void *vproc)
/* initialize the scheduler */
void
-erts_init_process(int ncpu, int proc_tab_size)
+erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
{
#ifdef ERTS_SMP
@@ -496,7 +639,9 @@ erts_init_process(int ncpu, int proc_tab_size)
#endif
(ErtsPTabElementCommon *) &erts_invalid_process.common,
proc_tab_size,
- "process_table");
+ sizeof(Process),
+ "process_table",
+ legacy_proc_tab);
last_reductions = 0;
last_exact_reductions = 0;
@@ -546,6 +691,7 @@ erts_late_init_process(void)
static void
init_sched_wall_time(ErtsSchedWallTime *swtp)
{
+ swtp->need = erts_sched_balance_util;
swtp->enabled = 0;
swtp->start = 0;
swtp->working.total = 0;
@@ -568,27 +714,253 @@ sched_wall_time_ts(void)
#endif
}
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+
+#ifdef ARCH_64
+
+static ERTS_INLINE Uint64
+aschedtime_read(ErtsAtomicSchedTime *var)
+{
+ return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var);
+}
+
+static ERTS_INLINE void
+aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
+{
+ erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val);
+}
+
+static ERTS_INLINE void
+aschedtime_init(ErtsAtomicSchedTime *var)
+{
+ erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0);
+}
+
+#elif defined(ARCH_32)
+
+static ERTS_INLINE Uint64
+aschedtime_read(ErtsAtomicSchedTime *var)
+{
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw);
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw.dw_sint;
+#else
+ {
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+ }
+#endif
+}
+
+static ERTS_INLINE void
+aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
+{
+ erts_dw_aint_t dw;
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
+ dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
+#endif
+ erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw);
+}
+
+static ERTS_INLINE void
+aschedtime_init(ErtsAtomicSchedTime *var)
+{
+ erts_dw_aint_t dw;
+ dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0;
+ dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0;
+ erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw);
+}
+
+#else
+# error :-/
+#endif
+
+#define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50
+#define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0))
+
+/* Intervals in nanoseconds */
+#define ERTS_SCHED_UTIL_SHORT_INTERVAL ((Uint64) 1*1000*1000*1000)
+#define ERTS_SCHED_UTIL_LONG_INTERVAL ((Uint64) 10*1000*1000*1000)
+
+
+#define ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF 5000 /* ppm */
+
+static ERTS_INLINE Uint64
+calc_sched_worktime(int is_working, Uint64 now, Uint64 last,
+ Uint64 interval, Uint64 old_worktime)
+{
+ Uint64 worktime;
+ Uint64 new;
+
+ if (now <= last)
+ return old_worktime;
+
+ new = now - last;
+
+ if (new >= interval)
+ return is_working ? interval : (Uint64) 0;
+
+
+ /*
+ * Division by 1000 in order to avoid
+ * overflow. If changed update assertions
+ * in init_runq_sched_util().
+ */
+ worktime = old_worktime;
+ worktime *= (interval - new)/1000;
+ worktime /= (interval/1000);
+ if (is_working)
+ worktime += new;
+
+ ASSERT(0 <= worktime && worktime <= interval);
+
+ return worktime;
+}
+
+static ERTS_INLINE void
+update_avg_sched_util(ErtsSchedulerData *esdp, Uint64 now, int is_working)
+{
+ ErtsRunQueue *rq;
+ int worked;
+ Uint64 swt, lwt, last;
+
+ rq = esdp->run_queue;
+ last = aschedtime_read(&rq->sched_util.last);
+
+ if (now <= last) {
+ ASSERT(last == ERTS_SCHED_AVG_UTIL_WRITE_MARKER);
+ return;
+ }
+
+ ASSERT(now >= last);
+
+ worked = rq->sched_util.is_working;
+
+ swt = calc_sched_worktime(worked, now, last, ERTS_SCHED_UTIL_SHORT_INTERVAL,
+ rq->sched_util.worktime.short_interval);
+ lwt = calc_sched_worktime(worked, now, last, ERTS_SCHED_UTIL_LONG_INTERVAL,
+ rq->sched_util.worktime.long_interval);
+
+ aschedtime_set(&rq->sched_util.last, ERTS_SCHED_AVG_UTIL_WRITE_MARKER);
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ rq->sched_util.is_working = is_working;
+ rq->sched_util.worktime.short_interval = swt;
+ rq->sched_util.worktime.long_interval = lwt;
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ aschedtime_set(&rq->sched_util.last, now);
+}
+
+int
+erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval)
+{
+ /* Average scheduler utilization in ppm */
+ int util, is_working, try = 0, locked = initially_locked;
+ Uint64 worktime, old_worktime, now, last, interval, *old_worktimep;
+
+ if (short_interval) {
+ old_worktimep = &rq->sched_util.worktime.short_interval;
+ interval = ERTS_SCHED_UTIL_SHORT_INTERVAL;
+ }
+ else {
+ old_worktimep = &rq->sched_util.worktime.long_interval;
+ interval = ERTS_SCHED_UTIL_LONG_INTERVAL;
+ }
+
+ while (1) {
+ Uint64 chk_last;
+ last = aschedtime_read(&rq->sched_util.last);
+ ERTS_THR_READ_MEMORY_BARRIER;
+ is_working = rq->sched_util.is_working;
+ old_worktime = *old_worktimep;
+ ERTS_THR_READ_MEMORY_BARRIER;
+ chk_last = aschedtime_read(&rq->sched_util.last);
+ if (chk_last == last)
+ break;
+ if (!locked) {
+ if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) {
+ /* Writer will eventually block on runq-lock */
+ erts_smp_runq_lock(rq);
+ locked = 1;
+ }
+ }
+ }
+
+ if (!initially_locked && locked)
+ erts_smp_runq_unlock(rq);
+
+ now = sched_wall_time_ts();
+ worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime);
+
+ util = (int) ((worktime * 1000000)/interval);
+
+ ASSERT(0 <= util && util <= 1000000);
+
+ return util;
+}
+
+static void
+init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
+{
+ aschedtime_init(&rqsu->last);
+ if (!enabled)
+ aschedtime_set(&rqsu->last, ERTS_SCHED_AVG_UTIL_WRITE_MARKER);
+ rqsu->is_working = 0;
+ rqsu->worktime.short_interval = (Uint64) 0;
+ rqsu->worktime.long_interval = (Uint64) 0;
+
+#ifdef DEBUG
+ {
+ Uint64 intrvl;
+ /*
+ * If one of these asserts fail we may have
+ * overflow in calc_sched_worktime(). Which
+ * have to be fixed either by shrinking
+ * interval size, or fix calculation of
+ * worktime in calc_sched_worktime().
+ */
+ intrvl = ERTS_SCHED_UTIL_SHORT_INTERVAL;
+ ASSERT(intrvl*(intrvl/1000) > intrvl);
+ intrvl = ERTS_SCHED_UTIL_LONG_INTERVAL;
+ ASSERT(intrvl*(intrvl/1000) > intrvl);
+ }
+#endif
+}
+
+#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
+
static ERTS_INLINE void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
{
- if (esdp->sched_wall_time.enabled) {
+ if (esdp->sched_wall_time.need) {
Uint64 ts = sched_wall_time_ts();
- if (working) {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ update_avg_sched_util(esdp, ts, working);
+#endif
+ if (esdp->sched_wall_time.enabled) {
+ if (working) {
#ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
+ ASSERT(!esdp->sched_wall_time.working.currently);
+ esdp->sched_wall_time.working.currently = 1;
#endif
- ts -= esdp->sched_wall_time.start;
- esdp->sched_wall_time.working.start = ts;
- }
- else {
+ ts -= esdp->sched_wall_time.start;
+ esdp->sched_wall_time.working.start = ts;
+ }
+ else {
#ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
+ ASSERT(esdp->sched_wall_time.working.currently);
+ esdp->sched_wall_time.working.currently = 0;
#endif
- ts -= esdp->sched_wall_time.start;
- ts -= esdp->sched_wall_time.working.start;
- esdp->sched_wall_time.working.total += ts;
+ ts -= esdp->sched_wall_time.start;
+ ts -= esdp->sched_wall_time.working.start;
+ esdp->sched_wall_time.working.total += ts;
+ }
}
}
}
@@ -641,12 +1013,17 @@ reply_sched_wall_time(void *vswtrp)
ErlHeapFragment *bp = NULL;
ASSERT(esdp);
-
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+#endif
if (swtrp->set) {
- if (!swtrp->enable && esdp->sched_wall_time.enabled)
+ if (!swtrp->enable && esdp->sched_wall_time.enabled) {
+ esdp->sched_wall_time.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
+ }
else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
+ esdp->sched_wall_time.need = 1;
esdp->sched_wall_time.enabled = 1;
esdp->sched_wall_time.start = ts;
esdp->sched_wall_time.working.total = 0;
@@ -722,6 +1099,9 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable)
if (!set && !esdp->sched_wall_time.enabled)
return THE_NON_VALUE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+#endif
swtrp = swtreq_alloc();
ref = erts_make_ref(c_p);
@@ -909,6 +1289,53 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
#ifdef ERTS_SMP
static ERTS_INLINE void
+haw_chk_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
+{
+ if (awdp->later_op.first
+ && erts_thr_progress_cmp(val, awdp->later_op.thr_prgr) >= 0) {
+ awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
+ }
+}
+
+static ERTS_INLINE void
+haw_thr_prgr_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
+{
+ int cmp = erts_thr_progress_cmp(val, awdp->latest_wakeup);
+ if (cmp != 0) {
+ if (cmp > 0) {
+ awdp->latest_wakeup = val;
+ haw_chk_later_cleanup_op_wakeup(awdp, val);
+ }
+ erts_thr_progress_wakeup(awdp->esdp, val);
+ }
+}
+
+static ERTS_INLINE void
+haw_thr_prgr_soft_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
+{
+ if (erts_thr_progress_cmp(val, awdp->latest_wakeup) > 0) {
+ awdp->latest_wakeup = val;
+ haw_chk_later_cleanup_op_wakeup(awdp, val);
+ erts_thr_progress_wakeup(awdp->esdp, val);
+ }
+}
+
+static ERTS_INLINE void
+haw_thr_prgr_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val, UWord size)
+{
+ if (erts_thr_progress_cmp(val, awdp->latest_wakeup) > 0) {
+ awdp->later_op.thr_prgr = val;
+ if (awdp->later_op.size > size)
+ awdp->later_op.size -= size;
+ else {
+ awdp->latest_wakeup = val;
+ awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
+ erts_thr_progress_wakeup(awdp->esdp, val);
+ }
+ }
+}
+
+static ERTS_INLINE void
haw_thr_prgr_current_reset(ErtsAuxWorkData *awdp)
{
awdp->current_thr_prgr = ERTS_THR_PRGR_INVALID;
@@ -929,6 +1356,9 @@ static ERTS_INLINE void
haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
{
ErtsThrPrgrVal current = awdp->current_thr_prgr;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (current != ERTS_THR_PRGR_INVALID
&& !erts_thr_progress_equal(current, erts_thr_progress_current())) {
/*
@@ -945,6 +1375,10 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in
{
int jix, max_jix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
+
ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions)
@@ -1066,8 +1500,7 @@ misc_aux_work_clean(ErtsThrQ_t *q,
case ERTS_THR_Q_NEED_THR_PRGR:
#ifdef ERTS_SMP
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
- erts_thr_progress_wakeup(awdp->esdp,
- erts_thr_q_need_thr_progress(q));
+ haw_thr_prgr_soft_wakeup(awdp, erts_thr_q_need_thr_progress(q));
#endif
case ERTS_THR_Q_CLEAN:
break;
@@ -1101,6 +1534,9 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
erts_aint32_t aux_work,
int waiting)
{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
@@ -1153,6 +1589,9 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
if (ignore_self) {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+#endif
if (esdp)
self = (int) esdp->no;
}
@@ -1182,6 +1621,9 @@ handle_async_ready(ErtsAuxWorkData *awdp,
int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
if (erts_check_async_ready(awdp->async_ready.queue)) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
@@ -1206,6 +1648,9 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
{
void *thr_prgr_p;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
#ifdef ERTS_SMP
if (awdp->async_ready.need_thr_prgr
&& !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
@@ -1225,8 +1670,7 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
#ifdef ERTS_SMP
case ERTS_ASYNC_READY_NEED_THR_PRGR:
- erts_thr_progress_wakeup(awdp->esdp,
- awdp->async_ready.thr_prgr);
+ haw_thr_prgr_soft_wakeup(awdp, awdp->async_ready.thr_prgr);
awdp->async_ready.need_thr_prgr = 1;
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
#endif
@@ -1244,6 +1688,9 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
@@ -1263,7 +1710,7 @@ void
erts_alloc_notify_delayed_dealloc(int ix)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
schedule_aux_work_wakeup(&esdp->aux_work_data,
ix,
ERTS_SSI_AUX_WORK_DD);
@@ -1272,6 +1719,17 @@ erts_alloc_notify_delayed_dealloc(int ix)
ERTS_SSI_AUX_WORK_DD);
}
+void
+erts_alloc_ensure_handle_delayed_dealloc_call(int ix)
+{
+#ifdef DEBUG
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(!esdp || (ERTS_SCHEDULER_IS_DIRTY(esdp) || ix == (int) esdp->no));
+#endif
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
+ ERTS_SSI_AUX_WORK_DD);
+}
+
static ERTS_INLINE erts_aint32_t
handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
@@ -1280,6 +1738,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
&need_thr_progress,
@@ -1300,7 +1761,7 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
awdp->dd.thr_prgr = wakeup;
set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
awdp->dd.thr_prgr = wakeup;
- erts_thr_progress_wakeup(awdp->esdp, wakeup);
+ haw_thr_prgr_soft_wakeup(awdp, wakeup);
}
else if (awdp->dd.completed_callback) {
awdp->dd.completed_callback(awdp->dd.completed_arg);
@@ -1319,6 +1780,9 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1341,7 +1805,7 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
if (wakeup == ERTS_THR_PRGR_INVALID)
wakeup = erts_thr_progress_later(awdp->esdp);
awdp->dd.thr_prgr = wakeup;
- erts_thr_progress_wakeup(awdp->esdp, wakeup);
+ haw_thr_prgr_soft_wakeup(awdp, wakeup);
}
else {
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
@@ -1366,6 +1830,9 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
int lops;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
if (!erts_thr_progress_has_reached_this(current, lop->later))
@@ -1376,6 +1843,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
}
lop->func(lop->data);
if (!awdp->later_op.first) {
+ awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
awdp->later_op.last = NULL;
unset_aux_work_flags(awdp->ssi,
ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
@@ -1386,6 +1854,29 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
return aux_work;
}
+static ERTS_INLINE ErtsThrPrgrVal
+enqueue_later_op(ErtsSchedulerData *esdp,
+ void (*later_func)(void *),
+ void *later_data,
+ ErtsThrPrgrLaterOp *lop)
+{
+ ErtsThrPrgrVal later = erts_thr_progress_later(esdp);
+ ASSERT(esdp);
+
+ lop->func = later_func;
+ lop->data = later_data;
+ lop->later = later;
+ lop->next = NULL;
+ if (!esdp->aux_work_data.later_op.last)
+ esdp->aux_work_data.later_op.first = lop;
+ else
+ esdp->aux_work_data.later_op.last->next = lop;
+ esdp->aux_work_data.later_op.last = lop;
+ set_aux_work_flags_wakeup_nob(esdp->ssi,
+ ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
+ return later;
+}
+
#endif /* ERTS_SMP */
void
@@ -1396,31 +1887,24 @@ erts_schedule_thr_prgr_later_op(void (*later_func)(void *),
#ifndef ERTS_SMP
later_func(later_data);
#else
- ErtsSchedulerData *esdp;
- ErtsAuxWorkData *awdp;
- int request_wakeup = 1;
-
- esdp = erts_get_scheduler_data();
- ASSERT(esdp);
- awdp = &esdp->aux_work_data;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
+ haw_thr_prgr_wakeup(&esdp->aux_work_data, later);
+#endif
+}
- lop->func = later_func;
- lop->data = later_data;
- lop->later = erts_thr_progress_later(esdp);
- lop->next = NULL;
- if (!awdp->later_op.last)
- awdp->later_op.first = lop;
- else {
- ErtsThrPrgrLaterOp *last = awdp->later_op.last;
- last->next = lop;
- if (erts_thr_progress_equal(last->later, lop->later))
- request_wakeup = 0;
- }
- awdp->later_op.last = lop;
- set_aux_work_flags_wakeup_nob(awdp->ssi,
- ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
- if (request_wakeup)
- erts_thr_progress_wakeup(esdp, lop->later);
+void
+erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *),
+ void *later_data,
+ ErtsThrPrgrLaterOp *lop,
+ UWord size)
+{
+#ifndef ERTS_SMP
+ later_func(later_data);
+#else
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
+ haw_thr_prgr_later_cleanup_op_wakeup(&esdp->aux_work_data, later, size);
#endif
}
@@ -1507,6 +1991,14 @@ erts_smp_notify_check_children_needed(void)
for (i = 0; i < erts_no_schedulers; i++)
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ for (i = 0; i < erts_no_dirty_cpu_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ for (i = 0; i < erts_no_dirty_io_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+#endif
}
static ERTS_INLINE erts_aint32_t
@@ -1723,6 +2215,9 @@ aux_work_timeout_early_init(int no_schedulers)
p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ sizeof(erts_atomic32_t)*(no_schedulers+1))
+ ERTS_CACHE_LINE_SIZE-1);
+ if (!p) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
if (p & ERTS_CACHE_LINE_MASK)
p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
@@ -1811,7 +2306,7 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
ERTS_DBG_CHK_AUX_WORK_VAL(type);
ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
-// erts_fprintf(stderr, "t(%d, 0x%x, %d)\n", ix, type, enable);
+ /* erts_fprintf(stderr, "t(%d, 0x%x, %d)\n", ix, type, enable); */
if (!enable) {
old = erts_atomic32_read_band_mb(&aux_work_tmo->type[ix], ~type);
@@ -1854,6 +2349,9 @@ static ERTS_INLINE void
sched_active_sys(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
ASSERT(rq->waiting < 0);
rq->waiting *= -1;
rq->waiting--;
@@ -1889,19 +2387,47 @@ try_set_sys_scheduling(void)
#endif
static ERTS_INLINE int
-prepare_for_sys_schedule(void)
+prepare_for_sys_schedule(ErtsSchedulerData *esdp, int non_blocking)
{
+ if (non_blocking && erts_eager_check_io) {
#ifdef ERTS_SMP
- while (!erts_port_task_have_outstanding_io_tasks()
- && try_set_sys_scheduling()) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- clear_sys_scheduling();
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ if (esdp->no != 1) {
+ /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
+ then we make sure to wake scheduler 1 */
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
+ wake_scheduler(rq);
+ return 0;
+ }
+#endif
+ return try_set_sys_scheduling();
+#else
+ return 1;
+#endif
}
- return 0;
+ else {
+#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && try_set_sys_scheduling()) {
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ if (esdp->no != 1) {
+ /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
+ then we make sure to wake scheduler 1 */
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
+ clear_sys_scheduling();
+ wake_scheduler(rq);
+ return 0;
+ }
+#endif
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
+ }
+ return 0;
#else
- return !erts_port_task_have_outstanding_io_tasks();
+ return !erts_port_task_have_outstanding_io_tasks();
#endif
+ }
}
#ifdef ERTS_SMP
@@ -1910,6 +2436,9 @@ static ERTS_INLINE void
sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
ASSERT(rq->waiting < 0);
rq->waiting *= -1;
}
@@ -1925,7 +2454,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq)
else
rq->waiting++;
rq->woken = 0;
- if (erts_system_profile_flags.scheduler)
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && erts_system_profile_flags.scheduler)
profile_scheduler(make_small(no), am_inactive);
}
@@ -1937,7 +2466,7 @@ sched_active(Uint no, ErtsRunQueue *rq)
rq->waiting++;
else
rq->waiting--;
- if (erts_system_profile_flags.scheduler)
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && erts_system_profile_flags.scheduler)
profile_scheduler(make_small(no), am_active);
}
@@ -1949,10 +2478,9 @@ ongoing_multi_scheduling_block(void)
}
static ERTS_INLINE void
-empty_runq(ErtsRunQueue *rq)
+empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags)
{
- Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED);
- if (old_flags & ERTS_RUNQ_FLG_NONEMPTY) {
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) {
#ifdef DEBUG
erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
@@ -1961,15 +2489,38 @@ empty_runq(ErtsRunQueue *rq)
*/
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
- erts_smp_atomic32_inc_relb(&no_empty_run_queues);
+ if (!erts_runq_supervision_interval)
+ erts_smp_atomic32_inc_relb(&no_empty_run_queues);
+ else {
+ erts_smp_atomic32_inc_mb(&no_empty_run_queues);
+ if (erts_atomic_read_nob(&runq_supervisor_sleeping))
+ ethr_event_set(&runq_supervision_event);
+ }
}
}
static ERTS_INLINE void
+empty_runq(ErtsRunQueue *rq)
+{
+ Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED);
+ empty_runq_aux(rq, old_flags);
+}
+
+static ERTS_INLINE Uint32
+empty_protected_runq(ErtsRunQueue *rq)
+{
+ Uint32 old_flags = ERTS_RUNQ_FLGS_BSET(rq,
+ ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED,
+ ERTS_RUNQ_FLG_PROTECTED);
+ empty_runq_aux(rq, old_flags);
+ return old_flags;
+}
+
+static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY);
- if (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY)) {
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) {
#ifdef DEBUG
erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
@@ -1978,10 +2529,29 @@ non_empty_runq(ErtsRunQueue *rq)
*/
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
- erts_smp_atomic32_dec_relb(&no_empty_run_queues);
+ if (!erts_runq_supervision_interval)
+ erts_smp_atomic32_dec_relb(&no_empty_run_queues);
+ else {
+ erts_aint32_t no;
+ no = erts_smp_atomic32_dec_read_mb(&no_empty_run_queues);
+ if (no > 0 && erts_atomic_read_nob(&runq_supervisor_sleeping))
+ ethr_event_set(&runq_supervision_event);
+ }
}
}
+void
+erts_empty_runq(ErtsRunQueue *rq)
+{
+ empty_runq(rq);
+}
+
+void
+erts_non_empty_runq(ErtsRunQueue *rq)
+{
+ non_empty_runq(rq);
+}
+
static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
@@ -2127,6 +2697,13 @@ aux_thread(void *unused)
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[] = "aux_thread";
+ erts_lc_set_thread_name(buf);
+ }
+#endif
+
ssi->event = erts_tse_fetch();
callbacks.arg = (void *) ssi;
@@ -2158,6 +2735,8 @@ aux_thread(void *unused)
erts_thr_progress_active(NULL, thr_prgr_active = 0);
erts_thr_progress_prepare_wait(NULL);
+ ERTS_SCHED_FAIR_YIELD();
+
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
@@ -2195,18 +2774,37 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ erts_smp_spin_lock(&rq->sleepers.lock);
+#endif
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+#endif
return;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+ }
+#endif
+
/*
* If all schedulers are waiting, one of them *should*
* be waiting in erl_sys_schedule()
*/
- if (!prepare_for_sys_schedule()) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp, 0)) {
sched_waiting(esdp->no, rq);
@@ -2216,30 +2814,35 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
- if (thr_prgr_active != working)
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && thr_prgr_active != working)
sched_wall_time_change(esdp, thr_prgr_active);
while (1) {
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
- if (!thr_prgr_active) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- if (aux_work && erts_thr_progress_update(esdp))
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)
+ && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
if (aux_work)
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
else {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
}
- erts_thr_progress_prepare_wait(esdp);
+
+ ERTS_SCHED_FAIR_YIELD();
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
@@ -2254,7 +2857,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
} while (res == EINTR);
}
}
- erts_thr_progress_finalize_wait(esdp);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ erts_thr_progress_finalize_wait(esdp);
}
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
@@ -2275,7 +2879,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
- if (!thr_prgr_active) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
@@ -2292,6 +2896,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_atomic32_set_relb(&function_calls, 0);
*fcalls = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+#endif
+
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ ASSERT(esdp->no == 1);
+#endif
sched_waiting_sys(esdp->no, rq);
@@ -2312,7 +2923,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_wall_time_change(esdp, working = 0);
ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
erl_sys_schedule(1); /* Might give us something to do */
dt = erts_do_time_read_and_reset();
@@ -2358,7 +2968,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(esdp, 0)) {
spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
goto tse_wait;
}
@@ -2380,7 +2990,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(esdp, 0)) {
/*
* Not allowed to wait in erl_sys_schedule;
* do tse wait instead...
@@ -2484,7 +3094,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq)
{
ErtsSchedulerSleepInfo *ssi;
erts_aint32_t flgs;
@@ -2503,10 +3113,53 @@ wake_scheduler(ErtsRunQueue *rq, int incq)
flgs = ssi_flags_set_wake(ssi);
erts_sched_finish_poke(ssi, flgs);
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+static void
+wake_dirty_schedulers(ErtsRunQueue *rq, int one)
+{
+ ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepList *sl;
+
+ ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ sl = &rq->sleepers;
+ erts_smp_spin_lock(&sl->lock);
+ ssi = sl->list;
+ if (!ssi) {
+ erts_smp_spin_unlock(&sl->lock);
+ if (one)
+ wake_scheduler(rq);
+ } else if (one) {
+ erts_aint32_t flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
+ }
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
- if (incq && (flgs & ERTS_SSI_FLG_WAITING))
- non_empty_runq(rq);
+ erts_smp_spin_unlock(&sl->lock);
+
+ ERTS_THR_MEMORY_BARRIER;
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+ } else {
+ sl->list = NULL;
+ erts_smp_spin_unlock(&sl->lock);
+
+ ERTS_THR_MEMORY_BARRIER;
+ do {
+ ErtsSchedulerSleepInfo *wake_ssi = ssi;
+ ssi = ssi->next;
+ erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi));
+ } while (ssi);
+ }
}
+#endif
#define ERTS_NO_USED_RUNQS_SHIFT 16
#define ERTS_NO_RUNQS_MASK 0xffff
@@ -2554,6 +3207,8 @@ set_no_active_runqs(int active)
erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
+ if ((exp & ERTS_NO_RUNQS_MASK) == active)
+ break;
new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
new |= active & ERTS_NO_RUNQS_MASK;
act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
@@ -2580,7 +3235,6 @@ try_inc_no_active_runqs(int active)
return 0;
}
-
static ERTS_INLINE int
chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
{
@@ -2595,7 +3249,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
if (try_inc_no_active_runqs(ix+1))
(void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
}
- wake_scheduler(wrq, 0);
+ wake_scheduler(wrq);
return 1;
}
return 0;
@@ -2642,8 +3296,14 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
- if (runq)
- wake_scheduler(runq, 1);
+ if (runq) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ wake_dirty_schedulers(runq, 1);
+ else
+#endif
+ wake_scheduler(runq);
+ }
#endif
}
@@ -2661,7 +3321,7 @@ erts_sched_notify_check_cpu_bind(void)
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
#else
erts_sched_check_cpu_bind(erts_get_scheduler_data());
@@ -2746,7 +3406,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
if (statep)
*statep = state;
- prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
rqi = &runq->procs.prio_info[prio];
@@ -2789,6 +3449,11 @@ check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
if (!f_rq)
return NULL;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (mp->sched_util)
+ return NULL;
+#endif
+
f_rq_flags = ERTS_RUNQ_FLGS_GET(f_rq);
if (f_rq_flags & ERTS_RUNQ_FLG_PROTECTED)
return NULL;
@@ -2895,7 +3560,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
erts_aint32_t state;
state = erts_smp_atomic32_read_acqb(&proc->state);
if (!(ERTS_PSFLG_BOUND & state)
- && (prio == (int) (ERTS_PSFLG_PRIO_MASK & state))) {
+ && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) {
ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio];
unqueue_process(rq, rpq, rqi, prio, prev_proc, proc);
erts_smp_runq_unlock(rq);
@@ -2928,10 +3593,11 @@ suspend_run_queue(ErtsRunQueue *rq)
ERTS_SSI_FLG_SUSPENDED);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
static void scheduler_ix_resume_wake(Uint ix);
+static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
@@ -2958,7 +3624,10 @@ resume_run_queue(ErtsRunQueue *rq)
erts_smp_runq_unlock(rq);
- scheduler_ix_resume_wake(rq->ix);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ scheduler_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -2977,7 +3646,7 @@ schedule_bound_processes(ErtsRunQueue *rq,
while (proc) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
next = proc->next;
- enqueue_process(rq, (int) (ERTS_PSFLG_PRIO_MASK & state), proc);
+ enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc);
proc = next;
}
}
@@ -2989,20 +3658,28 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp;
+ ErtsMigrationPath *mp = NULL;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ {
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
+ }
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -3020,6 +3697,9 @@ evacuate_run_queue(ErtsRunQueue *rq,
to_rq->misc.start = start;
to_rq->misc.end = end;
+
+ non_empty_runq(to_rq);
+
erts_smp_runq_unlock(to_rq);
smp_notify_inc_runq(to_rq);
erts_smp_runq_lock(to_rq);
@@ -3028,6 +3708,9 @@ evacuate_run_queue(ErtsRunQueue *rq,
if (rq->ports.start) {
Port *prt;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -3063,15 +3746,26 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_aint32_t state;
Process *proc;
int notify = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int requeue;
+#endif
to_rq = NULL;
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ {
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
+ }
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ requeue = 1;
+#endif
if (ERTS_PSFLG_BOUND & state) {
/* Bound processes get stuck here... */
proc->next = NULL;
@@ -3080,12 +3774,51 @@ evacuate_run_queue(ErtsRunQueue *rq,
else
sbpp->first = proc;
sbpp->last = proc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ requeue = 0;
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+ /* assert that no other dirty flags are set */
+ ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
+ } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ /* assert that no other dirty flags are set */
+ ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
}
+ if (requeue) {
+#else
else {
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+#endif
+ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
- to_rq = mp->prio[prio].runq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ /*
+ * dirty run queues evacuate only to run
+ * queue 0 during multi-scheduling blocking
+ */
+ to_rq = ERTS_RUNQ_IX(0);
+ else
+#endif
+ to_rq = mp->prio[prio].runq;
RUNQ_SET_RQ(&proc->run_queue, to_rq);
erts_smp_runq_lock(to_rq);
@@ -3100,9 +3833,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
if (notify)
smp_notify_inc_runq(to_rq);
}
-
- if (ERTS_EMPTY_RUNQ(rq))
- empty_runq(rq);
}
static int
@@ -3158,7 +3888,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
if (!(ERTS_PSFLG_BOUND & state)) {
/* Steal process */
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio];
unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc);
erts_smp_runq_unlock(vrq);
@@ -3235,7 +3965,7 @@ try_steal_task(ErtsRunQueue *rq)
Uint32 flags;
/* Protect jobs we steal from getting stolen from us... */
- flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ flags = empty_protected_runq(rq);
if (flags & ERTS_RUNQ_FLG_SUSPENDED)
return 0; /* go suspend instead... */
@@ -3314,6 +4044,9 @@ typedef struct {
int full_reds_history_change;
int oowc;
int max_len;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ int sched_util;
+#endif
} ErtsRunQueueBalance;
static ErtsRunQueueBalance *run_queue_info;
@@ -3477,6 +4210,9 @@ check_balance(ErtsRunQueue *c_rq)
Sint64 scheds_reds, full_scheds_reds;
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ int sched_util_balancing;
+#endif
if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
@@ -3532,6 +4268,10 @@ check_balance(ErtsRunQueue *c_rq)
return;
}
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ sched_util_balancing = 0;
+#endif
+
freds_hist_ix = balance_info.full_reds_history_index;
balance_info.full_reds_history_index++;
if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE)
@@ -3562,7 +4302,12 @@ check_balance(ErtsRunQueue *c_rq)
run_queue_info[qix].oowc = rq->out_of_work_count;
run_queue_info[qix].max_len = rq->max_len;
rq->check_balance_reds = INT_MAX;
-
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (erts_sched_balance_util)
+ run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0);
+#endif
+
erts_smp_runq_unlock(rq);
}
@@ -3632,8 +4377,38 @@ check_balance(ErtsRunQueue *c_rq)
mmax_len = run_queue_info[qix].max_len;
}
- if (!erts_sched_compact_load)
+ if (!erts_sched_compact_load) {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (erts_sched_balance_util && full_scheds < blnc_no_rqs) {
+ int avg_util = 0;
+
+ for (qix = 0; qix < blnc_no_rqs; qix++)
+ avg_util += run_queue_info[qix].sched_util;
+
+ avg_util /= blnc_no_rqs; /* in ppm */
+
+ sched_util_balancing = 1;
+ /*
+ * In order to avoid renaming a large amount of fields
+ * we write utilization values instead of lenght values
+ * in the 'max_len' and 'migration_limit' fields...
+ */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ run_queue_info[qix].flags = 0; /* Reset for later use... */
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ run_queue_info[qix].prio[pix].emigrate_to = -1;
+ run_queue_info[qix].prio[pix].immigrate_from = -1;
+ run_queue_info[qix].prio[pix].avail = 100;
+ run_queue_info[qix].prio[pix].max_len = run_queue_info[qix].sched_util;
+ run_queue_info[qix].prio[pix].migration_limit = avg_util;
+ }
+ }
+ active = blnc_no_rqs;
+ goto setup_migration_paths;
+ }
+#endif
goto all_active;
+ }
if (!forced && half_full_scheds != blnc_no_rqs) {
int min = 1;
@@ -3750,15 +4525,30 @@ check_balance(ErtsRunQueue *c_rq)
}
}
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ setup_migration_paths:
+#endif
+
/* Setup migration paths for all priorities */
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
int low = 0, high = 0;
for (qix = 0; qix < blnc_no_rqs; qix++) {
int len_diff = run_queue_info[qix].prio[pix].max_len;
len_diff -= run_queue_info[qix].prio[pix].migration_limit;
+
#ifdef DBG_PRINT
if (pix == 2) erts_fprintf(stderr, "%d ", len_diff);
#endif
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (sched_util_balancing
+ && -ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF <= len_diff
+ && len_diff <= ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF) {
+ /* ignore minor imbalance */
+ len_diff = 0;
+ }
+#endif
+
run_queue_compare[qix].qix = qix;
run_queue_compare[qix].len = len_diff;
if (len_diff != 0) {
@@ -3885,6 +4675,9 @@ erts_fprintf(stderr, "--------------------------------\n");
Uint32 flags = run_queue_info[qix].flags;
ErtsMigrationPath *mp = &new_mpaths->mpath[qix];
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ mp->sched_util = sched_util_balancing;
+#endif
mp->flags = flags;
mp->misc_evac_runq = NULL;
@@ -3996,9 +4789,8 @@ static void
change_no_used_runqs(int used)
{
ErtsMigrationPaths *new_mpaths, *old_mpaths;
- int active, qix;
+ int qix;
erts_smp_mtx_lock(&balance_info.update_mtx);
- get_no_runqs(&active, NULL);
set_no_used_runqs(used);
old_mpaths = erts_get_migration_paths_managed();
@@ -4140,13 +4932,20 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
rq->wakeup_other += (left_len*wo_reds
+ ERTS_WAKEUP_OTHER_FIXED_INC);
if (rq->wakeup_other > wakeup_other.limit) {
- int empty_rqs =
- erts_smp_atomic32_read_acqb(&no_empty_run_queues);
- if (flags & ERTS_RUNQ_FLG_PROTECTED)
- (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- if (empty_rqs != 0)
- wake_scheduler_on_empty_runq(rq);
- rq->wakeup_other = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting)
+ wake_dirty_schedulers(rq, 1);
+ else
+#endif
+ {
+ int empty_rqs =
+ erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ if (empty_rqs != 0)
+ wake_scheduler_on_empty_runq(rq);
+ rq->wakeup_other = 0;
+ }
}
}
rq->wakeup_other_reds = 0;
@@ -4250,6 +5049,53 @@ set_wakeup_other_data(void)
}
}
+static int
+no_runqs_to_supervise(void)
+{
+ int used;
+ erts_aint32_t nerq = erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ if (nerq <= 0)
+ return 0;
+ get_no_runqs(NULL, &used);
+ if (nerq >= used)
+ return 0;
+ return used;
+}
+
+static void *
+runq_supervisor(void *unused)
+{
+ while (1) {
+ int ix, no_rqs;
+
+ erts_milli_sleep(erts_runq_supervision_interval);
+ no_rqs = no_runqs_to_supervise();
+ if (!no_rqs) {
+ erts_atomic_set_nob(&runq_supervisor_sleeping, 1);
+ while (1) {
+ ethr_event_reset(&runq_supervision_event);
+ no_rqs = no_runqs_to_supervise();
+ if (no_rqs) {
+ erts_atomic_set_nob(&runq_supervisor_sleeping, 0);
+ break;
+ }
+ ethr_event_wait(&runq_supervision_event);
+ }
+ }
+
+ for (ix = 0; ix < no_rqs; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) {
+ erts_smp_runq_lock(rq);
+ if (rq->len != 0)
+ wake_scheduler_on_empty_runq(rq); /* forced wakeup... */
+ erts_smp_runq_unlock(rq);
+ }
+ }
+ }
+ return NULL;
+}
+
#endif
void
@@ -4260,11 +5106,17 @@ erts_early_init_scheduling(int no_schedulers)
wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
#endif
+#ifndef ERTS_SCHED_MIN_SPIN
sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
+#else
+ sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+#endif
}
int
@@ -4358,17 +5210,46 @@ erts_sched_set_busy_wait_threshold(char *str)
return 0;
}
+
+int
+erts_sched_set_wake_cleanup_threshold(char *str)
+{
+ if (sys_strcmp(str, "very_lazy") == 0)
+ thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY;
+ else if (sys_strcmp(str, "lazy") == 0)
+ thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY;
+ else if (sys_strcmp(str, "medium") == 0)
+ thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_MEDIUM;
+ else if (sys_strcmp(str, "eager") == 0)
+ thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_EAGER;
+ else if (sys_strcmp(str, "very_eager") == 0)
+ thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_EAGER;
+ else
+ return EINVAL;
+ return 0;
+}
+
static void
init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
- awdp->sched_id = esdp ? (int) esdp->no : 0;
+ if (!esdp)
+ awdp->sched_id = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp);
+#endif
+ else
+ awdp->sched_id = (int) esdp->no;
awdp->esdp = esdp;
awdp->ssi = esdp ? esdp->ssi : NULL;
#ifdef ERTS_SMP
+ awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST;
awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.completed_callback = NULL;
awdp->dd.completed_arg = NULL;
+ awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST;
+ awdp->later_op.size = 0;
awdp->later_op.first = NULL;
awdp->later_op.last = NULL;
#endif
@@ -4398,41 +5279,122 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
#endif
}
+static void
+init_scheduler_data(ErtsSchedulerData* esdp, int num,
+ ErtsSchedulerSleepInfo* ssi,
+ ErtsRunQueue* runq,
+ char** daww_ptr, size_t daww_sz)
+{
+#ifdef ERTS_SMP
+ erts_bits_init_state(&esdp->erl_bits_state);
+ esdp->match_pseudo_process = NULL;
+ esdp->free_process = NULL;
+#endif
+ esdp->x_reg_array =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
+ ERTS_X_REGS_ALLOCATED *
+ sizeof(Eterm));
+ esdp->f_reg_array =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
+ MAX_REG * sizeof(FloatDef));
+#if !HEAP_ON_C_STACK
+ esdp->num_tmp_heap_used = 0;
+#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
+ esdp->no = 0;
+ ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
+ }
+ else {
+ esdp->no = (Uint) num;
+ ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
+ }
+#else
+ esdp->no = (Uint) num;
+#endif
+ esdp->ssi = ssi;
+ esdp->current_process = NULL;
+ esdp->current_port = NULL;
+
+ esdp->virtual_reds = 0;
+ esdp->cpu_id = -1;
+
+ erts_init_atom_cache_map(&esdp->atom_cache_map);
+
+ esdp->run_queue = runq;
+ esdp->run_queue->scheduler = esdp;
+
+ if (daww_ptr) {
+ init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
+#ifdef ERTS_SMP
+ *daww_ptr += daww_sz;
+#endif
+ }
+
+ esdp->reductions = 0;
+
+ init_sched_wall_time(&esdp->sched_wall_time);
+ erts_port_task_handle_init(&esdp->nosuspend_port_task_handle);
+}
+
void
-erts_init_scheduling(int no_schedulers, int no_schedulers_online)
+erts_init_scheduling(int no_schedulers, int no_schedulers_online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
+ int no_dirty_io_schedulers
+#endif
+ )
{
int ix, n, no_ssi;
char *daww_ptr;
-#ifdef ERTS_SMP
size_t daww_sz;
-#endif
+ size_t size_runqs;
init_misc_op_list_alloc();
+ init_proc_sys_task_queues_alloc();
#ifdef ERTS_SMP
set_wakeup_other_data();
#endif
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (erts_sched_balance_util)
+ erts_sched_compact_load = 0;
+#endif
+
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(no_dirty_cpu_schedulers <= no_schedulers);
+ ASSERT(no_dirty_cpu_schedulers >= 1);
+ ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
+ ASSERT(no_dirty_cpu_schedulers_online >= 1);
+#endif
/* Create and initialize run queues */
n = no_schedulers;
-
- erts_aligned_run_queues =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS,
- sizeof(ErtsAlignedRunQueue) * n);
+ size_runqs = sizeof(ErtsAlignedRunQueue) * (n + ERTS_NUM_DIRTY_RUNQS);
+ erts_aligned_run_queues =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
#ifdef ERTS_SMP
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_aligned_run_queues += ERTS_NUM_DIRTY_RUNQS;
+#endif
erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
#endif
erts_no_run_queues = n;
- for (ix = 0; ix < n; ix++) {
+ for (ix = -(ERTS_NUM_DIRTY_RUNQS); ix < n; ix++) {
int pix, rix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsRunQueue *rq = ERTS_RUNQ_IX_IS_DIRTY(ix) ?
+ ERTS_DIRTY_RUNQ_IX(ix) : ERTS_RUNQ_IX(ix);
+#else
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+#endif
rq->ix = ix;
@@ -4443,6 +5405,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
+#ifdef ERTS_DIRTY_SCHEDULERS
+#ifdef ERTS_SMP
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix))
+ erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
+ rq->sleepers.list = NULL;
+#endif
+#endif
+
rq->waiting = 0;
rq->woken = 0;
ERTS_RUNQ_FLGS_INIT(rq, ERTS_RUNQ_FLG_NONEMPTY);
@@ -4481,6 +5451,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
rq->ports.info.reds = 0;
rq->ports.start = NULL;
rq->ports.end = NULL;
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ init_runq_sched_util(&rq->sched_util, erts_sched_balance_util);
+#endif
+
}
#ifdef ERTS_SMP
@@ -4498,6 +5473,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
n = (int) no_schedulers;
erts_no_schedulers = n;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
+ erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
+#endif
/* Create and initialize scheduler sleep info */
#ifdef ERTS_SMP
@@ -4524,6 +5503,29 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
aligned_sched_sleep_info++;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ aligned_dirty_cpu_sched_sleep_info =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
+ erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
+ }
+ aligned_dirty_io_sched_sleep_info =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
+ erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
+ }
+#endif
#endif
/* Create and initialize scheduler specific data */
@@ -4534,6 +5536,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
daww_sz*n);
#else
+ daww_sz = 0;
daww_ptr = NULL;
#endif
@@ -4543,43 +5546,32 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
-#ifdef ERTS_SMP
- erts_bits_init_state(&esdp->erl_bits_state);
- esdp->match_pseudo_process = NULL;
- esdp->free_process = NULL;
-#endif
- esdp->x_reg_array =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
- ERTS_X_REGS_ALLOCATED *
- sizeof(Eterm));
- esdp->f_reg_array =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
- MAX_REG * sizeof(FloatDef));
-#if !HEAP_ON_C_STACK
- esdp->num_tmp_heap_used = 0;
-#endif
- esdp->no = (Uint) ix+1;
- esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- esdp->current_process = NULL;
- esdp->current_port = NULL;
-
- esdp->virtual_reds = 0;
- esdp->cpu_id = -1;
-
- erts_init_atom_cache_map(&esdp->atom_cache_map);
-
- esdp->run_queue = ERTS_RUNQ_IX(ix);
- esdp->run_queue->scheduler = esdp;
+ init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz);
+ }
- init_aux_work_data(&esdp->aux_work_data, esdp, daww_ptr);
+#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- daww_ptr += daww_sz;
-#endif
-
- esdp->reductions = 0;
-
- init_sched_wall_time(&esdp->sched_wall_time);
+ erts_aligned_dirty_cpu_scheduler_data =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_CPU_RUNQ, NULL, 0);
+ }
+ erts_aligned_dirty_io_scheduler_data =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_IO_RUNQ, NULL, 0);
}
+#endif
+#endif
init_misc_aux_work();
#if !HALFWORD_HEAP
@@ -4603,8 +5595,18 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
schdlr_sspnd.curr_online = no_schedulers;
schdlr_sspnd.msb.ongoing = 0;
erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_changing, 0);
+ schdlr_sspnd.dirty_cpu_online = no_dirty_cpu_schedulers_online;
+ schdlr_sspnd.dirty_cpu_curr_online = no_dirty_cpu_schedulers;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_active, no_dirty_cpu_schedulers);
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_changing, 0);
+ schdlr_sspnd.dirty_io_online = no_dirty_io_schedulers;
+ schdlr_sspnd.dirty_io_curr_online = no_dirty_io_schedulers;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_active, no_dirty_io_schedulers);
+#endif
schdlr_sspnd.msb.procs = NULL;
- init_no_runqs(no_schedulers, no_schedulers_online);
+ init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
balance_info.forced_check_balance = 0;
@@ -4628,6 +5630,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.dirty_cpu_wait_curr_online = no_dirty_cpu_schedulers_online;
+ schdlr_sspnd.dirty_cpu_curr_online *= 2;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ }
+
+ schdlr_sspnd.dirty_io_wait_curr_online = no_dirty_io_schedulers;
+ schdlr_sspnd.dirty_io_curr_online *= 2;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+#endif
erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
@@ -4643,6 +5660,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
#endif
}
erts_no_schedulers = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_dirty_cpu_schedulers = 0;
+ erts_no_dirty_io_schedulers = 0;
+#endif
#endif
erts_smp_atomic32_init_nob(&function_calls, 0);
@@ -4689,76 +5710,296 @@ erts_get_scheduler_data(void)
#endif
+static Process *
+make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
+{
+ erts_aint32_t state;
+ Process *proxy;
+#ifdef ERTS_SMP
+ ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue);
+#endif
+
+ state = (ERTS_PSFLG_PROXY
+ | ERTS_PSFLG_IN_RUNQ
+ | (((erts_aint32_t) 1) << (prio + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET))
+ | (prio << ERTS_PSFLGS_PRQ_PRIO_OFFSET)
+ | (prio << ERTS_PSFLGS_USR_PRIO_OFFSET)
+ | (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET));
+
+ if (prev_proxy) {
+ proxy = prev_proxy;
+ ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_smp_atomic32_set_nob(&proxy->state, state);
+#ifdef ERTS_SMP
+ RUNQ_SET_RQ(&proc->run_queue, rq);
+#endif
+ }
+ else {
+ proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process));
+#ifdef DEBUG
+ {
+ int i;
+ Uint32 *ui32 = (Uint32 *) (char *) proxy;
+ for (i = 0; i < sizeof(Process)/sizeof(Uint32); i++)
+ ui32[i] = (Uint32) 0xdeadbeef;
+ }
+#endif
+ erts_smp_atomic32_init_nob(&proxy->state, state);
+#ifdef ERTS_SMP
+ erts_smp_atomic_init_nob(&proxy->run_queue,
+ erts_smp_atomic_read_nob(&proc->run_queue));
+#endif
+ }
+
+ proxy->common.id = proc->common.id;
+
+ return proxy;
+}
+
+static ERTS_INLINE void
+free_proxy_proc(Process *proxy)
+{
+ ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_free(ERTS_ALC_T_PROC, proxy);
+}
+
+#define ERTS_ENQUEUE_NOT 0
+#define ERTS_ENQUEUE_NORMAL_QUEUE 1
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
+#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
+#endif
+
+static ERTS_INLINE int
+check_enqueue_in_prio_queue(Process *c_p,
+ erts_aint32_t *prq_prio_p,
+ erts_aint32_t *newp,
+ erts_aint32_t actual)
+{
+ erts_aint32_t aprio, qbit, max_qbit;
+
+ aprio = (actual >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
+ qbit = 1 << aprio;
+
+ *prq_prio_p = aprio;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
+ /*
+ * If we have system tasks of a priority higher
+ * or equal to the user priority, we enqueue
+ * on ordinary run-queue and take care of
+ * those system tasks first.
+ */
+ if (actual & ERTS_PSFLG_ACTIVE_SYS) {
+ erts_aint32_t uprio, stprio, qmask;
+ uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
+ if (aprio < uprio)
+ goto enqueue_normal_runq; /* system tasks with higher prio */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ qmask = c_p->sys_task_qs->qmask;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ switch (qmask & -qmask) {
+ case MAX_BIT:
+ stprio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ stprio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ stprio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ stprio = PRIORITY_LOW;
+ break;
+ default:
+ stprio = PRIORITY_LOW+1;
+ break;
+ }
+ if (stprio <= uprio)
+ goto enqueue_normal_runq; /* system tasks with higher prio */
+ }
+
+ /* Enqueue in dirty run queue if not already enqueued */
+ if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
+ return ERTS_ENQUEUE_NOT; /* already in queue */
+ if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) {
+ *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
+ if (actual & ERTS_PSFLG_IN_RUNQ)
+ return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
+ }
+ *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
+ if (actual & ERTS_PSFLG_IN_RUNQ)
+ return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ }
+
+ enqueue_normal_runq:
+#endif
+ max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+ /*
+ * max_qbit now either contain bit set for highest prio queue or a bit
+ * out of range (which will have a value larger than valid range).
+ */
+
+ if (qbit >= max_qbit)
+ return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */
+
+ /* Need to enqueue (if already enqueued, it is in lower prio) */
+ *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
+
+ if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
+ != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
+ /*
+ * Process struct already enqueued, or actual prio not
+ * equal to user prio, i.e., enqueue using proxy.
+ */
+ return -ERTS_ENQUEUE_NORMAL_QUEUE;
+ }
+
+ /*
+ * Enqueue using process struct.
+ */
+ *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
+ *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
+}
+
/*
- * scheduler_out_process() return with c_rq locked.
+ * schedule_out_process() return with c_rq locked.
*/
static ERTS_INLINE int
-schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p)
+schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
{
- erts_aint32_t a, e, n;
- int res = 0;
+ erts_aint32_t a, e, n, enq_prio = -1;
+ int enqueue; /* < 0 -> use proxy */
+ Process* sched_p;
+ ErtsRunQueue* runq;
+#ifdef ERTS_SMP
+ int check_emigration_need;
+#endif
a = state;
while (1) {
n = e = a;
- ASSERT(a & ERTS_PSFLG_RUNNING);
- ASSERT(!(a & ERTS_PSFLG_IN_RUNQ));
+ ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+ enqueue = ERTS_ENQUEUE_NOT;
- n &= ~ERTS_PSFLG_RUNNING;
- if ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE)
- n |= ERTS_PSFLG_IN_RUNQ;
+ n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
+ if (a & ERTS_PSFLG_ACTIVE_SYS
+ || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
+ }
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- if (!(n & ERTS_PSFLG_IN_RUNQ)) {
- if (erts_system_profile_flags.runnable_procs)
- profile_runnable_proc(p, am_inactive);
- }
- else {
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & n);
- ErtsRunQueue *runq = erts_get_runq_proc(p);
+ switch (enqueue) {
+ case ERTS_ENQUEUE_NOT:
+ if (erts_system_profile_flags.runnable_procs) {
- ASSERT(!(n & ERTS_PSFLG_SUSPENDED));
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-#ifdef ERTS_SMP
- if (!(ERTS_PSFLG_BOUND & n)) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio);
- if (new_runq) {
- RUNQ_SET_RQ(&p->run_queue, new_runq);
- runq = new_runq;
+ if (!(a & ERTS_PSFLG_ACTIVE_SYS)
+ && (!(a & ERTS_PSFLG_ACTIVE)
+ || (a & ERTS_PSFLG_SUSPENDED))) {
+ /* Process inactive */
+ profile_runnable_proc(p, am_inactive);
}
}
+
+ if (proxy)
+ free_proxy_proc(proxy);
+
+ erts_smp_runq_lock(c_rq);
+ return 0;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+#ifdef ERTS_SMP
+ case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ runq = ERTS_DIRTY_CPU_RUNQ;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler));
+#ifdef ERTS_SMP
+ check_emigration_need = 0;
#endif
- ASSERT(runq);
- res = 1;
+ break;
- erts_smp_runq_lock(runq);
+ case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ runq = ERTS_DIRTY_IO_RUNQ;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler));
+#ifdef ERTS_SMP
+ check_emigration_need = 0;
+#endif
+ break;
+#endif
+#endif
- /* Enqueue the process */
- enqueue_process(runq, prio, p);
+ default:
+ ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
- if (runq == c_rq)
- return res;
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
+ runq = erts_get_runq_proc(p);
+#ifdef ERTS_SMP
+ check_emigration_need = !(ERTS_PSFLG_BOUND & n);
+#endif
+ break;
+ }
+
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+
+ if (enqueue < 0)
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ else {
+ sched_p = p;
+ if (proxy)
+ free_proxy_proc(proxy);
+ }
+
+#ifdef ERTS_SMP
+ if (check_emigration_need) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
+ runq = new_runq;
+ }
}
+#endif
+
+ ASSERT(runq);
+
+ erts_smp_runq_lock(runq);
+
+ /* Enqueue the process */
+ enqueue_process(runq, (int) enq_prio, sched_p);
+
+ if (runq == c_rq)
+ return 1;
+ erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
erts_smp_runq_lock(c_rq);
- return res;
+ return 1;
}
static ERTS_INLINE void
-add2runq(Process *p, erts_aint32_t state)
+add2runq(Process *p, erts_aint32_t state, erts_aint32_t prio)
{
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
ErtsRunQueue *runq = erts_get_runq_proc(p);
#ifdef ERTS_SMP
if (!(ERTS_PSFLG_BOUND & state)) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio);
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, (int) prio);
if (new_runq) {
RUNQ_SET_RQ(&p->run_queue, new_runq);
runq = new_runq;
@@ -4770,101 +6011,270 @@ add2runq(Process *p, erts_aint32_t state)
erts_smp_runq_lock(runq);
/* Enqueue the process */
- enqueue_process(runq, prio, p);
+ enqueue_process(runq, (int) prio, p);
erts_smp_runq_unlock(runq);
smp_notify_inc_runq(runq);
}
-static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t state, int active_enq)
+static ERTS_INLINE int
+change_proc_schedule_state(Process *p,
+ erts_aint32_t clear_state_flags,
+ erts_aint32_t set_state_flags,
+ erts_aint32_t *statep,
+ erts_aint32_t *enq_prio_p,
+ ErtsProcLocks locks)
{
- erts_aint32_t a = state, n;
+ /*
+ * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
+ * ERTS_PSFLG_ACTIVE_SYS are not allowed to be
+ * altered by this function!
+ */
+ erts_aint32_t a = *statep, n;
+ int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int lock_status = (prof_runnable_procs
+ && !(locks & ERTS_PROC_LOCK_STATUS));
+
+ ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
+
+ ASSERT(!(a & ERTS_PSFLG_PROXY));
+ ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_ACTIVE_SYS)) == 0);
+
+ if (lock_status)
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
while (1) {
erts_aint32_t e;
n = e = a;
+ enqueue = ERTS_ENQUEUE_NOT;
+
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
- n |= ERTS_PSFLG_ACTIVE;
- if (!(a & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_RUNNING)))
- n |= ERTS_PSFLG_IN_RUNQ;
+ break; /* We don't want to schedule free processes... */
+
+ if (clear_state_flags)
+ n &= ~clear_state_flags;
+
+ if (set_state_flags)
+ n |= set_state_flags;
+
+ if ((n & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_IN_RUNQ
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) {
+ /*
+ * Active and seemingly need to be enqueued, but
+ * process may be in a run queue via proxy, need
+ * further inspection...
+ */
+ enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a);
+ }
+
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
- if (!active_enq && (a & ERTS_PSFLG_ACTIVE))
- return; /* Someone else activated process ... */
+ if (enqueue == ERTS_ENQUEUE_NOT && n == a)
+ break;
}
- if (erts_system_profile_flags.runnable_procs
- && !(a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) {
- profile_runnable_proc(p, am_active);
+ if (prof_runnable_procs) {
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+
+ if (((n & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
+ && (!(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS)
+ && (!(a & ERTS_PSFLG_ACTIVE)
+ || (a & ERTS_PSFLG_SUSPENDED))))) {
+ /* We activated a prevously inactive process */
+ profile_runnable_proc(p, am_active);
+ }
+
+ if (lock_status)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- if ((n & ERTS_PSFLG_IN_RUNQ) && !(a & ERTS_PSFLG_IN_RUNQ))
- add2runq(p, n);
+
+ *statep = a;
+
+ return enqueue;
+}
+
+static ERTS_INLINE void
+schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
+{
+ erts_aint32_t enq_prio = -1;
+ erts_aint32_t state = in_state;
+ int enqueue = change_proc_schedule_state(p,
+ 0,
+ ERTS_PSFLG_ACTIVE,
+ &state,
+ &enq_prio,
+ locks);
+ if (enqueue != ERTS_ENQUEUE_NOT)
+ add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
+ state,
+ enq_prio);
}
void
-erts_schedule_process(Process *p, erts_aint32_t state)
+erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
{
- schedule_process(p, state, 0);
+ schedule_process(p, state, locks);
+}
+
+static void
+schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
+{
+ /*
+ * Expects status lock to be locked when called, and
+ * returns with status lock unlocked...
+ */
+ erts_aint32_t a = state, n, enq_prio = -1;
+ int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ ASSERT(!(state & ERTS_PSFLG_PROXY));
+
+ while (1) {
+ erts_aint32_t e;
+ n = e = a;
+
+ if (a & ERTS_PSFLG_FREE)
+ goto cleanup; /* We don't want to schedule free processes... */
+
+ enqueue = ERTS_ENQUEUE_NOT;
+ n |= ERTS_PSFLG_ACTIVE_SYS;
+ if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
+ a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ if (a == n && enqueue == ERTS_ENQUEUE_NOT)
+ goto cleanup;
+ }
+
+ if (prof_runnable_procs) {
+
+ if (!(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
+ /* We activated a prevously inactive process */
+ profile_runnable_proc(p, am_active);
+ }
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ prof_runnable_procs = 0;
+ }
+
+ if (enqueue != ERTS_ENQUEUE_NOT) {
+ Process *sched_p;
+ if (enqueue > 0)
+ sched_p = p;
+ else {
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ proxy = NULL;
+ }
+ add2runq(sched_p, n, enq_prio);
+ }
+
+cleanup:
+
+ if (prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ if (proxy)
+ free_proxy_proc(proxy);
+
+ ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
}
static ERTS_INLINE int
suspend_process(Process *c_p, Process *p)
{
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ erts_aint32_t state;
int suspended = 0;
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ state = erts_smp_atomic32_read_acqb(&p->state);
+
if ((state & ERTS_PSFLG_SUSPENDED))
suspended = -1;
else {
if (c_p == p) {
state = erts_smp_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
- state |= ERTS_PSFLG_SUSPENDED;
- ASSERT(state & ERTS_PSFLG_RUNNING);
- suspended = 1;
+ ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1;
}
else {
while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) {
- erts_aint32_t e, n;
+ erts_aint32_t n, e;
+
n = e = state;
n |= ERTS_PSFLG_SUSPENDED;
state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
if (state == e) {
- state = n;
suspended = 1;
break;
}
+ if (state & ERTS_PSFLG_SUSPENDED) {
+ suspended = -1;
+ break;
+ }
}
}
}
- if (state & ERTS_PSFLG_SUSPENDED) {
+ if (suspended) {
ASSERT(!(ERTS_PSFLG_RUNNING & state)
|| p == erts_get_current_process());
- if (erts_system_profile_flags.runnable_procs
- && (p->rcount == 0)
- && (state & ERTS_PSFLG_ACTIVE)) {
- profile_runnable_proc(p, am_inactive);
+ if (suspended > 0 && erts_system_profile_flags.runnable_procs) {
+
+ /* 'state' is before our change... */
+
+ if ((state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ /* We made process inactive */
+ profile_runnable_proc(p, am_inactive);
+ }
+
}
p->rcount++; /* count number of suspend */
}
+
return suspended;
}
static ERTS_INLINE void
-resume_process(Process *p)
+resume_process(Process *p, ErtsProcLocks locks)
{
- erts_aint32_t state;
+ erts_aint32_t state, enq_prio = -1;
+ int enqueue;
+
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
ASSERT(p->rcount > 0);
@@ -4872,14 +6282,17 @@ resume_process(Process *p)
if (--p->rcount > 0) /* multiple suspend */
return;
- state = erts_smp_atomic32_read_band_mb(&p->state, ~ERTS_PSFLG_SUSPENDED);
- state &= ~ERTS_PSFLG_SUSPENDED;
- if ((state & (ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_RUNNING)) == ERTS_PSFLG_ACTIVE) {
- schedule_process(p, state, 1);
- }
+ state = erts_smp_atomic32_read_nob(&p->state);
+ enqueue = change_proc_schedule_state(p,
+ ERTS_PSFLG_SUSPENDED,
+ 0,
+ &state,
+ &enq_prio,
+ locks);
+ if (enqueue)
+ add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
+ state,
+ enq_prio);
}
int
@@ -4901,6 +6314,12 @@ static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+}
+
+static void
+scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
| ERTS_SSI_FLG_WAITING
@@ -4991,6 +6410,301 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static void
+suspend_scheduler(ErtsSchedulerData *esdp)
+{
+ erts_aint32_t flgs;
+ erts_aint32_t changing;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ long no = (long) (ERTS_SCHEDULER_IS_DIRTY(esdp)
+ ? ERTS_DIRTY_SCHEDULER_NO(esdp)
+ : esdp->no);
+#else
+ long no = (long) esdp->no;
+#endif
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long active_schedulers;
+ int curr_online = 1;
+ int wake = 0;
+ erts_aint32_t aux_work;
+ int thr_prgr_active = 1;
+ ErtsStuckBoundProcesses sbp = {NULL, NULL};
+ int* ss_onlinep;
+ int* ss_curr_onlinep;
+ int* ss_wait_curr_onlinep;
+ long* ss_wait_activep;
+ long ss_wait_active_target;
+ erts_smp_atomic32_t* ss_changingp;
+ erts_smp_atomic32_t* ss_activep;
+
+ /*
+ * Schedulers may be suspended in two different ways:
+ * - A scheduler may be suspended since it is not online.
+ * All schedulers with scheduler ids greater than
+ * schdlr_sspnd.online are suspended; same for dirty
+ * schedulers and schdlr_sspnd.dirty_cpu_online and
+ * schdlr_sspnd.dirty_io_online.
+ * - Multi scheduling is blocked. All schedulers except the
+ * scheduler with scheduler id 1 are suspended, and all
+ * dirty CPU and dirty I/O schedulers are suspended.
+ *
+ * Regardless of why a scheduler is suspended, it ends up here.
+ */
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) || no != 1);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_smp_runq_lock(esdp->run_queue);
+ }
+ if (ongoing_multi_scheduling_block())
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ } else
+#endif
+ evacuate_run_queue(esdp->run_queue, &sbp);
+
+ erts_smp_runq_unlock(esdp->run_queue);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ erts_sched_check_cpu_bind_prep_suspend(esdp);
+
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_inactive);
+
+ sched_wall_time_change(esdp, 0);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_cpu_active);
+ ASSERT(active_schedulers >= 0);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
+ ss_onlinep = &schdlr_sspnd.dirty_cpu_online;
+ ss_curr_onlinep = &schdlr_sspnd.dirty_cpu_curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.dirty_cpu_wait_curr_online;
+ ss_changingp = &schdlr_sspnd.dirty_cpu_changing;
+ ss_wait_activep = &schdlr_sspnd.msb.dirty_cpu_wait_active;
+ ss_activep = &schdlr_sspnd.dirty_cpu_active;
+ } else {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_io_active);
+ ASSERT(active_schedulers >= 0);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing);
+ ss_onlinep = &schdlr_sspnd.dirty_io_online;
+ ss_curr_onlinep = &schdlr_sspnd.dirty_io_curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.dirty_io_wait_curr_online;
+ ss_changingp = &schdlr_sspnd.dirty_io_changing;
+ ss_wait_activep = &schdlr_sspnd.msb.dirty_io_wait_active;
+ ss_activep = &schdlr_sspnd.dirty_io_active;
+ }
+ ss_wait_active_target = 0;
+ }
+ else
+#endif
+ {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ ss_onlinep = &schdlr_sspnd.online;
+ ss_curr_onlinep = &schdlr_sspnd.curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.wait_curr_online;
+ ss_changingp = &schdlr_sspnd.changing;
+ ss_wait_activep = &schdlr_sspnd.msb.wait_active;
+ ss_activep = &schdlr_sspnd.active;
+ ss_wait_active_target = 1;
+ }
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == *ss_wait_activep)
+ wake = 1;
+ if (active_schedulers == ss_wait_active_target) {
+ changing = erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
+ }
+
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > *ss_onlinep && curr_online) {
+ (*ss_curr_onlinep)--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= *ss_onlinep && !curr_online) {
+ (*ss_curr_onlinep)++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && *ss_curr_onlinep == *ss_wait_curr_onlinep)
+ wake = 1;
+ if (*ss_onlinep == *ss_curr_onlinep) {
+ changing = erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
+ }
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
+ }
+
+ if (curr_online && !ongoing_multi_scheduling_block()) {
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ while (1) {
+ erts_aint32_t qmask;
+ erts_aint32_t flgs;
+
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work|qmask) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
+ (aux_work && erts_thr_progress_update(esdp)))
+ erts_thr_progress_leader_update(esdp);
+ if (qmask) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_smp_runq_lock(esdp->run_queue);
+ if (ongoing_multi_scheduling_block())
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ } else
+#endif
+ {
+ erts_smp_runq_lock(esdp->run_queue);
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ }
+ }
+ }
+
+ if (!aux_work) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ }
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ erts_thr_progress_finalize_wait(esdp);
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+ }
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ }
+
+ active_schedulers = erts_smp_atomic32_inc_read_nob(ss_activep);
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && *ss_onlinep == active_schedulers) {
+ erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ ASSERT(no <= *ss_onlinep);
+ ASSERT(!ongoing_multi_scheduling_block());
+
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ ASSERT(curr_online);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
+
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+
+ erts_smp_runq_lock(esdp->run_queue);
+ non_empty_runq(esdp->run_queue);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ schedule_bound_processes(esdp->run_queue, &sbp);
+
+ erts_sched_check_cpu_bind_post_suspend(esdp);
+ }
+}
+
+#else /* !ERTS_DIRTY_SCHEDULERS */
+
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
@@ -5179,29 +6893,328 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_sched_check_cpu_bind_post_suspend(esdp);
}
+#endif
+
ErtsSchedSuspendResult
erts_schedulers_state(Uint *total,
Uint *online,
Uint *active,
+ Uint *dirty_cpu,
+ Uint *dirty_cpu_online,
+ Uint *dirty_io,
int yield_allowed)
{
- int res;
+ int res = ERTS_SCHDLR_SSPND_EINVAL;
erts_aint32_t changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
+#endif
if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
- *active = *online = schdlr_sspnd.online;
- if (ongoing_multi_scheduling_block())
+ if (active)
+ *active = schdlr_sspnd.online;
+ if (online)
+ *online = schdlr_sspnd.online;
+ if (ongoing_multi_scheduling_block() && active)
*active = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (dirty_cpu_online)
+ *dirty_cpu_online = schdlr_sspnd.dirty_cpu_online;
+#endif
res = ERTS_SCHDLR_SSPND_DONE;
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- *total = erts_no_schedulers;
+ if (total)
+ *total = erts_no_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (dirty_cpu)
+ *dirty_cpu = erts_no_dirty_cpu_schedulers;
+ if (dirty_io)
+ *dirty_io = erts_no_dirty_io_schedulers;
+#endif
return res;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+ErtsSchedSuspendResult
+erts_set_schedulers_online(Process *p,
+ ErtsProcLocks plocks,
+ Sint new_no,
+ Sint *old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int dirty_only
+#endif
+ )
+{
+ ErtsSchedulerData *esdp;
+ int ix, res = -1, no, have_unlocked_plocks, end_wait;
+ erts_aint32_t changing = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerSleepInfo* ssi;
+ int dirty_no, change_dirty;
+#endif
+
+ if (new_no < 1)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (dirty_only && erts_no_dirty_cpu_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+#endif
+ else if (erts_no_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+ end_wait = 0;
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ have_unlocked_plocks = 0;
+ no = (int) new_no;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(schdlr_sspnd.dirty_cpu_online <= erts_no_dirty_cpu_schedulers);
+ if (dirty_only) {
+ if (no > schdlr_sspnd.online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ return ERTS_SCHDLR_SSPND_EINVAL;
+ }
+ dirty_no = no;
+ } else {
+ /*
+ * Adjust the number of dirty CPU schedulers online relative to the
+ * adjustment made to the number of normal schedulers online.
+ */
+ int total_pct = erts_no_dirty_cpu_schedulers*100/erts_no_schedulers;
+ int onln_pct = no*total_pct/schdlr_sspnd.online;
+ dirty_no = schdlr_sspnd.dirty_cpu_online*onln_pct/100;
+ if (dirty_no == 0)
+ dirty_no = 1;
+ ASSERT(dirty_no <= erts_no_dirty_cpu_schedulers);
+ }
+#endif
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
+#endif
+ if (changing) {
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ }
+ else {
+ int online = *old_no = schdlr_sspnd.online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_online = schdlr_sspnd.dirty_cpu_online;
+
+ if (dirty_only) {
+ *old_no = schdlr_sspnd.dirty_cpu_online;
+ if (dirty_no == schdlr_sspnd.dirty_cpu_online) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ change_dirty = 1;
+ } else {
+#endif
+ if (no == schdlr_sspnd.online) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ dirty_only = 1;
+ if (dirty_no == schdlr_sspnd.dirty_cpu_online)
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else
+ change_dirty = 1;
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else
+ change_dirty = (dirty_no != schdlr_sspnd.dirty_cpu_online);
+ }
+#endif
+ if (res == -1)
+ {
+ int increase = (no > online);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!dirty_only) {
+#endif
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ schdlr_sspnd.online = no;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ } else
+ increase = (dirty_no > dirty_online);
+ if (change_dirty) {
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ schdlr_sspnd.dirty_cpu_online = dirty_no;
+ }
+#endif
+ if (increase) {
+ int ix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!dirty_only) {
+#endif
+ schdlr_sspnd.wait_curr_online = no;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ change_no_used_runqs(no);
+
+ for (ix = online; ix < no; ix++)
+ resume_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ }
+ if (change_dirty) {
+ schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
+ schdlr_sspnd.dirty_cpu_wait_curr_online);
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
+ }
+ } else {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ }
+ }
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else /* if (no < online) */ {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (change_dirty) {
+ schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
+ schdlr_sspnd.dirty_cpu_wait_curr_online);
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = dirty_no; ix < dirty_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
+ }
+ } else {
+ for (ix = dirty_no; ix < dirty_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ }
+ }
+ if (dirty_only) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else
+#endif
+ {
+ if (p->scheduler_data->no <= no) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ schdlr_sspnd.wait_curr_online = no;
+ }
+ else {
+ /*
+ * Yield! Current process needs to migrate
+ * before bif returns.
+ */
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE;
+ schdlr_sspnd.wait_curr_online = no+1;
+ }
+
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = no; ix < online; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+
+ change_no_used_runqs(no);
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
+ }
+ }
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (change_dirty) {
+ while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ if (!dirty_only)
+#endif
+ {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ end_wait = 1;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ }
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(schdlr_sspnd.dirty_cpu_online <= schdlr_sspnd.online);
+ if (!dirty_only)
+#endif
+ {
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
+ if (have_unlocked_plocks)
+ erts_smp_proc_lock(p, plocks);
+ }
+
+ return res;
+}
+
+#else /* !ERTS_DIRTY_SCHEDULERS */
+
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
@@ -5288,7 +7301,7 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
}
}
@@ -5330,15 +7343,24 @@ erts_set_schedulers_online(Process *p,
return res;
}
+#endif
+
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
- int ix, res, have_unlocked_plocks = 0;
+ int ix, res, have_unlocked_plocks = 0, online;
erts_aint32_t changing;
ErtsProcList *plp;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerSleepInfo* ssi;
+#endif
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
+#endif
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -5348,10 +7370,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
p->flags |= F_HAVE_BLCKD_MSCHED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 0);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) == 0);
+#endif
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- }
- else {
+ } else {
int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
@@ -5363,6 +7388,35 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 1);
+ ASSERT(!(erts_smp_atomic32_read_nob(&ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0)->flags)
+ & ERTS_SSI_FLG_SUSPENDED));
+ schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
+ != schdlr_sspnd.msb.dirty_cpu_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+
+ schdlr_sspnd.msb.dirty_io_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
+ != schdlr_sspnd.msb.dirty_io_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+#endif
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -5381,13 +7435,44 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
+ != schdlr_sspnd.msb.dirty_cpu_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_online);
+
+ schdlr_sspnd.msb.dirty_io_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
+ != schdlr_sspnd.msb.dirty_io_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ ASSERT(schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_online);
+#endif
change_no_used_runqs(1);
for (ix = 1; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
for (ix = 1; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
@@ -5421,6 +7506,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
& erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
@@ -5461,12 +7547,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
p->flags &= ~F_HAVE_BLCKD_MSCHED;
schdlr_sspnd.msb.ongoing = 0;
if (schdlr_sspnd.online == 1) {
- /* No schedulers to resume */
+ /* No normal schedulers to resume */
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else {
- int online = schdlr_sspnd.online;
+ online = schdlr_sspnd.online;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
@@ -5481,6 +7567,27 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
for (ix = online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
+ schdlr_sspnd.msb.dirty_cpu_wait_active = schdlr_sspnd.dirty_cpu_online;
+ for (ix = 0; ix < schdlr_sspnd.dirty_cpu_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
+ schdlr_sspnd.msb.dirty_io_wait_active = erts_no_dirty_io_schedulers;
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+#endif
res = ERTS_SCHDLR_SSPND_DONE;
}
}
@@ -5607,7 +7714,11 @@ sched_thread_func(void *vesdp)
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (no != 1)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+#else
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+#endif
}
if (no == 1) {
@@ -5637,18 +7748,170 @@ sched_thread_func(void *vesdp)
return NULL;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+#ifdef ERTS_SMP
+static void*
+sched_dirty_cpu_thread_func(void *vesdp)
+{
+ ErtsThrPrgrCallbacks callbacks;
+ ErtsSchedulerData *esdp = vesdp;
+ Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER;
+ ASSERT(no != 0);
+ ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
+ callbacks.arg = (void *) esdp->ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = NULL;
+ callbacks.wait = NULL;
+ callbacks.finalize_wait = NULL;
+
+ erts_thr_progress_register_unmanaged_thread(&callbacks);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[31];
+ erts_snprintf(&buf[0], 31, "dirty cpu scheduler %beu", no);
+ erts_lc_set_thread_name(&buf[0]);
+ }
+#endif
+ erts_tsd_set(sched_data_key, vesdp);
+#if ERTS_USE_ASYNC_READY_Q
+ esdp->aux_work_data.async_ready.queue = NULL;
+#endif
+
+ erts_proc_lock_prepare_proc_lock_waiter();
+
+#ifdef HIPE
+ hipe_thread_signal_init();
+#endif
+ erts_thread_init_float();
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
+
+ if (--schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_wait_curr_online) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+
+ if (no == 1) {
+ while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ process_main();
+ /* No schedulers should *ever* terminate */
+ erl_exit(ERTS_ABORT_EXIT,
+ "Dirty CPU scheduler thread number %beu terminated\n",
+ no);
+ return NULL;
+}
+
+static void*
+sched_dirty_io_thread_func(void *vesdp)
+{
+ ErtsThrPrgrCallbacks callbacks;
+ ErtsSchedulerData *esdp = vesdp;
+ Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER;
+ ASSERT(no != 0);
+ ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
+ callbacks.arg = (void *) esdp->ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = NULL;
+ callbacks.wait = NULL;
+ callbacks.finalize_wait = NULL;
+
+ erts_thr_progress_register_unmanaged_thread(&callbacks);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[31];
+ erts_snprintf(&buf[0], 31, "dirty io scheduler %beu", no);
+ erts_lc_set_thread_name(&buf[0]);
+ }
+#endif
+ erts_tsd_set(sched_data_key, vesdp);
+#if ERTS_USE_ASYNC_READY_Q
+ esdp->aux_work_data.async_ready.queue = NULL;
+#endif
+
+ erts_proc_lock_prepare_proc_lock_waiter();
+
+#ifdef HIPE
+ hipe_thread_signal_init();
+#endif
+ erts_thread_init_float();
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
+
+ if (--schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_wait_curr_online) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_io_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+
+ if (no == 1) {
+ while (schdlr_sspnd.dirty_io_curr_online != schdlr_sspnd.dirty_io_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ process_main();
+ /* No schedulers should *ever* terminate */
+ erl_exit(ERTS_ABORT_EXIT,
+ "Dirty I/O scheduler thread number %beu terminated\n",
+ no);
+ return NULL;
+}
+#endif
+#endif
+
static ethr_tid aux_tid;
void
erts_start_schedulers(void)
{
int res = 0;
- Uint actual = 0;
+ Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
opts.detached = 1;
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ opts.name = malloc(80);
+ if (!opts.name) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
+#endif
+
+#ifdef ERTS_SMP
+ if (erts_runq_supervision_interval) {
+ opts.suggested_stack_size = 16;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "runq_supervisor");
+#endif
+ erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
+ if (0 != ethr_event_init(&runq_supervision_event))
+ erl_exit(1, "Failed to create run-queue supervision event\n");
+ if (0 != ethr_thr_create(&runq_supervisor_tid,
+ runq_supervisor,
+ NULL,
+ &opts))
+ erl_exit(1, "Failed to create run-queue supervision thread\n");
+
+ }
+#endif
+
opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
if (wanted < 1)
@@ -5658,21 +7921,65 @@ erts_start_schedulers(void)
res = ENOTSUP;
}
- while (actual < wanted) {
+ for (actual = 0; actual < wanted; actual++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
- actual++;
- ASSERT(actual == esdp->no);
- res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
+
+ ASSERT(actual == esdp->no - 1);
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "scheduler_%d", actual + 1);
+#endif
+
+#ifdef __OSE__
+ /* This should be done in the bind strategy */
+ opts.coreNo = (actual+1) % ose_num_cpus();
+#endif
+
+ res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
+
if (res != 0) {
- actual--;
- break;
+ break;
}
}
-
+
erts_no_schedulers = actual;
+#ifdef ERTS_DIRTY_SCHEDULERS
+#ifdef ERTS_SMP
+ {
+ int ix;
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1);
+#endif
+ res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
+ if (res != 0)
+ erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix);
+ }
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1);
+#endif
+ res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
+ if (res != 0)
+ erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix);
+ }
+ }
+#endif
+#endif
+
ERTS_THR_MEMORY_BARRIER;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "aux");
+#endif
+
+#ifdef __OSE__
+ opts.coreNo = 0;
+#endif /* __OSE__ */
+
res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
if (res != 0)
erl_exit(1, "Failed to create aux thread\n");
@@ -5692,6 +7999,10 @@ erts_start_schedulers(void)
actual, actual == 1 ? " was" : "s were");
erts_send_error_to_logger_nogl(dsbufp);
}
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ free(opts.name);
+#endif
}
#endif /* ERTS_SMP */
@@ -5793,7 +8104,8 @@ handle_pend_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspendee != suspender);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -5828,7 +8140,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(c_p->flags & F_P2PNR_RESCHED);
c_p->flags &= ~F_P2PNR_RESCHED;
if (!suspend && rp)
- resume_process(rp);
+ resume_process(rp, rp_locks);
}
else {
@@ -5847,7 +8159,8 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
goto done;
}
else {
- if (!(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&rp->state)))
+ if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
+ & erts_smp_atomic32_read_acqb(&rp->state)))
goto done;
}
@@ -5985,7 +8298,8 @@ handle_pend_bif_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspender != suspendee);
+ resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
}
@@ -6345,7 +8659,8 @@ resume_process_1(BIF_ALIST_1)
ASSERT(ERTS_PSFLG_SUSPENDED
& erts_smp_atomic32_read_nob(&suspendee->state));
- resume_process(suspendee);
+ ASSERT(BIF_P != suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
@@ -6475,7 +8790,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
- resume_process(process);
+ resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
@@ -6494,7 +8809,7 @@ erts_resume_processes(ErtsProcList *list)
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
if (erts_proclist_same(plp, proc)) {
- resume_process(proc);
+ resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
@@ -6510,7 +8825,7 @@ Eterm
erts_get_process_priority(Process *p)
{
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
- switch (state & ERTS_PSFLG_PRIO_MASK) {
+ switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
case PRIORITY_MAX: return am_max;
case PRIORITY_HIGH: return am_high;
case PRIORITY_NORMAL: return am_normal;
@@ -6533,18 +8848,68 @@ erts_set_process_priority(Process *p, Eterm value)
}
a = erts_smp_atomic32_read_nob(&p->state);
- if (nprio == (a & ERTS_PSFLG_PRIO_MASK))
+ if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a))
oprio = nprio;
else {
- erts_aint32_t e, n;
+ int slocked = 0;
+ erts_aint32_t e, n, aprio;
+
+ if (a & ERTS_PSFLG_ACTIVE_SYS) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = 1;
+ }
+
do {
- oprio = a & ERTS_PSFLG_PRIO_MASK;
+ oprio = ERTS_PSFLGS_GET_USR_PRIO(a);
n = e = a;
- ASSERT(!(a & ERTS_PSFLG_IN_RUNQ));
+ if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DELAYED_SYS)))
+ aprio = nprio;
+ else {
+ int max_qbit;
+
+ if (!slocked) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = 1;
+ }
+
+ max_qbit = 0;
+ if (a & ERTS_PSFLG_ACTIVE_SYS)
+ max_qbit |= p->sys_task_qs->qmask;
+ if (a & ERTS_PSFLG_DELAYED_SYS) {
+ ErtsProcSysTaskQs *qs;
+ qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(p);
+ ASSERT(qs);
+ max_qbit |= qs->qmask;
+ }
+ max_qbit &= -max_qbit;
+ switch (max_qbit) {
+ case MAX_BIT:
+ aprio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ aprio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ aprio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ aprio = PRIORITY_LOW;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ aprio = -1;
+ }
+
+ if (aprio > nprio) /* low value -> high prio */
+ aprio = nprio;
+ }
+
+ n &= ~(ERTS_PSFLGS_USR_PRIO_MASK
+ | ERTS_PSFLGS_ACT_PRIO_MASK);
+ n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET)
+ | (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET));
- n &= ~ERTS_PSFLG_PRIO_MASK;
- n |= nprio;
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
} while (a != e);
}
@@ -6578,6 +8943,7 @@ erts_set_process_priority(Process *p, Eterm value)
Process *schedule(Process *p, int calls)
{
+ Process *proxy_p = NULL;
ErtsRunQueue *rq;
erts_aint_t dt;
ErtsSchedulerData *esdp;
@@ -6607,7 +8973,8 @@ Process *schedule(Process *p, int calls)
input_reductions = INPUT_REDUCTIONS;
}
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())
+ || !erts_thr_progress_is_blocking());
/*
* Clean up after the process being scheduled out.
@@ -6620,6 +8987,8 @@ Process *schedule(Process *p, int calls)
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
+ sched_out_proc:
+
#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
esdp = p->scheduler_data;
@@ -6673,10 +9042,11 @@ Process *schedule(Process *p, int calls)
esdp->reductions += reds;
- schedule_out_process(rq, state, p); /* Returns with rq locked! */
+ schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */
+ proxy_p = NULL;
ERTS_PROC_REDUCTIONS_EXECUTED(rq,
- (int) (state & ERTS_PSFLG_PRIO_MASK),
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
reds,
actual_reds);
@@ -6685,18 +9055,19 @@ Process *schedule(Process *p, int calls)
p->scheduler_data = NULL;
#endif
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
if (state & ERTS_PSFLG_FREE) {
#ifdef ERTS_SMP
ASSERT(esdp->free_process == p);
esdp->free_process = NULL;
-#else
- erts_free_proc(p);
+#else
+ state = erts_smp_atomic32_read_nob(&p->state);
+ if (!(state & ERTS_PSFLG_IN_RUNQ))
+ erts_free_proc(p);
#endif
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
-
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
#endif
@@ -6714,45 +9085,42 @@ Process *schedule(Process *p, int calls)
}
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || !erts_thr_progress_is_blocking());
check_activities_to_run: {
#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
-
-#ifdef ERTS_SMP
- {
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
- rq->procs.pending_exiters = NULL;
- erts_smp_runq_unlock(rq);
- handle_pending_exiters(pnd_xtrs);
- erts_smp_runq_lock(rq);
- }
-
+ ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
+ if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
+ rq->procs.pending_exiters = NULL;
+ erts_smp_runq_unlock(rq);
+ handle_pending_exiters(pnd_xtrs);
+ erts_smp_runq_lock(rq);
}
-#endif
- if (rq->check_balance_reds <= 0)
- check_balance(rq);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (rq->check_balance_reds <= 0)
+ check_balance(rq);
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
- if (mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
- immigrate(rq, mp);
+ if (mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
+ immigrate(rq, mp);
+ }
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
continue_check_activities_to_run:
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
continue_check_activities_to_run_known_flags:
-
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || flags & ERTS_RUNQ_FLG_NONEMPTY);
if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
-
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
suspend_scheduler(esdp);
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
@@ -6763,22 +9131,32 @@ Process *schedule(Process *p, int calls)
erts_sched_check_cpu_bind(esdp);
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (ERTS_SCHEDULER_IS_DIRTY(esdp)
+ && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED))
+ suspend_scheduler(esdp);
+#endif
{
erts_aint32_t aux_work;
- int leader_update = erts_thr_progress_update(esdp);
+ int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0
+ : erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work | leader_update) {
+ if (aux_work | leader_update | ERTS_SCHED_FAIR) {
erts_smp_runq_unlock(rq);
if (leader_update)
erts_thr_progress_leader_update(esdp);
+ else if (ERTS_SCHED_FAIR)
+ ERTS_SCHED_FAIR_YIELD();
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_smp_runq_lock(rq);
}
- }
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || !erts_thr_progress_is_blocking());
+ }
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
#else /* ERTS_SMP */
@@ -6792,6 +9170,17 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) {
+ /*
+ * TODO: if halt in progress, need to put the dirty scheduler
+ * to sleep somewhere around here to prevent it from picking up
+ * new work
+ */
+ }
+ else
+#endif
+
if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
|| (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
/* Prepare for scheduler wait */
@@ -6801,20 +9190,16 @@ Process *schedule(Process *p, int calls)
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- empty_runq(rq);
-
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- non_empty_runq(rq);
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED)
goto continue_check_activities_to_run_known_flags;
- }
- else if (!(flags & ERTS_RUNQ_FLG_INACTIVE)) {
- if (try_steal_task(rq)) {
- non_empty_runq(rq);
+ if (flags & ERTS_RUNQ_FLG_INACTIVE)
+ empty_runq(rq);
+ else {
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && try_steal_task(rq))
goto continue_check_activities_to_run;
- }
- (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ empty_runq(rq);
/*
* Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
@@ -6823,10 +9208,10 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
non_empty_runq(rq);
+ flags |= ERTS_RUNQ_FLG_NONEMPTY;
goto continue_check_activities_to_run_known_flags;
}
}
-
#endif
scheduler_wait(&fcalls, esdp, rq);
@@ -6837,7 +9222,9 @@ Process *schedule(Process *p, int calls)
goto check_activities_to_run;
}
- else if (fcalls > input_reductions && prepare_for_sys_schedule()) {
+ else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
+ (fcalls > input_reductions &&
+ prepare_for_sys_schedule(esdp, !0))) {
/*
* Schedule system-level activities.
*/
@@ -6845,8 +9232,6 @@ Process *schedule(Process *p, int calls)
erts_smp_atomic32_set_relb(&function_calls, 0);
fcalls = 0;
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
#if 0 /* Not needed since we wont wait in sys schedule */
erts_sys_schedule_interrupt(0);
#endif
@@ -6878,7 +9263,9 @@ Process *schedule(Process *p, int calls)
if (RUNQ_READ_LEN(&rq->ports.info.len)) {
int have_outstanding_io;
have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
- if ((have_outstanding_io && fcalls > 2*input_reductions)
+ if ((!erts_eager_check_io
+ && have_outstanding_io
+ && fcalls > 2*input_reductions)
|| rq->halt_in_progress) {
/*
* If we have performed more than 2*INPUT_REDUCTIONS since
@@ -6903,6 +9290,7 @@ Process *schedule(Process *p, int calls)
* Find a new process to run.
*/
pick_next_process: {
+ erts_aint32_t psflg_band_mask;
int prio_q;
int qmask;
@@ -6936,18 +9324,72 @@ Process *schedule(Process *p, int calls)
ASSERT(p); /* Wrong qmask in rq->flags? */
+ psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) !=
+ (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC));
+ if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
+ ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) ||
+ (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS))
+ goto pick_next_process;
+ state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
+ }
+#endif
+
+ if (!(state & ERTS_PSFLG_PROXY))
+ psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
+ else {
+ proxy_p = p;
+ p = erts_proc_lookup_raw(proxy_p->common.id);
+ if (!p) {
+ free_proxy_proc(proxy_p);
+ proxy_p = NULL;
+ goto pick_next_process;
+ }
+ state = erts_smp_atomic32_read_nob(&p->state);
+ }
+
while (1) {
erts_aint32_t exp, new, tmp;
tmp = new = exp = state;
- new &= ~ERTS_PSFLG_IN_RUNQ;
- tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
- if (tmp != ERTS_PSFLG_SUSPENDED)
- new |= ERTS_PSFLG_RUNNING;
+ new &= psflg_band_mask;
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))) {
+ tmp = state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS);
+ if (tmp != ERTS_PSFLG_SUSPENDED) {
+ if (state & ERTS_PSFLG_ACTIVE_SYS)
+ new |= ERTS_PSFLG_RUNNING_SYS;
+ else
+ new |= ERTS_PSFLG_RUNNING;
+ }
+ }
state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
- tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
- if (tmp == ERTS_PSFLG_SUSPENDED)
+ if ((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_FREE))
+ || ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS))
+ == ERTS_PSFLG_SUSPENDED)) {
+ if (state & ERTS_PSFLG_FREE) {
+#ifdef ERTS_SMP
+ erts_smp_proc_dec_refc(p);
+#else
+ erts_free_proc(p);
+#endif
+ }
+ if (proxy_p) {
+ free_proxy_proc(proxy_p);
+ proxy_p = NULL;
+ }
goto pick_next_process;
+ }
state = new;
break;
}
@@ -6977,7 +9419,11 @@ Process *schedule(Process *p, int calls)
(UWord) esdp->no);
int migrated = old && old != esdp->no;
- prio = (int) (state & ERTS_PSFLG_PRIO_MASK);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+#endif
+
+ prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
erts_smp_spin_lock(&erts_sched_stat.lock);
erts_sched_stat.prio[prio].total_executed++;
@@ -6997,9 +9443,6 @@ Process *schedule(Process *p, int calls)
ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
#endif
- /* Never run a suspended process */
- ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
-
reds = context_reds;
if (IS_TRACED(p)) {
@@ -7025,21 +9468,786 @@ Process *schedule(Process *p, int calls)
erts_check_my_tracer_proc(p);
#endif
+ if (state & ERTS_PSFLG_RUNNING_SYS) {
+ reds -= execute_sys_tasks(p, &state, reds);
+ if (reds <= 0
+#ifdef ERTS_DIRTY_SCHEDULERS
+ || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))
+#endif
+ ) {
+ p->fcalls = reds;
+ goto sched_out_proc;
+ }
+
+ ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
+ ASSERT(!(state & ERTS_PSFLG_RUNNING));
+
+ while (1) {
+ erts_aint32_t n, e;
+
+ if (((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ && !(state & ERTS_PSFLG_EXITING))
+ goto sched_out_proc;
+
+ n = e = state;
+ n &= ~ERTS_PSFLG_RUNNING_SYS;
+ n |= ERTS_PSFLG_RUNNING;
+
+ state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ break;
+ }
+
+ ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
+ ASSERT(!(state & ERTS_PSFLG_RUNNING));
+ }
+ }
+
if (!(state & ERTS_PSFLG_EXITING)
&& ((FLAGS(p) & F_FORCE_GC)
|| (MSO(p).overhead > BIN_VHEAP_SZ(p)))) {
reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity);
- if (reds < 0) {
- reds = 1;
+ if (reds <= 0) {
+ p->fcalls = reds;
+ goto sched_out_proc;
}
}
+
+ if (proxy_p) {
+ free_proxy_proc(proxy_p);
+ proxy_p = NULL;
+ }
p->fcalls = reds;
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ /* Never run a suspended process */
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+
return p;
}
}
+static int
+notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
+{
+ Process *rp = erts_proc_lookup(st->requester);
+ if (rp) {
+ ErtsProcLocks rp_locks;
+ ErlOffHeap *ohp;
+ ErlHeapFragment* bp;
+ Eterm *hp, msg, req_id, result;
+ Uint st_result_sz, hsz;
+#ifdef DEBUG
+ Eterm *hp_start;
+#endif
+
+ rp_locks = (c_p == rp) ? ERTS_PROC_LOCK_MAIN : 0;
+
+ st_result_sz = is_immed(st_result) ? 0 : size_object(st_result);
+ hsz = st->req_id_sz + st_result_sz + 4 /* 3-tuple */;
+
+ hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+
+#ifdef DEBUG
+ hp_start = hp;
+#endif
+
+ req_id = st->req_id_sz == 0 ? st->req_id : copy_struct(st->req_id,
+ st->req_id_sz,
+ &hp,
+ ohp);
+
+ result = st_result_sz == 0 ? st_result : copy_struct(st_result,
+ st_result_sz,
+ &hp,
+ ohp);
+
+ ASSERT(is_immed(st->reply_tag));
+
+ msg = TUPLE3(hp, st->reply_tag, req_id, result);
+
+#ifdef DEBUG
+ hp += 4;
+ ASSERT(hp_start + hsz == hp);
+#endif
+
+ erts_queue_message(rp,
+ &rp_locks,
+ bp,
+ msg,
+ NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+
+ if (c_p == rp)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+
+ erts_cleanup_offheap(&st->off_heap);
+
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
+
+ return rp ? 1 : 0;
+}
+
+static ERTS_INLINE ErtsProcSysTask *
+fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop)
+{
+ ErtsProcSysTaskQs *unused_qs = NULL;
+ int qbit, qmask;
+ ErtsProcSysTask *st, **qp;
+
+ *priop = -1; /* Shut up annoying erroneous warning */
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!c_p->sys_task_qs) {
+ qmask = 0;
+ st = NULL;
+ goto update_state;
+ }
+
+ qmask = c_p->sys_task_qs->qmask;
+
+ if ((state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ /* No sys tasks if we got exclusively higher prio user work to do */
+ st = NULL;
+ switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
+ case PRIORITY_MAX:
+ if (!(qmask & MAX_BIT))
+ goto done;
+ break;
+ case PRIORITY_HIGH:
+ if (!(qmask & (MAX_BIT|HIGH_BIT)))
+ goto done;
+ break;
+ default:
+ break;
+ }
+ }
+
+ qbit = qmask & -qmask;
+ switch (qbit) {
+ case MAX_BIT:
+ qp = &c_p->sys_task_qs->q[PRIORITY_MAX];
+ *priop = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ qp = &c_p->sys_task_qs->q[PRIORITY_HIGH];
+ *priop = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ if (!(qmask & PRIORITY_LOW)
+ || ++c_p->sys_task_qs->ncount <= RESCHEDULE_LOW) {
+ qp = &c_p->sys_task_qs->q[PRIORITY_NORMAL];
+ *priop = PRIORITY_NORMAL;
+ break;
+ }
+ c_p->sys_task_qs->ncount = 0;
+ /* Fall through */
+ case LOW_BIT:
+ qp = &c_p->sys_task_qs->q[PRIORITY_LOW];
+ *priop = PRIORITY_LOW;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ }
+
+ st = *qp;
+ ASSERT(st);
+ if (st->next != st) {
+ *qp = st->next;
+ st->next->prev = st->prev;
+ st->prev->next = st->next;
+ }
+ else {
+ erts_aint32_t a, e, n, st_prio, qmask2;
+
+ *qp = NULL;
+ qmask &= ~qbit;
+ c_p->sys_task_qs->qmask = qmask;
+
+ update_state:
+
+ qmask2 = qmask;
+
+ if (state & ERTS_PSFLG_DELAYED_SYS) {
+ ErtsProcSysTaskQs *qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p);
+ ASSERT(qs);
+ qmask2 |= qs->qmask;
+ }
+
+ switch (qmask2 & -qmask2) {
+ case MAX_BIT:
+ st_prio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ st_prio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ st_prio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ case 0:
+ st_prio = PRIORITY_LOW;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ }
+
+ if (!qmask) {
+ unused_qs = c_p->sys_task_qs;
+ c_p->sys_task_qs = NULL;
+ }
+
+ a = state;
+ do {
+ erts_aint32_t prio = ERTS_PSFLGS_GET_USR_PRIO(a);
+
+ if (prio > st_prio)
+ prio = st_prio;
+
+ n = e = a;
+
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+
+ if (!qmask)
+ n &= ~ERTS_PSFLG_ACTIVE_SYS;
+
+ if (a == n)
+ break;
+ a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e);
+ } while (a != e);
+ }
+
+done:
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (unused_qs)
+ proc_sys_task_queues_free(unused_qs);
+
+ *qmaskp = qmask;
+
+ return st;
+}
+
+static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio);
+
+static int
+execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
+{
+ int garbage_collected = 0;
+ erts_aint32_t state = *statep;
+ int max_reds = in_reds;
+ int reds = 0;
+ int qmask = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ do {
+ ErtsProcSysTask *st;
+ int st_prio;
+ Eterm st_res;
+
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN);
+#endif
+ ASSERT(ERTS_PROC_IS_EXITING(c_p));
+ break;
+ }
+
+ st = fetch_sys_task(c_p, state, &qmask, &st_prio);
+ if (!st)
+ break;
+
+ switch (st->type) {
+ case ERTS_PSTT_GC:
+ if (c_p->flags & F_DISABLE_GC) {
+ save_gc_task(c_p, st, st_prio);
+ st = NULL;
+ reds++;
+ }
+ else {
+ if (!garbage_collected) {
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ reds += erts_garbage_collect(c_p,
+ 0,
+ c_p->arg_reg,
+ c_p->arity);
+ garbage_collected = 1;
+ }
+ st_res = am_true;
+ }
+ break;
+ case ERTS_PSTT_CPC:
+ st_res = erts_check_process_code(c_p,
+ st->arg[0],
+ st->arg[1] == am_true,
+ &reds);
+ if (is_non_value(st_res)) {
+ /* Needed gc, but gc was disabled */
+ save_gc_task(c_p, st, st_prio);
+ st = NULL;
+ }
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid process sys task type");
+ st_res = am_false;
+ }
+
+ if (st)
+ reds += notify_sys_task_executed(c_p, st, st_res);
+
+ state = erts_smp_atomic32_read_acqb(&c_p->state);
+ } while (qmask && reds < max_reds);
+
+ *statep = state;
+
+ return reds;
+}
+
+static int
+cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
+{
+ erts_aint32_t state = in_state;
+ int max_reds = in_reds;
+ int reds = 0;
+ int qmask = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ do {
+ ErtsProcSysTask *st;
+ Eterm st_res;
+ int st_prio;
+
+ st = fetch_sys_task(c_p, state, &qmask, &st_prio);
+ if (!st)
+ break;
+
+ switch (st->type) {
+ case ERTS_PSTT_GC:
+ st_res = am_false;
+ break;
+ case ERTS_PSTT_CPC:
+ st_res = am_false;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid process sys task type");
+ st_res = am_false;
+ break;
+ }
+
+ reds += notify_sys_task_executed(c_p, st, st_res);
+
+ state = erts_smp_atomic32_read_acqb(&c_p->state);
+ } while (qmask && reds < max_reds);
+
+ return reds;
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_3(BIF_ALIST_3)
+{
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ ErtsProcSysTaskQs *stqs, *free_stqs = NULL;
+ ErtsProcSysTask *st = NULL;
+ erts_aint32_t prio, rp_state;
+ int rp_locked;
+ Eterm noproc_res, req_type;
+
+ if (!rp && !is_internal_pid(BIF_ARG_1)) {
+ if (!is_external_pid(BIF_ARG_1))
+ goto badarg;
+ if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry)
+ goto badarg;
+ }
+
+ switch (BIF_ARG_2) {
+ case am_max: prio = PRIORITY_MAX; break;
+ case am_high: prio = PRIORITY_HIGH; break;
+ case am_normal: prio = PRIORITY_NORMAL; break;
+ case am_low: prio = PRIORITY_LOW; break;
+ default: goto badarg;
+ }
+
+ if (is_not_tuple(BIF_ARG_3))
+ goto badarg;
+ else {
+ int i;
+ Eterm *tp = tuple_val(BIF_ARG_3);
+ Uint arity = arityval(*tp);
+ Eterm req_id;
+ Uint req_id_sz;
+ Eterm arg[ERTS_MAX_PROC_SYS_TASK_ARGS];
+ Uint arg_sz[ERTS_MAX_PROC_SYS_TASK_ARGS];
+ Uint tot_sz;
+ Eterm *hp;
+
+ if (arity < 2)
+ goto badarg;
+ if (arity > 2 + ERTS_MAX_PROC_SYS_TASK_ARGS)
+ goto badarg;
+ req_type = tp[1];
+ req_id = tp[2];
+ req_id_sz = is_immed(req_id) ? req_id : size_object(req_id);
+ tot_sz = req_id_sz;
+ for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) {
+ int tix = 3 + i;
+ if (tix > arity) {
+ arg[i] = THE_NON_VALUE;
+ arg_sz[i] = 0;
+ }
+ else {
+ arg[i] = tp[tix];
+ if (is_immed(arg[i]))
+ arg_sz[i] = 0;
+ else {
+ arg_sz[i] = size_object(arg[i]);
+ tot_sz += arg_sz[i];
+ }
+ }
+ }
+ st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
+ ERTS_PROC_SYS_TASK_SIZE(tot_sz));
+ st->next = st->prev = st; /* Prep for empty prio queue */
+ ERTS_INIT_OFF_HEAP(&st->off_heap);
+ hp = &st->heap[0];
+
+ st->requester = BIF_P->common.id;
+ st->reply_tag = req_type;
+ st->req_id_sz = req_id_sz;
+ st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id,
+ req_id_sz,
+ &hp,
+ &st->off_heap);
+
+ for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++)
+ st->arg[i] = arg_sz[i] == 0 ? arg[i] : copy_struct(arg[i],
+ arg_sz[i],
+ &hp,
+ &st->off_heap);
+ ASSERT(&st->heap[0] + tot_sz == hp);
+ }
+
+ switch (req_type) {
+
+ case am_garbage_collect:
+ st->type = ERTS_PSTT_GC;
+ noproc_res = am_false;
+ if (!rp)
+ goto noproc;
+ break;
+
+ case am_check_process_code:
+ if (is_not_atom(st->arg[0]))
+ goto badarg;
+ if (st->arg[1] != am_true && st->arg[1] != am_false)
+ goto badarg;
+ noproc_res = am_false;
+ st->type = ERTS_PSTT_CPC;
+ if (!rp)
+ goto noproc;
+ break;
+
+ default:
+ goto badarg;
+ }
+
+ rp_state = erts_smp_atomic32_read_nob(&rp->state);
+
+ rp_locked = 0;
+
+ free_stqs = NULL;
+ if (rp_state & ERTS_PSFLG_ACTIVE_SYS)
+ stqs = NULL;
+ else {
+ alloc_qs:
+ stqs = proc_sys_task_queues_alloc();
+ stqs->qmask = 1 << prio;
+ stqs->ncount = 0;
+ stqs->q[PRIORITY_MAX] = NULL;
+ stqs->q[PRIORITY_HIGH] = NULL;
+ stqs->q[PRIORITY_NORMAL] = NULL;
+ stqs->q[PRIORITY_LOW] = NULL;
+ stqs->q[prio] = st;
+ }
+
+ if (!rp_locked) {
+ rp_locked = 1;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
+
+ rp_state = erts_smp_atomic32_read_nob(&rp->state);
+ if (rp_state & ERTS_PSFLG_EXITING) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = NULL;
+ free_stqs = stqs;
+ goto noproc;
+ }
+ }
+
+ if (!rp->sys_task_qs) {
+ if (stqs)
+ rp->sys_task_qs = stqs;
+ else
+ goto alloc_qs;
+ }
+ else {
+ if (stqs)
+ free_stqs = stqs;
+ stqs = rp->sys_task_qs;
+ if (!stqs->q[prio]) {
+ stqs->q[prio] = st;
+ stqs->qmask |= 1 << prio;
+ }
+ else {
+ st->next = stqs->q[prio];
+ st->prev = stqs->q[prio]->prev;
+ st->next->prev = st;
+ st->prev->next = st;
+ ASSERT(stqs->qmask & (1 << prio));
+ }
+ }
+
+ if (ERTS_PSFLGS_GET_ACT_PRIO(rp_state) > prio) {
+ erts_aint32_t n, a, e;
+ /* Need to elevate actual prio */
+
+ a = rp_state;
+ do {
+ if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
+ n = a;
+ break;
+ }
+ n = e = a;
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+ a = erts_smp_atomic32_cmpxchg_nob(&rp->state, n, e);
+ } while (a != e);
+ rp_state = n;
+ }
+
+ /*
+ * schedule_process_sys_task() unlocks status
+ * lock on process.
+ */
+ schedule_process_sys_task(rp, rp_state, NULL);
+
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+
+ BIF_RET(am_ok);
+
+noproc:
+
+ notify_sys_task_executed(BIF_P, st, noproc_res);
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+ BIF_RET(am_ok);
+
+badarg:
+
+ if (st) {
+ erts_cleanup_offheap(&st->off_heap);
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
+ }
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static void
+save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
+{
+ erts_aint32_t state;
+ ErtsProcSysTaskQs *qs;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+
+ qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p);
+ if (!qs) {
+ st->next = st->prev = st;
+ qs = proc_sys_task_queues_alloc();
+ qs->qmask = 1 << prio;
+ qs->ncount = 0;
+ qs->q[PRIORITY_MAX] = NULL;
+ qs->q[PRIORITY_HIGH] = NULL;
+ qs->q[PRIORITY_NORMAL] = NULL;
+ qs->q[PRIORITY_LOW] = NULL;
+ qs->q[prio] = st;
+ (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, ERTS_PROC_LOCK_MAIN, qs);
+ }
+ else {
+ if (!qs->q[prio]) {
+ st->next = st->prev = st;
+ qs->q[prio] = st;
+ qs->qmask |= 1 << prio;
+ }
+ else {
+ st->next = qs->q[prio];
+ st->prev = qs->q[prio]->prev;
+ st->next->prev = st;
+ st->prev->next = st;
+ ASSERT(qs->qmask & (1 << prio));
+ }
+ }
+
+ state = erts_smp_atomic32_read_nob(&c_p->state);
+ ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) & state);
+
+ while (!(state & ERTS_PSFLG_DELAYED_SYS)
+ || prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) {
+ erts_aint32_t n, e;
+
+ n = e = state;
+ n |= ERTS_PSFLG_DELAYED_SYS;
+ if (prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) {
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET;
+ }
+ state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e);
+ if (state == e)
+ break;
+ }
+}
+
+int
+erts_set_gc_state(Process *c_p, int enable)
+{
+ ErtsProcSysTaskQs *dgc_tsk_qs;
+ ASSERT(c_p == erts_get_current_process());
+ ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
+ & erts_smp_atomic32_read_nob(&c_p->state));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+
+ if (!enable) {
+ c_p->flags |= F_DISABLE_GC;
+ return 0;
+ }
+
+ c_p->flags &= ~F_DISABLE_GC;
+
+ dgc_tsk_qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p);
+ if (!dgc_tsk_qs)
+ return 0;
+
+ /* Move delayed gc tasks into sys tasks queues. */
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!c_p->sys_task_qs) {
+ c_p->sys_task_qs = dgc_tsk_qs;
+ dgc_tsk_qs = NULL;
+ }
+ else {
+ ErtsProcSysTaskQs *stsk_qs;
+ int prio;
+
+ /*
+ * We push delayed tasks to the front of the queue
+ * since they have already made it to the front
+ * once and then been delayed after that.
+ */
+
+ stsk_qs = c_p->sys_task_qs;
+
+ while (dgc_tsk_qs->qmask) {
+ int qbit = dgc_tsk_qs->qmask & -dgc_tsk_qs->qmask;
+ dgc_tsk_qs->qmask &= ~qbit;
+ switch (qbit) {
+ case MAX_BIT:
+ prio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ prio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ prio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ prio = PRIORITY_LOW;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ prio = -1;
+ break;
+ }
+
+ ASSERT(dgc_tsk_qs->q[prio]);
+
+ if (!stsk_qs->q[prio]) {
+ stsk_qs->q[prio] = dgc_tsk_qs->q[prio];
+ stsk_qs->qmask |= 1 << prio;
+ }
+ else {
+ ErtsProcSysTask *first1, *last1, *first2, *last2;
+
+ ASSERT(stsk_qs->qmask & (1 << prio));
+ first1 = dgc_tsk_qs->q[prio];
+ last1 = first1->prev;
+ first2 = stsk_qs->q[prio];
+ last2 = first1->prev;
+
+ last1->next = first2;
+ first2->prev = last1;
+
+ first1->prev = last2;
+ last2->next = first1;
+
+ stsk_qs->q[prio] = first1;
+ }
+
+ }
+ }
+
+#ifdef DEBUG
+ {
+ int qmask;
+ erts_aint32_t aprio, state =
+#endif
+
+ erts_smp_atomic32_read_bset_nob(&c_p->state,
+ (ERTS_PSFLG_DELAYED_SYS
+ | ERTS_PSFLG_ACTIVE_SYS),
+ ERTS_PSFLG_ACTIVE_SYS);
+
+#ifdef DEBUG
+ ASSERT(state & ERTS_PSFLG_DELAYED_SYS);
+ qmask = c_p->sys_task_qs->qmask;
+ aprio = ERTS_PSFLGS_GET_ACT_PRIO(state);
+ ASSERT(ERTS_PSFLGS_GET_USR_PRIO(state) >= aprio);
+ ASSERT((qmask & -qmask) >= (1 << aprio));
+ }
+#endif
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+
+ if (dgc_tsk_qs)
+ proc_sys_task_queues_free(dgc_tsk_qs);
+
+ return 1;
+}
+
void
erts_sched_stat_modify(int what)
{
@@ -7052,7 +10260,7 @@ erts_sched_stat_modify(int what)
break;
case ERTS_SCHED_STAT_MODIFY_DISABLE:
erts_smp_thr_progress_block();
- erts_sched_stat.enabled = 1;
+ erts_sched_stat.enabled = 0;
erts_smp_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_CLEAR:
@@ -7139,6 +10347,10 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
rq->misc.start = molp;
rq->misc.end = molp;
+#ifdef ERTS_SMP
+ non_empty_runq(rq);
+#endif
+
erts_smp_runq_unlock(rq);
smp_notify_inc_runq(rq);
@@ -7286,6 +10498,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
p->approx_started = erts_get_approx_time();
p->rcount = 0;
+ p->heap = NULL;
ASSERT(p == (Process *) (erts_ptab_pix2intptr_nob(
@@ -7320,7 +10533,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Check for errors.
*/
- if (is_not_atom(mod) || is_not_atom(func) || ((arity = list_length(args)) < 0)) {
+ if (is_not_atom(mod) || is_not_atom(func) || ((arity = erts_list_length(args)) < 0)) {
so->error_code = BADARG;
goto error;
}
@@ -7335,7 +10548,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
prio = (erts_aint32_t) so->priority;
}
- state |= (prio & ERTS_PSFLG_PRIO_MASK);
+ state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET)
+ | ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET));
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -7398,7 +10612,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
hipe_init_process_smp(&p->hipe_smp);
#endif
#endif
-
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
p->high_water = p->heap;
@@ -7413,6 +10626,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->bin_old_vheap = 0;
p->bin_vheap_mature = 0;
+ p->sys_task_qs = NULL;
+
/* No need to initialize p->fcalls. */
p->current = p->initial+INITIAL_MOD;
@@ -7629,6 +10844,7 @@ void erts_init_empty_process(Process *p)
p->bin_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap = 0;
+ p->sys_task_qs = NULL;
p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
p->common.u.alive.ptimer = NULL;
@@ -7884,33 +11100,22 @@ delete_process(Process* p)
p->fvalue = NIL;
}
-static ERTS_INLINE erts_aint32_t
-set_proc_exiting_state(Process *p, erts_aint32_t state)
-{
- erts_aint32_t a, n, e;
- a = state;
- while (1) {
- n = e = a;
- n &= ~(ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
- n |= ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE;
- if (!(a & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING)))
- n |= ERTS_PSFLG_IN_RUNQ;
- a = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
- if (a == e)
- break;
- }
- return a;
-}
-
static ERTS_INLINE void
set_proc_exiting(Process *p,
- erts_aint32_t state,
+ erts_aint32_t in_state,
Eterm reason,
ErlHeapFragment *bp)
{
+ erts_aint32_t state = in_state, enq_prio = -1;
+ int enqueue;
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
- state = set_proc_exiting_state(p, state);
+ enqueue = change_proc_schedule_state(p,
+ ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
+ ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
+ &state,
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
p->fvalue = reason;
if (bp)
@@ -7925,15 +11130,38 @@ set_proc_exiting(Process *p,
cancel_timer(p);
p->i = (BeamInstr *) beam_exit;
- if (erts_system_profile_flags.runnable_procs
- && !(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) {
- profile_runnable_proc(p, am_active);
- }
-
- if (!(state & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING)))
- add2runq(p, state);
+ if (enqueue)
+ add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
+ state,
+ enq_prio);
}
+static ERTS_INLINE erts_aint32_t
+set_proc_self_exiting(Process *c_p)
+{
+#ifdef DEBUG
+ int enqueue;
+#endif
+ erts_aint32_t state, enq_prio = -1;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
+
+ state = erts_smp_atomic32_read_nob(&c_p->state);
+ ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+#ifdef DEBUG
+ enqueue =
+#endif
+ change_proc_schedule_state(c_p,
+ ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
+ ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
+ &state,
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
+
+ ASSERT(!enqueue);
+ return state;
+}
#ifdef ERTS_SMP
@@ -7981,7 +11209,7 @@ handle_pending_exiters(ErtsProcList *pnd_xtrs)
if (p) {
if (erts_proclist_same(plp, p)) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & ERTS_PSFLG_RUNNING)) {
+ if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
}
@@ -8010,8 +11238,15 @@ save_pending_exiter(Process *p)
erts_proclist_store_last(&rq->procs.pending_exiters, plp);
+ non_empty_runq(rq);
+
erts_smp_runq_unlock(rq);
- wake_scheduler(rq, 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ wake_dirty_schedulers(rq, 0);
+ else
+#endif
+ wake_scheduler(rq);
}
#endif
@@ -8163,7 +11398,7 @@ send_exit_signal(Process *c_p, /* current process if and only
dtrace_pid_str(from, sender_str);
dtrace_proc_str(rp, receiver_str);
- erts_snprintf(reason_buf, sizeof(reason_buf) - 1, "%T", reason);
+ erts_snprintf(reason_buf, sizeof(DTRACE_CHARBUF_NAME(reason_buf)) - 1, "%T", reason);
DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
}
#endif
@@ -8202,7 +11437,7 @@ send_exit_signal(Process *c_p, /* current process if and only
}
set_proc_exiting(c_p, state, rsn, NULL);
}
- else if (!(state & ERTS_PSFLG_RUNNING)) {
+ else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
if (need_locks
@@ -8567,8 +11802,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
smon->pid, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
+ ASSERT(suspendee != vc_p);
if (smon->active)
- resume_process(suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
erts_destroy_suspend_monitor(smon);
@@ -8579,9 +11815,6 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
void
erts_do_exit_process(Process* p, Eterm reason)
{
-#ifdef ERTS_SMP
- erts_aint32_t state;
-#endif
p->arity = 0; /* No live registers */
p->fvalue = reason;
@@ -8606,10 +11839,9 @@ erts_do_exit_process(Process* p, Eterm reason)
#endif
#ifndef ERTS_SMP
- set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
+ set_proc_self_exiting(p);
#else
- state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
- if (state & ERTS_PSFLG_PENDING_EXIT) {
+ if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) {
/* Process exited before pending exit was received... */
p->pending_exit.reason = THE_NON_VALUE;
if (p->pending_exit.bp) {
@@ -8663,6 +11895,8 @@ erts_continue_exit_process(Process *p)
DistEntry *dep;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
+ erts_aint32_t state;
+ void *nif_export;
#ifdef DEBUG
int yield_allowed = 1;
@@ -8699,6 +11933,13 @@ erts_continue_exit_process(Process *p)
p->flags &= ~F_USING_DB;
}
+ erts_set_gc_state(p, 1);
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_ACTIVE_SYS) {
+ if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
+ goto yield;
+ }
+
if (p->flags & F_USING_DDLL) {
erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
p->flags &= ~F_USING_DDLL;
@@ -8776,22 +12017,37 @@ erts_continue_exit_process(Process *p)
{
/* Inactivate and notify free */
erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
+#ifdef ERTS_SMP
+ int refc_inced = 0;
+#endif
while (1) {
n = e = a;
ASSERT(a & ERTS_PSFLG_EXITING);
n |= ERTS_PSFLG_FREE;
n &= ~ERTS_PSFLG_ACTIVE;
+#ifdef ERTS_SMP
+ if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
+ erts_smp_proc_inc_refc(p);
+ refc_inced = 1;
+ }
+#endif
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- }
+#ifdef ERTS_SMP
+ if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
+ erts_smp_proc_dec_refc(p);
+#endif
+ }
+
dep = ((p->flags & F_DISTRIBUTION)
? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
+ nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
#ifdef BM_COUNTERS
@@ -8839,6 +12095,9 @@ erts_continue_exit_process(Process *p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+ if (nif_export)
+ erts_destroy_nif_export(nif_export);
+
delete_process(p);
#ifdef ERTS_SMP
@@ -8883,7 +12142,7 @@ timeout_proc(Process* p)
state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state, 0);
+ schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
}
@@ -8961,7 +12220,9 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
print_function_from_pc(to, to_arg, p->cp);
erts_print(to, to_arg, ")\n");
state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_GC))) {
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_GC))) {
erts_print(to, to_arg, "arity = %d\n",p->arity);
if (!ERTS_IS_CRASH_DUMPING) {
/*
@@ -9049,6 +12310,10 @@ void erl_halt(int code)
if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress,
erts_no_schedulers,
-1)) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
+ ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
+#endif
erts_halt_code = code;
notify_reap_ports_relb();
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 6d1032c292..27a3a3553b 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -70,7 +70,14 @@ typedef struct process Process;
struct ErtsNodesMonitor_;
+#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 0
+#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT 0
+
#define ERTS_MAX_NO_OF_SCHEDULERS 1024
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS ERTS_MAX_NO_OF_SCHEDULERS
+#define ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS ERTS_MAX_NO_OF_SCHEDULERS
+#endif
#define ERTS_DEFAULT_MAX_PROCESSES (1 << 18)
@@ -97,8 +104,14 @@ struct saved_calls {
};
extern Export exp_send, exp_receive, exp_timeout;
+extern int erts_eager_check_io;
extern int erts_sched_compact_load;
+extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern Uint erts_no_dirty_cpu_schedulers;
+extern Uint erts_no_dirty_io_schedulers;
+#endif
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */
@@ -198,6 +211,10 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \
((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \
(erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ (erts_aint32_t) (MSK), \
+ (erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \
((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \
(erts_aint32_t) ~(FLGS)))
@@ -267,6 +284,13 @@ typedef enum {
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
+#ifdef ERTS_DIRTY_SCHEDULERS
+typedef struct {
+ erts_smp_spinlock_t lock;
+ ErtsSchedulerSleepInfo *list;
+} ErtsSchedulerSleepList;
+#endif
+
struct ErtsSchedulerSleepInfo_ {
#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
@@ -316,9 +340,40 @@ typedef struct {
int reds;
} ErtsRunQueueInfo;
+
+#ifdef HAVE_GETHRTIME
+# undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1
+#endif
+
#ifdef ERTS_SMP
+#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+
+#ifdef ARCH_64
+typedef erts_atomic_t ErtsAtomicSchedTime;
+#elif defined(ARCH_32)
+typedef erts_dw_atomic_t ErtsAtomicSchedTime;
+#else
+# error :-/
+#endif
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+typedef struct {
+ ErtsAtomicSchedTime last;
+ struct {
+ Uint64 short_interval;
+ Uint64 long_interval;
+ } worktime;
+ int is_working;
+} ErtsRunQueueSchedUtil;
+#endif
+
typedef struct {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ int sched_util;
+#endif
Uint32 flags;
ErtsRunQueue *misc_evac_runq;
struct {
@@ -348,6 +403,12 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
+#ifdef ERTS_DIRTY_SCHEDULERS
+#ifdef ERTS_SMP
+ ErtsSchedulerSleepList sleepers;
+#endif
+#endif
+
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
@@ -385,8 +446,15 @@ struct ErtsRunQueue_ {
Port *start;
Port *end;
} ports;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ ErtsRunQueueSchedUtil sched_util;
+#endif
};
+#ifdef ERTS_SMP
+extern long erts_runq_supervision_interval;
+#endif
+
typedef union {
ErtsRunQueue runq;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunQueue))];
@@ -410,6 +478,7 @@ do { \
} while (0)
typedef struct {
+ int need; /* "+sbu true" or scheduler_wall_time enabled */
int enabled;
Uint64 start;
struct {
@@ -420,6 +489,11 @@ typedef struct {
} ErtsSchedWallTime;
typedef struct {
+ Uint64 reclaimed;
+ Uint64 garbage_cols;
+} ErtsGCInfo;
+
+typedef struct {
int sched;
erts_aint32_t aux_work;
} ErtsDelayedAuxWorkWakeupJob;
@@ -430,6 +504,7 @@ typedef struct {
ErtsSchedulerSleepInfo *ssi;
#ifdef ERTS_SMP
ErtsThrPrgrVal current_thr_prgr;
+ ErtsThrPrgrVal latest_wakeup;
#endif
struct {
int ix;
@@ -444,6 +519,8 @@ typedef struct {
void (*completed_arg)(void *);
} dd;
struct {
+ ErtsThrPrgrVal thr_prgr;
+ UWord size;
ErtsThrPrgrLaterOp *first;
ErtsThrPrgrLaterOp *last;
} later_op;
@@ -467,6 +544,21 @@ typedef struct {
#endif
} ErtsAuxWorkData;
+#ifdef ERTS_DIRTY_SCHEDULERS
+typedef enum {
+ ERTS_DIRTY_CPU_SCHEDULER,
+ ERTS_DIRTY_IO_SCHEDULER
+} ErtsDirtySchedulerType;
+
+typedef union {
+ struct {
+ ErtsDirtySchedulerType type: 1;
+ unsigned num: 31;
+ } s;
+ Uint no;
+} ErtsDirtySchedId;
+#endif
+
struct ErtsSchedulerData_ {
/*
* Keep X registers first (so we get as many low
@@ -487,12 +579,14 @@ struct ErtsSchedulerData_ {
Eterm tmp_heap[TMP_HEAP_SIZE];
int num_tmp_heap_used;
Eterm beam_emu_tmp_heap[BEAM_EMU_TMP_HEAP_SIZE];
- Eterm cmp_tmp_heap[CMP_TMP_HEAP_SIZE];
Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE];
#endif
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
- Uint no; /* Scheduler number */
+ Uint no; /* Scheduler number for normal schedulers */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
+#endif
Port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
@@ -504,6 +598,8 @@ struct ErtsSchedulerData_ {
Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
+ ErtsGCInfo gc_info;
+ ErtsPortTaskHandle nosuspend_port_task_handle;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
@@ -517,17 +613,34 @@ typedef union {
} ErtsAlignedSchedulerData;
extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
+extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
+#endif
#ifndef ERTS_SMP
extern ErtsSchedulerData *erts_scheduler_data;
#endif
+#ifdef ERTS_SCHED_FAIR
+#define ERTS_SCHED_FAIR_YIELD() ETHR_YIELD()
+#else
+#define ERTS_SCHED_FAIR 0
+#define ERTS_SCHED_FAIR_YIELD()
+#endif
+
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
#endif
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#ifdef ERTS_SMP
+void erts_empty_runq(ErtsRunQueue *rq);
+void erts_non_empty_runq(ErtsRunQueue *rq);
+#endif
+
+
/*
* Run queue locked during modifications. We use atomic ops since
* other threads peek at values without run queue lock.
@@ -560,6 +673,10 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
erts_smp_atomic32_set_relb(&rqi->len, len);
+#ifdef ERTS_SMP
+ if (rq->len == 0)
+ erts_non_empty_runq(rq);
+#endif
rq->len++;
if (rq->max_len < rq->len)
rq->max_len = len;
@@ -617,8 +734,10 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_SCHED_ID 2
#define ERTS_PSD_DIST_ENTRY 3
#define ERTS_PSD_CALL_TIME_BP 4
+#define ERTS_PSD_DELAYED_GC_TASK_QS 5
+#define ERTS_PSD_NIF_TRAP_EXPORT 6
-#define ERTS_PSD_SIZE 5
+#define ERTS_PSD_SIZE 7
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -642,6 +761,12 @@ typedef struct {
#define ERTS_PSD_CALL_TIME_BP_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_CALL_TIME_BP_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -674,6 +799,9 @@ typedef struct {
ErlHeapFragment *bp;
} ErtsPendExit;
+typedef struct ErtsProcSysTask_ ErtsProcSysTask;
+typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
+
#ifdef ERTS_SMP
typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
@@ -689,6 +817,7 @@ struct ErtsPendingSuspend_ {
#endif
+
/* Defines to ease the change of memory architecture */
# define HEAP_START(p) (p)->heap
# define HEAP_TOP(p) (p)->htop
@@ -831,6 +960,8 @@ struct process {
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
+ ErtsProcSysTaskQs *sys_task_qs;
+
erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_SMP
@@ -900,24 +1031,73 @@ void erts_check_for_holes(Process* p);
# error "Need to increase ERTS_PSFLG_PRIO_SHIFT"
#endif
-#define ERTS_PSFLG_PRIO_SHIFT 2
+#define ERTS_PSFLGS_PRIO_BITS 2
+#define ERTS_PSFLGS_PRIO_MASK \
+ ((((erts_aint32_t) 1) << ERTS_PSFLGS_PRIO_BITS) - 1)
-#define ERTS_PSFLG_BIT(N) \
- (((erts_aint32_t) 1) << (ERTS_PSFLG_PRIO_SHIFT + (N)))
+#define ERTS_PSFLGS_ACT_PRIO_OFFSET (0*ERTS_PSFLGS_PRIO_BITS)
+#define ERTS_PSFLGS_USR_PRIO_OFFSET (1*ERTS_PSFLGS_PRIO_BITS)
+#define ERTS_PSFLGS_PRQ_PRIO_OFFSET (2*ERTS_PSFLGS_PRIO_BITS)
+#define ERTS_PSFLGS_ZERO_BIT_OFFSET (3*ERTS_PSFLGS_PRIO_BITS)
-#define ERTS_PSFLG_PRIO_MASK (ERTS_PSFLG_BIT(0) - 1)
+#define ERTS_PSFLGS_QMASK_BITS 4
+#define ERTS_PSFLGS_QMASK \
+ ((((erts_aint32_t) 1) << ERTS_PSFLGS_QMASK_BITS) - 1)
+#define ERTS_PSFLGS_IN_PRQ_MASK_OFFSET \
+ ERTS_PSFLGS_ZERO_BIT_OFFSET
-#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(0)
-#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(1)
-#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(2)
-#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(3)
-#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(4)
-#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(5)
-#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(6)
-#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(7)
-#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(8)
-#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(9)
+#define ERTS_PSFLG_BIT(N) \
+ (((erts_aint32_t) 1) << (ERTS_PSFLGS_ZERO_BIT_OFFSET + (N)))
+/*
+ * ACT_PRIO -> Active prio, i.e., currently active prio. This
+ * prio may be higher than user prio.
+ * USR_PRIO -> User prio. i.e., prio the user has set.
+ * PRQ_PRIO -> Prio queue prio, i.e., prio queue currently
+ * enqueued in.
+ */
+#define ERTS_PSFLGS_ACT_PRIO_MASK \
+ (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_ACT_PRIO_OFFSET)
+#define ERTS_PSFLGS_USR_PRIO_MASK \
+ (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_USR_PRIO_OFFSET)
+#define ERTS_PSFLGS_PRQ_PRIO_MASK \
+ (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_PRQ_PRIO_OFFSET)
+#define ERTS_PSFLG_IN_PRQ_MAX ERTS_PSFLG_BIT(0)
+#define ERTS_PSFLG_IN_PRQ_HIGH ERTS_PSFLG_BIT(1)
+#define ERTS_PSFLG_IN_PRQ_NORMAL ERTS_PSFLG_BIT(2)
+#define ERTS_PSFLG_IN_PRQ_LOW ERTS_PSFLG_BIT(3)
+#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(4)
+#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(5)
+#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(6)
+#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(7)
+#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(8)
+#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(9)
+#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(10)
+#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(11)
+#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(12)
+#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(13)
+#define ERTS_PSFLG_ACTIVE_SYS ERTS_PSFLG_BIT(14)
+#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
+#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
+#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(18)
+#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(19)
+#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(20)
+#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(21)
+#endif
+
+#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \
+ | ERTS_PSFLG_IN_PRQ_HIGH \
+ | ERTS_PSFLG_IN_PRQ_NORMAL \
+ | ERTS_PSFLG_IN_PRQ_LOW)
+
+#define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \
+ (((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+#define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \
+ (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \
+ (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
/* The sequential tracing token is a tuple of size 5:
*
@@ -999,6 +1179,7 @@ extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
*/
extern Eterm erts_system_monitor;
extern Uint erts_system_monitor_long_gc;
+extern Uint erts_system_monitor_long_schedule;
extern Uint erts_system_monitor_large_heap;
struct erts_system_monitor_flags_t {
unsigned int busy_port : 1;
@@ -1031,6 +1212,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
+#define F_DISABLE_GC (1 << 11) /* Disable GC */
/* process trace_flags */
#define F_SENSITIVE (1 << 0)
@@ -1109,23 +1291,80 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
(p)->flags &= ~F_TIMO; \
} while (0)
+#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
+#define ERTS_NUM_DIRTY_RUNQS 2
+#else
+#define ERTS_NUM_DIRTY_RUNQS 0
+#endif
+
#define ERTS_RUNQ_IX(IX) \
- (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \
&erts_aligned_run_queues[(IX)].runq)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_RUNQ_IX_IS_DIRTY(IX) \
+ (-(ERTS_NUM_DIRTY_RUNQS) <= (IX) && (IX) < 0)
+#define ERTS_DIRTY_RUNQ_IX(IX) \
+ (ASSERT(ERTS_RUNQ_IX_IS_DIRTY(IX)), \
+ &erts_aligned_run_queues[(IX)].runq)
+#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq)
+#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq)
+#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1)
+#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2)
+#else
+#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
+#endif
#define ERTS_SCHEDULER_IX(IX) \
- (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \
&erts_aligned_scheduler_data[(IX)].esd)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \
+ &erts_aligned_dirty_cpu_scheduler_data[(IX)].esd)
+#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \
+ &erts_aligned_dirty_io_scheduler_data[(IX)].esd)
+#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \
+ ((ESDP)->dirty_no.s.num)
+#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \
+ ((ESDP)->dirty_no.s.type)
+#ifdef ERTS_SMP
+#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \
+ ((ESDP)->dirty_no.s.num != 0)
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \
+ ((ESDP)->dirty_no.s.type == 0)
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
+ ((ESDP)->dirty_no.s.type == 1)
+#else
+#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
+#endif
+#else
+#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
+#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
+#endif
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int);
+void erts_init_scheduling(int, int
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int, int, int
+#endif
+ );
+int erts_set_gc_state(Process *c_p, int enable);
Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
+Eterm erts_gc_info_request(Process *c_p);
Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
+int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
+void erts_destroy_nif_export(void *); /* see erl_nif.c */
+
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1298,10 +1537,15 @@ ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *list,
int erts_sched_set_wakeup_other_thresold(char *str);
int erts_sched_set_wakeup_other_type(char *str);
int erts_sched_set_busy_wait_threshold(char *str);
+int erts_sched_set_wake_cleanup_threshold(char *);
void erts_schedule_thr_prgr_later_op(void (*)(void *),
void *,
ErtsThrPrgrLaterOp *);
+void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
+ void *,
+ ErtsThrPrgrLaterOp *,
+ UWord);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
@@ -1310,20 +1554,27 @@ int erts_dbg_check_halloc_lock(Process *p);
void erts_dbg_multi_scheduling_return_trap(Process *, Eterm);
#endif
int erts_get_max_no_executing_schedulers(void);
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS)
ErtsSchedSuspendResult
-erts_schedulers_state(Uint *, Uint *, Uint *, int);
+erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, int);
+#endif
+#ifdef ERTS_SMP
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
Sint new_no,
- Sint *old_no);
+ Sint *old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int dirty_only
+#endif
+ );
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
int erts_is_multi_scheduling_blocked(void);
Eterm erts_multi_scheduling_blockers(Process *);
void erts_start_schedulers(void);
void erts_alloc_notify_delayed_dealloc(int);
+void erts_alloc_ensure_handle_delayed_dealloc_call(int);
void erts_smp_notify_check_children_needed(void);
#endif
#if ERTS_USE_ASYNC_READY_Q
@@ -1343,7 +1594,7 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
-void erts_init_process(int, int);
+void erts_init_process(int, int, int);
Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
Uint erts_run_queues_len(Uint *);
void erts_add_to_runq(Process *);
@@ -1413,6 +1664,8 @@ Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
Uint erts_debug_nbalance(void);
int erts_debug_wait_deallocations(Process *c_p);
+Uint erts_process_memory(Process *c_p);
+
#ifdef ERTS_SMP
# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
# define ERTS_PROC_GET_SCHDATA(PROC) ((PROC)->scheduler_data)
@@ -1427,7 +1680,7 @@ do { \
ErtsSchedulerData *esdp__ = ((P) \
? ERTS_PROC_GET_SCHDATA((Process *) (P)) \
: erts_get_scheduler_data()); \
- if (esdp__) \
+ if (esdp__ && !ERTS_SCHEDULER_IS_DIRTY(esdp__)) \
esdp__->verify_unused_temp_alloc( \
esdp__->verify_unused_temp_alloc_data); \
} while (0)
@@ -1449,17 +1702,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
#endif
#endif
-void erts_schedule_process(Process *, erts_aint32_t);
+void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_proc_notify_new_message(Process *p)
+erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- erts_schedule_process(p, state);
+ erts_schedule_process(p, state, locks);
}
#endif
@@ -1557,6 +1810,16 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_CALL_TIME(P, L, PBT) \
((process_breakpoint_time_t *) erts_psd_set((P), (L), ERTS_PSD_CALL_TIME_BP, (void *) (PBT)))
+#define ERTS_PROC_GET_DELAYED_GC_TASK_QS(P) \
+ ((ErtsProcSysTaskQs *) erts_psd_get((P), ERTS_PSD_DELAYED_GC_TASK_QS))
+#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \
+ ((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
+
+#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \
+ erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT)
+#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, NTE) \
+ erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
+
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p,
@@ -1602,6 +1865,13 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
extern erts_atomic_t erts_migration_paths;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+int erts_get_sched_util(ErtsRunQueue *rq,
+ int initially_locked,
+ int short_interval);
+#endif
+
+
ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths_managed(void);
ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths(void);
ERTS_GLB_INLINE ErtsRunQueue *erts_check_emigration_need(ErtsRunQueue *c_rq,
@@ -1653,22 +1923,36 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
return mp->prio[prio].runq;
}
-
- if (prio == ERTS_PORT_PRIO_LEVEL)
- len = RUNQ_READ_LEN(&c_rq->ports.info.len);
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (mp->sched_util) {
+ ErtsRunQueue *rq = mp->prio[prio].runq;
+ /* No migration if other is non-empty */
+ if (!(ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY)
+ && erts_get_sched_util(rq, 0, 1) < mp->prio[prio].limit.other
+ && erts_get_sched_util(c_rq, 0, 1) > mp->prio[prio].limit.this) {
+ return rq;
+ }
+ }
else
- len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len);
-
- if (len > mp->prio[prio].limit.this) {
- ErtsRunQueue *n_rq = mp->prio[prio].runq;
- if (n_rq) {
- if (prio == ERTS_PORT_PRIO_LEVEL)
- len = RUNQ_READ_LEN(&n_rq->ports.info.len);
- else
- len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len);
-
- if (len < mp->prio[prio].limit.other)
- return n_rq;
+#endif
+ {
+
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&c_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len);
+
+ if (len > mp->prio[prio].limit.this) {
+ ErtsRunQueue *n_rq = mp->prio[prio].runq;
+ if (n_rq) {
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&n_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len);
+
+ if (len < mp->prio[prio].limit.other)
+ return n_rq;
+ }
}
}
}
@@ -1729,7 +2013,12 @@ Uint erts_get_scheduler_id(void)
{
#ifdef ERTS_SMP
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- return esdp ? esdp->no : (Uint) 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
+ return 0;
+ else
+#endif
+ return esdp ? esdp->no : (Uint) 0;
#else
return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0;
#endif
@@ -1759,12 +2048,6 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
#endif
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
-
-#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
-
-#else
-
ERTS_GLB_INLINE void
erts_smp_runq_lock(ErtsRunQueue *rq)
{
@@ -1773,6 +2056,10 @@ erts_smp_runq_lock(ErtsRunQueue *rq)
#endif
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
+
#endif
ERTS_GLB_INLINE int
@@ -1956,6 +2243,7 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
}
}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* #ifdef ERTS_SMP */
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index a611b52af2..23e5bf737f 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -659,7 +659,7 @@ static void shrink(Process *p, Eterm* ret)
} else {
int needed = 4;
if (is_list(hi) && is_list(lo)) {
- needed = 2*list_length(hi);
+ needed = 2*erts_list_length(hi);
}
if (HeapWordsLeft(p) < needed) {
BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1));
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index a93229c473..2f3cf23b00 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -34,8 +34,8 @@
#define ERTS_WANT_EXTERNAL_TAGS
#include "external.h"
-#define WORD_FMT "%X"
-#define ADDR_FMT "%X"
+#define PTR_FMT "%bpX"
+#define ETERM_FMT "%beX"
#define OUR_NIL _make_header(0,_TAG_HEADER_FLOAT)
@@ -76,6 +76,43 @@ erts_deep_process_dump(int to, void *to_arg)
dump_binaries(to, to_arg, all_binaries);
}
+Uint erts_process_memory(Process *p) {
+ ErlMessage *mp;
+ Uint size = 0;
+ struct saved_calls *scb;
+ size += sizeof(Process);
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+
+ erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
+ erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
+ size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->old_hend && p->old_heap)
+ size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+
+ size += p->msg.len * sizeof(ErlMessage);
+
+ for (mp = p->msg.first; mp; mp = mp->next)
+ if (mp->data.attached)
+ size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
+
+ if (p->arg_reg != p->def_arg_reg) {
+ size += p->arity * sizeof(p->arg_reg[0]);
+ }
+
+ if (p->psd)
+ size += sizeof(ErtsPSD);
+
+ scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ size += (sizeof(struct saved_calls)
+ + (scb->len-1) * sizeof(scb->ct[0]));
+ }
+
+ size += erts_dicts_mem_size(p);
+ return size;
+}
+
static void
dump_process_info(int to, void *to_arg, Process *p)
{
@@ -173,9 +210,9 @@ static void
dump_element(int to, void *to_arg, Eterm x)
{
if (is_list(x)) {
- erts_print(to, to_arg, "H" WORD_FMT, list_val(x));
+ erts_print(to, to_arg, "H" PTR_FMT, list_val(x));
} else if (is_boxed(x)) {
- erts_print(to, to_arg, "H" WORD_FMT, boxed_val(x));
+ erts_print(to, to_arg, "H" PTR_FMT, boxed_val(x));
} else if (is_immed(x)) {
if (is_atom(x)) {
unsigned char* s = atom_tab(atom_val(x))->name;
@@ -274,7 +311,7 @@ heap_dump(int to, void *to_arg, Eterm x)
} else if (is_list(x)) {
ptr = list_val(x);
if (ptr[0] != OUR_NIL) {
- erts_print(to, to_arg, ADDR_FMT ":l", ptr);
+ erts_print(to, to_arg, PTR_FMT ":l", ptr);
dump_element(to, to_arg, ptr[0]);
erts_putc(to, to_arg, '|');
dump_element(to, to_arg, ptr[1]);
@@ -293,12 +330,12 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr = boxed_val(x);
hdr = *ptr;
if (hdr != OUR_NIL) { /* If not visited */
- erts_print(to, to_arg, ADDR_FMT ":", ptr);
+ erts_print(to, to_arg, PTR_FMT ":", ptr);
if (is_arity_value(hdr)) {
Uint i;
Uint arity = arityval(hdr);
- erts_print(to, to_arg, "t" WORD_FMT ":", arity);
+ erts_print(to, to_arg, "t" ETERM_FMT ":", arity);
for (i = 1; i <= arity; i++) {
dump_element(to, to_arg, ptr[i]);
if (is_immed(ptr[i])) {
@@ -351,21 +388,43 @@ heap_dump(int to, void *to_arg, Eterm x)
val->flags = (UWord) all_binaries;
all_binaries = val;
}
- erts_print(to, to_arg, "Yc%X:%X:%X", val,
+ erts_print(to, to_arg,
+ "Yc" PTR_FMT ":" PTR_FMT ":" PTR_FMT,
+ val,
pb->bytes - (byte *)val->orig_bytes,
size);
} else if (tag == SUB_BINARY_SUBTAG) {
ErlSubBin* Sb = (ErlSubBin *) binary_val(x);
- Eterm* real_bin = binary_val(Sb->orig);
+ Eterm* real_bin;
void* val;
+ /*
+ * Must use boxed_val() here, because the original
+ * binary may have been visited and have had its
+ * header word changed to OUR_NIL (in which case
+ * binary_val() will cause an assertion failure in
+ * the DEBUG emulator).
+ */
+
+ real_bin = boxed_val(Sb->orig);
+
if (thing_subtag(*real_bin) == REFC_BINARY_SUBTAG) {
+ /*
+ * Unvisited REFC_BINARY: Point directly to
+ * the binary.
+ */
ProcBin* pb = (ProcBin *) real_bin;
val = pb->val;
- } else { /* Heap binary */
+ } else {
+ /*
+ * Heap binary or visited REFC binary: Point
+ * to heap binary or ProcBin on the heap.
+ */
val = real_bin;
}
- erts_print(to, to_arg, "Ys%X:%X:%X", val, Sb->offs, size);
+ erts_print(to, to_arg,
+ "Ys" PTR_FMT ":" PTR_FMT ":" PTR_FMT,
+ val, Sb->offs, size);
}
erts_putc(to, to_arg, '\n');
*ptr = OUR_NIL;
@@ -401,7 +460,7 @@ dump_binaries(int to, void *to_arg, Binary* current)
long size = current->orig_size;
byte* bytes = (byte*) current->orig_bytes;
- erts_print(to, to_arg, "=binary:%X\n", current);
+ erts_print(to, to_arg, "=binary:" PTR_FMT "\n", current);
erts_print(to, to_arg, "%X:", size);
for (i = 0; i < size; i++) {
erts_print(to, to_arg, "%02X", bytes[i]);
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 2db5df06b4..82cc68222d 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -117,7 +117,7 @@ erts_init_proc_lock(int cpus)
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i));
+ "pix_lock", make_small(i), 1);
#else
erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
@@ -901,7 +901,7 @@ erts_pid2proc_opt(Process *c_p,
busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
- erts_proc_lc_trylock(proc, need_locks, !busy);
+ erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
if (!busy)
@@ -1001,8 +1001,8 @@ erts_pid2proc_opt(Process *c_p,
void
erts_proc_lock_init(Process *p)
{
-#if ERTS_PROC_LOCK_OWN_IMPL
int i;
+#if ERTS_PROC_LOCK_OWN_IMPL
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_smp_atomic32_init_nob(&p->lock.flags,
@@ -1013,25 +1013,33 @@ erts_proc_lock_init(Process *p)
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
p->lock.queue[i] = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
+ erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1,__FILE__,__LINE__);
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ int do_lock_count = 1;
+#else
+ int do_lock_count = 0;
+#endif
+
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
+ do_lock_count);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
@@ -1210,50 +1218,51 @@ void erts_lcnt_enable_proc_lock_count(int enable)
#if ERTS_PROC_LOCK_OWN_IMPL
void
-erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
}
void
-erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
+erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
+ char* file, unsigned int line)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
}
@@ -1319,7 +1328,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
}
void
-erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
+ unsigned int line)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
@@ -1327,29 +1337,29 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
- erts_lc_require_lock(&p->lock.main.lc);
+ erts_lc_require_lock(&p->lock.main.lc, file, line);
if (locks & ERTS_PROC_LOCK_LINK)
- erts_lc_require_lock(&p->lock.link.lc);
+ erts_lc_require_lock(&p->lock.link.lc, file, line);
if (locks & ERTS_PROC_LOCK_MSGQ)
- erts_lc_require_lock(&p->lock.msgq.lc);
+ erts_lc_require_lock(&p->lock.msgq.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
- erts_lc_require_lock(&p->lock.status.lc);
+ erts_lc_require_lock(&p->lock.status.lc, file, line);
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 9dd503f3cb..052d992d3f 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -215,7 +215,7 @@ typedef struct erts_proc_lock_t_ {
/* Lock counter implemetation */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__)
#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__)
#endif
@@ -243,8 +243,10 @@ void erts_lcnt_enable_proc_lock_count(int enable);
erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__)
#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
erts_proc_lc_chk_only_proc_main((P))
-void erts_proc_lc_lock(Process *p, ErtsProcLocks locks);
-void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked);
+void erts_proc_lc_lock(Process *p, ErtsProcLocks locks,
+ char *file, unsigned int line);
+void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
+ char *file, unsigned int line);
void erts_proc_lc_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
@@ -253,7 +255,8 @@ void erts_proc_lc_chk_only_proc_main(Process *p);
void erts_proc_lc_chk_no_proc_locks(char *file, int line);
ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
-void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks,
+ char* file, unsigned int line);
void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#else
#define ERTS_SMP_CHK_NO_PROC_LOCKS
@@ -372,7 +375,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *);
ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
erts_pix_lock_t *,
ErtsProcLocks,
@@ -482,7 +485,7 @@ busy_main:
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_proc_lock_x__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks,
@@ -528,7 +531,7 @@ erts_smp_proc_lock__(Process *p,
erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line);
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_lock(p, locks);
+ erts_proc_lc_lock(p, locks, file, line);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
@@ -695,7 +698,7 @@ erts_smp_proc_trylock__(Process *p,
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(p, locks, res == 0);
+ erts_proc_lc_trylock(p, locks, res == 0, __FILE__, __LINE__);
#endif
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -741,7 +744,7 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
#endif /* ERTS_SMP */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
@@ -756,13 +759,13 @@ ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
#else
erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_smp_proc_lock_x__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index 87beeafa1a..eabf016081 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -34,6 +34,8 @@
typedef struct ErtsPTabListBifData_ ErtsPTabListBifData;
+#define ERTS_PTAB_NEW_MAX_RESERVE_FAIL 1000
+
#define ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED 25
#define ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE 1000
#define ERTS_PTAB_LIST_BIF_MIN_START_REDS \
@@ -415,16 +417,39 @@ last_data_cmp(Uint64 ld1, Uint64 ld2)
#define ERTS_PTAB_LastData2EtermData(LD) \
((Eterm) ((LD) & ~(~((Uint64) 0) << ERTS_PTAB_ID_DATA_SIZE)))
+static ERTS_INLINE Uint32
+ix_to_free_id_data_ix(ErtsPTab *ptab, Uint32 ix)
+{
+ Uint32 dix;
+
+ dix = ((ix & ptab->r.o.dix_cl_mask) << ptab->r.o.dix_cl_shift);
+ dix += ((ix >> ptab->r.o.dix_cli_shift) & ptab->r.o.dix_cli_mask);
+ ASSERT(0 <= dix && dix < ptab->r.o.max);
+ return dix;
+}
+
+UWord
+erts_ptab_mem_size(ErtsPTab *ptab)
+{
+ UWord size = ptab->r.o.max*sizeof(erts_smp_atomic_t);
+ if (ptab->r.o.free_id_data)
+ size += ptab->r.o.max*sizeof(erts_smp_atomic32_t);
+ return size;
+}
+
+
void
erts_ptab_init_table(ErtsPTab *ptab,
ErtsAlcType_t atype,
void (*release_element)(void *),
ErtsPTabElementCommon *invalid_element,
int size,
- char *name)
+ UWord element_size,
+ char *name,
+ int legacy)
{
- size_t tab_sz;
- int bits;
+ size_t tab_sz, alloc_sz;
+ Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines;
char *tab_end;
erts_smp_atomic_t *tab_entry;
erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
@@ -443,10 +468,14 @@ erts_ptab_init_table(ErtsPTab *ptab,
bits = erts_fit_in_bits_int32((Sint32) size - 1);
}
+ ptab->r.o.element_size = element_size;
ptab->r.o.max = size;
tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t));
- ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, tab_sz);
+ alloc_sz = tab_sz;
+ if (!legacy)
+ alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t));
+ ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, alloc_sz);
tab_end = ((char *) ptab->r.o.tab) + tab_sz;
tab_entry = ptab->r.o.tab;
while (tab_end > ((char *) tab_entry)) {
@@ -454,25 +483,17 @@ erts_ptab_init_table(ErtsPTab *ptab,
tab_entry++;
}
- ptab->r.o.tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
- ptab->r.o.pix_per_cache_line = (ERTS_CACHE_LINE_SIZE
- / sizeof(erts_smp_atomic_t));
+ tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
+ ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t));
ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */
- ASSERT((ptab->r.o.pix_per_cache_line
- & (ptab->r.o.pix_per_cache_line - 1)) == 0); /* power of 2 */
- ASSERT((ptab->r.o.tab_cache_lines
- & (ptab->r.o.tab_cache_lines - 1)) == 0); /* power of 2 */
-
- ptab->r.o.pix_mask
- = (1 << bits) - 1;
- ptab->r.o.pix_cl_mask
- = ptab->r.o.tab_cache_lines-1;
- ptab->r.o.pix_cl_shift
- = erts_fit_in_bits_int32(ptab->r.o.pix_per_cache_line-1);
- ptab->r.o.pix_cli_shift
- = erts_fit_in_bits_int32(ptab->r.o.pix_cl_mask);
- ptab->r.o.pix_cli_mask
- = (1 << (bits - ptab->r.o.pix_cli_shift)) - 1;
+ ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */
+ ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */
+
+ ptab->r.o.pix_mask = (1 << bits) - 1;
+ ptab->r.o.pix_cl_mask = tab_cache_lines-1;
+ ptab->r.o.pix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1);
+ ptab->r.o.pix_cli_shift = erts_fit_in_bits_int32(ptab->r.o.pix_cl_mask);
+ ptab->r.o.pix_cli_mask = (1 << (bits - ptab->r.o.pix_cli_shift)) - 1;
ASSERT(ptab->r.o.pix_cl_shift + ptab->r.o.pix_cli_shift == bits);
@@ -480,6 +501,46 @@ erts_ptab_init_table(ErtsPTab *ptab,
ptab->r.o.invalid_data = erts_ptab_id2data(ptab, invalid_element->id);
ptab->r.o.release_element = release_element;
+ if (legacy) {
+ ptab->r.o.free_id_data = NULL;
+ ptab->r.o.dix_cl_mask = 0;
+ ptab->r.o.dix_cl_shift = 0;
+ ptab->r.o.dix_cli_shift = 0;
+ ptab->r.o.dix_cli_mask = 0;
+ }
+ else {
+
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t));
+ ptab->r.o.free_id_data = (erts_smp_atomic32_t *) tab_end;
+
+ tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
+ ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic32_t));
+
+ ptab->r.o.dix_cl_mask = tab_cache_lines-1;
+ ptab->r.o.dix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1);
+ ptab->r.o.dix_cli_shift = erts_fit_in_bits_int32(ptab->r.o.dix_cl_mask);
+ ptab->r.o.dix_cli_mask = (1 << (bits - ptab->r.o.dix_cli_shift)) - 1;
+
+ ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */
+ ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */
+
+ ASSERT(ptab->r.o.dix_cl_shift + ptab->r.o.dix_cli_shift == bits);
+
+ ix = 0;
+ for (cl = 0; cl < tab_cache_lines; cl++) {
+ for (cli = 0; cli < ix_per_cache_line; cli++) {
+ erts_smp_atomic32_init_nob(&ptab->r.o.free_id_data[ix],
+ cli*tab_cache_lines+cl);
+ ASSERT(erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data);
+ ix++;
+ }
+ }
+
+ erts_smp_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1);
+ erts_smp_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1);
+
+ }
+
erts_smp_interval_init(&ptab->list.data.interval);
ptab->list.data.deleted.start = NULL;
ptab->list.data.deleted.end = NULL;
@@ -520,9 +581,7 @@ erts_ptab_new_element(ErtsPTab *ptab,
void *init_arg,
void (*init_ptab_el)(void *, Eterm))
{
- int pix;
- Uint64 ld, exp_ld;
- Eterm data;
+ Uint32 pix, ix, data;
erts_aint32_t count;
erts_aint_t invalid = (erts_aint_t) ptab->r.o.invalid_element;
@@ -549,62 +608,110 @@ erts_ptab_new_element(ErtsPTab *ptab,
ptab_el->u.alive.started_interval
= erts_smp_current_interval_nob(erts_ptab_interval(ptab));
- ld = last_data_read_acqb(ptab);
+ if (ptab->r.o.free_id_data) {
+ do {
+ ix = (Uint32) erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix);
+ ix = ix_to_free_id_data_ix(ptab, ix);
- /* Reserve slot */
- while (1) {
- ld++;
- pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld));
- if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) {
- erts_aint_t val;
- val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
- invalid,
- ERTS_AINT_NULL);
+ data = erts_smp_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix],
+ (erts_aint32_t)ptab->r.o.invalid_data);
+ }while ((Eterm)data == ptab->r.o.invalid_data);
+
+ init_ptab_el(init_arg, (Eterm) data);
+
+#ifdef ERTS_SMP
+ erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
+#endif
+
+ pix = erts_ptab_data2pix(ptab, (Eterm) data);
+
+#ifdef DEBUG
+ ASSERT(ERTS_AINT_NULL == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ (erts_aint_t) ptab_el));
+#else
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+#endif
+
+ erts_ptab_runlock(ptab);
- if (ERTS_AINT_NULL == val)
- break;
- }
}
+ else {
+ int rlocked = ERTS_PTAB_NEW_MAX_RESERVE_FAIL;
+ Uint64 ld, exp_ld;
+ /* Deprecated legacy algorithm... */
- data = ERTS_PTAB_LastData2EtermData(ld);
+ restart:
+
+ ptab_el->u.alive.started_interval
+ = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ld = last_data_read_acqb(ptab);
+
+ /* Reserve slot */
+ while (1) {
+ ld++;
+ pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld));
+ if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix])
+ == ERTS_AINT_NULL) {
+ erts_aint_t val;
+ val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
+ invalid,
+ ERTS_AINT_NULL);
+
+ if (ERTS_AINT_NULL == val)
+ break;
+ }
+ if (rlocked && --rlocked == 0) {
+ erts_ptab_runlock(ptab);
+ erts_ptab_rwlock(ptab);
+ goto restart;
+ }
+ }
- if (data == ptab->r.o.invalid_data) {
- /* Do not use invalid data; fix it... */
- ld += ptab->r.o.max;
- ASSERT(pix == erts_ptab_data2pix(ptab,
- ERTS_PTAB_LastData2EtermData(ld)));
data = ERTS_PTAB_LastData2EtermData(ld);
- ASSERT(data != ptab->r.o.invalid_data);
- }
- exp_ld = last_data_read_nob(ptab);
+ if (data == ptab->r.o.invalid_data) {
+ /* Do not use invalid data; fix it... */
+ ld += ptab->r.o.max;
+ ASSERT(pix == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ ASSERT(data != ptab->r.o.invalid_data);
+ }
+
+ exp_ld = last_data_read_nob(ptab);
- /* Move last data forward */
- while (1) {
- Uint64 act_ld;
- if (last_data_cmp(ld, exp_ld) < 0)
- break;
- act_ld = last_data_cmpxchg_relb(ptab, ld, exp_ld);
- if (act_ld == exp_ld)
- break;
- exp_ld = act_ld;
- }
+ /* Move last data forward */
+ while (1) {
+ Uint64 act_ld;
+ if (last_data_cmp(ld, exp_ld) < 0)
+ break;
+ act_ld = last_data_cmpxchg_relb(ptab, ld, exp_ld);
+ if (act_ld == exp_ld)
+ break;
+ exp_ld = act_ld;
+ }
- init_ptab_el(init_arg, data);
+ init_ptab_el(init_arg, data);
#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
+ erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
#endif
- /* Move into slot reserved */
+ /* Move into slot reserved */
#ifdef DEBUG
- ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab_el));
#else
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
#endif
- erts_ptab_runlock(ptab);
+ if (rlocked)
+ erts_ptab_runlock(ptab);
+ else
+ erts_ptab_rwunlock(ptab);
+
+ }
return 1;
}
@@ -645,9 +752,12 @@ erts_ptab_delete_element(ErtsPTab *ptab,
ErtsPTabElementCommon *ptab_el)
{
int maybe_save;
- int pix = erts_ptab_id2pix(ptab, ptab_el->id);
+ Uint32 pix, ix, data;
+
+ pix = erts_ptab_id2pix(ptab, ptab_el->id);
- ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */
+ /* *Need* to be an managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_ptab_rlock(ptab);
maybe_save = ptab->list.data.deleted.end != NULL;
@@ -658,6 +768,29 @@ erts_ptab_delete_element(ErtsPTab *ptab,
erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
+ if (ptab->r.o.free_id_data) {
+ Uint32 prev_data;
+ /* Next data for this slot... */
+ data = (Uint32) erts_ptab_id2data(ptab, ptab_el->id);
+ data += ptab->r.o.max;
+ data &= ~(~((Uint32) 0) << ERTS_PTAB_ID_DATA_SIZE);
+ if (data == ptab->r.o.invalid_data) { /* make sure not invalid */
+ data += ptab->r.o.max;
+ data &= ~(~((Uint32) 0) << ERTS_PTAB_ID_DATA_SIZE);
+ }
+ ASSERT(data != ptab->r.o.invalid_data);
+ ASSERT(pix == erts_ptab_data2pix(ptab, data));
+
+ do {
+ ix = (Uint32) erts_smp_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix);
+ ix = ix_to_free_id_data_ix(ptab, ix);
+
+ prev_data = erts_smp_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix],
+ data,
+ ptab->r.o.invalid_data);
+ }while ((Eterm)prev_data != ptab->r.o.invalid_data);
+ }
+
ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0);
erts_smp_atomic32_dec_relb(&ptab->vola.tile.count);
@@ -670,9 +803,10 @@ erts_ptab_delete_element(ErtsPTab *ptab,
}
if (ptab->r.o.release_element)
- erts_schedule_thr_prgr_later_op(ptab->r.o.release_element,
- (void *) ptab_el,
- &ptab_el->u.release);
+ erts_schedule_thr_prgr_later_cleanup_op(ptab->r.o.release_element,
+ (void *) ptab_el,
+ &ptab_el->u.release,
+ ptab->r.o.element_size);
}
/*
@@ -1267,6 +1401,31 @@ erts_ptab_init(void)
* Debug stuff
*/
+static void assert_ptab_consistency(ErtsPTab *ptab)
+{
+#ifdef DEBUG
+ if (ptab->r.o.free_id_data) {
+ Uint32 ix, pix, data;
+ int free_pids = 0;
+ int null_slots = 0;
+
+ for (ix=0; ix < ptab->r.o.max; ix++) {
+ if (erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) {
+ ++free_pids;
+ data = erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]);
+ pix = erts_ptab_data2pix(ptab, (Eterm) data);
+ ASSERT(erts_ptab_pix2intptr_nob(ptab, pix) == ERTS_AINT_NULL);
+ }
+ if (erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) {
+ ++null_slots;
+ }
+ }
+ ASSERT(free_pids == null_slots);
+ ASSERT(free_pids == ptab->r.o.max - erts_smp_atomic32_read_nob(&ptab->vola.tile.count));
+ }
+#endif
+}
+
Sint
erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
{
@@ -1277,45 +1436,93 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
erts_ptab_rwlock(ptab);
- if (!set)
- ld = last_data_read_nob(ptab);
- else {
+ assert_ptab_consistency(ptab);
- ld = (Uint64) next;
- data = ERTS_PTAB_LastData2EtermData(ld);
- if (ptab->r.o.invalid_data == data) {
- ld += ptab->r.o.max;
- ASSERT(erts_ptab_data2pix(ptab, data)
- == erts_ptab_data2pix(ptab,
- ERTS_PTAB_LastData2EtermData(ld)));
+ if (ptab->r.o.free_id_data) {
+ Uint32 id_ix, dix;
+
+ if (set) {
+ Uint32 i, max_ix, num, stop_id_ix;
+ max_ix = ptab->r.o.max - 1;
+ num = next;
+ id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix);
+
+ for (i=0; i <= max_ix; ++i) {
+ Uint32 pix;
+ ++num;
+ num &= ~(~((Uint32) 0) << ERTS_PTAB_ID_DATA_SIZE);
+ if (num == ptab->r.o.invalid_data) {
+ num += ptab->r.o.max;
+ num &= ~(~((Uint32) 0) << ERTS_PTAB_ID_DATA_SIZE);
+ }
+ pix = erts_ptab_data2pix(ptab, num);
+ if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
+ ++id_ix;
+ dix = ix_to_free_id_data_ix(ptab, id_ix);
+ erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num);
+ ASSERT(pix == erts_ptab_data2pix(ptab, num));
+ }
+ }
+ erts_smp_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix);
+
+ /* Write invalid_data in rest of free_id_data[]: */
+ stop_id_ix = (1 + erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix;
+ while (1) {
+ id_ix = (id_ix+1) & max_ix;
+ if (id_ix == stop_id_ix)
+ break;
+ dix = ix_to_free_id_data_ix(ptab, id_ix);
+ erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix],
+ ptab->r.o.invalid_data);
+ }
}
- last_data_set_relb(ptab, ld);
+ id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1;
+ dix = ix_to_free_id_data_ix(ptab, id_ix);
+ res = (Sint) erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[dix]);
}
+ else {
+ /* Deprecated legacy algorithm... */
+ if (!set)
+ ld = last_data_read_nob(ptab);
+ else {
- while (1) {
- int pix;
- ld++;
- pix = (int) (ld % ptab->r.o.max);
- if (first_pix < 0)
- first_pix = pix;
- else if (pix == first_pix) {
- res = -1;
- break;
- }
- if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
+ ld = (Uint64) next;
data = ERTS_PTAB_LastData2EtermData(ld);
if (ptab->r.o.invalid_data == data) {
ld += ptab->r.o.max;
ASSERT(erts_ptab_data2pix(ptab, data)
== erts_ptab_data2pix(ptab,
ERTS_PTAB_LastData2EtermData(ld)));
+ }
+ last_data_set_relb(ptab, ld);
+ }
+
+ while (1) {
+ int pix;
+ ld++;
+ pix = (int) (ld % ptab->r.o.max);
+ if (first_pix < 0)
+ first_pix = pix;
+ else if (pix == first_pix) {
+ res = -1;
+ break;
+ }
+ if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2pix(ptab, data)
+ == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ }
+ res = data;
+ break;
}
- res = data;
- break;
}
}
+ assert_ptab_consistency(ptab);
erts_ptab_rwunlock(ptab);
return res;
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index 8a130f42a3..e3e05f14af 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -94,21 +94,27 @@ typedef struct {
erts_smp_atomic_t last_data;
#endif
erts_smp_atomic32_t count;
+ erts_smp_atomic32_t aid_ix;
+ erts_smp_atomic32_t fid_ix;
} ErtsPTabVolatileData;
typedef struct {
erts_smp_atomic_t *tab;
+ erts_smp_atomic32_t *free_id_data;
Uint32 max;
- Uint32 tab_cache_lines;
- Uint32 pix_per_cache_line;
Uint32 pix_mask;
Uint32 pix_cl_mask;
Uint32 pix_cl_shift;
Uint32 pix_cli_mask;
Uint32 pix_cli_shift;
+ Uint32 dix_cl_mask;
+ Uint32 dix_cl_shift;
+ Uint32 dix_cli_mask;
+ Uint32 dix_cli_shift;
ErtsPTabElementCommon *invalid_element;
Eterm invalid_data;
void (*release_element)(void *);
+ UWord element_size;
} ErtsPTabReadOnlyData;
typedef struct {
@@ -177,7 +183,9 @@ void erts_ptab_init_table(ErtsPTab *ptab,
void (*release_element)(void *),
ErtsPTabElementCommon *invalid_element,
int size,
- char *name);
+ UWord element_size,
+ char *name,
+ int legacy);
int erts_ptab_new_element(ErtsPTab *ptab,
ErtsPTabElementCommon *ptab_el,
void *init_arg,
@@ -185,6 +193,7 @@ int erts_ptab_new_element(ErtsPTab *ptab,
void erts_ptab_delete_element(ErtsPTab *ptab,
ErtsPTabElementCommon *ptab_el);
int erts_ptab_initialized(ErtsPTab *ptab);
+UWord erts_ptab_mem_size(ErtsPTab *ptab);
ERTS_GLB_INLINE erts_interval_t *erts_ptab_interval(ErtsPTab *ptab);
ERTS_GLB_INLINE int erts_ptab_max(ErtsPTab *ptab);
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index ecb5525022..c38ef47d87 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -26,10 +26,13 @@
#define ERL_SMP_H
#include "erl_threads.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__)
#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__)
#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__)
#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__)
@@ -131,10 +134,11 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx);
#endif
ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx);
@@ -159,16 +163,18 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
#endif
ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
@@ -179,7 +185,7 @@ ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
@@ -192,7 +198,7 @@ ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
#else
@@ -202,7 +208,8 @@ ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp,
+ char *keyname);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
@@ -835,7 +842,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra);
+ erts_mtx_init_x(mtx, name, extra, 1);
#endif
}
@@ -843,7 +850,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra);
+ erts_mtx_init_locked_x(mtx, name, extra, 1);
#endif
}
@@ -872,9 +879,15 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
+#else
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_mtx_trylock_x(mtx,file,line);
+#elif defined(ERTS_SMP)
return erts_mtx_trylock(mtx);
#else
return 0;
@@ -884,13 +897,13 @@ erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
-erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line)
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
#else
erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_mtx_lock_x(mtx, file, line);
#elif defined(ERTS_SMP)
erts_mtx_lock(mtx);
@@ -1020,9 +1033,15 @@ erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_rwmtx_tryrlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
return erts_rwmtx_tryrlock(rwmtx);
#else
return 0;
@@ -1030,13 +1049,13 @@ erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rlock_x(rwmtx, file, line);
#elif defined(ERTS_SMP)
erts_rwmtx_rlock(rwmtx);
@@ -1053,9 +1072,15 @@ erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_rwmtx_tryrwlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
return erts_rwmtx_tryrwlock(rwmtx);
#else
return 0;
@@ -1063,13 +1088,13 @@ erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rwlock_x(rwmtx, file, line);
#elif defined(ERTS_SMP)
erts_rwmtx_rwlock(rwmtx);
@@ -1171,13 +1196,13 @@ erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line)
#else
erts_smp_spin_lock(erts_smp_spinlock_t *lock)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_spin_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_spin_lock(lock);
@@ -1237,13 +1262,13 @@ erts_smp_read_unlock(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
#else
erts_smp_read_lock(erts_smp_rwlock_t *lock)
#endif
{
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP)
erts_read_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_read_lock(lock);
@@ -1263,13 +1288,13 @@ erts_smp_write_unlock(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
#else
erts_smp_write_lock(erts_smp_rwlock_t *lock)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_write_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_write_lock(lock);
@@ -1299,10 +1324,10 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp)
+erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname)
{
#ifdef ERTS_SMP
- erts_tsd_key_create(keyp);
+ erts_tsd_key_create(keyp,keyname);
#endif
}
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 2f206ffbec..28cbe7004f 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -23,6 +23,7 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
+#include "erl_map.h"
#include <stdlib.h>
#include <stdio.h>
@@ -85,7 +86,10 @@ unsigned tag_val_def(Wterm x)
case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): return EXTERNAL_PID_DEF;
case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): return EXTERNAL_PORT_DEF;
case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): return EXTERNAL_REF_DEF;
- default: return BINARY_DEF;
+ case (_TAG_HEADER_REFC_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
+ case (_TAG_HEADER_HEAP_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
+ case (_TAG_HEADER_SUB_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE): return MAP_DEF;
}
break;
}
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 953edf79ea..37014ccf94 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -112,11 +112,11 @@ struct erl_node_; /* Declared in erl_node_tables.h */
* 1000 REFC_BINARY | |
* 1001 HEAP_BINARY | BINARIES |
* 1010 SUB_BINARY | |
- * 1011 Not used
+ * 1011 Not used; see comment below
* 1100 EXTERNAL_PID | |
* 1101 EXTERNAL_PORT | EXTERNAL THINGS |
* 1110 EXTERNAL_REF | |
- * 1111 Not used
+ * 1111 MAP
*
* COMMENTS:
*
@@ -135,14 +135,16 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define REF_SUBTAG (0x4 << _TAG_PRIMARY_SIZE) /* REF */
#define FUN_SUBTAG (0x5 << _TAG_PRIMARY_SIZE) /* FUN */
#define FLOAT_SUBTAG (0x6 << _TAG_PRIMARY_SIZE) /* FLOAT */
-#define EXPORT_SUBTAG (0x7 << _TAG_PRIMARY_SIZE) /* FLOAT */
+#define EXPORT_SUBTAG (0x7 << _TAG_PRIMARY_SIZE) /* FLOAT */
#define _BINARY_XXX_MASK (0x3 << _TAG_PRIMARY_SIZE)
#define REFC_BINARY_SUBTAG (0x8 << _TAG_PRIMARY_SIZE) /* BINARY */
#define HEAP_BINARY_SUBTAG (0x9 << _TAG_PRIMARY_SIZE) /* BINARY */
#define SUB_BINARY_SUBTAG (0xA << _TAG_PRIMARY_SIZE) /* BINARY */
+/* _BINARY_XXX_MASK depends on 0xB being unused */
#define EXTERNAL_PID_SUBTAG (0xC << _TAG_PRIMARY_SIZE) /* EXTERNAL_PID */
#define EXTERNAL_PORT_SUBTAG (0xD << _TAG_PRIMARY_SIZE) /* EXTERNAL_PORT */
#define EXTERNAL_REF_SUBTAG (0xE << _TAG_PRIMARY_SIZE) /* EXTERNAL_REF */
+#define MAP_SUBTAG (0xF << _TAG_PRIMARY_SIZE) /* MAP */
#define _TAG_HEADER_ARITYVAL (TAG_PRIMARY_HEADER|ARITYVAL_SUBTAG)
@@ -159,6 +161,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#define _TAG_HEADER_EXTERNAL_PORT (TAG_PRIMARY_HEADER|EXTERNAL_PORT_SUBTAG)
#define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG)
#define _TAG_HEADER_BIN_MATCHSTATE (TAG_PRIMARY_HEADER|BIN_MATCHSTATE_SUBTAG)
+#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG)
#define _TAG_HEADER_MASK 0x3F
@@ -354,7 +357,10 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
#define is_value(x) ((x) != THE_NON_VALUE)
/* binary object access methods */
-#define is_binary_header(x) (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_REFC_BIN)
+#define is_binary_header(x) \
+ ((((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_REFC_BIN) || \
+ (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_HEAP_BIN) || \
+ (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_SUB_BIN))
#define make_binary(x) make_boxed((Eterm*)(x))
#define is_binary(x) (is_boxed((x)) && is_binary_header(*boxed_val((x))))
#define is_not_binary(x) (!is_binary((x)))
@@ -887,7 +893,8 @@ typedef struct external_thing_ {
(((x) & _TAG_HEADER_MASK) == _TAG_HEADER_EXTERNAL_REF)
#define is_external_header(x) \
- (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_EXTERNAL_PID)
+ (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_EXTERNAL_PID \
+ && ((x) & _TAG_HEADER_MASK) != _TAG_HEADER_MAP)
#define is_external(x) (is_boxed((x)) && is_external_header(*boxed_val((x))))
@@ -1064,8 +1071,8 @@ _ET_DECLARE_CHECKED(Uint,y_reg_index,Uint)
/*
* Backwards compatibility definitions:
- * - #define virtal *_DEF constants with values that fit term order:
- * number < atom < ref < fun < port < pid < tuple < nil < cons < binary
+ * - #define virtual *_DEF constants with values that fit term order:
+ * number < atom < ref < fun < port < pid < tuple < map < nil < cons < binary
* - tag_val_def() function generates virtual _DEF tag
* - not_eq_tags() and NUMBER_CODE() defined in terms
* of the tag_val_def() function
@@ -1074,19 +1081,20 @@ _ET_DECLARE_CHECKED(Uint,y_reg_index,Uint)
#define BINARY_DEF 0x0
#define LIST_DEF 0x1
#define NIL_DEF 0x2
-#define TUPLE_DEF 0x3
-#define PID_DEF 0x4
-#define EXTERNAL_PID_DEF 0x5
-#define PORT_DEF 0x6
-#define EXTERNAL_PORT_DEF 0x7
-#define EXPORT_DEF 0x8
-#define FUN_DEF 0x9
-#define REF_DEF 0xa
-#define EXTERNAL_REF_DEF 0xb
-#define ATOM_DEF 0xc
-#define FLOAT_DEF 0xd
-#define BIG_DEF 0xe
-#define SMALL_DEF 0xf
+#define MAP_DEF 0x3
+#define TUPLE_DEF 0x4
+#define PID_DEF 0x5
+#define EXTERNAL_PID_DEF 0x6
+#define PORT_DEF 0x7
+#define EXTERNAL_PORT_DEF 0x8
+#define EXPORT_DEF 0x9
+#define FUN_DEF 0xa
+#define REF_DEF 0xb
+#define EXTERNAL_REF_DEF 0xc
+#define ATOM_DEF 0xd
+#define FLOAT_DEF 0xe
+#define BIG_DEF 0xf
+#define SMALL_DEF 0x10
#if ET_DEBUG
extern unsigned tag_val_def_debug(Wterm, const char*, unsigned);
@@ -1096,8 +1104,8 @@ extern unsigned tag_val_def(Wterm);
#endif
#define not_eq_tags(X,Y) (tag_val_def((X)) ^ tag_val_def((Y)))
-#define NUMBER_CODE(x,y) ((tag_val_def(x) << 4) | tag_val_def(y))
-#define _NUMBER_CODE(TX,TY) ((TX << 4) | TY)
+#define NUMBER_CODE(x,y) ((tag_val_def(x) << 5) | tag_val_def(y))
+#define _NUMBER_CODE(TX,TY) ((TX << 5) | TY)
#define SMALL_SMALL _NUMBER_CODE(SMALL_DEF,SMALL_DEF)
#define SMALL_BIG _NUMBER_CODE(SMALL_DEF,BIG_DEF)
#define SMALL_FLOAT _NUMBER_CODE(SMALL_DEF,FLOAT_DEF)
@@ -1126,6 +1134,7 @@ extern unsigned tag_val_def(Wterm);
#define make_tuple_rel make_boxed_rel
#define make_external_rel make_boxed_rel
#define make_internal_ref_rel make_boxed_rel
+#define make_big_rel make_boxed_rel
#define binary_val_rel(RTERM, BASE) binary_val(rterm2wterm(RTERM, BASE))
#define list_val_rel(RTERM, BASE) list_val(rterm2wterm(RTERM, BASE))
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 1292cb0678..545a0343d0 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -417,8 +417,9 @@ void
erts_thr_progress_pre_init(void)
{
intrnl = NULL;
- erts_tsd_key_create(&erts_thr_prgr_data_key__);
- init_nob(&erts_thr_prgr__.current, 0);
+ erts_tsd_key_create(&erts_thr_prgr_data_key__,
+ "erts_thr_prgr_data_key");
+ init_nob(&erts_thr_prgr__.current, ERTS_THR_PRGR_VAL_FIRST);
}
void
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 1416aa6166..5f392944c2 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -108,6 +108,8 @@ struct ErtsThrPrgrLaterOp_ {
#ifdef ERTS_SMP
+/* ERTS_THR_PRGR_VAL_FIRST should only be used when initializing... */
+#define ERTS_THR_PRGR_VAL_FIRST ((ErtsThrPrgrVal) 0)
#define ERTS_THR_PRGR_VAL_WAITING (~((ErtsThrPrgrVal) 0))
#define ERTS_THR_PRGR_INVALID (~((ErtsThrPrgrVal) 0))
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 759c8f4c33..80026104db 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -281,10 +281,13 @@
#define ERTS_THR_READ_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER ETHR_READ_DEPEND_MEMORY_BARRIER
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_mtx_trylock(L) erts_mtx_trylock_x(L, __FILE__, __LINE__)
#define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_tryrlock(L) erts_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
#define erts_rwmtx_rlock(L) erts_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_tryrwlock(L) erts_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
#define erts_rwmtx_rwlock(L) erts_rwmtx_rwlock_x(L, __FILE__, __LINE__)
#define erts_read_lock(L) erts_read_lock_x(L, __FILE__, __LINE__)
#define erts_write_lock(L) erts_write_lock_x(L, __FILE__, __LINE__)
@@ -461,18 +464,24 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
+ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
+ int enable_lcnt);
+ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
+ Uint16 opt, int enable_lcnt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
char *name,
- Eterm extra);
+ Eterm extra,
+ int enable_lcnt);
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
+ unsigned int line);
+ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file,
+ unsigned int line);
#else
+ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_mtx_lock(erts_mtx_t *mtx);
#endif
ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx);
@@ -496,16 +505,18 @@ ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
#endif
ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
@@ -571,7 +582,7 @@ ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
@@ -584,7 +595,7 @@ ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
#else
@@ -594,7 +605,7 @@ ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
@@ -1549,7 +1560,7 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1559,13 +1570,17 @@ erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
+erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
+ int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1575,14 +1590,17 @@ erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1592,7 +1610,10 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
#endif
ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1670,7 +1691,11 @@ erts_mtx_destroy(erts_mtx_t *mtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line)
+#else
erts_mtx_trylock(erts_mtx_t *mtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1684,8 +1709,12 @@ erts_mtx_trylock(erts_mtx_t *mtx)
res = ethr_mutex_trylock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_x(res == 0, &mtx->lc,file,line);
+#else
erts_lc_trylock(res == 0, &mtx->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
#endif
@@ -1697,7 +1726,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
#else
erts_mtx_lock(erts_mtx_t *mtx)
@@ -1705,8 +1734,12 @@ erts_mtx_lock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_x(&mtx->lc, file, line);
+#else
erts_lc_lock(&mtx->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
@@ -1857,7 +1890,10 @@ erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
+ if (name && name[0] == '\0')
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
#endif
#endif
}
@@ -1921,7 +1957,11 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1935,8 +1975,12 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
res = ethr_rwmutex_tryrlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
@@ -1948,7 +1992,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
@@ -1956,8 +2000,12 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
@@ -1984,7 +2032,11 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1998,8 +2050,12 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
res = ethr_rwmutex_tryrwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2011,7 +2067,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
@@ -2019,8 +2075,12 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2426,7 +2486,7 @@ erts_spin_unlock(erts_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
#else
erts_spin_lock(erts_spinlock_t *lock)
@@ -2434,8 +2494,12 @@ erts_spin_lock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_x(&lock->lc,file,line);
+#else
erts_lc_lock(&lock->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
@@ -2545,7 +2609,7 @@ erts_read_unlock(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
#else
erts_read_lock(erts_rwlock_t *lock)
@@ -2553,8 +2617,12 @@ erts_read_lock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
@@ -2584,7 +2652,7 @@ erts_write_unlock(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
#else
erts_write_lock(erts_rwlock_t *lock)
@@ -2592,8 +2660,12 @@ erts_write_lock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2635,10 +2707,10 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_tsd_key_create(erts_tsd_key_t *keyp)
+erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname)
{
#ifdef USE_THREADS
- int res = ethr_tsd_key_create(keyp);
+ int res = ethr_tsd_key_create(keyp, keyname);
if (res)
erts_thr_fatal_error(res, "create thread specific data key");
#endif
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 848877d43e..ea5c850a30 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -151,6 +151,11 @@ do { \
message dispatcher thread takes care of that). */
#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
do { (RES) = (TPID); } while(0)
+int
+erts_is_tracer_proc_valid(Process* p)
+{
+ return 1;
+}
#else
#define ERTS_NULL_TRACER_REF NULL
#define ERTS_TRACER_REF_TYPE Process *
@@ -163,6 +168,20 @@ do { \
return; \
} \
} while (0)
+int
+erts_is_tracer_proc_valid(Process* p)
+{
+ Process* tracer;
+
+ tracer = erts_proc_lookup(ERTS_TRACER_PROC(p));
+ if (tracer && ERTS_TRACE_FLAGS(tracer) & F_TRACER) {
+ return 1;
+ } else {
+ ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACE_FLAGS(p) = ~TRACEE_FLAGS;
+ return 0;
+ }
+}
#endif
static Uint active_sched;
@@ -2184,7 +2203,7 @@ trace_gc(Process *p, Eterm what)
AM_bin_old_vheap_block_size
};
- Uint values[] = {
+ UWord values[] = {
OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
HEAP_SIZE(p),
MBUF_SIZE(p),
@@ -2198,7 +2217,7 @@ trace_gc(Process *p, Eterm what)
BIN_OLD_VHEAP_SZ(p)
};
#define LOCAL_HEAP_SIZE \
- (sizeof(values)/sizeof(Eterm)) * \
+ (sizeof(values)/sizeof(*values)) * \
(2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE) + \
5/*4-tuple */ + TS_HEAP_WORDS
DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
@@ -2206,7 +2225,7 @@ trace_gc(Process *p, Eterm what)
Eterm* limit;
#endif
- ASSERT(sizeof(values)/sizeof(Uint) == sizeof(tags)/sizeof(Eterm));
+ ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
UseTmpHeap(LOCAL_HEAP_SIZE,p);
@@ -2214,9 +2233,9 @@ trace_gc(Process *p, Eterm what)
hp = local_heap;
#ifdef DEBUG
size = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&size,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
size += 5/*4-tuple*/ + TS_SIZE(p);
@@ -2229,9 +2248,9 @@ trace_gc(Process *p, Eterm what)
ERTS_TRACE_FLAGS(p));
size = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&size,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
size += 5/*4-tuple*/ + TS_SIZE(p);
@@ -2244,9 +2263,9 @@ trace_gc(Process *p, Eterm what)
ASSERT(size <= LOCAL_HEAP_SIZE);
#endif
- msg = erts_bld_atom_uint_2tup_list(&hp,
+ msg = erts_bld_atom_uword_2tup_list(&hp,
NULL,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
@@ -2268,7 +2287,134 @@ trace_gc(Process *p, Eterm what)
#undef LOCAL_HEAP_SIZE
}
+void
+monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint time)
+{
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+#ifndef ERTS_SMP
+ Process *monitor_p;
+#endif
+ Uint hsz;
+ Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined;
+ Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg;
+
+
+#ifndef ERTS_SMP
+ ASSERT(is_internal_pid(system_monitor));
+ monitor_p = erts_proc_lookup(system_monitor);
+ if (!monitor_p || p == monitor_p) {
+ return;
+ }
+#endif
+ /*
+ * Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} ->
+ * 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout +
+ * (2 * 3 (in/out tuple of 2)) +
+ * 0 (unknown) or 4 (MFA tuple of 3) + 0 (unknown) or 4 (MFA tuple of 3)
+ * = 20 + (in_fp != NULL) ? 4 : 0 + (out_fp != NULL) ? 4 : 0 + size of Timeout
+ */
+ hsz = 20 + ((in_fp != NULL) ? 4 : 0) + ((out_fp != NULL) ? 4 : 0);
+ (void) erts_bld_uint(NULL, &hsz, time);
+ hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
+ tmo = erts_bld_uint(&hp, NULL, time);
+ if (in_fp != NULL) {
+ in_mfa = TUPLE3(hp,(Eterm) in_fp[0], (Eterm) in_fp[1], make_small(in_fp[2]));
+ hp +=4;
+ }
+ if (out_fp != NULL) {
+ out_mfa = TUPLE3(hp,(Eterm) out_fp[0], (Eterm) out_fp[1], make_small(out_fp[2]));
+ hp +=4;
+ }
+ tmo_tpl = TUPLE2(hp,am_timeout, tmo);
+ hp += 3;
+ in_tpl = TUPLE2(hp,am_in,in_mfa);
+ hp += 3;
+ out_tpl = TUPLE2(hp,am_out,out_mfa);
+ hp += 3;
+ list = CONS(hp,out_tpl,NIL);
+ hp += 2;
+ list = CONS(hp,in_tpl,list);
+ hp += 2;
+ list = CONS(hp,tmo_tpl,list);
+ hp += 2;
+ msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list);
+ hp += 5;
+#ifdef ERTS_SMP
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
+#else
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+#endif
+}
+void
+monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
+{
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+#ifndef ERTS_SMP
+ Process *monitor_p;
+#endif
+ Uint hsz;
+ Eterm *hp, list, op;
+ Eterm op_tpl, tmo_tpl, tmo, msg;
+
+
+#ifndef ERTS_SMP
+ ASSERT(is_internal_pid(system_monitor));
+ monitor_p = erts_proc_lookup(system_monitor);
+ if (!monitor_p) {
+ return;
+ }
+#endif
+ /*
+ * Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} ->
+ * 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2)
+ * + size of Timeout + 3 (op tuple of 2 atoms)
+ * = 15 + size of Timeout
+ */
+ hsz = 15;
+ (void) erts_bld_uint(NULL, &hsz, time);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
+
+ switch (type) {
+ case ERTS_PORT_TASK_PROC_SIG: op = am_proc_sig; break;
+ case ERTS_PORT_TASK_TIMEOUT: op = am_timeout; break;
+ case ERTS_PORT_TASK_INPUT: op = am_input; break;
+ case ERTS_PORT_TASK_OUTPUT: op = am_output; break;
+ case ERTS_PORT_TASK_EVENT: op = am_event; break;
+ case ERTS_PORT_TASK_DIST_CMD: op = am_dist_cmd; break;
+ default: op = am_undefined; break;
+ }
+
+ tmo = erts_bld_uint(&hp, NULL, time);
+
+ op_tpl = TUPLE2(hp,am_port_op,op);
+ hp += 3;
+
+ tmo_tpl = TUPLE2(hp,am_timeout, tmo);
+ hp += 3;
+ list = CONS(hp,op_tpl,NIL);
+ hp += 2;
+ list = CONS(hp,tmo_tpl,list);
+ hp += 2;
+ msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list);
+ hp += 5;
+#ifdef ERTS_SMP
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp);
+#else
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+#endif
+}
void
monitor_long_gc(Process *p, Uint time) {
@@ -2288,7 +2434,7 @@ monitor_long_gc(Process *p, Uint time) {
am_old_heap_size,
am_heap_size
};
- Eterm values[] = {
+ UWord values[] = {
time,
OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
HEAP_SIZE(p),
@@ -2309,9 +2455,9 @@ monitor_long_gc(Process *p, Uint time) {
#endif
hsz = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&hsz,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
hsz += 5 /* 4-tuple */;
@@ -2322,9 +2468,9 @@ monitor_long_gc(Process *p, Uint time) {
hp_end = hp + hsz;
#endif
- list = erts_bld_atom_uint_2tup_list(&hp,
+ list = erts_bld_atom_uword_2tup_list(&hp,
NULL,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list);
@@ -2362,7 +2508,7 @@ monitor_large_heap(Process *p) {
am_old_heap_size,
am_heap_size
};
- Uint values[] = {
+ UWord values[] = {
OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
HEAP_SIZE(p),
MBUF_SIZE(p),
@@ -2378,15 +2524,15 @@ monitor_large_heap(Process *p) {
#ifndef ERTS_SMP
ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
- if (monitor_p || p == monitor_p) {
+ if (!monitor_p || p == monitor_p) {
return;
}
#endif
hsz = 0;
- (void) erts_bld_atom_uint_2tup_list(NULL,
+ (void) erts_bld_atom_uword_2tup_list(NULL,
&hsz,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
hsz += 5 /* 4-tuple */;
@@ -2397,9 +2543,9 @@ monitor_large_heap(Process *p) {
hp_end = hp + hsz;
#endif
- list = erts_bld_atom_uint_2tup_list(&hp,
+ list = erts_bld_atom_uword_2tup_list(&hp,
NULL,
- sizeof(values)/sizeof(Uint),
+ sizeof(values)/sizeof(*values),
tags,
values);
msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list);
@@ -3011,6 +3157,7 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
case SYS_MSG_TYPE_SYSMON:
if (receiver == NIL
&& !erts_system_monitor_long_gc
+ && !erts_system_monitor_long_schedule
&& !erts_system_monitor_large_heap
&& !erts_system_monitor_flags.busy_port
&& !erts_system_monitor_flags.busy_dist_port)
@@ -3181,6 +3328,8 @@ sys_msg_dispatcher_func(void *unused)
if (erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
+ ERTS_SCHED_FAIR_YIELD();
+
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
#endif
@@ -3339,12 +3488,20 @@ static void
init_sys_msg_dispatcher(void)
{
erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+#ifdef __OSE__
+ thr_opts.coreNo = 0;
+#endif
thr_opts.detached = 1;
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ thr_opts.name = "sys_msg_dispatcher";
+#endif
+
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 50fb27aab0..4f2c70d6e7 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -39,6 +39,7 @@ void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
+int erts_is_tracer_proc_valid(Process* p);
#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
@@ -83,6 +84,8 @@ void erts_system_profile_setup_active_schedulers(void);
/* system_monitor */
void monitor_long_gc(Process *p, Uint time);
+void monitor_long_schedule_proc(Process *p, BeamInstr *in_i, BeamInstr *out_i, Uint time);
+void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time);
void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index fa53fd0937..f8e1431a53 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -724,7 +724,7 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
hp = HAlloc(p, 2);
obj = CDR(objp);
ioterm = CONS(hp, rest_term, obj);
- //(*left) = 0;
+ /* (*left) = 0; */
goto done;
}
if (rest_term != NIL) {
@@ -1476,6 +1476,9 @@ static Eterm do_utf8_to_list_normalize(Process *p, Uint num, byte *bytes, Uint s
Uint16 savepoints[4];
int numpoints = 0;
+ if (num == 0)
+ return NIL;
+
ASSERT(num > 0);
hp = HAlloc(p,num * 2); /* May be to much */
@@ -1723,14 +1726,14 @@ static BIF_RETTYPE do_bif_utf8_to_list(Process *p,
if (b_sz) {
ErlSubBin *sb;
Eterm orig;
- ERTS_DECLARE_DUMMY(Uint offset);
+ Uint offset;
ASSERT(state != ERTS_UTF8_OK);
hp = HAlloc(p, ERL_SUB_BIN_SIZE);
sb = (ErlSubBin *) hp;
ERTS_GET_REAL_BIN(orig_bin, orig, offset, bitoffs, bitsize);
sb->thing_word = HEADER_SUB_BIN;
sb->size = b_sz;
- sb->offs = num_bytes_to_process + num_processed_bytes;
+ sb->offs = offset + num_bytes_to_process + num_processed_bytes;
sb->orig = orig;
sb->bitoffs = bitoffs;
sb->bitsize = bitsize;
@@ -1981,9 +1984,21 @@ BIF_RETTYPE binary_to_existing_atom_2(BIF_ALIST_2)
* string routines, that will certainly fail on some OS.
*/
-char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_size, ErtsAlcType_t alloc_type, int allow_empty, int allow_atom, Sint *used)
+char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_size,
+ ErtsAlcType_t alloc_type, int allow_empty,
+ int allow_atom, Sint *used)
{
int encoding = erts_get_native_filename_encoding();
+ return erts_convert_filename_to_encoding(name, statbuf, statbuf_size, alloc_type,
+ allow_empty, allow_atom, encoding,
+ used, 0);
+}
+
+char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbuf_size,
+ ErtsAlcType_t alloc_type, int allow_empty,
+ int allow_atom, int encoding, Sint *used,
+ Uint extra)
+{
char* name_buf = NULL;
if ((allow_atom && is_atom(name)) ||
@@ -1995,13 +2010,14 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_
}
if (encoding == ERL_FILENAME_WIN_WCHAR) {
need += 2;
+ extra *= 2;
} else {
++need;
}
if (used)
*used = (Sint) need;
- if (need > statbuf_size) {
- name_buf = (char *) erts_alloc(alloc_type, need);
+ if (need+extra > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, need+extra);
} else {
name_buf = statbuf;
}
@@ -2013,52 +2029,27 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_
} else if (is_binary(name)) {
byte *temp_alloc = NULL;
byte *bytes;
- byte *err_pos;
- Uint size,num_chars;
+ Uint size;
size = binary_size(name);
bytes = erts_get_aligned_binary_bytes(name, &temp_alloc);
+
if (encoding != ERL_FILENAME_WIN_WCHAR) {
/*Add 0 termination only*/
if (used)
*used = (Sint) size+1;
- if (size+1 > statbuf_size) {
- name_buf = (char *) erts_alloc(alloc_type, size+1);
+ if (size+1+extra > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, size+1+extra);
} else {
name_buf = statbuf;
}
memcpy(name_buf,bytes,size);
name_buf[size]=0;
- } else if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK ||
- erts_get_user_requested_filename_encoding() == ERL_FILENAME_LATIN1) {
- byte *p;
- /* What to do now? Maybe latin1, so just take byte for byte instead */
- if (used)
- *used = (Sint) (size+1)*2;
- if ((size+1)*2 > statbuf_size) {
- name_buf = (char *) erts_alloc(alloc_type, (size+1)*2);
- } else {
- name_buf = statbuf;
- }
- p = (byte *) name_buf;
- while (size--) {
- *p++ = *bytes++;
- *p++ = 0;
- }
- *p++ = 0;
- *p++ = 0;
- } else { /* WIN_WCHAR and valid UTF8 */
- if (used)
- *used = (Sint) (num_chars+1)*2;
- if ((num_chars+1)*2 > statbuf_size) {
- name_buf = (char *) erts_alloc(alloc_type, (num_chars+1)*2);
- } else {
- name_buf = statbuf;
- }
- erts_copy_utf8_to_utf16_little((byte *) name_buf, bytes, num_chars);
- name_buf[num_chars*2] = 0;
- name_buf[num_chars*2+1] = 0;
- }
+ } else {
+ name_buf = erts_convert_filename_to_wchar(bytes, size,
+ statbuf, statbuf_size,
+ alloc_type, used, extra);
+ }
erts_free_aligned_binary_bytes(temp_alloc);
} else {
return NULL;
@@ -2066,6 +2057,50 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_
return name_buf;
}
+char* erts_convert_filename_to_wchar(byte* bytes, Uint size,
+ char *statbuf, size_t statbuf_size,
+ ErtsAlcType_t alloc_type, Sint* used,
+ Uint extra_wchars)
+{
+ byte *err_pos;
+ Uint num_chars;
+ char* name_buf = NULL;
+ Sint need;
+ char *p;
+
+ if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK ||
+ erts_get_user_requested_filename_encoding() == ERL_FILENAME_LATIN1) {
+
+ /* What to do now? Maybe latin1, so just take byte for byte instead */
+ need = (Sint) (size + extra_wchars + 1) * 2;
+ if (need > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, need);
+ } else {
+ name_buf = statbuf;
+ }
+ p = name_buf;
+ while (size--) {
+ *p++ = *bytes++;
+ *p++ = 0;
+ }
+ } else { /* WIN_WCHAR and valid UTF8 */
+ need = (Sint) (num_chars + extra_wchars + 1) * 2;
+ if (need > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, need);
+ } else {
+ name_buf = statbuf;
+ }
+ erts_copy_utf8_to_utf16_little((byte *) name_buf, bytes, num_chars);
+ p = name_buf + num_chars*2;
+ }
+ *p++ = 0;
+ *p++ = 0;
+ if (used)
+ *used = p - name_buf;
+ return name_buf;
+}
+
+
static int filename_len_16bit(byte *str)
{
byte *p = str;
@@ -2091,6 +2126,8 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
mac = 1;
case ERL_FILENAME_UTF8:
size = strlen((char *) bytes);
+ if (size == 0)
+ return NIL;
if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) {
goto noconvert;
}
@@ -2145,16 +2182,31 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding)
ap = atom_tab(atom_val(ioterm));
switch (encoding) {
case ERL_FILENAME_LATIN1:
- need = ap->len;
+ need = ap->latin1_chars; /* May be -1 */
break;
case ERL_FILENAME_UTF8_MAC:
case ERL_FILENAME_UTF8:
- for (i = 0; i < ap->len; i++) {
- need += (ap->name[i] >= 0x80) ? 2 : 1;
- }
+ need = ap->len;
break;
case ERL_FILENAME_WIN_WCHAR:
- need = 2*(ap->len);
+ if (ap->latin1_chars >= 0) {
+ need = 2* ap->latin1_chars;
+ }
+ else {
+ for (i = 0; i < ap->len; ) {
+ if (ap->name[i] < 0x80) {
+ i++;
+ } else if (ap->name[i] < 0xE0) {
+ i += 2;
+ } else if (ap->name[i] < 0xF0) {
+ i += 3;
+ } else {
+ need = -1;
+ break;
+ }
+ need += 2;
+ }
+ }
break;
default:
need = -1;
@@ -2284,26 +2336,36 @@ void erts_native_filename_put(Eterm ioterm, int encoding, byte *p)
switch (encoding) {
case ERL_FILENAME_LATIN1:
for (i = 0; i < ap->len; i++) {
- *p++ = ap->name[i];
- }
- break;
- case ERL_FILENAME_UTF8_MAC:
- case ERL_FILENAME_UTF8:
- for (i = 0; i < ap->len; i++) {
- if(ap->name[i] < 0x80) {
+ if (ap->name[i] < 0x80) {
*p++ = ap->name[i];
} else {
- *p++ = (((ap->name[i]) >> 6) | ((byte) 0xC0));
- *p++ = (((ap->name[i]) & 0x3F) | ((byte) 0x80));
+ ASSERT(ap->name[i] < 0xC4);
+ *p++ = ((ap->name[i] & 3) << 6) | (ap->name[i+1] & 0x3F);
+ i++;
}
}
break;
+ case ERL_FILENAME_UTF8_MAC:
+ case ERL_FILENAME_UTF8:
+ sys_memcpy(p, ap->name, ap->len);
+ break;
case ERL_FILENAME_WIN_WCHAR:
for (i = 0; i < ap->len; i++) {
/* Little endian */
- *p++ = ap->name[i];
- *p++ = 0;
- }
+ if (ap->name[i] < 0x80) {
+ *p++ = ap->name[i];
+ *p++ = 0;
+ } else if (ap->name[i] < 0xE0) {
+ *p++ = ((ap->name[i] & 3) << 6) | (ap->name[i+1] & 0x3F);
+ *p++ = ((ap->name[i] & 0x1C) >> 2);
+ i++;
+ } else {
+ ASSERT(ap->name[i] < 0xF0);
+ *p++ = ((ap->name[i+1] & 3) << 6) | (ap->name[i+2] & 0x3C);
+ *p++ = ((ap->name[i] & 0xF) << 4) | ((ap->name[i+1] & 0x3C) >> 2);
+ i += 2;
+ }
+ }
break;
default:
ASSERT(0);
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index a2064bd8a3..0807649ea1 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,8 @@
#include "erl_smp.h"
#include "erl_printf.h"
+struct process;
+
typedef struct {
#ifdef DEBUG
int smp_api;
@@ -148,13 +150,16 @@ void erts_silence_warn_unused_result(long unused);
int erts_fit_in_bits_int64(Sint64);
int erts_fit_in_bits_int32(Sint32);
-int list_length(Eterm);
+int erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
+void erts_save_emu_args(int argc, char **argv);
+Eterm erts_get_emu_args(struct process *c_p);
+Eterm erts_get_ethread_info(struct process * c_p);
Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
@@ -163,6 +168,10 @@ Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
+#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
+#define erts_bld_tuple3(H,S,E1,E2,E3) erts_bld_tuple(H,S,3,E1,E2,E3)
+#define erts_bld_tuple4(H,S,E1,E2,E3,E4) erts_bld_tuple(H,S,4,E1,E2,E3,E4)
+#define erts_bld_tuple5(H,S,E1,E2,E3,E4,E5) erts_bld_tuple(H,S,5,E1,E2,E3,E4,E5)
Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
@@ -170,8 +179,8 @@ Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
Sint length, Eterm terms1[], Uint terms2[]);
Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[]);
+erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], UWord uints[]);
Eterm
erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
Eterm atoms[], Uint uints1[], Uint uints2[]);
@@ -193,23 +202,38 @@ int eq(Eterm, Eterm);
#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
#if HALFWORD_HEAP
-Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
-#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
+Sint erts_cmp_rel_opt(Eterm, Eterm*, Eterm, Eterm*, int);
+#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,0)
+#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,1)
+#define CMP(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,0)
+#define CMP_TERM(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,1)
#else
Sint cmp(Eterm, Eterm);
-#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
-#define CMP(A,B) cmp(A,B)
+Sint erts_cmp(Eterm, Eterm, int);
+#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp(A,B,0)
+#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp(A,B,1)
+#define CMP(A,B) erts_cmp(A,B,0)
+#define CMP_TERM(A,B) erts_cmp(A,B,1)
#endif
-#define cmp_lt(a,b) (CMP((a),(b)) < 0)
-#define cmp_le(a,b) (CMP((a),(b)) <= 0)
-#define cmp_eq(a,b) (CMP((a),(b)) == 0)
-#define cmp_ne(a,b) (CMP((a),(b)) != 0)
-#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
-#define cmp_gt(a,b) (CMP((a),(b)) > 0)
-
-#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
-#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
-#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
-#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+
+#define cmp_lt(a,b) (CMP((a),(b)) < 0)
+#define cmp_le(a,b) (CMP((a),(b)) <= 0)
+#define cmp_eq(a,b) (CMP((a),(b)) == 0)
+#define cmp_ne(a,b) (CMP((a),(b)) != 0)
+#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
+#define cmp_gt(a,b) (CMP((a),(b)) > 0)
+
+#define cmp_lt_term(a,b) (CMP_TERM((a),(b)) < 0)
+#define cmp_le_term(a,b) (CMP_TERM((a),(b)) <= 0)
+#define cmp_ge_term(a,b) (CMP_TERM((a),(b)) >= 0)
+#define cmp_gt_term(a,b) (CMP_TERM((a),(b)) > 0)
+
+#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
+#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
+#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
+#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+
+#define CMP_LT_TERM(a,b) ((a) != (b) && cmp_lt_term((a),(b)))
+#define CMP_GE_TERM(a,b) ((a) == (b) || cmp_ge_term((a),(b)))
#endif
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index c962955de9..b7de8208ad 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2012. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -46,7 +46,6 @@
heap data on the C stack or if we use the buffers in the scheduler data. */
#define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers
small heap for transient heap data */
-#define CMP_TMP_HEAP_SIZE 32 /* cmp wants its own tmp-heap... */
#define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */
#define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */
@@ -80,7 +79,7 @@
# ifdef CHECK_FOR_HOLES
# define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(HEAP_TOP(p), (sz))
# else
-# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),DEBUG_BAD_BYTE,(sz)*sizeof(Eterm*))
+# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*))
# endif
#else
# define INIT_HEAP_MEM(p,sz) ((void)0)
@@ -98,7 +97,7 @@
* failing that, in a heap fragment.
*/
#define HAllocX(p, sz, xtra) \
- (ASSERT_EXPR((sz) >= 0), \
+ (ASSERT((sz) >= 0), \
ErtsHAllocLockCheck(p), \
(IS_FORCE_HEAP_FRAGS || (((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \
? erts_heap_alloc((p),(sz),(xtra)) \
@@ -135,14 +134,14 @@
*/
#ifdef CHECK_FOR_HOLES
# define HeapOnlyAlloc(p, sz) \
- (ASSERT_EXPR((sz) >= 0), \
- (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
+ (ASSERT((sz) >= 0), \
+ (ASSERT(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
(erts_set_hole_marker(HEAP_TOP(p), (sz)), \
(HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz)))))
#else
# define HeapOnlyAlloc(p, sz) \
- (ASSERT_EXPR((sz) >= 0), \
- (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
+ (ASSERT((sz) >= 0), \
+ (ASSERT(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
(HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
#endif
diff --git a/erts/emulator/beam/erl_zlib.c b/erts/emulator/beam/erl_zlib.c
index f73d48b6c2..8e33144f96 100644
--- a/erts/emulator/beam/erl_zlib.c
+++ b/erts/emulator/beam/erl_zlib.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -44,6 +44,88 @@ void erl_zlib_zfree_callback (voidpf opaque, voidpf ptr)
erts_free(ERTS_ALC_T_ZLIB, ptr);
}
+/*
+ * Initialize a z_stream with a source, to later *chunk* data into a destination
+ * Returns Z_OK or Error.
+ */
+int ZEXPORT erl_zlib_deflate_start(z_stream *streamp, const Bytef* source,
+ uLong sourceLen, int level)
+{
+ streamp->next_in = (Bytef*)source;
+ streamp->avail_in = (uInt)sourceLen;
+ streamp->total_out = streamp->avail_out = 0;
+ streamp->next_out = NULL;
+ erl_zlib_alloc_init(streamp);
+ return deflateInit(streamp, level);
+}
+/*
+ * Deflate a chunk, The destination length is the limit.
+ * Returns Z_OK if more to process, Z_STREAM_END if we are done.
+ */
+int ZEXPORT erl_zlib_deflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen)
+{
+ int err;
+ uLongf last_tot = streamp->total_out;
+
+ streamp->next_out = dest;
+ streamp->avail_out = (uInt)*destLen;
+
+ if ((uLong)streamp->avail_out != *destLen) return Z_BUF_ERROR;
+
+ err = deflate(streamp, Z_FINISH);
+ *destLen = streamp->total_out - last_tot;
+ return err;
+}
+
+
+/*
+ * When we are done, free up the deflate structure
+ * Retyurns Z_OK or Error
+ */
+int ZEXPORT erl_zlib_deflate_finish(z_stream *streamp)
+{
+ return deflateEnd(streamp);
+}
+
+int ZEXPORT erl_zlib_inflate_start(z_stream *streamp, const Bytef* source,
+ uLong sourceLen)
+{
+ streamp->next_in = (Bytef*)source;
+ streamp->avail_in = (uInt)sourceLen;
+ streamp->total_out = streamp->avail_out = 0;
+ streamp->next_out = NULL;
+ erl_zlib_alloc_init(streamp);
+ return inflateInit(streamp);
+}
+/*
+ * Inflate a chunk, The destination length is the limit.
+ * Returns Z_OK if more to process, Z_STREAM_END if we are done.
+ */
+int ZEXPORT erl_zlib_inflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen)
+{
+ int err;
+ uLongf last_tot = streamp->total_out;
+
+ streamp->next_out = dest;
+ streamp->avail_out = (uInt)*destLen;
+
+ if ((uLong)streamp->avail_out != *destLen) return Z_BUF_ERROR;
+
+ err = inflate(streamp, Z_NO_FLUSH);
+ ASSERT(err != Z_STREAM_ERROR);
+ *destLen = streamp->total_out - last_tot;
+ return err;
+}
+
+/*
+ * When we are done, free up the inflate structure
+ * Retyurns Z_OK or Error
+ */
+int ZEXPORT erl_zlib_inflate_finish(z_stream *streamp)
+{
+ return inflateEnd(streamp);
+}
+
int ZEXPORT erl_zlib_compress2 (Bytef* dest, uLongf* destLen,
const Bytef* source, uLong sourceLen,
diff --git a/erts/emulator/beam/erl_zlib.h b/erts/emulator/beam/erl_zlib.h
index 9054a5e428..160166c66b 100644
--- a/erts/emulator/beam/erl_zlib.h
+++ b/erts/emulator/beam/erl_zlib.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2013. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,6 +31,20 @@
(s)->zfree = erl_zlib_zfree_callback; \
} while (0)
+/*
+ * Chunked interface, used by term_to_binary among others.
+ */
+int ZEXPORT erl_zlib_deflate_start(z_stream *streamp, const Bytef* source,
+ uLong sourceLen, int level);
+int ZEXPORT erl_zlib_deflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen);
+int ZEXPORT erl_zlib_deflate_finish(z_stream *streamp);
+
+int ZEXPORT erl_zlib_inflate_start(z_stream *streamp, const Bytef* source,
+ uLong sourceLen);
+int ZEXPORT erl_zlib_inflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen);
+int ZEXPORT erl_zlib_inflate_finish(z_stream *streamp);
+
+
/* Use instead of compress
*/
#define erl_zlib_compress(dest,destLen,source,sourceLen) \
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 5ce0d97c74..9b9b4b2a62 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,10 +42,8 @@
#include "erl_binary.h"
#include "erl_bits.h"
#include "erl_zlib.h"
+#include "erl_map.h"
-#ifdef HIPE
-#include "hipe_mode_switch.h"
-#endif
#define in_area(ptr,start,nbytes) ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define MAX_STRING_LEN 0xffff
@@ -61,6 +59,9 @@
*/
# define ERTS_DEBUG_USE_DIST_SEP
# endif
+# define IF_DEBUG(X) X
+#else
+# define IF_DEBUG(X)
#endif
/* Does Sint fit in Sint32?
@@ -81,17 +82,45 @@
*
*/
+static Export term_to_binary_trap_export;
+
static byte* enc_term(ErtsAtomCacheMap *, Eterm, byte*, Uint32, struct erl_off_heap_header** off_heap);
+struct TTBEncodeContext_;
+static int enc_term_int(struct TTBEncodeContext_*,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
+ struct erl_off_heap_header** off_heap, Sint *reds, byte **res);
static Uint is_external_string(Eterm obj, int* p_is_string);
static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
-static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
+struct B2TContext_t;
+static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*, struct B2TContext_t*);
static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*);
static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
-static Sint decoded_size(byte *ep, byte* endp, int internal_tags);
+static Sint decoded_size(byte *ep, byte* endp, int internal_tags, struct B2TContext_t*);
+static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1);
+static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
+ Binary *context_b);
static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned);
+struct TTBSizeContext_;
+static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acmp, Eterm obj,
+ unsigned dflags, Sint *reds, Uint *res);
+
+static Export binary_to_term_trap_export;
+static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1);
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif, Eterm arg0, Eterm arg1);
+
+void erts_init_external(void) {
+ erts_init_trap_export(&term_to_binary_trap_export,
+ am_erts_internal, am_term_to_binary_trap, 1,
+ &term_to_binary_trap_1);
+
+ erts_init_trap_export(&binary_to_term_trap_export,
+ am_erts_internal, am_binary_to_term_trap, 1,
+ &binary_to_term_trap_1);
+ return;
+}
#define ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES 255
@@ -488,7 +517,7 @@ Uint erts_encode_ext_size(Eterm term)
Uint erts_encode_ext_size_2(Eterm term, unsigned dflags)
{
- return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|dflags)
+ return encode_size_struct2(NULL, term, dflags)
+ 1 /* VERSION_MAGIC */;
}
@@ -851,7 +880,7 @@ erts_decode_dist_ext_size(ErtsDistExternal *edep)
goto fail;
ep = edep->extp+1;
}
- res = decoded_size(ep, edep->ext_endp, 0);
+ res = decoded_size(ep, edep->ext_endp, 0, NULL);
if (res >= 0)
return res;
fail:
@@ -863,12 +892,12 @@ Sint erts_decode_ext_size(byte *ext, Uint size)
{
if (size == 0 || *ext != VERSION_MAGIC)
return -1;
- return decoded_size(ext+1, ext+size, 0);
+ return decoded_size(ext+1, ext+size, 0, NULL);
}
Sint erts_decode_ext_size_ets(byte *ext, Uint size)
{
- Sint sz = decoded_size(ext, ext+size, 1);
+ Sint sz = decoded_size(ext, ext+size, 1, NULL);
ASSERT(sz >= 0);
return sz;
}
@@ -901,7 +930,7 @@ erts_decode_dist_ext(Eterm** hpp,
goto error;
ep++;
}
- ep = dec_term(edep, hpp, ep, off_heap, &obj);
+ ep = dec_term(edep, hpp, ep, off_heap, &obj, NULL);
if (!ep)
goto error;
@@ -922,7 +951,7 @@ Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext)
byte *ep = *ext;
if (*ep++ != VERSION_MAGIC)
return THE_NON_VALUE;
- ep = dec_term(NULL, hpp, ep, off_heap, &obj);
+ ep = dec_term(NULL, hpp, ep, off_heap, &obj, NULL);
if (!ep) {
#ifdef DEBUG
bin_write(ERTS_PRINT_STDERR,NULL,*ext,500);
@@ -936,7 +965,7 @@ Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext)
Eterm erts_decode_ext_ets(Eterm **hpp, ErlOffHeap *off_heap, byte *ext)
{
Eterm obj;
- ext = dec_term(NULL, hpp, ext, off_heap, &obj);
+ ext = dec_term(NULL, hpp, ext, off_heap, &obj, NULL);
ASSERT(ext);
return obj;
}
@@ -1009,12 +1038,41 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
+{
+ Eterm *tp = tuple_val(BIF_ARG_1);
+ Eterm Term = tp[1];
+ Eterm bt = tp[2];
+ Binary *bin = ((ProcBin *) binary_val(bt))->val;
+ Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin);
+ if (is_tuple(res)) {
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
+ } else {
+ if (erts_set_gc_state(BIF_P, 1)
+ || MSO(BIF_P).overhead > BIN_VHEAP_SZ(BIF_P))
+ ERTS_BIF_YIELD_RETURN(BIF_P, res);
+ else
+ BIF_RET(res);
+ }
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 1)
BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
- return erts_term_to_binary(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS);
+ Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL);
+ if (is_tuple(res)) {
+ erts_set_gc_state(BIF_P, 0);
+ BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
+ } else {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_RET(res);
+ }
}
+HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 2)
+
BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
{
Process* p = BIF_P;
@@ -1022,6 +1080,7 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
Eterm Flags = BIF_ARG_2;
int level = 0;
Uint flags = TERM_TO_BINARY_DFLAGS;
+ Eterm res;
while (is_list(Flags)) {
Eterm arg = CAR(list_val(Flags));
@@ -1032,10 +1091,10 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
if (tp[1] == am_minor_version && is_small(tp[2])) {
switch (signed_val(tp[2])) {
case 0:
- flags = TERM_TO_BINARY_DFLAGS;
+ flags = TERM_TO_BINARY_DFLAGS & ~DFLAG_NEW_FLOATS;
break;
case 1:
- flags = TERM_TO_BINARY_DFLAGS|DFLAG_NEW_FLOATS;
+ flags = TERM_TO_BINARY_DFLAGS;
break;
default:
goto error;
@@ -1058,9 +1117,77 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
goto error;
}
- return erts_term_to_binary(p, Term, level, flags);
+ res = erts_term_to_binary_int(p, Term, level, flags, NULL);
+ if (is_tuple(res)) {
+ erts_set_gc_state(p, 0);
+ BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
+ } else {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_RET(res);
+ }
}
+
+enum B2TState { /* order is somewhat significant */
+ B2TPrepare,
+ B2TUncompressChunk,
+ B2TSizeInit,
+ B2TSize,
+ B2TDecodeInit,
+ B2TDecode,
+ B2TDecodeList,
+ B2TDecodeTuple,
+ B2TDecodeString,
+ B2TDecodeBinary,
+
+ B2TDone,
+ B2TDecodeFail,
+ B2TBadArg
+};
+
+typedef struct {
+ int heap_size;
+ int terms;
+ byte* ep;
+ int atom_extra_skip;
+} B2TSizeContext;
+
+typedef struct {
+ byte* ep;
+ Eterm res;
+ Eterm* next;
+ Eterm* hp_start;
+ Eterm* hp;
+ Eterm* hp_end;
+ int remaining_n;
+ char* remaining_bytes;
+ Eterm* maps_head;
+} B2TDecodeContext;
+
+typedef struct {
+ z_stream stream;
+ byte* dbytes;
+ Uint dleft;
+} B2TUncompressContext;
+
+typedef struct B2TContext_t {
+ Sint heap_size;
+ byte* aligned_alloc;
+ ErtsBinary2TermState b2ts;
+ Uint32 flags;
+ SWord reds;
+ Eterm trap_bin;
+ Export *bif;
+ Eterm arg[2];
+ enum B2TState state;
+ union {
+ B2TSizeContext sc;
+ B2TDecodeContext dc;
+ B2TUncompressContext uc;
+ } u;
+} B2TContext;
+
+
static uLongf binary2term_uncomp_size(byte* data, Sint size)
{
z_stream stream;
@@ -1090,48 +1217,62 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size)
return err == Z_STREAM_END ? uncomp_size : 0;
}
-static ERTS_INLINE Sint
-binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size)
+static ERTS_INLINE int
+binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
+ B2TContext* ctx)
{
- Sint res;
byte *bytes = data;
Sint size = data_size;
state->exttmp = 0;
if (size < 1 || *bytes != VERSION_MAGIC) {
- error:
- if (state->exttmp)
- erts_free(ERTS_ALC_T_TMP, state->extp);
- state->extp = NULL;
- state->exttmp = 0;
return -1;
}
bytes++;
size--;
if (size < 5 || *bytes != COMPRESSED) {
state->extp = bytes;
+ if (ctx)
+ ctx->state = B2TSizeInit;
}
else {
uLongf dest_len = (Uint32) get_int32(bytes+1);
bytes += 5;
size -= 5;
if (dest_len > 32*1024*1024
- || (state->extp = erts_alloc_fnf(ERTS_ALC_T_TMP, dest_len)) == NULL) {
+ || (state->extp = erts_alloc_fnf(ERTS_ALC_T_EXT_TERM_DATA, dest_len)) == NULL) {
+ /*
+ * Try avoid out-of-memory crash due to corrupted 'dest_len'
+ * by checking the actual length of the uncompressed data.
+ * The only way to do that is to uncompress it. Sad but true.
+ */
if (dest_len != binary2term_uncomp_size(bytes, size)) {
- goto error;
+ return -1;
}
- state->extp = erts_alloc(ERTS_ALC_T_TMP, dest_len);
+ state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len);
+ ctx->reds -= dest_len;
}
state->exttmp = 1;
- if (erl_zlib_uncompress(state->extp, &dest_len, bytes, size) != Z_OK)
- goto error;
+ if (ctx) {
+ if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK)
+ return -1;
+
+ ctx->u.uc.dbytes = state->extp;
+ ctx->u.uc.dleft = dest_len;
+ ctx->state = B2TUncompressChunk;
+ }
+ else {
+ uLongf dlen = dest_len;
+ if (erl_zlib_uncompress(state->extp, &dlen, bytes, size) != Z_OK
+ || dlen != dest_len) {
+ return -1;
+ }
+ }
size = (Sint) dest_len;
}
- res = decoded_size(state->extp, state->extp + size, 0);
- if (res < 0)
- goto error;
- return res;
+ state->extsize = size;
+ return 0;
}
static ERTS_INLINE void
@@ -1139,7 +1280,7 @@ binary2term_abort(ErtsBinary2TermState *state)
{
if (state->exttmp) {
state->exttmp = 0;
- erts_free(ERTS_ALC_T_TMP, state->extp);
+ erts_free(ERTS_ALC_T_EXT_TERM_DATA, state->extp);
}
}
@@ -1147,11 +1288,11 @@ static ERTS_INLINE Eterm
binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp)
{
Eterm res;
- if (!dec_term(edep, hpp, state->extp, ohp, &res))
+ if (!dec_term(edep, hpp, state->extp, ohp, &res, NULL))
res = THE_NON_VALUE;
if (state->exttmp) {
state->exttmp = 0;
- erts_free(ERTS_ALC_T_TMP, state->extp);
+ erts_free(ERTS_ALC_T_EXT_TERM_DATA, state->extp);
}
return res;
}
@@ -1159,7 +1300,18 @@ binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, Eterm **
Sint
erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size)
{
- return binary2term_prepare(state, data, data_size);
+ Sint res;
+
+ if (binary2term_prepare(state, data, data_size, NULL) < 0 ||
+ (res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) {
+
+ if (state->exttmp)
+ erts_free(ERTS_ALC_T_EXT_TERM_DATA, state->extp);
+ state->extp = NULL;
+ state->exttmp = 0;
+ return -1;
+ }
+ return res;
}
void
@@ -1174,68 +1326,262 @@ erts_binary2term_create(ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *oh
return binary2term_create(NULL,state, hpp, ohp);
}
-BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
+static void b2t_destroy_context(B2TContext* context)
{
- Sint heap_size;
- Eterm res;
+ erts_free_aligned_binary_bytes_extra(context->aligned_alloc,
+ ERTS_ALC_T_EXT_TERM_DATA);
+ context->aligned_alloc = NULL;
+ binary2term_abort(&context->b2ts);
+ if (context->state == B2TUncompressChunk) {
+ erl_zlib_inflate_finish(&context->u.uc.stream);
+ }
+}
+
+static void b2t_context_destructor(Binary *context_bin)
+{
+ B2TContext* ctx = (B2TContext*) ERTS_MAGIC_BIN_DATA(context_bin);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
+
+ b2t_destroy_context(ctx);
+}
+
+static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
+{
+ Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
+
+ return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
+ THE_NON_VALUE, THE_NON_VALUE);
+}
+
+
+#define B2T_BYTES_PER_REDUCTION 128
+#define B2T_MEMCPY_FACTOR 8
+
+/* Define for testing */
+/*#define EXTREME_B2T_TRAPPING 1*/
+
+#ifdef EXTREME_B2T_TRAPPING
+static unsigned b2t_rand(void)
+{
+ static unsigned prev = 17;
+ prev = (prev * 214013 + 2531011);
+ return prev;
+}
+#endif
+
+
+static B2TContext* b2t_export_context(Process* p, B2TContext* src)
+{
+ Binary* context_b = erts_create_magic_binary(sizeof(B2TContext),
+ b2t_context_destructor);
+ B2TContext* ctx = ERTS_MAGIC_BIN_DATA(context_b);
Eterm* hp;
- Eterm* endp;
- Sint size;
- byte* bytes;
- byte* temp_alloc = NULL;
- ErtsBinary2TermState b2ts;
+ sys_memcpy(ctx, src, sizeof(B2TContext));
+ if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) {
+ ctx->u.dc.next = &ctx->u.dc.res;
+ }
+ hp = HAlloc(p, PROC_BIN_SIZE);
+ ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), context_b);
+ return ctx;
+}
- if ((bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) {
- error:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(BIF_P, BADARG);
+static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
+ Export *bif_init, Eterm arg0, Eterm arg1)
+{
+ BIF_RETTYPE ret_val;
+#ifdef EXTREME_B2T_TRAPPING
+ SWord initial_reds = 1 + b2t_rand() % 4;
+#else
+ SWord initial_reds = (Uint)(ERTS_BIF_REDS_LEFT(p) * B2T_BYTES_PER_REDUCTION);
+#endif
+ B2TContext c_buff;
+ B2TContext *ctx;
+ int is_first_call;
+
+ if (context_b == NULL) {
+ /* Setup enough to get started */
+ is_first_call = 1;
+ ctx = &c_buff;
+ ctx->state = B2TPrepare;
+ ctx->aligned_alloc = NULL;
+ ctx->flags = flags;
+ ctx->bif = bif_init;
+ ctx->arg[0] = arg0;
+ ctx->arg[1] = arg1;
+ IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;)
+ } else {
+ is_first_call = 0;
+ ctx = ERTS_MAGIC_BIN_DATA(context_b);
+ ASSERT(ctx->state != B2TPrepare);
}
- size = binary_size(BIF_ARG_1);
+ ctx->reds = initial_reds;
+
+ do {
+ switch (ctx->state) {
+ case B2TPrepare: {
+ byte* bytes;
+ Uint bin_size;
+ bytes = erts_get_aligned_binary_bytes_extra(bin,
+ &ctx->aligned_alloc,
+ ERTS_ALC_T_EXT_TERM_DATA,
+ 0);
+ if (bytes == NULL) {
+ ctx->b2ts.exttmp = 0;
+ ctx->state = B2TBadArg;
+ break;
+ }
+ bin_size = binary_size(bin);
+ if (ctx->aligned_alloc) {
+ ctx->reds -= bin_size / 8;
+ }
+ if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) {
+ ctx->state = B2TBadArg;
+ }
+ break;
+ }
+ case B2TUncompressChunk: {
+ uLongf chunk = ctx->reds;
+ int zret;
+
+ if (chunk > ctx->u.uc.dleft)
+ chunk = ctx->u.uc.dleft;
+ zret = erl_zlib_inflate_chunk(&ctx->u.uc.stream,
+ ctx->u.uc.dbytes, &chunk);
+ ctx->u.uc.dbytes += chunk;
+ ctx->u.uc.dleft -= chunk;
+ if (zret == Z_OK && ctx->u.uc.dleft > 0) {
+ ctx->reds = 0;
+ }
+ else if (erl_zlib_inflate_finish(&ctx->u.uc.stream) == Z_OK
+ && zret == Z_STREAM_END
+ && ctx->u.uc.dleft == 0) {
+ ctx->reds -= chunk;
+ ctx->state = B2TSizeInit;
+ }
+ else {
+ ctx->state = B2TBadArg;
+ }
+ break;
+ }
+ case B2TSizeInit:
+ ctx->u.sc.ep = NULL;
+ ctx->state = B2TSize;
+ /*fall through*/
+ case B2TSize:
+ ctx->heap_size = decoded_size(ctx->b2ts.extp,
+ ctx->b2ts.extp + ctx->b2ts.extsize,
+ 0, ctx);
+ break;
+
+ case B2TDecodeInit:
+ if (ctx == &c_buff && ctx->b2ts.extsize > ctx->reds) {
+ /* dec_term will maybe trap, allocate space for magic bin
+ before result term to make it easy to trim with HRelease.
+ */
+ ctx = b2t_export_context(p, &c_buff);
+ }
+ ctx->u.dc.ep = ctx->b2ts.extp;
+ ctx->u.dc.res = (Eterm) (UWord) NULL;
+ ctx->u.dc.next = &ctx->u.dc.res;
+ ctx->u.dc.hp_start = HAlloc(p, ctx->heap_size);
+ ctx->u.dc.hp = ctx->u.dc.hp_start;
+ ctx->u.dc.hp_end = ctx->u.dc.hp_start + ctx->heap_size;
+ ctx->u.dc.maps_head = NULL;
+ ctx->state = B2TDecode;
+ /*fall through*/
+ case B2TDecode:
+ case B2TDecodeList:
+ case B2TDecodeTuple:
+ case B2TDecodeString:
+ case B2TDecodeBinary: {
+ ErtsDistExternal fakedep;
+ fakedep.flags = ctx->flags;
+ dec_term(&fakedep, NULL, NULL, &MSO(p), NULL, ctx);
+ break;
+ }
+ case B2TDecodeFail:
+ HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start);
+ /*fall through*/
+ case B2TBadArg:
+ BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
- heap_size = binary2term_prepare(&b2ts, bytes, size);
- if (heap_size < 0)
- goto error;
+ ASSERT(ctx->bif == bif_export[BIF_binary_to_term_1]
+ || ctx->bif == bif_export[BIF_binary_to_term_2]);
+
+ if (is_first_call)
+ ERTS_BIF_PREP_ERROR(ret_val, p, BADARG);
+ else {
+ erts_set_gc_state(p, 1);
+ if (is_non_value(ctx->arg[1]))
+ ERTS_BIF_PREP_ERROR_TRAPPED1(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0]);
+ else
+ ERTS_BIF_PREP_ERROR_TRAPPED2(ret_val, p, BADARG, ctx->bif,
+ ctx->arg[0], ctx->arg[1]);
+ }
+ b2t_destroy_context(ctx);
+ return ret_val;
+
+ case B2TDone:
+ b2t_destroy_context(ctx);
- hp = HAlloc(BIF_P, heap_size);
- endp = hp + heap_size;
+ if (ctx->u.dc.hp > ctx->u.dc.hp_end) {
+ erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n",
+ __FILE__, __LINE__, ctx->u.dc.hp - ctx->u.dc.hp_end);
+ }
+ HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp);
+
+ if (!is_first_call) {
+ erts_set_gc_state(p, 1);
+ }
+ BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION);
+ ERTS_BIF_PREP_RET(ret_val, ctx->u.dc.res);
+ return ret_val;
- res = binary2term_create(NULL, &b2ts, &hp, &MSO(BIF_P));
+ default:
+ ASSERT(!"Unknown state in binary_to_term");
+ }
+ }while (ctx->reds > 0 || ctx->state >= B2TDone);
- erts_free_aligned_binary_bytes(temp_alloc);
+ if (ctx == &c_buff) {
+ ASSERT(ctx->trap_bin == THE_NON_VALUE);
+ ctx = b2t_export_context(p, &c_buff);
+ }
+ ASSERT(ctx->trap_bin != THE_NON_VALUE);
- if (hp > endp) {
- erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n",
- __FILE__, __LINE__, hp-endp);
+ if (is_first_call) {
+ erts_set_gc_state(p, 0);
}
+ BUMP_ALL_REDS(p);
- HRelease(BIF_P, endp, hp);
+ ERTS_BIF_PREP_TRAP1(ret_val, &binary_to_term_trap_export,
+ p, ctx->trap_bin);
- if (res == THE_NON_VALUE)
- goto error;
+ return ret_val;
+}
- return res;
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1)
+
+BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
+{
+ return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1],
+ BIF_ARG_1, THE_NON_VALUE);
}
+HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2)
+
BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
{
- Sint heap_size;
- Eterm res;
Eterm opts;
Eterm opt;
- Eterm* hp;
- Eterm* endp;
- Sint size;
- byte* bytes;
- byte* temp_alloc = NULL;
- ErtsBinary2TermState b2ts;
- ErtsDistExternal fakedep;
+ Uint32 flags = 0;
- fakedep.flags = 0;
opts = BIF_ARG_2;
while (is_list(opts)) {
opt = CAR(list_val(opts));
if (opt == am_safe) {
- fakedep.flags |= ERTS_DIST_EXT_BTT_SAFE;
+ flags |= ERTS_DIST_EXT_BTT_SAFE;
}
else {
goto error;
@@ -1246,35 +1592,11 @@ BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
if (is_not_nil(opts))
goto error;
- if ((bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) {
- error:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(BIF_P, BADARG);
- }
- size = binary_size(BIF_ARG_1);
-
- heap_size = binary2term_prepare(&b2ts, bytes, size);
- if (heap_size < 0)
- goto error;
-
- hp = HAlloc(BIF_P, heap_size);
- endp = hp + heap_size;
-
- res = binary2term_create(&fakedep, &b2ts, &hp, &MSO(BIF_P));
-
- erts_free_aligned_binary_bytes(temp_alloc);
+ return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2],
+ BIF_ARG_1, BIF_ARG_2);
- if (hp > endp) {
- erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n",
- __FILE__, __LINE__, hp-endp);
- }
-
- HRelease(BIF_P, endp, hp);
-
- if (res == THE_NON_VALUE)
- goto error;
-
- return res;
+error:
+ BIF_ERROR(BIF_P, BADARG);
}
Eterm
@@ -1306,9 +1628,9 @@ external_size_2(BIF_ALIST_2)
if (tp[1] == am_minor_version && is_small(tp[2])) {
switch (signed_val(tp[2])) {
case 0:
+ flags &= ~DFLAG_NEW_FLOATS;
break;
case 1:
- flags |= DFLAG_NEW_FLOATS;
break;
default:
goto error;
@@ -1335,16 +1657,13 @@ external_size_2(BIF_ALIST_2)
}
}
-Eterm
-erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags)
+static Eterm
+erts_term_to_binary_simple(Process* p, Eterm Term, Uint size, int level, Uint flags)
{
- Uint size;
Eterm bin;
size_t real_size;
byte* endp;
- size = encode_size_struct2(NULL, Term, flags) + 1 /* VERSION_MAGIC */;
-
if (level != 0) {
byte buf[256];
byte* bytes = buf;
@@ -1414,6 +1733,338 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags)
}
}
+Eterm
+erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
+ Uint size;
+ size = encode_size_struct2(NULL, Term, flags) + 1 /* VERSION_MAGIC */;
+ return erts_term_to_binary_simple(p, Term, size, level, flags);
+}
+
+/* Define for testing */
+/* #define EXTREME_TTB_TRAPPING 1 */
+
+#ifndef EXTREME_TTB_TRAPPING
+#define TERM_TO_BINARY_LOOP_FACTOR 32
+#define TERM_TO_BINARY_COMPRESS_CHUNK (1 << 18)
+#else
+#define TERM_TO_BINARY_LOOP_FACTOR 1
+#define TERM_TO_BINARY_COMPRESS_CHUNK 10
+#endif
+
+
+typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState;
+typedef struct TTBSizeContext_ {
+ Uint flags;
+ int level;
+ Uint result;
+ Eterm obj;
+ ErtsEStack estack;
+} TTBSizeContext;
+
+typedef struct TTBEncodeContext_ {
+ Uint flags;
+ int level;
+ byte* ep;
+ Eterm obj;
+ ErtsWStack wstack;
+ Binary *result_bin;
+} TTBEncodeContext;
+
+typedef struct {
+ Uint real_size;
+ Uint dest_len;
+ byte *dbytes;
+ Binary *result_bin;
+ Binary *destination_bin;
+ z_stream stream;
+} TTBCompressContext;
+
+typedef struct {
+ int alive;
+ TTBState state;
+ union {
+ TTBSizeContext sc;
+ TTBEncodeContext ec;
+ TTBCompressContext cc;
+ } s;
+} TTBContext;
+
+static void ttb_context_destructor(Binary *context_bin)
+{
+ TTBContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
+ if (context->alive) {
+ context->alive = 0;
+ switch (context->state) {
+ case TTBSize:
+ DESTROY_SAVED_ESTACK(&context->s.sc.estack);
+ break;
+ case TTBEncode:
+ DESTROY_SAVED_WSTACK(&context->s.ec.wstack);
+ if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */
+ ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0);
+ erts_bin_free(context->s.ec.result_bin);
+ context->s.ec.result_bin = NULL;
+ }
+ break;
+ case TTBCompress:
+ erl_zlib_deflate_finish(&(context->s.cc.stream));
+
+ if (context->s.cc.destination_bin != NULL) { /* Set to NULL if ever made alive! */
+ ASSERT(erts_refc_read(&(context->s.cc.destination_bin->refc),0) == 0);
+ erts_bin_free(context->s.cc.destination_bin);
+ context->s.cc.destination_bin = NULL;
+ }
+
+ if (context->s.cc.result_bin != NULL) { /* Set to NULL if ever made alive! */
+ ASSERT(erts_refc_read(&(context->s.cc.result_bin->refc),0) == 0);
+ erts_bin_free(context->s.cc.result_bin);
+ context->s.cc.result_bin = NULL;
+ }
+ break;
+ }
+ }
+}
+
+static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
+ Binary *context_b)
+{
+ Eterm *hp;
+ Eterm res;
+ Eterm c_term;
+#ifndef EXTREME_TTB_TRAPPING
+ Sint reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+#else
+ Sint reds = 20; /* For testing */
+#endif
+ Sint initial_reds = reds;
+ TTBContext c_buff;
+ TTBContext *context = &c_buff;
+
+#define EXPORT_CONTEXT() \
+ do { \
+ if (context_b == NULL) { \
+ context_b = erts_create_magic_binary(sizeof(TTBContext), \
+ ttb_context_destructor); \
+ context = ERTS_MAGIC_BIN_DATA(context_b); \
+ memcpy(context,&c_buff,sizeof(TTBContext)); \
+ } \
+ } while (0)
+
+#define RETURN_STATE() \
+ do { \
+ hp = HAlloc(p, PROC_BIN_SIZE+3); \
+ c_term = erts_mk_magic_binary_term(&hp, &MSO(p), context_b); \
+ res = TUPLE2(hp, Term, c_term); \
+ BUMP_ALL_REDS(p); \
+ return res; \
+ } while (0);
+
+
+ if (context_b == NULL) {
+ /* Setup enough to get started */
+ context->state = TTBSize;
+ context->alive = 1;
+ context->s.sc.estack.start = NULL;
+ context->s.sc.flags = flags;
+ context->s.sc.level = level;
+ } else {
+ context = ERTS_MAGIC_BIN_DATA(context_b);
+ }
+ /* Initialization done, now we will go through the states */
+ for (;;) {
+ switch (context->state) {
+ case TTBSize:
+ {
+ Uint size;
+ Binary *result_bin;
+ int level;
+ Uint flags;
+ /* Try for fast path */
+ if (encode_size_struct_int(&context->s.sc, NULL, Term,
+ context->s.sc.flags, &reds, &size) < 0) {
+ EXPORT_CONTEXT();
+ /* Same state */
+ RETURN_STATE();
+ }
+ ++size; /* VERSION_MAGIC */
+ /* Move these to next state */
+ flags = context->s.sc.flags;
+ level = context->s.sc.level;
+ if (size <= ERL_ONHEAP_BIN_LIMIT) {
+ /* Finish in one go */
+ res = erts_term_to_binary_simple(p, Term, size,
+ level, flags);
+ BUMP_REDS(p, 1);
+ return res;
+ }
+
+ result_bin = erts_bin_nrml_alloc(size);
+ result_bin->flags = 0;
+ result_bin->orig_size = size;
+ erts_refc_init(&result_bin->refc, 0);
+ result_bin->orig_bytes[0] = VERSION_MAGIC;
+ /* Next state immediately, no need to export context */
+ context->state = TTBEncode;
+ context->s.ec.flags = flags;
+ context->s.ec.level = level;
+ context->s.ec.wstack.wstart = NULL;
+ context->s.ec.result_bin = result_bin;
+ break;
+ }
+ case TTBEncode:
+ {
+ byte *endp;
+ byte *bytes = (byte *) context->s.ec.result_bin->orig_bytes;
+ size_t real_size;
+ Binary *result_bin;
+
+ flags = context->s.ec.flags;
+ if (enc_term_int(&context->s.ec, NULL,Term, bytes+1, flags, NULL, &reds, &endp) < 0) {
+ EXPORT_CONTEXT();
+ RETURN_STATE();
+ }
+ real_size = endp - bytes;
+ result_bin = erts_bin_realloc(context->s.ec.result_bin,real_size);
+ result_bin->orig_size = real_size;
+ level = context->s.ec.level;
+ BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
+ if (level == 0 || real_size < 6) { /* We are done */
+ ProcBin* pb;
+ return_normal:
+ context->s.ec.result_bin = NULL;
+ context->alive = 0;
+ pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = real_size;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
+ pb->val = result_bin;
+ pb->bytes = (byte*) result_bin->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
+ erts_refc_inc(&result_bin->refc, 1);
+ if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ erts_bin_free(context_b);
+ }
+ return make_binary(pb);
+ }
+ /* Continue with compression... */
+ /* To make absolutely sure that zlib does not barf on a reallocated context,
+ we make sure it's "exported" before doing anything compession-like */
+ EXPORT_CONTEXT();
+ bytes = (byte *) result_bin->orig_bytes; /* result_bin is reallocated */
+ if (erl_zlib_deflate_start(&(context->s.cc.stream),bytes+1,real_size-1,level)
+ != Z_OK) {
+ goto return_normal;
+ }
+ context->state = TTBCompress;
+ context->s.cc.real_size = real_size;
+ context->s.cc.result_bin = result_bin;
+
+ result_bin = erts_bin_nrml_alloc(real_size);
+ result_bin->flags = 0;
+ result_bin->orig_size = real_size;
+ erts_refc_init(&result_bin->refc, 0);
+ result_bin->orig_bytes[0] = VERSION_MAGIC;
+
+ context->s.cc.destination_bin = result_bin;
+ context->s.cc.dest_len = 0;
+ context->s.cc.dbytes = (byte *) result_bin->orig_bytes+6;
+ break;
+ }
+ case TTBCompress:
+ {
+ uLongf tot_dest_len = context->s.cc.real_size - 6;
+ uLongf left = (tot_dest_len - context->s.cc.dest_len);
+ uLongf this_time = (left > TERM_TO_BINARY_COMPRESS_CHUNK) ?
+ TERM_TO_BINARY_COMPRESS_CHUNK :
+ left;
+ Binary *result_bin;
+ ProcBin *pb;
+ Uint max = (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_COMPRESS_CHUNK) / CONTEXT_REDS;
+
+ if (max < this_time) {
+ this_time = max + 1; /* do not set this_time to 0 */
+ }
+
+ res = erl_zlib_deflate_chunk(&(context->s.cc.stream), context->s.cc.dbytes, &this_time);
+ context->s.cc.dbytes += this_time;
+ context->s.cc.dest_len += this_time;
+ switch (res) {
+ case Z_OK:
+ if (context->s.cc.dest_len >= tot_dest_len) {
+ goto no_use_compressing;
+ }
+ RETURN_STATE();
+ case Z_STREAM_END:
+ {
+ byte *dbytes = (byte *) context->s.cc.destination_bin->orig_bytes + 1;
+
+ dbytes[0] = COMPRESSED;
+ put_int32(context->s.cc.real_size-1,dbytes+1);
+ erl_zlib_deflate_finish(&(context->s.cc.stream));
+ result_bin = erts_bin_realloc(context->s.cc.destination_bin,
+ context->s.cc.dest_len+6);
+ result_bin->orig_size = context->s.cc.dest_len+6;
+ context->s.cc.destination_bin = NULL;
+ pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = context->s.cc.dest_len+6;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
+ pb->val = result_bin;
+ pb->bytes = (byte*) result_bin->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
+ erts_refc_inc(&result_bin->refc, 1);
+ erts_bin_free(context->s.cc.result_bin);
+ context->s.cc.result_bin = NULL;
+ context->alive = 0;
+ BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
+ if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ erts_bin_free(context_b);
+ }
+ return make_binary(pb);
+ }
+ default: /* Compression error, revert to uncompressed binary (still in
+ context) */
+ no_use_compressing:
+ result_bin = context->s.cc.result_bin;
+ context->s.cc.result_bin = NULL;
+ pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = context->s.cc.real_size;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
+ pb->val = result_bin;
+ pb->bytes = (byte*) result_bin->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
+ erts_refc_inc(&result_bin->refc, 1);
+ erl_zlib_deflate_finish(&(context->s.cc.stream));
+ erts_bin_free(context->s.cc.destination_bin);
+ context->s.cc.destination_bin = NULL;
+ context->alive = 0;
+ BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
+ if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ erts_bin_free(context_b);
+ }
+ return make_binary(pb);
+ }
+ }
+ }
+ }
+#undef EXPORT_CONTEXT
+#undef RETURN_STATE
+}
+
+
+
+
+
+
+
+
/*
* This function fills ext with the external format of atom.
* If it's an old atom we just supply an index, otherwise
@@ -1678,10 +2329,20 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
#define ENC_PATCH_FUN_SIZE ((Eterm) 2)
#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 3)
+
static byte*
enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
struct erl_off_heap_header** off_heap)
{
+ byte *res;
+ (void) enc_term_int(NULL, acmp, obj, ep, dflags, off_heap, NULL, &res);
+ return res;
+}
+
+static int
+enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
+ struct erl_off_heap_header** off_heap, Sint *reds, byte **res)
+{
DECLARE_WSTACK(s);
Uint n;
Uint i;
@@ -1689,11 +2350,23 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
Uint* ptr;
Eterm val;
FloatDef f;
+ Sint r = 0;
#if HALFWORD_HEAP
UWord wobj;
#endif
+ if (ctx) {
+ WSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ r = *reds;
+
+ if (ctx->wstack.wstart) { /* restore saved stacks and byte pointer */
+ WSTACK_RESTORE(s, &ctx->wstack);
+ ep = ctx->ep;
+ obj = ctx->obj;
+ }
+ }
+
goto L_jump_start;
outer_loop:
@@ -1729,6 +2402,7 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
}
goto outer_loop;
case ENC_LAST_ARRAY_ELEMENT:
+ /* obj is the tuple */
{
#if HALFWORD_HEAP
Eterm* ptr = (Eterm *) wobj;
@@ -1745,14 +2419,22 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
#else
Eterm* ptr = (Eterm *) obj;
#endif
- obj = *ptr++;
WSTACK_PUSH(s, val-1);
- WSTACK_PUSH(s, (UWord) ptr);
+ obj = *ptr++;
+ WSTACK_PUSH(s, (UWord)ptr);
}
break;
}
L_jump_start:
+
+ if (ctx && --r == 0) {
+ *reds = r;
+ ctx->obj = obj;
+ ctx->ep = ep;
+ WSTACK_SAVE(s, &ctx->wstack);
+ return -1;
+ }
switch(tag_val_def(obj)) {
case NIL_DEF:
*ep++ = NIL_EXT;
@@ -1897,7 +2579,35 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
}
if (i > 0) {
WSTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
- WSTACK_PUSH(s, (UWord) ptr);
+ WSTACK_PUSH(s, (UWord)ptr);
+ }
+ break;
+
+ case MAP_DEF:
+ {
+ map_t *mp = (map_t*)map_val(obj);
+ Uint size = map_get_size(mp);
+
+ *ep++ = MAP_EXT;
+ put_int32(size, ep); ep += 4;
+
+ if (size > 0) {
+ Eterm *kptr = map_get_keys(mp);
+ Eterm *vptr = map_get_values(mp);
+
+ for (i = size-1; i >= 1; i--) {
+ WSTACK_PUSH(s, ENC_TERM);
+ WSTACK_PUSH(s, (UWord) vptr[i]);
+ WSTACK_PUSH(s, ENC_TERM);
+ WSTACK_PUSH(s, (UWord) kptr[i]);
+ }
+
+ WSTACK_PUSH(s, ENC_TERM);
+ WSTACK_PUSH(s, (UWord) vptr[0]);
+
+ obj = kptr[0];
+ goto L_jump_start;
+ }
}
break;
@@ -1905,7 +2615,7 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
GET_DOUBLE(obj, f);
if (dflags & DFLAG_NEW_FLOATS) {
*ep++ = NEW_FLOAT_EXT;
-#ifdef WORDS_BIGENDIAN
+#if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
put_int32(f.fw[0], ep);
ep += 4;
put_int32(f.fw[1], ep);
@@ -2104,7 +2814,12 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
}
}
DESTROY_WSTACK(s);
- return ep;
+ if (ctx) {
+ ASSERT(ctx->wstack.wstart == NULL);
+ *reds = r;
+ }
+ *res = ep;
+ return 0;
}
static
@@ -2174,21 +2889,115 @@ undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end)
#endif /* DEBUG */
}
+
/* Decode term from external format into *objp.
** On failure return NULL and (R13B04) *hpp will be unchanged.
*/
static byte*
-dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Eterm* objp)
+dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap,
+ Eterm* objp, B2TContext* ctx)
{
- Eterm* hp_saved = *hpp;
+ Eterm* hp_saved;
int n;
ErtsAtomEncoding char_enc;
- register Eterm* hp = *hpp; /* Please don't take the address of hp */
- Eterm* next = objp;
+ register Eterm* hp; /* Please don't take the address of hp */
+ Eterm *maps_head; /* for validation of maps */
+ Eterm* next;
+ SWord reds;
+
+ if (ctx) {
+ hp_saved = ctx->u.dc.hp_start;
+ reds = ctx->reds;
+ next = ctx->u.dc.next;
+ ep = ctx->u.dc.ep;
+ hpp = &ctx->u.dc.hp;
+ maps_head = ctx->u.dc.maps_head;
+
+ if (ctx->state != B2TDecode) {
+ int n_limit = reds;
+
+ n = ctx->u.dc.remaining_n;
+ if (ctx->state == B2TDecodeBinary) {
+ n_limit *= B2T_MEMCPY_FACTOR;
+ ASSERT(n_limit >= reds);
+ reds -= n / B2T_MEMCPY_FACTOR;
+ }
+ else
+ reds -= n;
- *next = (Eterm) (UWord) NULL;
+ if (n > n_limit) {
+ ctx->u.dc.remaining_n -= n_limit;
+ n = n_limit;
+ reds = 0;
+ }
+ else {
+ ctx->u.dc.remaining_n = 0;
+ }
+
+ switch (ctx->state) {
+ case B2TDecodeList:
+ objp = next - 2;
+ while (n > 0) {
+ objp[0] = (Eterm) COMPRESS_POINTER(next);
+ objp[1] = make_list(next);
+ next = objp;
+ objp -= 2;
+ n--;
+ }
+ break;
+
+ case B2TDecodeTuple:
+ objp = next - 1;
+ while (n-- > 0) {
+ objp[0] = (Eterm) COMPRESS_POINTER(next);
+ next = objp;
+ objp--;
+ }
+ break;
+
+ case B2TDecodeString:
+ hp = *hpp;
+ hp[-1] = make_list(hp); /* overwrite the premature NIL */
+ while (n-- > 0) {
+ hp[0] = make_small(*ep++);
+ hp[1] = make_list(hp+2);
+ hp += 2;
+ }
+ hp[-1] = NIL;
+ *hpp = hp;
+ break;
+
+ case B2TDecodeBinary:
+ sys_memcpy(ctx->u.dc.remaining_bytes, ep, n);
+ ctx->u.dc.remaining_bytes += n;
+ ep += n;
+ break;
+
+ default:
+ ASSERT(!"Unknown state");
+ }
+ if (!ctx->u.dc.remaining_n) {
+ ctx->state = B2TDecode;
+ }
+ if (reds <= 0) {
+ ctx->u.dc.next = next;
+ ctx->u.dc.ep = ep;
+ ctx->reds = 0;
+ return NULL;
+ }
+ }
+ }
+ else {
+ hp_saved = *hpp;
+ reds = ERTS_SWORD_MAX;
+ next = objp;
+ *next = (Eterm) (UWord) NULL;
+ maps_head = NULL;
+ }
+ hp = *hpp;
while (next != NULL) {
+
objp = next;
next = (Eterm *) EXPAND_POINTER(*objp);
@@ -2308,7 +3117,16 @@ dec_term_atom_common:
*objp = make_tuple(hp);
*hp++ = make_arityval(n);
hp += n;
- objp = hp - 1;
+ objp = hp - 1;
+ if (ctx) {
+ if (reds < n) {
+ ASSERT(reds > 0);
+ ctx->state = B2TDecodeTuple;
+ ctx->u.dc.remaining_n = n - reds;
+ n = reds;
+ }
+ reds -= n;
+ }
while (n-- > 0) {
objp[0] = (Eterm) COMPRESS_POINTER(next);
next = objp;
@@ -2326,17 +3144,27 @@ dec_term_atom_common:
break;
}
*objp = make_list(hp);
- hp += 2*n;
+ hp += 2 * n;
objp = hp - 2;
objp[0] = (Eterm) COMPRESS_POINTER((objp+1));
objp[1] = (Eterm) COMPRESS_POINTER(next);
next = objp;
objp -= 2;
- while (--n > 0) {
+ n--;
+ if (ctx) {
+ if (reds < n) {
+ ctx->state = B2TDecodeList;
+ ctx->u.dc.remaining_n = n - reds;
+ n = reds;
+ }
+ reds -= n;
+ }
+ while (n > 0) {
objp[0] = (Eterm) COMPRESS_POINTER(next);
- objp[1] = make_list(objp + 2);
+ objp[1] = make_list(next);
next = objp;
objp -= 2;
+ n--;
}
break;
case STRING_EXT:
@@ -2347,6 +3175,14 @@ dec_term_atom_common:
break;
}
*objp = make_list(hp);
+ if (ctx) {
+ if (reds < n) {
+ ctx->state = B2TDecodeString;
+ ctx->u.dc.remaining_n = n - reds;
+ n = reds;
+ }
+ reds -= n;
+ }
while (n-- > 0) {
hp[0] = make_small(*ep++);
hp[1] = make_list(hp+2);
@@ -2374,7 +3210,7 @@ dec_term_atom_common:
volatile unsigned long *fpexnp = erts_get_current_fp_exception();
#endif
-#ifdef WORDS_BIGENDIAN
+#if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
ff.fw[0] = get_int32(ep);
ep += 4;
ff.fw[1] = get_int32(ep);
@@ -2540,7 +3376,7 @@ dec_term_atom_common:
n = get_int32(ep);
ep += 4;
- if (n <= ERL_ONHEAP_BIN_LIMIT) {
+ if ((unsigned)n <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hb = (ErlHeapBin *) hp;
hb->thing_word = header_heap_bin(n);
@@ -2554,7 +3390,6 @@ dec_term_atom_common:
dbin->flags = 0;
dbin->orig_size = n;
erts_refc_init(&dbin->refc, 1);
- sys_memcpy(dbin->orig_bytes, ep, n);
pb = (ProcBin *) hp;
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
@@ -2565,7 +3400,20 @@ dec_term_atom_common:
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
*objp = make_binary(pb);
- }
+ if (ctx) {
+ int n_limit = reds * B2T_MEMCPY_FACTOR;
+ if (n > n_limit) {
+ ctx->state = B2TDecodeBinary;
+ ctx->u.dc.remaining_n = n - n_limit;
+ ctx->u.dc.remaining_bytes = dbin->orig_bytes + n_limit;
+ n = n_limit;
+ reds = 0;
+ }
+ else
+ reds -= n / B2T_MEMCPY_FACTOR;
+ }
+ sys_memcpy(dbin->orig_bytes, ep, n);
+ }
ep += n;
break;
}
@@ -2577,8 +3425,10 @@ dec_term_atom_common:
n = get_int32(ep);
bitsize = ep[4];
- ep += 5;
- if (n <= ERL_ONHEAP_BIN_LIMIT) {
+ if (((bitsize==0) != (n==0)) || bitsize > 8)
+ goto error;
+ ep += 5;
+ if ((unsigned)n <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hb = (ErlHeapBin *) hp;
hb->thing_word = header_heap_bin(n);
@@ -2586,13 +3436,14 @@ dec_term_atom_common:
sys_memcpy(hb->data, ep, n);
bin = make_binary(hb);
hp += heap_bin_size(n);
+ ep += n;
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
+
dbin->flags = 0;
dbin->orig_size = n;
erts_refc_init(&dbin->refc, 1);
- sys_memcpy(dbin->orig_bytes, ep, n);
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
@@ -2603,12 +3454,27 @@ dec_term_atom_common:
pb->flags = 0;
bin = make_binary(pb);
hp += PROC_BIN_SIZE;
- }
- ep += n;
- if (bitsize == 0) {
+ if (ctx) {
+ int n_limit = reds * B2T_MEMCPY_FACTOR;
+ if (n > n_limit) {
+ ctx->state = B2TDecodeBinary;
+ ctx->u.dc.remaining_n = n - n_limit;
+ ctx->u.dc.remaining_bytes = dbin->orig_bytes + n_limit;
+ n = n_limit;
+ reds = 0;
+ }
+ else
+ reds -= n / B2T_MEMCPY_FACTOR;
+ }
+ sys_memcpy(dbin->orig_bytes, ep, n);
+ ep += n;
+ n = pb->size;
+ }
+
+ if (bitsize == 8 || n == 0) {
*objp = bin;
} else {
- sb = (ErlSubBin *) hp;
+ sb = (ErlSubBin *)hp;
sb->thing_word = HEADER_SUB_BIN;
sb->orig = bin;
sb->size = n - 1;
@@ -2635,7 +3501,7 @@ dec_term_atom_common:
goto error;
}
*hpp = hp;
- ep = dec_term(edep, hpp, ep, off_heap, &temp);
+ ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL);
hp = *hpp;
if (ep == NULL) {
goto error;
@@ -2662,6 +3528,50 @@ dec_term_atom_common:
break;
}
break;
+ case MAP_EXT:
+ {
+ map_t *mp;
+ Uint32 size,n;
+ Eterm *kptr,*vptr;
+ Eterm keys;
+
+ size = get_int32(ep); ep += 4;
+
+ keys = make_tuple(hp);
+ *hp++ = make_arityval(size);
+ hp += size;
+ kptr = hp - 1;
+
+ mp = (map_t*)hp;
+ hp += MAP_HEADER_SIZE;
+ hp += size;
+ vptr = hp - 1;
+
+ /* kptr, last word for keys
+ * vptr, last word for values
+ */
+
+ /*
+ * Use thing_word to link through decoded maps.
+ * The list of maps is for later validation.
+ */
+
+ mp->thing_word = (Eterm) COMPRESS_POINTER(maps_head);
+ maps_head = (Eterm *) mp;
+
+ mp->size = size;
+ mp->keys = keys;
+ *objp = make_map(mp);
+
+ for (n = size; n; n--) {
+ *vptr = (Eterm) COMPRESS_POINTER(next);
+ *kptr = (Eterm) COMPRESS_POINTER(vptr);
+ next = kptr;
+ vptr--;
+ kptr--;
+ }
+ }
+ break;
case NEW_FUN_EXT:
{
ErlFunThing* funp = (ErlFunThing *) hp;
@@ -2695,7 +3605,7 @@ dec_term_atom_common:
}
*hpp = hp;
/* Index */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -2704,7 +3614,7 @@ dec_term_atom_common:
old_index = unsigned_val(temp);
/* Uniq */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -2772,7 +3682,7 @@ dec_term_atom_common:
}
/* Index */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -2781,7 +3691,7 @@ dec_term_atom_common:
old_index = unsigned_val(temp);
/* Uniq */
- if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -2871,72 +3781,107 @@ dec_term_atom_common:
}
default:
- error:
- /* UNDO:
- * Must unlink all off-heap objects that may have been
- * linked into the process.
- */
- if (hp < *hpp) { /* Sometimes we used hp and sometimes *hpp */
- hp = *hpp; /* the largest must be the freshest */
- }
- undo_offheap_in_area(off_heap, hp_saved, hp);
- *hpp = hp_saved;
- return NULL;
+ goto error;
}
+
+ if (--reds <= 0) {
+ if (ctx) {
+ if (next || ctx->state != B2TDecode) {
+ ctx->u.dc.ep = ep;
+ ctx->u.dc.next = next;
+ ctx->u.dc.hp = hp;
+ ctx->u.dc.maps_head = maps_head;
+ ctx->reds = 0;
+ return NULL;
+ }
+ }
+ else {
+ reds = ERTS_SWORD_MAX;
+ }
+ }
+ }
+
+ /* Iterate through all the maps and check for validity and sort keys
+ * - done here for when we know it is complete.
+ */
+
+ while (maps_head) {
+ next = (Eterm *)(EXPAND_POINTER(*maps_head));
+ *maps_head = MAP_HEADER;
+ if (!erts_validate_and_sort_map((map_t*)maps_head))
+ goto error;
+ maps_head = next;
+ }
+
+ if (ctx) {
+ ctx->state = B2TDone;
+ ctx->reds = reds;
}
+
*hpp = hp;
return ep;
+
+error:
+ /* UNDO:
+ * Must unlink all off-heap objects that may have been
+ * linked into the process.
+ */
+ if (hp < *hpp) { /* Sometimes we used hp and sometimes *hpp */
+ hp = *hpp; /* the largest must be the freshest */
+ }
+ undo_offheap_in_area(off_heap, hp_saved, hp);
+ *hpp = hp_saved;
+ if (ctx) {
+ ctx->state = B2TDecodeFail;
+ ctx->reds = reds;
+ }
+
+ return NULL;
}
/* returns the number of bytes needed to encode an object
to a sequence of bytes
N.B. That this must agree with to_external2() above!!!
(except for cached atoms) */
+static Uint encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags) {
+ Uint res;
+ (void) encode_size_struct_int(NULL, acmp, obj, dflags, NULL, &res);
+ return res;
+}
-static Uint
-encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
+static int
+encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
+ unsigned dflags, Sint *reds, Uint *res)
{
- DECLARE_WSTACK(s);
+ DECLARE_ESTACK(s);
Uint m, i, arity;
Uint result = 0;
-#if HALFWORD_HEAP
- UWord wobj = 0;
-#endif
+ Sint r = 0;
+
+ if (ctx) {
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ r = *reds;
+
+ if (ctx->estack.start) { /* restore saved stack */
+ ESTACK_RESTORE(s, &ctx->estack);
+ result = ctx->result;
+ obj = ctx->obj;
+ }
+ }
goto L_jump_start;
outer_loop:
- while (!WSTACK_ISEMPTY(s)) {
-#if HALFWORD_HEAP
- obj = (Eterm) (wobj = WSTACK_POP(s));
-#else
- obj = WSTACK_POP(s);
-#endif
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
handle_popped_obj:
- if (is_CP(obj)) { /* Does not look for CP, looks for "no tag" */
-#if HALFWORD_HEAP
- Eterm* ptr = (Eterm *) wobj;
-#else
- Eterm* ptr = (Eterm *) obj;
-#endif
- /*
- * Pointer into a tuple.
- */
- obj = *ptr--;
- if (!is_header(obj)) {
- WSTACK_PUSH(s, (UWord)ptr);
- } else {
- /* Reached tuple header */
- ASSERT(header_is_arityval(obj));
- goto outer_loop;
- }
- } else if (is_list(obj)) {
+ if (is_list(obj)) {
Eterm* cons = list_val(obj);
Eterm tl;
tl = CDR(cons);
obj = CAR(cons);
- WSTACK_PUSH(s, tl);
+ ESTACK_PUSH(s, tl);
} else if (is_nil(obj)) {
result++;
goto outer_loop;
@@ -2948,6 +3893,13 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
L_jump_start:
+ if (ctx && --r == 0) {
+ *reds = r;
+ ctx->obj = obj;
+ ctx->result = result;
+ ESTACK_SAVE(s, &ctx->estack);
+ return -1;
+ }
switch (tag_val_def(obj)) {
case NIL_DEF:
result++;
@@ -3034,20 +3986,64 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
case TUPLE_DEF:
{
Eterm* ptr = tuple_val(obj);
-
+ Uint i;
arity = arityval(*ptr);
if (arity <= 0xff) {
result += 1 + 1;
} else {
result += 1 + 4;
}
- ptr += arity;
-#if HALFWORD_HEAP
- obj = (Eterm) (wobj = (UWord) ptr);
-#else
- obj = (Eterm) ptr;
-#endif
- goto handle_popped_obj;
+ for (i = 1; i <= arity; ++i) {
+ if (is_list(ptr[i])) {
+ if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
+ result += m + 2 + 1;
+ } else {
+ result += 5;
+ }
+ }
+ ESTACK_PUSH(s,ptr[i]);
+ }
+ goto outer_loop;
+ }
+ break;
+ case MAP_DEF:
+ {
+ map_t *mp = (map_t*)map_val(obj);
+ Uint size = map_get_size(mp);
+ Uint i;
+ Eterm *ptr;
+
+ result += 1 + 4; /* tag + 4 bytes size */
+
+ /* push values first */
+ ptr = map_get_values(mp);
+ i = size;
+ while(i--) {
+ if (is_list(*ptr)) {
+ if ((m = is_string(*ptr)) && (m < MAX_STRING_LEN)) {
+ result += m + 2 + 1;
+ } else {
+ result += 5;
+ }
+ }
+ ESTACK_PUSH(s,*ptr);
+ ++ptr;
+ }
+
+ ptr = map_get_keys(mp);
+ i = size;
+ while(i--) {
+ if (is_list(*ptr)) {
+ if ((m = is_string(*ptr)) && (m < MAX_STRING_LEN)) {
+ result += m + 2 + 1;
+ } else {
+ result += 5;
+ }
+ }
+ ESTACK_PUSH(s,*ptr);
+ ++ptr;
+ }
+ goto outer_loop;
}
break;
case FLOAT_DEF:
@@ -3105,14 +4101,14 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
if (is_not_list(obj)) {
/* Push any non-list terms on the stack */
- WSTACK_PUSH(s, obj);
+ ESTACK_PUSH(s, obj);
} else {
/* Lists must be handled specially. */
if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
result += m + 2 + 1;
} else {
result += 5;
- WSTACK_PUSH(s, obj);
+ ESTACK_PUSH(s, obj);
}
}
}
@@ -3143,34 +4139,47 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
}
- DESTROY_WSTACK(s);
- return result;
-}
-
-static int is_valid_utf8_atom(byte* bytes, Uint nbytes)
-{
- byte* err_pos;
- Uint num_chars;
-
- /*SVERK Do we really need to validate correct utf8? */
- return nbytes <= MAX_ATOM_SZ_LIMIT
- && erts_analyze_utf8(bytes, nbytes, &err_pos, &num_chars, NULL) == ERTS_UTF8_OK
- && num_chars <= MAX_ATOM_CHARACTERS;
+ DESTROY_ESTACK(s);
+ if (ctx) {
+ ASSERT(ctx->estack.start == NULL);
+ *reds = r;
+ }
+ *res = result;
+ return 0;
}
static Sint
-decoded_size(byte *ep, byte* endp, int internal_tags)
+decoded_size(byte *ep, byte* endp, int internal_tags, B2TContext* ctx)
{
- int heap_size = 0;
+ int heap_size;
int terms;
- int atom_extra_skip = 0;
+ int atom_extra_skip;
Uint n;
+ SWord reds;
+
+ if (ctx) {
+ reds = ctx->reds;
+ if (ctx->u.sc.ep) {
+ heap_size = ctx->u.sc.heap_size;
+ terms = ctx->u.sc.terms;
+ ep = ctx->u.sc.ep;
+ atom_extra_skip = ctx->u.sc.atom_extra_skip;
+ goto init_done;
+ }
+ }
+ else
+ reds = 0; /* not used but compiler warns anyway */
+
+ heap_size = 0;
+ terms = 1;
+ atom_extra_skip = 0;
+init_done:
#define SKIP(sz) \
do { \
if ((sz) <= endp-ep) { \
ep += (sz); \
- } else { return -1; }; \
+ } else { goto error; }; \
} while (0)
#define SKIP2(sz1, sz2) \
@@ -3178,31 +4187,32 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
Uint sz = (sz1) + (sz2); \
if (sz1 < sz && (sz) <= endp-ep) { \
ep += (sz); \
- } else { return -1; } \
+ } else { goto error; } \
} while (0)
#define CHKSIZE(sz) \
do { \
- if ((sz) > endp-ep) { return -1; } \
+ if ((sz) > endp-ep) { goto error; } \
} while (0)
#define ADDTERMS(n) \
do { \
int before = terms; \
terms += (n); \
- if (terms < before) return -1; \
+ if (terms < before) goto error; \
} while (0)
-
- for (terms=1; terms > 0; terms--) {
- int tag;
-
+ ASSERT(terms > 0);
+ do {
+ int tag;
CHKSIZE(1);
tag = ep++[0];
switch (tag) {
case INTEGER_EXT:
SKIP(4);
+#if !defined(ARCH_64) || HALFWORD_HEAP
heap_size += BIG_UINT_HEAP_SIZE;
+#endif
break;
case SMALL_INTEGER_EXT:
SKIP(1);
@@ -3217,7 +4227,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
CHKSIZE(4);
n = get_int32(ep);
if (n > BIG_ARITY_MAX*sizeof(ErtsDigit)) {
- return -1;
+ goto error;
}
SKIP2(n,4+1); /* skip, size,sign,digits */
heap_size += 1+1+(n+sizeof(Eterm)-1)/sizeof(Eterm); /* XXX: 1 too much? */
@@ -3226,7 +4236,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
CHKSIZE(2);
n = get_int16(ep);
if (n > MAX_ATOM_CHARACTERS) {
- return -1;
+ goto error;
}
SKIP(n+2+atom_extra_skip);
atom_extra_skip = 0;
@@ -3235,8 +4245,8 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
CHKSIZE(2);
n = get_int16(ep);
ep += 2;
- if (!is_valid_utf8_atom(ep, n)) {
- return -1;
+ if (n > MAX_ATOM_SZ_LIMIT) {
+ goto error;
}
SKIP(n+atom_extra_skip);
atom_extra_skip = 0;
@@ -3245,7 +4255,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
CHKSIZE(1);
n = get_int8(ep);
if (n > MAX_ATOM_CHARACTERS) {
- return -1;
+ goto error;
}
SKIP(n+1+atom_extra_skip);
atom_extra_skip = 0;
@@ -3254,8 +4264,8 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
CHKSIZE(1);
n = get_int8(ep);
ep++;
- if (!is_valid_utf8_atom(ep, n)) {
- return -1;
+ if (n > MAX_ATOM_SZ_LIMIT) {
+ goto error;
}
SKIP(n+atom_extra_skip);
atom_extra_skip = 0;
@@ -3284,7 +4294,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
id_words = get_int16(ep);
if (id_words > ERTS_MAX_REF_NUMBERS)
- return -1;
+ goto error;
ep += 2;
atom_extra_skip = 1 + 4*id_words;
@@ -3326,6 +4336,13 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
ADDTERMS(n);
heap_size += n + 1;
break;
+ case MAP_EXT:
+ CHKSIZE(4);
+ n = get_int32(ep);
+ ep += 4;
+ ADDTERMS(2*n);
+ heap_size += 3 + n + 1 + n;
+ break;
case STRING_EXT:
CHKSIZE(2);
n = get_int16(ep);
@@ -3386,7 +4403,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
num_free = get_int32(ep);
ep += 4;
if (num_free > MAX_ARG) {
- return -1;
+ goto error;
}
terms += 4 + num_free;
heap_size += ERL_FUN_SIZE + num_free;
@@ -3403,24 +4420,47 @@ decoded_size(byte *ep, byte* endp, int internal_tags)
case BINARY_INTERNAL_REF:
if (!internal_tags) {
- return -1;
+ goto error;
}
SKIP(sizeof(ProcBin));
heap_size += PROC_BIN_SIZE;
break;
case BIT_BINARY_INTERNAL_REF:
if (!internal_tags) {
- return -1;
+ goto error;
}
SKIP(2+sizeof(ProcBin));
heap_size += PROC_BIN_SIZE + ERL_SUB_BIN_SIZE;
break;
default:
- return -1;
+ goto error;
}
- }
+ terms--;
+
+ if (ctx && --reds <= 0 && terms > 0) {
+ ctx->u.sc.heap_size = heap_size;
+ ctx->u.sc.terms = terms;
+ ctx->u.sc.ep = ep;
+ ctx->u.sc.atom_extra_skip = atom_extra_skip;
+ ctx->reds = 0;
+ return 0;
+ }
+ }while (terms > 0);
+
/* 'terms' may be non-zero if it has wrapped around */
- return terms==0 ? heap_size : -1;
+ if (terms == 0) {
+ if (ctx) {
+ ctx->state = B2TDecodeInit;
+ ctx->reds = reds;
+ }
+ return heap_size;
+ }
+
+error:
+ if (ctx) {
+ ctx->state = B2TBadArg;
+ }
+ return -1;
#undef SKIP
#undef SKIP2
#undef CHKSIZE
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index e37d47919e..bf00958eb1 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -50,6 +50,7 @@
#define LARGE_BIG_EXT 'o'
#define NEW_FUN_EXT 'p'
#define EXPORT_EXT 'q'
+#define MAP_EXT 't'
#define FUN_EXT 'u'
#define ATOM_UTF8_EXT 'v'
#define SMALL_ATOM_UTF8_EXT 'w'
@@ -138,14 +139,15 @@ typedef struct {
#define ERTS_DIST_EXT_SIZE(EDEP) \
(sizeof(ErtsDistExternal) \
- (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \
- ? (ASSERT_EXPR(0 <= (EDEP)->attab.size \
- && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \
+ ? (ASSERT(0 <= (EDEP)->attab.size \
+ && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \
sizeof(Eterm)*(ERTS_ATOM_CACHE_SIZE - (EDEP)->attab.size)) \
: sizeof(ErtsAtomTranslationTable)))
typedef struct {
byte *extp;
int exttmp;
+ Uint extsize;
} ErtsBinary2TermState;
/* -------------------------------------------------------------------------- */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 012c1c7e6a..891046a8b5 100755..100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -186,11 +186,6 @@ extern void erts_ddll_remove_monitor(Process *p,
extern Eterm erts_ddll_monitor_driver(Process *p,
Eterm description,
ErtsProcLocks plocks);
-/*
- * Max no. of drivers (linked in and dynamically loaded). Each table
- * entry uses 4 bytes.
- */
-#define DRIVER_TAB_SIZE 32
/*
** Just like the driver binary but with initial flags
@@ -375,113 +370,237 @@ extern int stackdump_on_exit;
* DESTROY_ESTACK(Stack)
*/
+typedef struct {
+ Eterm* start;
+ Eterm* sp;
+ Eterm* end;
+ ErtsAlcType_t alloc_type;
+}ErtsEStack;
-void erl_grow_stack(Eterm** start, Eterm** sp, Eterm** end);
-#define ESTK_CONCAT(a,b) a##b
-#define ESTK_SUBSCRIPT(s,i) *((Eterm *)((byte *)ESTK_CONCAT(s,_start) + (i)))
#define DEF_ESTACK_SIZE (16)
-#define DECLARE_ESTACK(s) \
- Eterm ESTK_CONCAT(s,_default_stack)[DEF_ESTACK_SIZE]; \
- Eterm* ESTK_CONCAT(s,_start) = ESTK_CONCAT(s,_default_stack); \
- Eterm* ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start); \
- Eterm* ESTK_CONCAT(s,_end) = ESTK_CONCAT(s,_start) + DEF_ESTACK_SIZE
+void erl_grow_estack(ErtsEStack*, Eterm* def_stack);
+#define ESTK_CONCAT(a,b) a##b
+#define ESTK_DEF_STACK(s) ESTK_CONCAT(s,_default_estack)
+
+#define DECLARE_ESTACK(s) \
+ Eterm ESTK_DEF_STACK(s)[DEF_ESTACK_SIZE]; \
+ ErtsEStack s = { \
+ ESTK_DEF_STACK(s), /* start */ \
+ ESTK_DEF_STACK(s), /* sp */ \
+ ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }
-#define DESTROY_ESTACK(s) \
+#define ESTACK_CHANGE_ALLOCATOR(s,t) \
do { \
- if (ESTK_CONCAT(s,_start) != ESTK_CONCAT(s,_default_stack)) { \
- erts_free(ERTS_ALC_T_ESTACK, ESTK_CONCAT(s,_start)); \
+ if (s.start != ESTK_DEF_STACK(s)) { \
+ erl_exit(1, "Internal error - trying to change allocator " \
+ "type of active estack\n"); \
} \
+ s.alloc_type = (t); \
+ } while (0)
+
+#define DESTROY_ESTACK(s) \
+do { \
+ if (s.start != ESTK_DEF_STACK(s)) { \
+ erts_free(s.alloc_type, s.start); \
+ } \
} while(0)
-#define ESTACK_PUSH(s, x) \
-do { \
- if (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_end)) { \
- erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
- &ESTK_CONCAT(s,_end)); \
- } \
- *ESTK_CONCAT(s,_sp)++ = (x); \
+
+/*
+ * Do not free the stack after this, it may have pointers into what
+ * was saved in 'dst'.
+ */
+#define ESTACK_SAVE(s,dst)\
+do {\
+ if (s.start == ESTK_DEF_STACK(s)) {\
+ UWord _wsz = ESTACK_COUNT(s);\
+ (dst)->start = erts_alloc(s.alloc_type,\
+ DEF_ESTACK_SIZE * sizeof(Eterm));\
+ memcpy((dst)->start, s.start,_wsz*sizeof(Eterm));\
+ (dst)->sp = (dst)->start + _wsz;\
+ (dst)->end = (dst)->start + DEF_ESTACK_SIZE;\
+ (dst)->alloc_type = s.alloc_type;\
+ } else\
+ *(dst) = s;\
+ } while (0)
+
+#define DESTROY_SAVED_ESTACK(estack)\
+do {\
+ if ((estack)->start) {\
+ erts_free((estack)->alloc_type, (estack)->start);\
+ (estack)->start = NULL;\
+ }\
} while(0)
-#define ESTACK_PUSH2(s, x, y) \
-do { \
- if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 2) { \
- erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
- &ESTK_CONCAT(s,_end)); \
- } \
- *ESTK_CONCAT(s,_sp)++ = (x); \
- *ESTK_CONCAT(s,_sp)++ = (y); \
+#define CLEAR_SAVED_ESTACK(estack) ((void) ((estack)->start = NULL))
+
+/*
+ * Use on empty stack, only the allocator can be changed before this.
+ * The src stack is reset to NULL.
+ */
+#define ESTACK_RESTORE(s, src) \
+do { \
+ ASSERT(s.start == ESTK_DEF_STACK(s)); \
+ s = *(src); /* struct copy */ \
+ (src)->start = NULL; \
+ ASSERT(s.sp >= s.start); \
+ ASSERT(s.sp <= s.end); \
+} while (0)
+
+#define ESTACK_IS_STATIC(s) (s.start == ESTK_DEF_STACK(s)))
+
+#define ESTACK_PUSH(s, x) \
+do { \
+ if (s.sp == s.end) { \
+ erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ } \
+ *s.sp++ = (x); \
} while(0)
-#define ESTACK_PUSH3(s, x, y, z) \
-do { \
- if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 3) { \
- erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
- &ESTK_CONCAT(s,_end)); \
- } \
- *ESTK_CONCAT(s,_sp)++ = (x); \
- *ESTK_CONCAT(s,_sp)++ = (y); \
- *ESTK_CONCAT(s,_sp)++ = (z); \
+#define ESTACK_PUSH2(s, x, y) \
+do { \
+ if (s.sp > s.end - 2) { \
+ erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ } \
+ *s.sp++ = (x); \
+ *s.sp++ = (y); \
} while(0)
-#define ESTACK_COUNT(s) (ESTK_CONCAT(s,_sp) - ESTK_CONCAT(s,_start))
+#define ESTACK_PUSH3(s, x, y, z) \
+do { \
+ if (s.sp > s.end - 3) { \
+ erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ } \
+ *s.sp++ = (x); \
+ *s.sp++ = (y); \
+ *s.sp++ = (z); \
+} while(0)
-#define ESTACK_ISEMPTY(s) (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_start))
-#define ESTACK_POP(s) (*(--ESTK_CONCAT(s,_sp)))
+#define ESTACK_COUNT(s) (s.sp - s.start)
+#define ESTACK_ISEMPTY(s) (s.sp == s.start)
+#define ESTACK_POP(s) (*(--s.sp))
-void erl_grow_wstack(UWord** start, UWord** sp, UWord** end);
-#define WSTK_CONCAT(a,b) a##b
-#define WSTK_SUBSCRIPT(s,i) *((UWord *)((byte *)WSTK_CONCAT(s,_start) + (i)))
+/*
+ * WSTACK: same as ESTACK but with UWord instead of Eterm
+ */
+
+typedef struct {
+ UWord* wstart;
+ UWord* wsp;
+ UWord* wend;
+ ErtsAlcType_t alloc_type;
+}ErtsWStack;
+
#define DEF_WSTACK_SIZE (16)
-#define DECLARE_WSTACK(s) \
- UWord WSTK_CONCAT(s,_default_stack)[DEF_WSTACK_SIZE]; \
- UWord* WSTK_CONCAT(s,_start) = WSTK_CONCAT(s,_default_stack); \
- UWord* WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start); \
- UWord* WSTK_CONCAT(s,_end) = WSTK_CONCAT(s,_start) + DEF_WSTACK_SIZE
+void erl_grow_wstack(ErtsWStack*, UWord* def_stack);
+#define WSTK_CONCAT(a,b) a##b
+#define WSTK_DEF_STACK(s) WSTK_CONCAT(s,_default_wstack)
+
+#define DECLARE_WSTACK(s) \
+ UWord WSTK_DEF_STACK(s)[DEF_WSTACK_SIZE]; \
+ ErtsWStack s = { \
+ WSTK_DEF_STACK(s), /* wstart */ \
+ WSTK_DEF_STACK(s), /* wsp */ \
+ WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }
-#define DESTROY_WSTACK(s) \
+#define WSTACK_CHANGE_ALLOCATOR(s,t) \
do { \
- if (WSTK_CONCAT(s,_start) != WSTK_CONCAT(s,_default_stack)) { \
- erts_free(ERTS_ALC_T_ESTACK, WSTK_CONCAT(s,_start)); \
+ if (s.wstart != WSTK_DEF_STACK(s)) { \
+ erl_exit(1, "Internal error - trying to change allocator " \
+ "type of active wstack\n"); \
} \
+ s.alloc_type = (t); \
+ } while (0)
+
+#define DESTROY_WSTACK(s) \
+do { \
+ if (s.wstart != WSTK_DEF_STACK(s)) { \
+ erts_free(s.alloc_type, s.wstart); \
+ } \
} while(0)
-#define WSTACK_PUSH(s, x) \
-do { \
- if (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_end)) { \
- erl_grow_wstack(&WSTK_CONCAT(s,_start), &WSTK_CONCAT(s,_sp), \
- &WSTK_CONCAT(s,_end)); \
- } \
- *WSTK_CONCAT(s,_sp)++ = (x); \
+
+/*
+ * Do not free the stack after this, it may have pointers into what
+ * was saved in 'dst'.
+ */
+#define WSTACK_SAVE(s,dst)\
+do {\
+ if (s.wstart == WSTK_DEF_STACK(s)) {\
+ UWord _wsz = WSTACK_COUNT(s);\
+ (dst)->wstart = erts_alloc(s.alloc_type,\
+ DEF_WSTACK_SIZE * sizeof(UWord));\
+ memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\
+ (dst)->wsp = (dst)->wstart + _wsz;\
+ (dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\
+ (dst)->alloc_type = s.alloc_type;\
+ } else\
+ *(dst) = s;\
+ } while (0)
+
+#define DESTROY_SAVED_WSTACK(wstack)\
+do {\
+ if ((wstack)->wstart) {\
+ erts_free((wstack)->alloc_type, (wstack)->wstart);\
+ (wstack)->wstart = NULL;\
+ }\
} while(0)
-#define WSTACK_PUSH2(s, x, y) \
-do { \
- if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 2) { \
- erl_grow_wstack(&WSTK_CONCAT(s,_start), &WSTK_CONCAT(s,_sp), \
- &WSTK_CONCAT(s,_end)); \
- } \
- *WSTK_CONCAT(s,_sp)++ = (x); \
- *WSTK_CONCAT(s,_sp)++ = (y); \
+#define CLEAR_SAVED_WSTACK(wstack) ((void) ((wstack)->wstart = NULL))
+
+/*
+ * Use on empty stack, only the allocator can be changed before this.
+ * The src stack is reset to NULL.
+ */
+#define WSTACK_RESTORE(s, src) \
+do { \
+ ASSERT(s.wstart == WSTK_DEF_STACK(s)); \
+ s = *(src); /* struct copy */ \
+ (src)->wstart = NULL; \
+ ASSERT(s.wsp >= s.wstart); \
+ ASSERT(s.wsp <= s.wend); \
+} while (0)
+
+#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s)))
+
+#define WSTACK_PUSH(s, x) \
+do { \
+ if (s.wsp == s.wend) { \
+ erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ } \
+ *s.wsp++ = (x); \
} while(0)
-#define WSTACK_PUSH3(s, x, y, z) \
-do { \
- if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 3) { \
- erl_grow_wstack(&WSTK_CONCAT(s,_start), &WSTK_CONCAT(s,_sp), \
- &WSTK_CONCAT(s,_end)); \
- } \
- *WSTK_CONCAT(s,_sp)++ = (x); \
- *WSTK_CONCAT(s,_sp)++ = (y); \
- *WSTK_CONCAT(s,_sp)++ = (z); \
+#define WSTACK_PUSH2(s, x, y) \
+do { \
+ if (s.wsp > s.wend - 2) { \
+ erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ } \
+ *s.wsp++ = (x); \
+ *s.wsp++ = (y); \
+} while(0)
+
+#define WSTACK_PUSH3(s, x, y, z) \
+do { \
+ if (s.wsp > s.wend - 3) { \
+ erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ } \
+ *s.wsp++ = (x); \
+ *s.wsp++ = (y); \
+ *s.wsp++ = (z); \
} while(0)
-#define WSTACK_COUNT(s) (WSTK_CONCAT(s,_sp) - WSTK_CONCAT(s,_start))
+#define WSTACK_COUNT(s) (s.wsp - s.wstart)
+#define WSTACK_ISEMPTY(s) (s.wsp == s.wstart)
+#define WSTACK_POP(s) (*(--s.wsp))
-#define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start))
-#define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp)))
/* binary.c */
@@ -537,6 +656,10 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
+/* beam_bif_load.c */
+Eterm erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp);
+
+
/* beam_load.c */
typedef struct {
BeamInstr* current; /* Pointer to: Mod, Name, Arity */
@@ -688,12 +811,6 @@ void MD5Final(unsigned char [16], MD5_CTX *);
/* ggc.c */
-
-typedef struct {
- Uint garbage_collections;
- Uint reclaimed;
-} ErtsGCInfo;
-
void erts_gc_info(ErtsGCInfo *gcip);
void erts_init_gc(void);
int erts_garbage_collect(Process*, int, Eterm*, int);
@@ -724,15 +841,15 @@ int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_l
void erts_destroy_driver(erts_driver_t *drv);
int erts_save_suspend_process_on_port(Port*, Process*);
Port *erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *, int *);
-void erts_init_io(int, int);
+void erts_init_io(int, int, int);
void erts_raw_port_command(Port*, byte*, Uint);
void driver_report_exit(ErlDrvPort, int);
LineBuf* allocate_linebuf(int);
int async_ready(Port *, void*);
-ErtsPortNames *erts_get_port_names(Eterm);
+ErtsPortNames *erts_get_port_names(Eterm, ErlDrvPort);
void erts_free_port_names(ErtsPortNames *);
Uint erts_port_ioq_size(Port *pp);
-void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int);
+void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
@@ -740,25 +857,33 @@ Port *erts_get_heart_port(void);
void erts_lcnt_enable_io_lock_count(int enable);
#endif
+/* driver_tab.c */
+typedef void *(*ErtsStaticNifInitFPtr)(void);
+ErtsStaticNifInitFPtr erts_static_nif_get_nif_init(const char *name, int len);
+int erts_is_static_nif(void *handle);
+void erts_init_static_drivers(void);
+
/* erl_drv_thread.c */
void erl_drv_thr_init(void);
/* utils.c */
void erts_cleanup_offheap(ErlOffHeap *offheap);
+Uint64 erts_timestamp_millis(void);
+
Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
#define NC_HEAP_SIZE(NC) \
- (ASSERT_EXPR(is_node_container((NC))), \
+ (ASSERT(is_node_container((NC))), \
IS_CONST((NC)) ? 0 : (thing_arityval(*boxed_val((NC))) + 1))
#define STORE_NC(Hpp, ETpp, NC) \
- (ASSERT_EXPR(is_node_container((NC))), \
+ (ASSERT(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_((Hpp), (ETpp), (NC)))
#define STORE_NC_IN_PROC(Pp, NC) \
- (ASSERT_EXPR(is_node_container((NC))), \
+ (ASSERT(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC)))
/* duplicates from big.h */
@@ -777,6 +902,9 @@ Sint erts_re_set_loop_limit(Sint limit);
void erts_init_bif_binary(void);
Sint erts_binary_set_loop_limit(Sint limit);
+/* external.c */
+void erts_init_external(void);
+
/* erl_unicode.c */
void erts_init_unicode(void);
Sint erts_unicode_set_loop_limit(Sint limit);
@@ -794,6 +922,17 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf,
ErtsAlcType_t alloc_type,
int allow_empty, int allow_atom,
Sint *used /* out */);
+char *erts_convert_filename_to_encoding(Eterm name, char *statbuf,
+ size_t statbuf_size,
+ ErtsAlcType_t alloc_type,
+ int allow_empty, int allow_atom,
+ int encoding,
+ Sint *used /* out */,
+ Uint extra);
+char* erts_convert_filename_to_wchar(byte* bytes, Uint size,
+ char *statbuf, size_t statbuf_size,
+ ErtsAlcType_t alloc_type, Sint* used,
+ Uint extra_wchars);
Eterm erts_convert_native_to_filename(Process *p, byte *bytes);
Eterm erts_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, Uint left,
Uint *num_built, Uint *num_eaten, Eterm tail);
@@ -816,20 +955,67 @@ struct Sint_buf {
};
char* Sint_to_buf(Sint, struct Sint_buf*);
+#define ERTS_IOLIST_STATE_INITER(C_P, OBJ) \
+ {(C_P), 0, 0, (OBJ), {NULL, NULL, NULL, ERTS_ALC_T_INVALID}, 0, 0}
+
+#define ERTS_IOLIST_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOListState))
+
+#define ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED 8
+
+typedef struct {
+ Process *c_p;
+ ErlDrvSizeT size;
+ Uint offs;
+ Eterm obj;
+ ErtsEStack estack;
+ int reds_left;
+ int have_size;
+} ErtsIOListState;
+
+#define ERTS_IOLIST2BUF_STATE_INITER(C_P, OBJ) \
+ {ERTS_IOLIST_STATE_INITER((C_P), (OBJ)), {NULL, 0, 0, 0}, NULL, 0, NULL, 0}
+
+#define ERTS_IOLIST2BUF_STATE_MOVE(TO, FROM) \
+ sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOList2BufState))
+
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT 32
+#define ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED 8
+#define ERTS_IOLIST_TO_BUF_BYTES_PER_RED \
+ (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED*ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT)
+
+typedef struct {
+ ErtsIOListState iolist;
+ struct {
+ byte *bptr;
+ size_t size;
+ Uint bitoffs;
+ Uint bitsize;
+ } bcopy;
+ char *buf;
+ ErlDrvSizeT len;
+ Eterm *objp;
+ int offset;
+} ErtsIOList2BufState;
+
#define ERTS_IOLIST_OK 0
#define ERTS_IOLIST_OVERFLOW 1
#define ERTS_IOLIST_TYPE 2
+#define ERTS_IOLIST_YIELD 3
-Eterm buf_to_intlist(Eterm**, char*, size_t, Eterm); /* most callers pass plain char*'s */
+Eterm buf_to_intlist(Eterm**, const char*, size_t, Eterm); /* most callers pass plain char*'s */
#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0))
#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1))
+#define ERTS_IOLIST_TO_BUF_YIELD (~((ErlDrvSizeT) 2))
#define ERTS_IOLIST_TO_BUF_FAILED(R) \
- (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1)))
+ (((R) & (~((ErlDrvSizeT) 3))) == (~((ErlDrvSizeT) 3)))
#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \
(!ERTS_IOLIST_TO_BUF_FAILED((R)))
ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT);
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *);
+int erts_iolist_size_yielding(ErtsIOListState *state);
int erts_iolist_size(Eterm, ErlDrvSizeT *);
int is_string(Eterm);
void erl_at_exit(void (*) (void*), void*);
@@ -863,6 +1049,7 @@ Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
@@ -901,9 +1088,10 @@ Eterm erts_match_set_lint(Process *p, Eterm matchexpr);
extern void erts_match_set_release_result(Process* p);
enum erts_pam_run_flags {
- ERTS_PAM_TMP_RESULT=0,
- ERTS_PAM_COPY_RESULT=1,
- ERTS_PAM_CONTIGUOUS_TUPLE=2
+ ERTS_PAM_TMP_RESULT=1,
+ ERTS_PAM_COPY_RESULT=2,
+ ERTS_PAM_CONTIGUOUS_TUPLE=4,
+ ERTS_PAM_IGNORE_TRACE_SILENT=8
};
extern Eterm erts_match_set_run(Process *p, Binary *mpsp,
Eterm *args, int num_args,
@@ -925,6 +1113,8 @@ extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
extern erts_driver_t fd_driver;
+int erts_beam_jump_table(void);
+
/* Should maybe be placed in erl_message.h, but then we get an include mess. */
ERTS_GLB_INLINE Eterm *
erts_alloc_message_heap_state(Uint size,
@@ -990,7 +1180,12 @@ erts_alloc_message_heap_state(Uint size,
if (statep)
*statep = state;
if ((state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ || (receiver->flags & F_DISABLE_GC)
|| HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) {
+ /*
+ * The heap is either potentially in an inconsistent
+ * state, or not large enough.
+ */
#ifdef ERTS_SMP
if (locked_main) {
*receiver_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -1100,6 +1295,13 @@ erts_alloc_message_heap(Uint size,
# define UnUseTmpHeapNoproc(Size) /* Nothing */
#endif /* HEAP_ON_C_STACK */
+ERTS_GLB_INLINE void dtrace_pid_str(Eterm pid, char *process_buf);
+ERTS_GLB_INLINE void dtrace_proc_str(Process *process, char *process_buf);
+ERTS_GLB_INLINE void dtrace_port_str(Port *port, char *port_buf);
+ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
+ Eterm module, Eterm function, int arity,
+ char *process_buf, char *mfa_buf);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
#include "dtrace-wrapper.h"
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 8c67f731f4..9ae973e108 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -46,9 +46,12 @@
#define ERTS_WANT_EXTERNAL_TAGS
#include "external.h"
#include "dtrace-wrapper.h"
+#include "erl_map.h"
extern ErlDrvEntry fd_driver_entry;
+#ifndef __OSE__
extern ErlDrvEntry vanilla_driver_entry;
+#endif
extern ErlDrvEntry spawn_driver_entry;
extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
@@ -244,11 +247,13 @@ static ERTS_INLINE void port_init_instr(Port *prt
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
char *lock_str = "port_lock";
+ erts_mtx_init_locked_x(prt->lock, lock_str, id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
- lock_str = NULL;
+ (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
+#else
+ 0
#endif
- erts_mtx_init_locked_x(prt->lock, lock_str, id);
+ );
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -364,9 +369,8 @@ static Port *create_port(char *name,
ERTS_P_LINKS(prt) = NULL;
ERTS_P_MONITORS(prt) = NULL;
prt->linebuf = NULL;
- prt->bp = NULL;
prt->suspended = NULL;
- prt->data = am_undefined;
+ erts_init_port_data(prt);
prt->port_data_lock = NULL;
prt->control_flags = 0;
prt->bytes_in = 0;
@@ -910,8 +914,8 @@ int erts_port_handle_xports(Port *prt)
(iov)->iov_base = (ptr); \
(iov)->iov_len = (len); \
if (sizeof((iov)->iov_len) < sizeof(len) \
- /* Check if (len) overflowed (iov)->iov_len */ \
- && ((len) >> (sizeof((iov)->iov_len)*CHAR_BIT)) != 0) { \
+ /* Check if (len) overflowed (iov)->iov_len */ \
+ && (iov)->iov_len != (len)) { \
goto L_overflow; \
} \
*(bv)++ = (bin); \
@@ -1214,9 +1218,10 @@ typedef struct {
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
{
+ unsigned int prof_runnable_ports;
ErtsTryImmDrvCallResult res;
int reds_left_in;
- erts_aint32_t invalid_state, invalid_sched_flags;
+ erts_aint32_t act, exp, invalid_state, invalid_sched_flags;
Port *prt = sp->port;
Process *c_p = sp->c_p;
@@ -1243,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
goto locked_fail;
}
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- if (sp->sched_flags & invalid_sched_flags) {
- res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
- goto locked_fail;
- }
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+ act = erts_smp_atomic32_read_nob(&prt->sched.flags);
+
+ do {
+ erts_aint32_t new;
+
+ if (act & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ sp->sched_flags = act;
+ goto locked_fail;
+ }
+ exp = act;
+ new = act | ERTS_PTS_FLG_EXEC_IMM;
+ act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ } while (act != exp);
+
+ sp->sched_flags = act;
if (!c_p)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_out);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_inactive);
@@ -1269,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_in, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_active);
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_active);
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&prt->sched);
+ }
sp->fpe_was_unmasked = erts_block_fpe();
@@ -1290,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
int reds;
Port *prt = sp->port;
Process *c_p = sp->c_p;
+ erts_aint32_t act;
+ unsigned int prof_runnable_ports;
reds = prt->reds;
reds += erts_port_driver_callback_epilogue(prt, NULL);
erts_unblock_fpe(sp->fpe_was_unmasked);
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_out, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_inactive);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_band_mb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_inactive);
+ erts_port_task_sched_unlock(&prt->sched);
+ }
+ }
erts_port_release(prt);
@@ -1315,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_in);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_active);
@@ -1331,7 +1381,7 @@ force_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_aint32_t invalid_state;
Port *prt = sp->port;
- ASSERT(ERTS_IS_CRASH_DUMPING)
+ ASSERT(ERTS_IS_CRASH_DUMPING);
ASSERT(is_atom(sp->port_op));
invalid_state = sp->state;
@@ -1442,6 +1492,7 @@ erts_schedule_proc2port_signal(Process *c_p,
Eterm *refp,
ErtsProc2PortSigData *sigdp,
int task_flags,
+ ErtsPortTaskHandle *pthp,
ErtsProc2PortSigCallback callback)
{
int sched_res;
@@ -1491,7 +1542,7 @@ erts_schedule_proc2port_signal(Process *c_p,
/* Schedule port close call for later execution... */
sched_res = erts_port_task_schedule(prt->common.id,
- NULL,
+ pthp,
ERTS_PORT_TASK_PROC_SIG,
sigdp,
callback,
@@ -1629,6 +1680,7 @@ bad_port_signal(Process *c_p,
refp,
sigdp,
0,
+ NULL,
port_badsig);
}
@@ -1838,8 +1890,11 @@ erts_port_output(Process *c_p,
ErlIOVec *evp = NULL;
char *buf = NULL;
int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
+ int async_nosuspend;
+ ErtsPortTaskHandle *ns_pthp;
ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_ASYNC
| ERTS_PORT_SIG_FLG_NOSUSPEND
| ERTS_PORT_SIG_FLG_FORCE
| ERTS_PORT_SIG_FLG_FORCE_IMM_CALL)) == 0);
@@ -1861,6 +1916,12 @@ erts_port_output(Process *c_p,
? ERTS_PORT_OP_DROPPED
: ERTS_PORT_OP_BUSY);
+ async_nosuspend = ((flags & (ERTS_PORT_SIG_FLG_ASYNC
+ | ERTS_PORT_SIG_FLG_NOSUSPEND
+ | ERTS_PORT_SIG_FLG_FORCE))
+ == (ERTS_PORT_SIG_FLG_ASYNC
+ | ERTS_PORT_SIG_FLG_NOSUSPEND));
+
try_call = (force_immediate_call /* crash dumping */
|| !(sched_flags & (invalid_flags
| ERTS_PTS_FLGS_FORCE_SCHEDULE_OP)));
@@ -1995,6 +2056,15 @@ erts_port_output(Process *c_p,
return ERTS_PORT_OP_DONE;
case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
sched_flags = try_call_state.sched_flags;
+ if (async_nosuspend
+ && (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))) {
+ driver_free_binary(cbin);
+ if (evp != &ev)
+ erts_free(ERTS_ALC_T_TMP, evp);
+ return ((sched_flags & ERTS_PTS_FLG_EXIT)
+ ? ERTS_PORT_OP_DROPPED
+ : ERTS_PORT_OP_BUSY);
+ }
case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
/* Schedule outputv() call instead... */
break;
@@ -2142,6 +2212,13 @@ erts_port_output(Process *c_p,
return ERTS_PORT_OP_DONE;
case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
sched_flags = try_call_state.sched_flags;
+ if (async_nosuspend
+ && (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))) {
+ erts_free(ERTS_ALC_T_TMP, buf);
+ return ((sched_flags & ERTS_PTS_FLG_EXIT)
+ ? ERTS_PORT_OP_DROPPED
+ : ERTS_PORT_OP_BUSY);
+ }
case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
/* Schedule outputv() call instead... */
break;
@@ -2163,20 +2240,33 @@ erts_port_output(Process *c_p,
task_flags = ERTS_PT_FLG_WAIT_BUSY;
sigdp->flags |= flags;
+ ns_pthp = NULL;
if (flags & (ERTS_P2P_SIG_DATA_FLG_FORCE|ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)) {
task_flags = 0;
if (flags & ERTS_P2P_SIG_DATA_FLG_FORCE)
sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
+ else if (async_nosuspend) {
+ ErtsSchedulerData *esdp = (c_p
+ ? ERTS_PROC_GET_SCHDATA(c_p)
+ : erts_get_scheduler_data());
+ ASSERT(esdp);
+ ns_pthp = &esdp->nosuspend_port_task_handle;
+ sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
+ }
else if (flags & ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)
task_flags = ERTS_PT_FLG_NOSUSPEND;
}
+ ASSERT(ns_pthp || !async_nosuspend);
+ ASSERT(async_nosuspend || !ns_pthp);
+
res = erts_schedule_proc2port_signal(c_p,
prt,
c_p ? c_p->common.id : ERTS_INVALID_PID,
refp,
sigdp,
task_flags,
+ ns_pthp,
port_sig_callback);
if (res != ERTS_PORT_OP_SCHEDULED) {
@@ -2187,9 +2277,23 @@ erts_port_output(Process *c_p,
return res;
}
- if (!(sched_flags & ERTS_PTS_FLG_EXIT) && (sched_flags & busy_flgs))
- return ERTS_PORT_OP_BUSY_SCHEDULED;
-
+ if (!(flags & ERTS_PORT_SIG_FLG_FORCE)) {
+ sched_flags = erts_smp_atomic32_read_acqb(&prt->sched.flags);
+ if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)) {
+ if (async_nosuspend)
+ erts_port_task_tmp_handle_detach(ns_pthp);
+ }
+ else {
+ if (!async_nosuspend)
+ return ERTS_PORT_OP_BUSY_SCHEDULED;
+ else {
+ if (erts_port_task_abort(ns_pthp) == 0)
+ return ERTS_PORT_OP_BUSY;
+ else
+ erts_port_task_tmp_handle_detach(ns_pthp);
+ }
+ }
+ }
return res;
bad_value:
@@ -2285,6 +2389,7 @@ erts_port_exit(Process *c_p,
ErlHeapFragment *bp = NULL;
ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_ASYNC
| ERTS_PORT_SIG_FLG_BROKEN_LINK
| ERTS_PORT_SIG_FLG_FORCE_SCHED)) == 0);
@@ -2345,6 +2450,7 @@ erts_port_exit(Process *c_p,
refp,
sigdp,
0,
+ NULL,
port_sig_exit);
if (res == ERTS_PORT_OP_DROPPED) {
@@ -2413,7 +2519,7 @@ set_port_connected(int bang_op,
DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
dtrace_pid_str(connect, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", prt->common.id);
dtrace_proc_str(rp, newprocess_str);
DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
}
@@ -2460,7 +2566,8 @@ erts_port_connect(Process *c_p,
!refp,
am_connect);
- ASSERT((flags & ~ERTS_PORT_SIG_FLG_BANG_OP) == 0);
+ ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_ASYNC)) == 0);
if (is_not_internal_pid(connect))
connect_id = NIL; /* Fail in op (for signal order) */
@@ -2499,6 +2606,7 @@ erts_port_connect(Process *c_p,
refp,
sigdp,
0,
+ NULL,
port_sig_connect);
}
@@ -2555,6 +2663,7 @@ erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
refp,
sigdp,
0,
+ NULL,
port_sig_unlink);
}
@@ -2644,18 +2753,26 @@ erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
refp,
sigdp,
0,
+ NULL,
port_sig_link);
}
void erts_init_io(int port_tab_size,
- int port_tab_size_ignore_files)
+ int port_tab_size_ignore_files,
+ int legacy_port_tab)
{
ErlDrvEntry** dp;
+ UWord common_element_size;
erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
+ common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ));
+ common_element_size += 10; /* name */
#ifdef ERTS_SMP
+ common_element_size += sizeof(erts_mtx_t);
+
init_xports_list_alloc();
#endif
@@ -2676,15 +2793,19 @@ void erts_init_io(int port_tab_size,
&drv_list_rwmtx_opts,
"driver_list");
driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key);
- erts_smp_tsd_key_create(&driver_list_last_error_key);
+ erts_smp_tsd_key_create(&driver_list_lock_status_key,
+ "erts_driver_list_lock_status_key");
+ erts_smp_tsd_key_create(&driver_list_last_error_key,
+ "erts_driver_list_last_error_key");
erts_ptab_init_table(&erts_port,
ERTS_ALC_T_PORT_TABLE,
NULL,
(ErtsPTabElementCommon *) &erts_invalid_port.common,
port_tab_size,
- "port_table");
+ common_element_size, /* Doesn't need to be excact */
+ "port_table",
+ legacy_port_tab);
erts_smp_atomic_init_nob(&erts_bytes_out, 0);
erts_smp_atomic_init_nob(&erts_bytes_in, 0);
@@ -2695,8 +2816,11 @@ void erts_init_io(int port_tab_size,
erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
+#ifndef __OSE__
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
+#endif
init_driver(&spawn_driver, &spawn_driver_entry, NULL);
+ erts_init_static_drivers();
for (dp = driver_tab; *dp != NULL; dp++)
erts_add_driver_entry(*dp, NULL, 1);
@@ -3364,11 +3488,8 @@ terminate_port(Port *prt)
erts_free(ERTS_ALC_T_LINEBUF, (void *) prt->linebuf);
prt->linebuf = NULL;
}
- if (prt->bp != NULL) {
- free_message_buffer(prt->bp);
- prt->bp = NULL;
- prt->data = am_undefined;
- }
+
+ erts_cleanup_port_data(prt);
if (prt->psd)
erts_free(ERTS_ALC_T_PRTSD, prt->psd);
@@ -3516,9 +3637,9 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
DTRACE_CHARBUF(rreason_str, 64);
- erts_snprintf(from_str, sizeof(from_str), "%T", from);
+ erts_snprintf(from_str, sizeof(DTRACE_CHARBUF_NAME(from_str)), "%T", from);
dtrace_port_str(p, port_str);
- erts_snprintf(rreason_str, sizeof(rreason_str), "%T", rreason);
+ erts_snprintf(rreason_str, sizeof(DTRACE_CHARBUF_NAME(rreason_str)), "%T", rreason);
DTRACE4(port_exit, from_str, port_str, p->name, rreason_str);
}
#endif
@@ -3536,6 +3657,8 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
if (send_closed)
set_state_flags |= ERTS_PORT_SFLG_SEND_CLOSED;
+ erts_port_task_sched_enter_exiting_state(&p->sched);
+
state = erts_atomic32_read_bor_mb(&p->state, set_state_flags);
state |= set_state_flags;
@@ -3618,6 +3741,10 @@ erts_port_command(Process *c_p,
ASSERT(port);
flags |= ERTS_PORT_SIG_FLG_BANG_OP;
+ if (!erts_port_synchronous_ops) {
+ flags |= ERTS_PORT_SIG_FLG_ASYNC;
+ refp = NULL;
+ }
if (is_tuple_arity(command, 2)) {
Eterm cntd;
@@ -3625,21 +3752,14 @@ erts_port_command(Process *c_p,
cntd = tp[1];
if (is_internal_pid(cntd)) {
if (tp[2] == am_close) {
- if (!erts_port_synchronous_ops)
- refp = NULL;
flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
return erts_port_exit(c_p, flags, port, cntd, am_normal, refp);
} else if (is_tuple_arity(tp[2], 2)) {
tp = tuple_val(tp[2]);
if (tp[1] == am_command) {
- if (!(flags & ERTS_PORT_SIG_FLG_NOSUSPEND)
- && !erts_port_synchronous_ops)
- refp = NULL;
return erts_port_output(c_p, flags, port, cntd, tp[2], refp);
}
else if (tp[1] == am_connect) {
- if (!erts_port_synchronous_ops)
- refp = NULL;
flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
return erts_port_connect(c_p, flags, port, cntd, tp[2], refp);
}
@@ -3648,8 +3768,6 @@ erts_port_command(Process *c_p,
}
/* badsig */
- if (!erts_port_synchronous_ops)
- refp = NULL;
flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
return bad_port_signal(c_p, flags, port, c_p->common.id, refp, am_command);
}
@@ -4018,7 +4136,7 @@ erts_port_control(Process* c_p,
copy = 1;
else {
binp = ((ProcBin *) ebinp)->val;
- ASSERT(bufp < bufp + size);
+ ASSERT(bufp <= bufp + size);
ASSERT(binp->orig_bytes <= bufp
&& bufp + size <= binp->orig_bytes + binp->orig_size);
erts_refc_inc(&binp->refc, 1);
@@ -4046,6 +4164,7 @@ erts_port_control(Process* c_p,
retvalp,
sigdp,
0,
+ NULL,
port_sig_control);
if (res != ERTS_PORT_OP_SCHEDULED) {
cleanup_scheduled_control(binp, bufp);
@@ -4140,7 +4259,7 @@ port_sig_call(Port *prt,
ErlOffHeap *ohp;
Process *rp;
ErtsProcLocks rp_locks = 0;
- Uint hsz;
+ Sint hsz;
rp = erts_proc_lookup_raw(sigdp->caller);
if (!rp)
@@ -4257,7 +4376,7 @@ erts_port_call(Process* c_p,
switch (try_call_res) {
case ERTS_TRY_IMM_DRV_CALL_OK: {
Eterm *hp, *hp_end;
- Uint hsz;
+ Sint hsz;
unsigned ret_flags = 0U;
Eterm term;
@@ -4326,6 +4445,7 @@ erts_port_call(Process* c_p,
retvalp,
sigdp,
0,
+ NULL,
port_sig_call);
if (res != ERTS_PORT_OP_SCHEDULED) {
cleanup_scheduled_call(bufp);
@@ -4492,228 +4612,10 @@ erts_port_info(Process* c_p,
retvalp,
sigdp,
0,
+ NULL,
port_sig_info);
}
-static int
-port_sig_set_data(Port *prt,
- erts_aint32_t state,
- int op,
- ErtsProc2PortSigData *sigdp)
-{
- ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
-
- if (op == ERTS_PROC2PORT_SIG_EXEC) {
- if (prt->bp)
- free_message_buffer(prt->bp);
- prt->bp = sigdp->u.set_data.bp;
- prt->data = sigdp->u.set_data.data;
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
- }
- else {
- if (sigdp->u.set_data.bp)
- free_message_buffer(sigdp->u.set_data.bp);
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
- }
- return ERTS_PORT_REDS_SET_DATA;
-}
-
-ErtsPortOpResult
-erts_port_set_data(Process* c_p,
- Port *prt,
- Eterm data,
- Eterm *refp)
-{
- ErtsPortOpResult res;
- Eterm set_data;
- ErlHeapFragment *bp;
- ErtsProc2PortSigData *sigdp;
- ErtsTryImmDrvCallResult try_call_res;
- ErtsTryImmDrvCallState try_call_state
- = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
- c_p,
- prt,
- ERTS_PORT_SFLGS_INVALID_LOOKUP,
- 0,
- !refp,
- am_set_data);
-
- if (is_immed(data)) {
- set_data = data;
- bp = NULL;
- }
- else {
- Eterm *hp;
- Uint sz = size_object(data);
- bp = new_message_buffer(sz);
- hp = bp->mem;
- set_data = copy_struct(data, sz, &hp, &bp->off_heap);
- }
-
- try_call_res = try_imm_drv_call(&try_call_state);
- switch (try_call_res) {
- case ERTS_TRY_IMM_DRV_CALL_OK:
- if (prt->bp)
- free_message_buffer(prt->bp);
- prt->bp = bp;
- prt->data = set_data;
- finalize_imm_drv_call(&try_call_state);
- BUMP_REDS(c_p, ERTS_PORT_REDS_SET_DATA);
- return ERTS_PORT_OP_DONE;
- case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- return ERTS_PORT_OP_DROPPED;
- case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
- case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
- /* Schedule call instead... */
- break;
- }
-
- sigdp = erts_port_task_alloc_p2p_sig_data();
- sigdp->flags = ERTS_P2P_SIG_TYPE_SET_DATA;
- sigdp->u.set_data.data = set_data;
- sigdp->u.set_data.bp = bp;
-
- res = erts_schedule_proc2port_signal(c_p,
- prt,
- c_p->common.id,
- refp,
- sigdp,
- 0,
- port_sig_set_data);
- if (res != ERTS_PORT_OP_SCHEDULED && bp)
- free_message_buffer(bp);
- return res;
-}
-
-static int
-port_sig_get_data(Port *prt,
- erts_aint32_t state,
- int op,
- ErtsProc2PortSigData *sigdp)
-{
- ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
- if (op != ERTS_PROC2PORT_SIG_EXEC)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
- else {
- Process *rp;
- ErtsProcLocks rp_locks = 0;
-
- rp = erts_proc_lookup_raw(sigdp->caller);
- if (rp) {
- Uint hsz;
- Eterm *hp, *hp_start;
- Eterm data, msg;
- ErlHeapFragment *bp;
- ErlOffHeap *ohp;
-
- hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
- hsz += 3;
- if (prt->bp)
- hsz += prt->bp->used_size;
-
- hp_start = hp = erts_alloc_message_heap(hsz,
- &bp,
- &ohp,
- rp,
- &rp_locks);
-
- if (is_immed(prt->data))
- data = prt->data;
- else
- data = copy_struct(prt->data,
- prt->bp->used_size,
- &hp,
- &bp->off_heap);
-
-
-
- msg = TUPLE2(hp, am_ok, data);
- hp += 3;
-
- queue_port_sched_op_reply(rp,
- &rp_locks,
- hp_start,
- hp,
- hsz,
- bp,
- sigdp->ref,
- msg);
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
- return ERTS_PORT_REDS_GET_DATA;
-}
-
-ErtsPortOpResult
-erts_port_get_data(Process* c_p,
- Port *prt,
- Eterm *retvalp)
-{
- ErtsProc2PortSigData *sigdp;
- ErtsTryImmDrvCallResult try_call_res;
- ErtsTryImmDrvCallState try_call_state
- = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
- c_p,
- prt,
- ERTS_PORT_SFLGS_INVALID_LOOKUP,
- 0,
- 0,
- am_get_data);
-
- try_call_res = try_imm_drv_call(&try_call_state);
- switch (try_call_res) {
- case ERTS_TRY_IMM_DRV_CALL_OK: {
- Eterm *hp;
- Eterm data;
- ErlHeapFragment *bp;
- Uint sz;
- if (is_immed(prt->data)) {
- bp = NULL;
- data = prt->data;
- }
- else {
- bp = new_message_buffer(prt->bp->used_size);
- data = copy_struct(prt->data,
- prt->bp->used_size,
- &hp,
- &bp->off_heap);
- }
- finalize_imm_drv_call(&try_call_state);
- if (is_immed(data))
- sz = 0;
- else
- sz = bp->used_size;
-
- hp = HAlloc(c_p, sz + 3);
- if (is_not_immed(data)) {
- data = copy_struct(data, bp->used_size, &hp, &MSO(c_p));
- free_message_buffer(bp);
- }
- *retvalp = TUPLE2(hp, am_ok, data);
- BUMP_REDS(c_p, ERTS_PORT_REDS_GET_DATA);
- return ERTS_PORT_OP_DONE;
- }
- case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- return ERTS_PORT_OP_DROPPED;
- case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
- case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
- /* Schedule call instead... */
- break;
- }
-
- sigdp = erts_port_task_alloc_p2p_sig_data();
- sigdp->flags = ERTS_P2P_SIG_TYPE_GET_DATA;
-
- return erts_schedule_proc2port_signal(c_p,
- prt,
- c_p->common.id,
- retvalp,
- sigdp,
- 0,
- port_sig_get_data);
-}
-
typedef struct {
int to;
void *arg;
@@ -4804,7 +4706,7 @@ set_busy_port(ErlDrvPort dprt, int on)
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_busy)) {
- erts_snprintf(port_str, sizeof(port_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
DTRACE1(port_busy, port_str);
}
@@ -4817,7 +4719,7 @@ set_busy_port(ErlDrvPort dprt, int on)
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_not_busy)) {
- erts_snprintf(port_str, sizeof(port_str),
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
DTRACE1(port_not_busy, port_str);
}
@@ -4869,9 +4771,9 @@ erts_port_resume_procs(Port *prt)
DTRACE_CHARBUF(pid_str, 16);
ErtsProcList* plp2 = plp;
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", prt->common.id);
while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->pid);
DTRACE2(process_port_unblocked, pid_str, port_str);
}
}
@@ -4970,7 +4872,8 @@ int async_ready(Port *p, void* data)
static void
report_missing_drv_callback(Port *p, char *drv_type, char *callback)
{
- ErtsPortNames *pnp = erts_get_port_names(p->common.id);
+ ErtsPortNames *pnp = erts_get_port_names(p->common.id,
+ ERTS_Port2ErlDrvPort(p));
char *unknown = "<unknown>";
char *drv_name = pnp->driver_name ? pnp->driver_name : unknown;
char *prt_name = pnp->name ? pnp->name : unknown;
@@ -4985,15 +4888,25 @@ report_missing_drv_callback(Port *p, char *drv_type, char *callback)
void
erts_stale_drv_select(Eterm port,
+ ErlDrvPort drv_port,
ErlDrvEvent hndl,
int mode,
int deselect)
{
char *type;
- ErlDrvPort drv_port = ERTS_Port2ErlDrvPort(erts_port_lookup_raw(port));
- ErtsPortNames *pnp = erts_get_port_names(port);
+ ErtsPortNames *pnp;
erts_dsprintf_buf_t *dsbufp;
+ if (drv_port == ERTS_INVALID_ERL_DRV_PORT) {
+ Port *prt = erts_port_lookup_raw(port);
+ if (prt)
+ drv_port = ERTS_Port2ErlDrvPort(prt);
+ else
+ drv_port = ERTS_INVALID_ERL_DRV_PORT;
+ }
+
+ pnp = erts_get_port_names(port, drv_port);
+
switch (mode) {
case ERL_DRV_READ | ERL_DRV_WRITE:
type = "Input/Output";
@@ -5028,12 +4941,16 @@ erts_stale_drv_select(Eterm port,
}
ErtsPortNames *
-erts_get_port_names(Eterm id)
+erts_get_port_names(Eterm id, ErlDrvPort drv_port)
{
- Port *prt = erts_port_lookup_raw(id);
+ Port *prt;
ErtsPortNames *pnp;
ASSERT(is_nil(id) || is_internal_port(id));
+ prt = ERTS_ErlDrvPort2Port(drv_port);
+ if (prt == ERTS_INVALID_ERL_DRV_PORT)
+ prt = erts_port_lookup_raw(id);
+
if (!prt) {
pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, sizeof(ErtsPortNames));
pnp->name = NULL;
@@ -5045,6 +4962,7 @@ erts_get_port_names(Eterm id)
size_t pnp_len = sizeof(ErtsPortNames);
#ifndef DEBUG
pnp_len += 100; /* In most cases 100 characters will be enough... */
+ ASSERT(prt->common.id == id);
#endif
pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
do {
@@ -5430,6 +5348,17 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
depth++;
break;
}
+ case ERL_DRV_MAP: { /* int */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ if ((int) ptr[0] < 0) ERTS_DDT_FAIL;
+ need += MAP_HEADER_SIZE + 1 + 2*ptr[0];
+ depth -= 2*ptr[0];
+ if (depth < 0) ERTS_DDT_FAIL;
+ ptr++;
+ depth++;
+ break;
+ }
+
default:
ERTS_DDT_FAIL;
}
@@ -5666,6 +5595,36 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
ptr += 2;
break;
+ case ERL_DRV_MAP: { /* int */
+ int size = (int)ptr[0];
+ Eterm* tp = hp;
+ Eterm* vp;
+ map_t *mp;
+
+ *tp = make_arityval(size);
+
+ hp += 1 + size;
+ mp = (map_t*)hp;
+ mp->thing_word = MAP_HEADER;
+ mp->size = size;
+ mp->keys = make_tuple(tp);
+ mess = make_map(mp);
+
+ hp += MAP_HEADER_SIZE + size; /* advance "heap" pointer */
+
+ tp += size; /* point at last key */
+ vp = hp - 1; /* point at last value */
+
+ while(size--) {
+ *vp-- = ESTACK_POP(stack);
+ *tp-- = ESTACK_POP(stack);
+ }
+ if (!erts_validate_and_sort_map(mp))
+ ERTS_DDT_FAIL;
+ ptr++;
+ break;
+ }
+
}
ESTACK_PUSH(stack, mess);
}
@@ -5934,10 +5893,6 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
return driver_output2(ix, hbuf, hlen, NULL, 0);
size = vec->size - skip; /* Size of remaining bytes in vector */
- ASSERT(hlen >= 0); /* debug only */
- if (hlen < 0)
- hlen = 0;
-
prt = erts_drvport2port_state(ix, &state);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
@@ -5960,7 +5915,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
binv++;
n--;
} else {
- iov->iov_base += skip;
+ iov->iov_base = ((char *)(iov->iov_base)) + skip;
iov->iov_len -= skip;
skip = 0;
}
@@ -6220,7 +6175,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init(&pdl->mtx, "port_data_lock");
+ erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -6405,7 +6360,7 @@ int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
n--;
}
else {
- iov->iov_base += skip;
+ iov->iov_base = ((char *)(iov->iov_base)) + skip;
iov->iov_len -= skip;
skip = 0;
}
@@ -6470,7 +6425,7 @@ int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
n--;
}
else {
- iov->iov_base += skip;
+ iov->iov_base = ((char *)(iov->iov_base)) + skip;
iov->iov_len -= skip;
skip = 0;
}
@@ -6529,7 +6484,7 @@ ErlDrvSizeT driver_deq(ErlDrvPort ix, ErlDrvSizeT size)
q->v_head++;
}
else {
- q->v_head->iov_base += size;
+ q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
q->v_head->iov_len -= size;
size = 0;
}
@@ -7202,7 +7157,7 @@ void *driver_dl_open(char * path)
int res;
int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key);
int locked = maybe_lock_driver_list();
- if ((res = erts_sys_ddll_open(path, &ptr)) == 0) {
+ if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) {
maybe_unlock_driver_list(locked);
return ptr;
} else {
@@ -7257,7 +7212,7 @@ char *driver_dl_error(void)
#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
- (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ (offsetof(ErlDrvSysInfo, LAST_FIELD) \
+ sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
void
@@ -7319,6 +7274,18 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->nif_major_version = ERL_NIF_MAJOR_VERSION;
sip->nif_minor_version = ERL_NIF_MINOR_VERSION;
}
+ /*
+ * 'dirty_scheduler_support' is the last field in the 4th version
+ * (driver version 3.1, NIF version 2.7)
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
+#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS)
+ sip->dirty_scheduler_support = 1;
+#else
+ sip->dirty_scheduler_support = 0;
+#endif
+ }
+
}
@@ -7405,10 +7372,11 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
erts_atom_put((byte *) drv->name,
sys_strlen(drv->name),
ERTS_ATOM_ENC_LATIN1,
- 1)
+ 1),
#else
- NIL
+ NIL,
#endif
+ 1
);
}
#endif
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 1e5ae46bfa..68fcc177ae 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -763,17 +763,17 @@ allocate_init t I y
#################################################################
#
-# The BIFs erlang:check_process_code/2 must be called like a function,
+# The BIFs erts_internal:check_process_code/2 must be called like a function,
# to ensure that c_p->i (program counter) is set correctly (an ordinary
# BIF call doesn't set it).
#
-call_ext u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erlang:check_process_code/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext_only Bif
+call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif
+call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D
+call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif
#
-# The BIFs erlang:garbage_collect/0,1 must be called like functions,
+# The BIFs erlang:garbage_collect/0 must be called like a function,
# to allow them to invoke the garbage collector. (The stack pointer must
# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
#
@@ -782,10 +782,6 @@ call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
-call_ext u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext Bif
-call_ext_last u==1 Bif=u$bif:erlang:garbage_collect/1 D => i_call_ext_last Bif D
-call_ext_only u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext_only Bif
-
#
# put/2 and erase/1 must be able to do garbage collection, so we must call
# them like functions.
@@ -1470,6 +1466,93 @@ apply I
apply_last I P
#
+# Map instructions in R17.
+#
+
+put_map_assoc F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
+put_map_assoc F Src=s Dst Live Size Rest=* => \
+ update_map_assoc F Src Dst Live Size Rest
+put_map_assoc F Src Dst Live Size Rest=* => \
+ move Src x | update_map_assoc F x Dst Live Size Rest
+put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
+put_map_exact F Src=s Dst Live Size Rest=* => \
+ update_map_exact F Src Dst Live Size Rest
+put_map_exact F Src Dst Live Size Rest=* => \
+ move Src x | update_map_exact F x Dst Live Size Rest
+
+new_map j d I I
+update_map_assoc j s d I I
+update_map_exact j s d I I
+
+is_map Fail Literal=q => move Literal x | is_map Fail x
+is_map Fail c => jump Fail
+
+%macro: is_map IsMap -fail_action
+is_map f r
+is_map f x
+is_map f y
+
+## Transform has_map_field(s) #{ K1 := _, K2 := _ }
+
+has_map_field/3
+
+has_map_fields Fail Src Size=u==1 Rest=* => gen_has_map_field(Fail,Src,Size,Rest)
+has_map_fields Fail Src Size Rest=* => i_has_map_fields Fail Src Size Rest
+
+i_has_map_fields f s I
+
+has_map_field Fail Src=rxy Key=arxy => i_has_map_field Fail Src Key
+has_map_field Fail Src Key => move Key x | i_has_map_field Fail Src x
+
+%macro: i_has_map_field HasMapField -fail_action
+i_has_map_field f r a
+i_has_map_field f x a
+i_has_map_field f y a
+i_has_map_field f r r
+i_has_map_field f x r
+i_has_map_field f y r
+i_has_map_field f r x
+i_has_map_field f x x
+i_has_map_field f y x
+i_has_map_field f r y
+i_has_map_field f x y
+i_has_map_field f y y
+
+## Transform get_map_elements(s) #{ K1 := V1, K2 := V2 }
+
+get_map_element/4
+
+get_map_elements Fail Src=rxy Size=u==2 Rest=* => gen_get_map_element(Fail,Src,Size,Rest)
+get_map_elements Fail Src Size Rest=* => i_get_map_elements Fail Src Size Rest
+
+i_get_map_elements f s I
+
+get_map_element Fail Src=rxy Key=ax Dst => i_get_map_element Fail Src Key Dst
+get_map_element Fail Src=rxy Key=rycq Dst => \
+ move Key x | i_get_map_element Fail Src x Dst
+get_map_element Fail Src Key Dst => jump Fail
+
+%macro: i_get_map_element GetMapElement -fail_action
+i_get_map_element f r a r
+i_get_map_element f x a r
+i_get_map_element f y a r
+i_get_map_element f r a x
+i_get_map_element f x a x
+i_get_map_element f y a x
+i_get_map_element f r a y
+i_get_map_element f x a y
+i_get_map_element f y a y
+i_get_map_element f r x r
+i_get_map_element f x x r
+i_get_map_element f y x r
+i_get_map_element f r x x
+i_get_map_element f x x x
+i_get_map_element f y x x
+i_get_map_element f r x y
+i_get_map_element f x x y
+i_get_map_element f y x y
+
+#
# Optimize addition and subtraction of small literals using
# the i_increment/4 instruction (in bodies, not in guards).
#
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 05bff430e3..c29d4b3777 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -38,6 +38,8 @@
#if defined (__WIN32__)
# include "erl_win_sys.h"
+#elif defined (__OSE__)
+# include "erl_ose_sys.h"
#else
# include "erl_unix_sys.h"
#ifndef UNIX
@@ -64,8 +66,12 @@
*/
#ifndef ERTS_SYS_FD_TYPE
+#define ERTS_SYS_FD_INVALID ((ErtsSysFdType) -1)
typedef int ErtsSysFdType;
#else
+#ifndef ERTS_SYS_FD_INVALID
+# error missing ERTS_SYS_FD_INVALID
+#endif
typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
#endif
@@ -149,19 +155,37 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_EXIT_AFTER_DUMP exit
#endif
+/* In VC++, noreturn is a declspec that has to be before the types,
+ * but in GNUC it is an att ribute to be placed between return type
+ * and function name, hence __decl_noreturn <types> __noreturn <function name>
+ *
+ * at some platforms (e.g. Android) __noreturn is defined at sys/cdef.h
+ */
+#if __GNUC__
+# define __decl_noreturn
+# ifndef __noreturn
+# define __noreturn __attribute__((noreturn))
+# endif
+#else
+# if defined(__WIN32__) && defined(_MSC_VER)
+# define __noreturn
+# define __decl_noreturn __declspec(noreturn)
+# else
+# define __noreturn
+# define __decl_noreturn
+# endif
+#endif
+
+#define ERTS_ASSERT(e) \
+ ((void) ((e) ? 1 : (erl_assert_error(#e, __func__, __FILE__, __LINE__), 0)))
+
+__decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *func,
+ const char* file, int line);
+
#ifdef DEBUG
-# define ASSERT(e) \
- if (e) { \
- ; \
- } else { \
- erl_assert_error(#e, __FILE__, __LINE__); \
- }
-# define ASSERT_EXPR(e) \
- ((void) ((e) ? 1 : (erl_assert_error(#e, __FILE__, __LINE__), 0)))
-void erl_assert_error(char* expr, char* file, int line);
+# define ASSERT(e) ERTS_ASSERT(e)
#else
-# define ASSERT(e)
-# define ASSERT_EXPR(e) ((void) 1)
+# define ASSERT(e) ((void) 1)
#endif
/*
@@ -197,23 +221,6 @@ void erl_assert_error(char* expr, char* file, int line);
# define erts_align_attribute(SZ)
#endif
-/* In VC++, noreturn is a declspec that has to be before the types,
- * but in GNUC it is an att ribute to be placed between return type
- * and function name, hence __decl_noreturn <types> __noreturn <function name>
- */
-#if __GNUC__
-# define __decl_noreturn
-# define __noreturn __attribute__((noreturn))
-#else
-# if defined(__WIN32__) && defined(_MSC_VER)
-# define __noreturn
-# define __decl_noreturn __declspec(noreturn)
-# else
-# define __noreturn
-# define __decl_noreturn
-# endif
-#endif
-
/*
** Data types:
**
@@ -271,6 +278,7 @@ void erl_assert_error(char* expr, char* file, int line);
typedef unsigned int Eterm;
typedef unsigned int Uint;
typedef int Sint;
+#define ERTS_UINT_MAX UINT_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#else
@@ -282,16 +290,22 @@ typedef unsigned long UWord;
typedef long SWord;
#define SWORD_CONSTANT(Const) Const##L
#define UWORD_CONSTANT(Const) Const##UL
+#define ERTS_UWORD_MAX ULONG_MAX
+#define ERTS_SWORD_MAX LONG_MAX
#elif SIZEOF_VOID_P == SIZEOF_INT
typedef unsigned int UWord;
typedef int SWord;
#define SWORD_CONSTANT(Const) Const
#define UWORD_CONSTANT(Const) Const##U
+#define ERTS_UWORD_MAX UINT_MAX
+#define ERTS_SWORD_MAX INT_MAX
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
typedef unsigned long long UWord;
typedef long long SWord;
#define SWORD_CONSTANT(Const) Const##LL
#define UWORD_CONSTANT(Const) Const##ULL
+#define ERTS_UWORD_MAX ULLONG_MAX
+#define ERTS_SWORD_MAX LLONG_MAX
#else
#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
#endif
@@ -304,6 +318,8 @@ typedef unsigned long Uint;
typedef long Sint;
#define SWORD_CONSTANT(Const) Const##L
#define UWORD_CONSTANT(Const) Const##UL
+#define ERTS_UWORD_MAX ULONG_MAX
+#define ERTS_SWORD_MAX LONG_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
@@ -312,6 +328,8 @@ typedef unsigned int Uint;
typedef int Sint;
#define SWORD_CONSTANT(Const) Const
#define UWORD_CONSTANT(Const) Const##U
+#define ERTS_UWORD_MAX UINT_MAX
+#define ERTS_SWORD_MAX INT_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
@@ -320,6 +338,8 @@ typedef unsigned long long Uint;
typedef long long Sint;
#define SWORD_CONSTANT(Const) Const##LL
#define UWORD_CONSTANT(Const) Const##ULL
+#define ERTS_UWORD_MAX ULLONG_MAX
+#define ERTS_SWORD_MAX LLONG_MAX
#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
#if defined(__WIN32__)
#define ErtsStrToSint _strtoi64
@@ -332,6 +352,7 @@ typedef long long Sint;
typedef Uint UWord;
typedef Sint SWord;
+#define ERTS_UINT_MAX ERTS_UWORD_MAX
#endif /* HALFWORD_HEAP */
@@ -484,7 +505,7 @@ extern volatile int erts_writing_erl_crash_dump;
# define NO_ERF
# define NO_ERFC
/* This definition doesn't take NaN into account, but matherr() gets those */
-# define finite(x) (fabs(x) != HUGE_VAL)
+# define isfinite(x) (fabs(x) != HUGE_VAL)
# define USE_MATHERR
# define HAVE_FINITE
#endif
@@ -658,8 +679,7 @@ typedef struct {
#define ERTS_SYS_DDLL_ERROR_INIT {NULL}
extern void erts_sys_ddll_free_error(ErtsSysDdllError*);
extern void erl_sys_ddll_init(void); /* to initialize mutexes etc */
-extern int erts_sys_ddll_open2(const char *path, void **handle, ErtsSysDdllError*);
-#define erts_sys_ddll_open(P,H) erts_sys_ddll_open2(P,H,NULL)
+extern int erts_sys_ddll_open(const char *path, void **handle, ErtsSysDdllError*);
extern int erts_sys_ddll_open_noext(char *path, void **handle, ErtsSysDdllError*);
extern int erts_sys_ddll_load_driver_init(void *handle, void **function);
extern int erts_sys_ddll_load_nif_init(void *handle, void **function,ErtsSysDdllError*);
@@ -728,6 +748,14 @@ void init_getenv_state(GETENV_STATE *);
char * getenv_string(GETENV_STATE *);
void fini_getenv_state(GETENV_STATE *);
+#define HAVE_ERTS_CHECK_IO_DEBUG
+typedef struct {
+ int no_used_fds;
+ int no_driver_select_structs;
+ int no_driver_event_structs;
+} ErtsCheckIoDebugInfo;
+int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
+
/* xxxP */
#define SYS_DEFAULT_FLOAT_DECIMALS 20
void init_sys_float(void);
@@ -749,6 +777,8 @@ int erts_sys_getenv(char *key, char *value, size_t *size);
int erts_sys_getenv_raw(char *key, char *value, size_t *size);
/* erts_sys_getenv__() is only allowed to be used in early init phase */
int erts_sys_getenv__(char *key, char *value, size_t *size);
+/* erst_sys_unsetenv() returns 0 on success and a value != 0 on failure. */
+int erts_sys_unsetenv(char *key);
/* Easier to use, but not as efficient, environment functions */
char *erts_read_env(char *key);
@@ -1012,6 +1042,9 @@ void erl_bin_write(unsigned char *, int, int);
#define ERTS_SMALL_ABS(Small) labs(Small)
#endif
+#ifndef ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
+# define ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC 0
+#endif
#ifdef __WIN32__
void call_break_handler(void);
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index a8f15fdc38..f20e6e5665 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,6 +31,7 @@
#include "bif.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "erl_map.h"
#include "packet_parser.h"
#include "erl_gc.h"
#define ERTS_WANT_DB_INTERNAL__
@@ -47,6 +48,7 @@
#include "erl_sched_spec_pre_alloc.h"
#include "beam_bp.h"
#include "erl_ptab.h"
+#include "erl_check_io.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -185,39 +187,41 @@ erts_set_hole_marker(Eterm* ptr, Uint sz)
* Helper function for the ESTACK macros defined in global.h.
*/
void
-erl_grow_stack(Eterm** start, Eterm** sp, Eterm** end)
+erl_grow_estack(ErtsEStack* s, Eterm* default_estack)
{
- Uint old_size = (*end - *start);
+ Uint old_size = (s->end - s->start);
Uint new_size = old_size * 2;
- Uint sp_offs = *sp - *start;
- if (new_size > 2 * DEF_ESTACK_SIZE) {
- *start = erts_realloc(ERTS_ALC_T_ESTACK, (void *) *start, new_size*sizeof(Eterm));
+ Uint sp_offs = s->sp - s->start;
+ if (s->start != default_estack) {
+ s->start = erts_realloc(s->alloc_type, s->start,
+ new_size*sizeof(Eterm));
} else {
- Eterm* new_ptr = erts_alloc(ERTS_ALC_T_ESTACK, new_size*sizeof(Eterm));
- sys_memcpy(new_ptr, *start, old_size*sizeof(Eterm));
- *start = new_ptr;
+ Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm));
+ sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm));
+ s->start = new_ptr;
}
- *end = *start + new_size;
- *sp = *start + sp_offs;
+ s->end = s->start + new_size;
+ s->sp = s->start + sp_offs;
}
/*
- * Helper function for the ESTACK macros defined in global.h.
+ * Helper function for the WSTACK macros defined in global.h.
*/
void
-erl_grow_wstack(UWord** start, UWord** sp, UWord** end)
+erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
{
- Uint old_size = (*end - *start);
+ Uint old_size = (s->wend - s->wstart);
Uint new_size = old_size * 2;
- Uint sp_offs = *sp - *start;
- if (new_size > 2 * DEF_ESTACK_SIZE) {
- *start = erts_realloc(ERTS_ALC_T_ESTACK, (void *) *start, new_size*sizeof(UWord));
+ Uint sp_offs = s->wsp - s->wstart;
+ if (s->wstart != default_wstack) {
+ s->wstart = erts_realloc(s->alloc_type, s->wstart,
+ new_size*sizeof(UWord));
} else {
- UWord* new_ptr = erts_alloc(ERTS_ALC_T_ESTACK, new_size*sizeof(UWord));
- sys_memcpy(new_ptr, *start, old_size*sizeof(UWord));
- *start = new_ptr;
+ UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord));
+ sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord));
+ s->wstart = new_ptr;
}
- *end = *start + new_size;
- *sp = *start + sp_offs;
+ s->wend = s->wstart + new_size;
+ s->wsp = s->wstart + sp_offs;
}
/* CTYPE macros */
@@ -255,7 +259,7 @@ erl_grow_wstack(UWord** start, UWord** sp, UWord** end)
* Returns -1 if not a proper list (i.e. not terminated with NIL)
*/
int
-list_length(Eterm list)
+erts_list_length(Eterm list)
{
int i = 0;
@@ -576,8 +580,8 @@ erts_bld_2tup_list(Uint **hpp, Uint *szp,
}
Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[])
+erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], UWord uints[])
{
Sint i;
Eterm res = THE_NON_VALUE;
@@ -732,6 +736,8 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
#define FUNNY_NUMBER10 268440479
#define FUNNY_NUMBER11 268440577
#define FUNNY_NUMBER12 268440581
+#define FUNNY_NUMBER13 268440593
+#define FUNNY_NUMBER14 268440611
static Uint32
hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
@@ -783,10 +789,10 @@ Uint32 make_hash(Eterm term_arg)
unsigned op;
/* Must not collide with the real tag_val_def's: */
-#define MAKE_HASH_TUPLE_OP 0x10
-#define MAKE_HASH_FUN_OP 0x11
-#define MAKE_HASH_CDR_PRE_OP 0x12
-#define MAKE_HASH_CDR_POST_OP 0x13
+#define MAKE_HASH_TUPLE_OP 0x11
+#define MAKE_HASH_TERM_ARRAY_OP 0x12
+#define MAKE_HASH_CDR_PRE_OP 0x13
+#define MAKE_HASH_CDR_POST_OP 0x14
/*
** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
@@ -875,7 +881,7 @@ tail_recur:
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
if (num_free > 0) {
if (num_free > 1) {
- WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
+ WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
}
term = funp->env[0];
goto tail_recur;
@@ -965,6 +971,24 @@ tail_recur:
hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
break;
}
+ case MAP_DEF:
+ {
+ map_t *mp = (map_t *)map_val(term);
+ int size = map_get_size(mp);
+ Eterm *ks = map_get_keys(mp);
+ Eterm *vs = map_get_values(mp);
+
+ /* Use a prime with size to remedy some of
+ * the {} and <<>> hash problems */
+ hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size;
+ if (size == 0)
+ break;
+
+ /* push values first */
+ WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
+ WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
+ break;
+ }
case TUPLE_DEF:
{
Eterm* ptr = tuple_val(term);
@@ -974,7 +998,7 @@ tail_recur:
op = MAKE_HASH_TUPLE_OP;
}/*fall through*/
case MAKE_HASH_TUPLE_OP:
- case MAKE_HASH_FUN_OP:
+ case MAKE_HASH_TERM_ARRAY_OP:
{
Uint i = (Uint) WSTACK_POP(stack);
Eterm* ptr = (Eterm*) WSTACK_POP(stack);
@@ -1068,9 +1092,11 @@ Uint32
make_hash2(Eterm term)
{
Uint32 hash;
+ Uint32 hash_xor_keys = 0;
+ Uint32 hash_xor_values = 0;
DeclareTmpHeapNoproc(tmp_big,2);
-/* (HCONST * {2, ..., 14}) mod 2^32 */
+/* (HCONST * {2, ..., 16}) mod 2^32 */
#define HCONST_2 0x3c6ef372UL
#define HCONST_3 0xdaa66d2bUL
#define HCONST_4 0x78dde6e4UL
@@ -1085,6 +1111,11 @@ make_hash2(Eterm term)
#define HCONST_13 0x08d12e65UL
#define HCONST_14 0xa708a81eUL
#define HCONST_15 0x454021d7UL
+#define HCONST_16 0xe3779b90UL
+
+#define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF))
+#define HASH_MAP_KEY (_make_header(2,_TAG_HEADER_REF))
+#define HASH_MAP_VAL (_make_header(3,_TAG_HEADER_REF))
#define UINT32_HASH_2(Expr1, Expr2, AConst) \
do { \
@@ -1180,11 +1211,45 @@ make_hash2(Eterm term)
UINT32_HASH(arity, HCONST_9);
if (arity == 0) /* Empty tuple */
goto hash2_common;
- for (i = arity; i >= 2; i--) {
+ for (i = arity; i >= 1; i--) {
tmp = elem[i];
ESTACK_PUSH(s, tmp);
}
- term = elem[1];
+ goto hash2_common;
+ }
+ break;
+ case MAP_SUBTAG:
+ {
+ map_t *mp = (map_t *)map_val(term);
+ int i;
+ int size = map_get_size(mp);
+ Eterm *ks = map_get_keys(mp);
+ Eterm *vs = map_get_values(mp);
+ UINT32_HASH(size, HCONST_16);
+ if (size == 0) {
+ goto hash2_common;
+ }
+ ESTACK_PUSH(s, hash_xor_values);
+ ESTACK_PUSH(s, hash_xor_keys);
+ ESTACK_PUSH(s, hash);
+ ESTACK_PUSH(s, HASH_MAP_TAIL);
+ hash = 0;
+ hash_xor_keys = 0;
+ hash_xor_values = 0;
+ for (i = size - 1; i >= 0; i--) {
+ tmp = vs[i];
+ ESTACK_PUSH(s, HASH_MAP_VAL);
+ ESTACK_PUSH(s, tmp);
+ }
+ /* We do not want to expose the tuple representation.
+ * Do not push the keys as a tuple.
+ */
+ for (i = size - 1; i >= 0; i--) {
+ tmp = ks[i];
+ ESTACK_PUSH(s, HASH_MAP_KEY);
+ ESTACK_PUSH(s, tmp);
+ }
+ goto hash2_common;
}
break;
case EXPORT_SUBTAG:
@@ -1319,7 +1384,7 @@ make_hash2(Eterm term)
{
FloatDef ff;
GET_DOUBLE(term, ff);
-#if defined(WORDS_BIGENDIAN)
+#if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
#else
UINT32_HASH_2(ff.fw[1], ff.fw[0], HCONST_12);
@@ -1378,15 +1443,47 @@ make_hash2(Eterm term)
default:
erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
hash2_common:
+
+ /* Uint32 hash always has the hash value of the previous term,
+ * compounded or otherwise.
+ */
+
if (ESTACK_ISEMPTY(s)) {
DESTROY_ESTACK(s);
UnUseTmpHeapNoproc(2);
return hash;
}
+
term = ESTACK_POP(s);
+
+ switch (term) {
+ case HASH_MAP_TAIL: {
+ hash = (Uint32) ESTACK_POP(s);
+ UINT32_HASH(hash_xor_keys, HCONST_16);
+ UINT32_HASH(hash_xor_values, HCONST_16);
+ hash_xor_keys = (Uint32) ESTACK_POP(s);
+ hash_xor_values = (Uint32) ESTACK_POP(s);
+ goto hash2_common;
+ }
+ case HASH_MAP_KEY:
+ hash_xor_keys ^= hash;
+ hash = 0;
+ goto hash2_common;
+ case HASH_MAP_VAL:
+ hash_xor_values ^= hash;
+ hash = 0;
+ goto hash2_common;
+ default:
+ break;
+ }
}
}
}
+
+#undef HASH_MAP_TAIL
+#undef HASH_MAP_KEY
+#undef HASH_MAP_VAL
+
#undef UINT32_HASH_2
#undef UINT32_HASH
#undef SINT32_HASH
@@ -1488,7 +1585,7 @@ tail_recur:
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
if (num_free > 0) {
if (num_free > 1) {
- WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
+ WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
}
term = funp->env[0];
goto tail_recur;
@@ -1601,6 +1698,24 @@ tail_recur:
}
break;
+ case MAP_DEF:
+ {
+ map_t *mp = (map_t *)map_val(term);
+ int size = map_get_size(mp);
+ Eterm *ks = map_get_keys(mp);
+ Eterm *vs = map_get_values(mp);
+
+ /* Use a prime with size to remedy some of
+ * the {} and <<>> hash problems */
+ hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size;
+ if (size == 0)
+ break;
+
+ /* push values first */
+ WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
+ WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP);
+ break;
+ }
case TUPLE_DEF:
{
Eterm* ptr = tuple_val(term);
@@ -1610,7 +1725,7 @@ tail_recur:
op = MAKE_HASH_TUPLE_OP;
}/*fall through*/
case MAKE_HASH_TUPLE_OP:
- case MAKE_HASH_FUN_OP:
+ case MAKE_HASH_TERM_ARRAY_OP:
{
Uint i = (Uint) WSTACK_POP(stack);
Eterm* ptr = (Eterm*) WSTACK_POP(stack);
@@ -1638,7 +1753,7 @@ tail_recur:
return hash;
#undef MAKE_HASH_TUPLE_OP
-#undef MAKE_HASH_FUN_OP
+#undef MAKE_HASH_TERM_ARRAY_OP
#undef MAKE_HASH_CDR_PRE_OP
#undef MAKE_HASH_CDR_POST_OP
}
@@ -1675,7 +1790,7 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
if (p) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_RUNNING)
+ if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))
p = NULL;
}
}
@@ -2005,6 +2120,22 @@ tailrecur_ne:
++bb;
goto term_array;
}
+ case MAP_SUBTAG:
+ {
+ aa = map_val_rel(a, a_base);
+ if (!is_boxed(b) || *boxed_val_rel(b,b_base) != *aa)
+ goto not_equal;
+ bb = map_val_rel(b,b_base);
+ sz = map_get_size((map_t*)aa);
+
+ if (sz != map_get_size((map_t*)bb)) goto not_equal;
+ if (sz == 0) goto pop_next;
+
+ aa += 2;
+ bb += 2;
+ sz += 1; /* increment for tuple-keys */
+ goto term_array;
+ }
case REFC_BINARY_SUBTAG:
case HEAP_BINARY_SUBTAG:
case SUB_BINARY_SUBTAG:
@@ -2279,7 +2410,7 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
*
* According to the Erlang Standard, types are orderered as follows:
* numbers < (characters) < atoms < refs < funs < ports < pids <
- * tuples < [] < conses < binaries.
+ * tuples < maps < [] < conses < binaries.
*
* Note that characters are currently not implemented.
*
@@ -2299,10 +2430,24 @@ static int cmp_atoms(Eterm a, Eterm b)
bb->name+3, bb->len-3);
}
+#if !HALFWORD_HEAP
+/* cmp(Eterm a, Eterm b)
+ * For compatibility with HiPE - arith-based compare.
+ */
+Sint cmp(Eterm a, Eterm b)
+{
+ return erts_cmp(a, b, 0);
+}
+#endif
+
+/* erts_cmp(Eterm a, Eterm b, int exact)
+ * exact = 1 -> term-based compare
+ * exact = 0 -> arith-based compare
+ */
#if HALFWORD_HEAP
-Sint cmp_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base)
+Sint erts_cmp_rel_opt(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base, int exact)
#else
-Sint cmp(Eterm a, Eterm b)
+Sint erts_cmp(Eterm a, Eterm b, int exact)
#endif
{
DECLARE_WSTACK(stack);
@@ -2462,7 +2607,25 @@ tailrecur_ne:
++aa;
++bb;
goto term_array;
+ case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
+ if (!is_map_rel(b,b_base)) {
+ a_tag = MAP_DEF;
+ goto mixed_types;
+ }
+ aa = (Eterm *)map_val_rel(a,a_base);
+ bb = (Eterm *)map_val_rel(b,b_base);
+ i = map_get_size((map_t*)aa);
+ if (i != map_get_size((map_t*)bb)) {
+ RETURN_NEQ((int)(i - map_get_size((map_t*)bb)));
+ }
+ if (i == 0) {
+ goto pop_next;
+ }
+ aa += 2;
+ bb += 2;
+ i += 1; /* increment for tuple-keys */
+ goto term_array;
case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
if (!is_float_rel(b,b_base)) {
a_tag = FLOAT_DEF;
@@ -2686,11 +2849,6 @@ tailrecur_ne:
{
FloatDef f1, f2;
Eterm big;
-#if HEAP_ON_C_STACK
- Eterm big_buf[CMP_TMP_HEAP_SIZE]; /* If HEAP_ON_C_STACK */
-#else
- Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
-#endif
#if HALFWORD_HEAP
Wterm aw = is_immed(a) ? a : rterm2wterm(a,a_base);
Wterm bw = is_immed(b) ? b : rterm2wterm(b,b_base);
@@ -2701,6 +2859,8 @@ tailrecur_ne:
#define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
#define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
#define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
+ Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)];
+
b_tag = tag_val_def(bw);
switch(_NUMBER_CODE(a_tag, b_tag)) {
@@ -2711,86 +2871,95 @@ tailrecur_ne:
j = big_sign(aw) ? -1 : 1;
break;
case SMALL_FLOAT:
+ if (exact) goto exact_fall_through;
GET_DOUBLE(bw, f2);
if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
- // Float is within the no loss limit
+ /* Float is within the no loss limit */
f1.fd = signed_val(aw);
j = float_comp(f1.fd, f2.fd);
+ }
#if ERTS_SIZEOF_ETERM == 8
- } else if (f2.fd > (double) (MAX_SMALL + 1)) {
- // Float is a positive bignum, i.e. bigger
+ else if (f2.fd > (double) (MAX_SMALL + 1)) {
+ /* Float is a positive bignum, i.e. bigger */
j = -1;
} else if (f2.fd < (double) (MIN_SMALL - 1)) {
- // Float is a negative bignum, i.e. smaller
+ /* Float is a negative bignum, i.e. smaller */
j = 1;
- } else { // Float is a Sint but less precise
+ } else {
+ /* Float is a Sint but less precise */
j = signed_val(aw) - (Sint) f2.fd;
}
#else
- } else {
- // If float is positive it is bigger than small
+ else {
+ /* If float is positive it is bigger than small */
j = (f2.fd > 0.0) ? -1 : 1;
}
-#endif // ERTS_SIZEOF_ETERM == 8
+#endif /* ERTS_SIZEOF_ETERM == 8 */
break;
case FLOAT_BIG:
+ if (exact) goto exact_fall_through;
{
Wterm tmp = aw;
aw = bw;
bw = tmp;
}/* fall through */
case BIG_FLOAT:
+ if (exact) goto exact_fall_through;
GET_DOUBLE(bw, f2);
if ((f2.fd < (double) (MAX_SMALL + 1))
&& (f2.fd > (double) (MIN_SMALL - 1))) {
- // Float is a Sint
+ /* Float is a Sint */
j = big_sign(aw) ? -1 : 1;
} else if (big_arity(aw) > BIG_ARITY_FLOAT_MAX
|| pow(2.0,(big_arity(aw)-1)*D_EXP) > fabs(f2.fd)) {
- // If bignum size shows that it is bigger than the abs float
+ /* If bignum size shows that it is bigger than the abs float */
j = big_sign(aw) ? -1 : 1;
} else if (big_arity(aw) < BIG_ARITY_FLOAT_MAX
&& (pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) {
- // If bignum size shows that it is smaller than the abs float
+ /* If bignum size shows that it is smaller than the abs float */
j = f2.fd < 0 ? 1 : -1;
} else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
- // Float is within the no loss limit
+ /* Float is within the no loss limit */
if (big_to_double(aw, &f1.fd) < 0) {
j = big_sign(aw) ? -1 : 1;
} else {
j = float_comp(f1.fd, f2.fd);
}
} else {
- big = double_to_big(f2.fd, big_buf);
- j = big_comp(aw, big);
+ big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
+ j = big_comp(aw, rterm2wterm(big,big_buf));
}
if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
j = -j;
}
break;
case FLOAT_SMALL:
+ if (exact) goto exact_fall_through;
GET_DOUBLE(aw, f1);
if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
- // Float is within the no loss limit
+ /* Float is within the no loss limit */
f2.fd = signed_val(bw);
j = float_comp(f1.fd, f2.fd);
+ }
#if ERTS_SIZEOF_ETERM == 8
- } else if (f1.fd > (double) (MAX_SMALL + 1)) {
- // Float is a positive bignum, i.e. bigger
+ else if (f1.fd > (double) (MAX_SMALL + 1)) {
+ /* Float is a positive bignum, i.e. bigger */
j = 1;
} else if (f1.fd < (double) (MIN_SMALL - 1)) {
- // Float is a negative bignum, i.e. smaller
+ /* Float is a negative bignum, i.e. smaller */
j = -1;
- } else { // Float is a Sint but less precise it
+ } else {
+ /* Float is a Sint but less precise it */
j = (Sint) f1.fd - signed_val(bw);
}
#else
- } else {
- // If float is positive it is bigger than small
+ else {
+ /* If float is positive it is bigger than small */
j = (f1.fd > 0.0) ? 1 : -1;
}
-#endif // ERTS_SIZEOF_ETERM == 8
+#endif /* ERTS_SIZEOF_ETERM == 8 */
break;
+exact_fall_through:
default:
j = b_tag - a_tag;
}
@@ -2844,7 +3013,7 @@ pop_next:
return 0;
not_equal:
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return j;
#undef CMP_NODES
@@ -2981,7 +3150,7 @@ char* Sint_to_buf(Sint n, struct Sint_buf *buf)
*/
Eterm
-buf_to_intlist(Eterm** hpp, char *buf, size_t len, Eterm tail)
+buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
{
Eterm* hp = *hpp;
size_t i = len;
@@ -3019,118 +3188,349 @@ buf_to_intlist(Eterm** hpp, char *buf, size_t len, Eterm tail)
** Return remaining bytes in buffer on success
** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow
** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes)
+**
+** Note!
+** Do not detect indata errors in this fiunction that are not detected by erts_iolist_size!
+**
+** A caller should be able to rely on a successful return from erts_iolist_to_buf
+** if erts_iolist_size is previously successfully called and erts_iolist_to_buf
+** is called with a buffer at least as large as the value given by erts_iolist_size.
+**
*/
-ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+typedef enum {
+ ERTS_IL2B_BCOPY_OK,
+ ERTS_IL2B_BCOPY_YIELD,
+ ERTS_IL2B_BCOPY_OVERFLOW,
+ ERTS_IL2B_BCOPY_TYPE_ERROR
+} ErtsIL2BBCopyRes;
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
+
+static ERTS_INLINE ErlDrvSizeT
+iolist_to_buf(const int yield_support,
+ ErtsIOList2BufState *state,
+ Eterm obj,
+ char* buf,
+ ErlDrvSizeT alloced_len)
{
- ErlDrvSizeT len = (ErlDrvSizeT) alloced_len;
- Eterm* objp;
+#undef IOLIST_TO_BUF_BCOPY
+#define IOLIST_TO_BUF_BCOPY(CONSP) \
+do { \
+ size_t size = binary_size(obj); \
+ if (size > 0) { \
+ Uint bitsize; \
+ byte* bptr; \
+ Uint bitoffs; \
+ Uint num_bits; \
+ if (yield_support) { \
+ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ if (yield_count > 0) \
+ max_size *= yield_count+1; \
+ if (size > max_size) { \
+ state->objp = CONSP; \
+ goto L_bcopy_yield; \
+ } \
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
+ int cost = (int) size; \
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
+ yield_count -= cost; \
+ } \
+ } \
+ if (len < size) \
+ goto L_overflow; \
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
+ if (bitsize != 0) \
+ goto L_type_error; \
+ num_bits = 8*size; \
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \
+ buf += size; \
+ len -= size; \
+ } \
+} while (0)
+
+ ErlDrvSizeT res, len;
+ Eterm* objp = NULL;
+ int init_yield_count;
+ int yield_count;
DECLARE_ESTACK(s);
- goto L_again;
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_again:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0) {
- goto L_overflow;
- }
- *buf++ = unsigned_val(obj);
- len--;
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
-
- if (len < size) {
+
+ len = (ErlDrvSizeT) alloced_len;
+
+ if (!yield_support) {
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ goto L_again;
+ }
+ else {
+
+ if (state->iolist.reds_left <= 0)
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
+ * state->iolist.reds_left);
+ yield_count = init_yield_count;
+
+ if (!state->iolist.estack.start)
+ goto L_again;
+ else {
+ int chk_stack;
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->iolist.estack);
+
+ if (!state->bcopy.bptr)
+ chk_stack = 0;
+ else {
+ chk_stack = 1;
+ switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ break;
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
- obj = CDR(objp);
- if (is_list(obj)) {
- goto L_iter_list; /* on tail */
- } else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
+ obj = state->iolist.obj;
+ buf = state->buf;
+ len = state->len;
+ objp = state->objp;
+ state->objp = NULL;
+ if (objp)
+ goto L_tail;
+ if (!chk_stack)
+ goto L_again;
+ /* check stack */
+ }
+ }
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0) {
+ goto L_overflow;
+ }
+ *buf++ = unsigned_val(obj);
+ len--;
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(objp);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
+
+ L_tail:
+
+ obj = CDR(objp);
+
+ if (is_list(obj)) {
+ continue; /* Tail loop */
+ } else if (is_binary(obj)) {
+ IOLIST_TO_BUF_BCOPY(NULL);
+ } else if (is_not_nil(obj)) {
goto L_type_error;
}
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ break;
}
} else if (is_binary(obj)) {
- byte* bptr;
- size_t size = binary_size(obj);
- Uint bitsize;
- Uint bitoffs;
- Uint num_bits;
- if (len < size) {
- goto L_overflow;
- }
- ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
- if (bitsize != 0) {
- goto L_type_error;
- }
- num_bits = 8*size;
- copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
- buf += size;
- len -= size;
+ IOLIST_TO_BUF_BCOPY(NULL);
} else if (is_not_nil(obj)) {
goto L_type_error;
- }
+ } else if (yield_support && --yield_count <= 0)
+ goto L_yield;
}
+ res = len;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return len;
+
+ if (yield_support) {
+ int reds;
+ CLEAR_SAVED_ESTACK(&state->iolist.estack);
+ reds = ((init_yield_count - yield_count - 1)
+ / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->iolist.c_p, reds);
+ state->iolist.reds_left -= reds;
+ if (state->iolist.reds_left < 0)
+ state->iolist.reds_left = 0;
+ }
+
+
+ return res;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
+ goto L_return;
L_overflow:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TO_BUF_OVERFLOW;
+ res = ERTS_IOLIST_TO_BUF_OVERFLOW;
+ goto L_return;
+
+ L_bcopy_yield:
+
+ state->buf = buf;
+ state->len = len;
+
+ switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
+ case ERTS_IL2B_BCOPY_OK:
+ ERTS_INTERNAL_ERROR("Missing yield");
+ case ERTS_IL2B_BCOPY_YIELD:
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+ case ERTS_IL2B_BCOPY_OVERFLOW:
+ goto L_overflow;
+ case ERTS_IL2B_BCOPY_TYPE_ERROR:
+ goto L_type_error;
+ }
+
+ L_yield:
+
+ BUMP_ALL_REDS(state->iolist.c_p);
+ state->iolist.reds_left = 0;
+ state->iolist.obj = obj;
+ state->buf = buf;
+ state->len = len;
+ ESTACK_SAVE(s, &state->iolist.estack);
+ return ERTS_IOLIST_TO_BUF_YIELD;
+
+#undef IOLIST_TO_BUF_BCOPY
+}
+
+static ErtsIL2BBCopyRes
+iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
+{
+ ErtsIL2BBCopyRes res;
+ char *buf = state->buf;
+ ErlDrvSizeT len = state->len;
+ byte* bptr;
+ size_t size;
+ size_t max_size;
+ Uint bitoffs;
+ Uint num_bits;
+ int yield_count = *yield_countp;
+
+ if (state->bcopy.bptr) {
+ bptr = state->bcopy.bptr;
+ size = state->bcopy.size;
+ bitoffs = state->bcopy.bitoffs;
+ state->bcopy.bptr = NULL;
+ }
+ else {
+ Uint bitsize;
+
+ ASSERT(is_binary(obj));
+
+ size = binary_size(obj);
+ if (size <= 0)
+ return ERTS_IL2B_BCOPY_OK;
+
+ if (len < size)
+ return ERTS_IL2B_BCOPY_OVERFLOW;
+
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0)
+ return ERTS_IL2B_BCOPY_TYPE_ERROR;
+ }
+
+ ASSERT(size > 0);
+ max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ if (yield_count > 0)
+ max_size *= (size_t) (yield_count+1);
+
+ if (size <= max_size) {
+ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
+ int cost = (int) size;
+ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
+ yield_count -= cost;
+ }
+ res = ERTS_IL2B_BCOPY_OK;
+ }
+ else {
+ ASSERT(0 < max_size && max_size < size);
+ yield_count = 0;
+ state->bcopy.bptr = bptr + max_size;
+ state->bcopy.bitoffs = bitoffs;
+ state->bcopy.size = size - max_size;
+ size = max_size;
+ res = ERTS_IL2B_BCOPY_YIELD;
+ }
+
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ state->buf += size;
+ state->len -= size;
+ *yield_countp = yield_count;
+
+ return res;
+}
+
+ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
+{
+ return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
+}
+
+ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
+{
+ return iolist_to_buf(0, NULL, obj, buf, alloced_len);
}
/*
* Return 0 if successful, and non-zero if unsuccessful.
+ *
+ * It is vital that if erts_iolist_to_buf would return an error for
+ * any type of term data, this function should do so as well.
+ * Any input term error detected in erts_iolist_to_buf should also
+ * be detected in this function!
*/
-int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+
+static ERTS_INLINE int
+iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
{
+ int res, init_yield_count, yield_count;
Eterm* objp;
- Uint size = 0; /* Intentionally Uint due to halfword heap */
+ Uint size = (Uint) *sizep; /* Intentionally Uint due to halfword heap */
DECLARE_ESTACK(s);
+
+ if (!yield_support)
+ yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
+ else {
+ if (state->reds_left <= 0)
+ return ERTS_IOLIST_YIELD;
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+ init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
+ init_yield_count *= state->reds_left;
+ yield_count = init_yield_count;
+ if (state->estack.start) {
+ /* Restart; restore state... */
+ ESTACK_RESTORE(s, &state->estack);
+ size = (Uint) state->size;
+ obj = state->obj;
+ }
+ }
+
goto L_again;
#define SAFE_ADD(Var, Val) \
@@ -3146,51 +3546,101 @@ int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
obj = ESTACK_POP(s);
L_again:
if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- /* Head */
- obj = CAR(objp);
- if (is_byte(obj)) {
- size++;
- if (size == 0) {
- goto L_overflow_error;
+ while (1) { /* Tail loop */
+ while (1) { /* Head loop */
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ size++;
+ if (size == 0) {
+ goto L_overflow_error;
+ }
+ } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ continue; /* Head loop */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
- SAFE_ADD(size, binary_size(obj));
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (is_not_nil(obj)) {
- goto L_type_error;
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ continue; /* Tail loop */
+ else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ SAFE_ADD(size, binary_size(obj));
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ break;
}
- /* Tail */
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ } else {
+ if (yield_support && --yield_count <= 0)
+ goto L_yield;
+ if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
SAFE_ADD(size, binary_size(obj));
} else if (is_not_nil(obj)) {
goto L_type_error;
}
- } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
- SAFE_ADD(size, binary_size(obj));
- } else if (is_not_nil(obj)) {
- goto L_type_error;
}
}
#undef SAFE_ADD
- DESTROY_ESTACK(s);
*sizep = (ErlDrvSizeT) size;
- return ERTS_IOLIST_OK;
- L_overflow_error:
+ res = ERTS_IOLIST_OK;
+
+ L_return:
+
DESTROY_ESTACK(s);
- return ERTS_IOLIST_OVERFLOW;
+
+ if (yield_support) {
+ int yc, reds;
+ CLEAR_SAVED_ESTACK(&state->estack);
+ yc = init_yield_count - yield_count;
+ reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
+ BUMP_REDS(state->c_p, reds);
+ state->reds_left -= reds;
+ state->size = (ErlDrvSizeT) size;
+ state->have_size = 1;
+ }
+
+ return res;
+
+ L_overflow_error:
+ res = ERTS_IOLIST_OVERFLOW;
+ size = 0;
+ goto L_return;
L_type_error:
- DESTROY_ESTACK(s);
- return ERTS_IOLIST_TYPE;
+ res = ERTS_IOLIST_TYPE;
+ size = 0;
+ goto L_return;
+
+ L_yield:
+ BUMP_ALL_REDS(state->c_p);
+ state->reds_left = 0;
+ state->size = size;
+ state->obj = obj;
+ ESTACK_SAVE(s, &state->estack);
+ return ERTS_IOLIST_YIELD;
+}
+
+int erts_iolist_size_yielding(ErtsIOListState *state)
+{
+ ErlDrvSizeT size = state->size;
+ return iolist_size(1, state, state->obj, &size);
+}
+
+int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
+{
+ *sizep = 0;
+ return iolist_size(0, NULL, obj, sizep);
}
/* return 0 if item is not a non-empty flat list of bytes */
@@ -3464,6 +3914,294 @@ erts_free_read_env(void *value)
erts_free(ERTS_ALC_T_TMP, value);
}
+
+typedef struct {
+ size_t sz;
+ char *ptr;
+} ErtsEmuArg;
+
+typedef struct {
+ int argc;
+ ErtsEmuArg *arg;
+ size_t no_bytes;
+} ErtsEmuArgs;
+
+ErtsEmuArgs saved_emu_args = {0};
+
+void
+erts_save_emu_args(int argc, char **argv)
+{
+#ifdef DEBUG
+ char *end_ptr;
+#endif
+ char *ptr;
+ int i;
+ size_t arg_sz[100];
+ size_t size;
+
+ ASSERT(!saved_emu_args.argc);
+
+ size = sizeof(ErtsEmuArg)*argc;
+ for (i = 0; i < argc; i++) {
+ size_t sz = sys_strlen(argv[i]);
+ if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
+ arg_sz[i] = sz;
+ size += sz+1;
+ }
+ ptr = (char *) malloc(size);
+ if (!ptr) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
+#ifdef DEBUG
+ end_ptr = ptr + size;
+#endif
+ saved_emu_args.arg = (ErtsEmuArg *) ptr;
+ ptr += sizeof(ErtsEmuArg)*argc;
+ saved_emu_args.argc = argc;
+ saved_emu_args.no_bytes = 0;
+ for (i = 0; i < argc; i++) {
+ size_t sz;
+ if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
+ sz = arg_sz[i];
+ else
+ sz = sys_strlen(argv[i]);
+ saved_emu_args.arg[i].ptr = ptr;
+ saved_emu_args.arg[i].sz = sz;
+ saved_emu_args.no_bytes += sz;
+ ptr += sz+1;
+ sys_strcpy(saved_emu_args.arg[i].ptr, argv[i]);
+ }
+ ASSERT(ptr == end_ptr);
+}
+
+Eterm
+erts_get_emu_args(Process *c_p)
+{
+#ifdef DEBUG
+ Eterm *end_hp;
+#endif
+ int i;
+ Uint hsz;
+ Eterm *hp, res;
+
+ hsz = saved_emu_args.no_bytes*2;
+ hsz += saved_emu_args.argc*2;
+
+ hp = HAlloc(c_p, hsz);
+#ifdef DEBUG
+ end_hp = hp + hsz;
+#endif
+ res = NIL;
+
+ for (i = saved_emu_args.argc-1; i >= 0; i--) {
+ Eterm arg = buf_to_intlist(&hp,
+ saved_emu_args.arg[i].ptr,
+ saved_emu_args.arg[i].sz,
+ NIL);
+ res = CONS(hp, arg, res);
+ hp += 2;
+ }
+
+ ASSERT(hp == end_hp);
+
+ return res;
+}
+
+
+Eterm
+erts_get_ethread_info(Process *c_p)
+{
+ Uint sz, *szp;
+ Eterm res, *hp, **hpp, *end_hp = NULL;
+
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+
+ while (1) {
+ Eterm tup, list, name;
+#if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
+ || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
+ || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
+ char buf[1024];
+ int i;
+ char **str;
+#endif
+
+ res = NIL;
+
+#ifdef ETHR_X86_MEMBAR_H__
+
+ tup = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_string(hpp, szp, "sse2"),
+#ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
+ erts_bld_string(hpp, szp,
+ (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
+ ? "yes" : "no"))
+#else
+ erts_bld_string(hpp, szp, "yes")
+#endif
+ );
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+ tup = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_string(hpp, szp,
+ "x86"
+#ifdef ARCH_64
+ "_64"
+#endif
+ " OOO"),
+ erts_bld_string(hpp, szp,
+#ifdef ETHR_X86_OUT_OF_ORDER
+ "yes"
+#else
+ "no"
+#endif
+ ));
+
+ res = erts_bld_cons(hpp, szp, tup, res);
+#endif
+
+#ifdef ETHR_SPARC_V9_MEMBAR_H__
+
+ tup = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_string(hpp, szp, "Sparc V9"),
+ erts_bld_string(hpp, szp,
+#if defined(ETHR_SPARC_TSO)
+ "TSO"
+#elif defined(ETHR_SPARC_PSO)
+ "PSO"
+#elif defined(ETHR_SPARC_RMO)
+ "RMO"
+#else
+ "undefined"
+#endif
+ ));
+
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+#endif
+
+#ifdef ETHR_PPC_MEMBAR_H__
+
+ tup = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_string(hpp, szp, "lwsync"),
+ erts_bld_string(hpp, szp,
+#if defined(ETHR_PPC_HAVE_LWSYNC)
+ "yes"
+#elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
+ "no"
+#elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
+ ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no"
+#else
+ "undefined"
+#endif
+ ));
+
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+#endif
+
+ tup = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_string(hpp, szp, "Native rw-spinlocks"),
+#ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
+ erts_bld_string(hpp, szp, ETHR_NATIVE_RWSPINLOCK_IMPL)
+#else
+ erts_bld_string(hpp, szp, "no")
+#endif
+ );
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+ tup = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_string(hpp, szp, "Native spinlocks"),
+#ifdef ETHR_NATIVE_SPINLOCK_IMPL
+ erts_bld_string(hpp, szp, ETHR_NATIVE_SPINLOCK_IMPL)
+#else
+ erts_bld_string(hpp, szp, "no")
+#endif
+ );
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+
+ list = NIL;
+#ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
+ if (ethr_have_native_dw_atomic()) {
+ name = erts_bld_string(hpp, szp, ETHR_NATIVE_DW_ATOMIC_IMPL);
+ str = ethr_native_dw_atomic_ops();
+ for (i = 0; str[i]; i++) {
+ erts_snprintf(buf, sizeof(buf), "ethr_native_dw_atomic_%s()", str[i]);
+ list = erts_bld_cons(hpp, szp,
+ erts_bld_string(hpp, szp, buf),
+ list);
+ }
+ str = ethr_native_su_dw_atomic_ops();
+ for (i = 0; str[i]; i++) {
+ erts_snprintf(buf, sizeof(buf), "ethr_native_su_dw_atomic_%s()", str[i]);
+ list = erts_bld_cons(hpp, szp,
+ erts_bld_string(hpp, szp, buf),
+ list);
+ }
+ }
+ else
+#endif
+ name = erts_bld_string(hpp, szp, "no");
+
+ tup = erts_bld_tuple(hpp, szp, 3,
+ erts_bld_string(hpp, szp, "Double word native atomics"),
+ name,
+ list);
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+ list = NIL;
+#ifdef ETHR_NATIVE_ATOMIC64_IMPL
+ name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC64_IMPL);
+ str = ethr_native_atomic64_ops();
+ for (i = 0; str[i]; i++) {
+ erts_snprintf(buf, sizeof(buf), "ethr_native_atomic64_%s()", str[i]);
+ list = erts_bld_cons(hpp, szp,
+ erts_bld_string(hpp, szp, buf),
+ list);
+ }
+#else
+ name = erts_bld_string(hpp, szp, "no");
+#endif
+ tup = erts_bld_tuple(hpp, szp, 3,
+ erts_bld_string(hpp, szp, "64-bit native atomics"),
+ name,
+ list);
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+ list = NIL;
+#ifdef ETHR_NATIVE_ATOMIC32_IMPL
+ name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC32_IMPL);
+ str = ethr_native_atomic32_ops();
+ for (i = 0; str[i]; i++) {
+ erts_snprintf(buf, sizeof(buf), "ethr_native_atomic32_%s()", str[i]);
+ list = erts_bld_cons(hpp, szp,
+ erts_bld_string(hpp, szp, buf),
+ list);
+ }
+#else
+ name = erts_bld_string(hpp, szp, "no");
+#endif
+ tup = erts_bld_tuple(hpp, szp, 3,
+ erts_bld_string(hpp, szp, "32-bit native atomics"),
+ name,
+ list);
+ res = erts_bld_cons(hpp, szp, tup, res);
+
+ if (hpp) {
+ HRelease(c_p, end_hp, *hpp)
+ return res;
+ }
+
+ hp = HAlloc(c_p, sz);
+ end_hp = hp + sz;
+ hpp = &hp;
+ szp = NULL;
+ }
+}
+
/*
* To be used to silence unused result warnings, but do not abuse it.
*/
@@ -3719,6 +4457,23 @@ erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
#endif
}
+/*
+ * A millisecond timestamp without time correction where there's no hrtime
+ * - for tracing on "long" things...
+ */
+Uint64 erts_timestamp_millis(void)
+{
+#ifdef HAVE_GETHRTIME
+ return (Uint64) (sys_gethrtime() / 1000000);
+#else
+ Uint64 res;
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ res = (Uint64) tv.tv_sec*1000000;
+ res += (Uint64) tv.tv_usec;
+ return (res / 1000);
+#endif
+}
#ifdef DEBUG
/*