aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.c49
-rw-r--r--erts/emulator/beam/atom.h6
-rw-r--r--erts/emulator/beam/atom.names42
-rw-r--r--erts/emulator/beam/beam_bif_load.c562
-rw-r--r--erts/emulator/beam/beam_bp.c361
-rw-r--r--erts/emulator/beam/beam_bp.h46
-rw-r--r--erts/emulator/beam/beam_debug.c548
-rw-r--r--erts/emulator/beam/beam_emu.c780
-rw-r--r--erts/emulator/beam/beam_load.c597
-rw-r--r--erts/emulator/beam/beam_load.h51
-rw-r--r--erts/emulator/beam/beam_ranges.c18
-rw-r--r--erts/emulator/beam/bif.c725
-rw-r--r--erts/emulator/beam/bif.h97
-rw-r--r--erts/emulator/beam/bif.tab72
-rw-r--r--erts/emulator/beam/big.c38
-rw-r--r--erts/emulator/beam/big.h4
-rw-r--r--erts/emulator/beam/binary.c38
-rw-r--r--erts/emulator/beam/break.c47
-rw-r--r--erts/emulator/beam/code_ix.c3
-rw-r--r--erts/emulator/beam/code_ix.h84
-rw-r--r--erts/emulator/beam/copy.c161
-rw-r--r--erts/emulator/beam/dist.c45
-rw-r--r--erts/emulator/beam/dist.h8
-rw-r--r--erts/emulator/beam/dtrace-wrapper.h4
-rw-r--r--erts/emulator/beam/erl_alloc.c87
-rw-r--r--erts/emulator/beam/erl_alloc.h20
-rw-r--r--erts/emulator/beam/erl_alloc.types27
-rw-r--r--erts/emulator/beam/erl_alloc_util.c135
-rw-r--r--erts/emulator/beam/erl_alloc_util.h20
-rw-r--r--erts/emulator/beam/erl_async.c6
-rw-r--r--erts/emulator/beam/erl_async.h10
-rw-r--r--erts/emulator/beam/erl_bif_binary.c56
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c16
-rw-r--r--erts/emulator/beam/erl_bif_guard.c65
-rw-r--r--erts/emulator/beam/erl_bif_info.c813
-rw-r--r--erts/emulator/beam/erl_bif_op.c2
-rw-r--r--erts/emulator/beam/erl_bif_os.c16
-rw-r--r--erts/emulator/beam/erl_bif_port.c16
-rw-r--r--erts/emulator/beam/erl_bif_re.c63
-rw-r--r--erts/emulator/beam/erl_bif_trace.c228
-rw-r--r--erts/emulator/beam/erl_bif_unique.c314
-rw-r--r--erts/emulator/beam/erl_bif_unique.h346
-rw-r--r--erts/emulator/beam/erl_binary.h266
-rw-r--r--erts/emulator/beam/erl_bits.c45
-rw-r--r--erts/emulator/beam/erl_bits.h20
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c73
-rw-r--r--erts/emulator/beam/erl_cpu_topology.h10
-rw-r--r--erts/emulator/beam/erl_db.c2089
-rw-r--r--erts/emulator/beam/erl_db.h50
-rw-r--r--erts/emulator/beam/erl_db_hash.c2585
-rw-r--r--erts/emulator/beam/erl_db_hash.h31
-rw-r--r--erts/emulator/beam/erl_db_tree.c563
-rw-r--r--erts/emulator/beam/erl_db_util.c259
-rw-r--r--erts/emulator/beam/erl_db_util.h150
-rw-r--r--erts/emulator/beam/erl_debug.c4
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab87
-rw-r--r--erts/emulator/beam/erl_driver.h18
-rw-r--r--erts/emulator/beam/erl_drv_nif.h21
-rw-r--r--erts/emulator/beam/erl_drv_thread.c103
-rw-r--r--erts/emulator/beam/erl_fun.c30
-rw-r--r--erts/emulator/beam/erl_fun.h6
-rw-r--r--erts/emulator/beam/erl_gc.c436
-rw-r--r--erts/emulator/beam/erl_gc.h107
-rw-r--r--erts/emulator/beam/erl_hl_timer.c1860
-rw-r--r--erts/emulator/beam/erl_hl_timer.h6
-rw-r--r--erts/emulator/beam/erl_init.c181
-rw-r--r--erts/emulator/beam/erl_instrument.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c297
-rw-r--r--erts/emulator/beam/erl_lock_check.h50
-rw-r--r--erts/emulator/beam/erl_lock_count.c965
-rw-r--r--erts/emulator/beam/erl_lock_count.h1013
-rw-r--r--erts/emulator/beam/erl_lock_flags.c59
-rw-r--r--erts/emulator/beam/erl_lock_flags.h78
-rw-r--r--erts/emulator/beam/erl_map.c77
-rw-r--r--erts/emulator/beam/erl_map.h4
-rw-r--r--erts/emulator/beam/erl_math.c13
-rw-r--r--erts/emulator/beam/erl_message.c35
-rw-r--r--erts/emulator/beam/erl_monitors.c76
-rw-r--r--erts/emulator/beam/erl_monitors.h23
-rw-r--r--erts/emulator/beam/erl_msacc.c22
-rw-r--r--erts/emulator/beam/erl_msacc.h10
-rw-r--r--erts/emulator/beam/erl_mtrace.c15
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.c180
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.h332
-rw-r--r--erts/emulator/beam/erl_nif.c1826
-rw-r--r--erts/emulator/beam/erl_nif.h55
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h18
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h39
-rw-r--r--erts/emulator/beam/erl_node_tables.c102
-rw-r--r--erts/emulator/beam/erl_node_tables.h4
-rw-r--r--erts/emulator/beam/erl_port.h14
-rw-r--r--erts/emulator/beam/erl_port_task.c48
-rw-r--r--erts/emulator/beam/erl_port_task.h11
-rw-r--r--erts/emulator/beam/erl_printf_term.c13
-rw-r--r--erts/emulator/beam/erl_process.c2385
-rw-r--r--erts/emulator/beam/erl_process.h218
-rw-r--r--erts/emulator/beam/erl_process_dict.c13
-rw-r--r--erts/emulator/beam/erl_process_dict.h1
-rw-r--r--erts/emulator/beam/erl_process_dump.c13
-rw-r--r--erts/emulator/beam/erl_process_lock.c253
-rw-r--r--erts/emulator/beam/erl_process_lock.h188
-rw-r--r--erts/emulator/beam/erl_ptab.c27
-rw-r--r--erts/emulator/beam/erl_rbtree.h68
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c2
-rw-r--r--erts/emulator/beam/erl_smp.h135
-rw-r--r--erts/emulator/beam/erl_term.c42
-rw-r--r--erts/emulator/beam/erl_term.h261
-rw-r--r--erts/emulator/beam/erl_thr_progress.c33
-rw-r--r--erts/emulator/beam/erl_threads.h423
-rw-r--r--erts/emulator/beam/erl_time.h82
-rw-r--r--erts/emulator/beam/erl_time_sup.c85
-rw-r--r--erts/emulator/beam/erl_trace.c81
-rw-r--r--erts/emulator/beam/erl_trace.h16
-rw-r--r--erts/emulator/beam/erl_unicode.c100
-rw-r--r--erts/emulator/beam/erl_utils.h6
-rw-r--r--erts/emulator/beam/erl_vm.h16
-rw-r--r--erts/emulator/beam/error.h91
-rw-r--r--erts/emulator/beam/export.c47
-rw-r--r--erts/emulator/beam/export.h18
-rw-r--r--erts/emulator/beam/external.c139
-rw-r--r--erts/emulator/beam/global.h408
-rw-r--r--erts/emulator/beam/io.c347
-rw-r--r--erts/emulator/beam/module.c64
-rw-r--r--erts/emulator/beam/module.h8
-rw-r--r--erts/emulator/beam/ops.tab163
-rw-r--r--erts/emulator/beam/register.c15
-rw-r--r--erts/emulator/beam/safe_hash.c23
-rw-r--r--erts/emulator/beam/safe_hash.h7
-rw-r--r--erts/emulator/beam/sys.h87
-rw-r--r--erts/emulator/beam/time.c1527
-rw-r--r--erts/emulator/beam/utils.c607
131 files changed, 18650 insertions, 10360 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 2052afe52b..c2d78aaccb 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -199,7 +199,7 @@ atom_alloc(Atom* tmpl)
static void
atom_free(Atom* obj)
{
- erts_free(ERTS_ALC_T_ATOM, (void*) obj);
+ ASSERT(obj->slot.index == atom_val(am_ErtsSecretAtom));
}
static void latin1_to_utf8(byte* conv_buf, const byte** srcp, int* lenp)
@@ -233,10 +233,10 @@ need_convertion:
}
/*
- * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ * erts_atom_put_index() may fail. Returns negative indexes for errors.
*/
-Eterm
-erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+int
+erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
{
byte utf8_copy[MAX_ATOM_SZ_FROM_LATIN1];
const byte *text = name;
@@ -253,7 +253,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = 0;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
switch (enc) {
@@ -262,7 +262,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
#ifdef DEBUG
for (aix = 0; aix < len; aix++) {
@@ -276,7 +276,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
no_latin1_chars = tlen;
latin1_to_utf8(utf8_copy, &text, &tlen);
@@ -284,7 +284,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_ATOM_ENC_UTF8:
/* First sanity check; need to verify later */
if (tlen > MAX_ATOM_SZ_LIMIT && !trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
break;
}
@@ -295,7 +295,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_read_unlock();
if (aix >= 0) {
/* Already in table no need to verify it */
- return make_atom(aix);
+ return aix;
}
if (enc == ERTS_ATOM_ENC_UTF8) {
@@ -314,13 +314,13 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_UTF8_OK_MAX_CHARS:
/* Truncated... */
if (!trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
ASSERT(no_chars == MAX_ATOM_CHARACTERS);
tlen = err_pos - text;
break;
default:
/* Bad utf8... */
- return THE_NON_VALUE;
+ return ATOM_BAD_ENCODING_ERROR;
}
}
@@ -333,7 +333,20 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_write_lock();
aix = index_put(&erts_atom_table, (void*) &a);
atom_write_unlock();
- return make_atom(aix);
+ return aix;
+}
+
+/*
+ * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ */
+Eterm
+erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+{
+ int aix = erts_atom_put_index(name, len, enc, trunc);
+ if (aix >= 0)
+ return make_atom(aix);
+ else
+ return THE_NON_VALUE;
}
Eterm
@@ -429,7 +442,8 @@ init_atom_table(void)
erts_smp_atomic_init_nob(&atom_put_ops, 0);
#endif
- erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
@@ -467,6 +481,9 @@ init_atom_table(void)
atom_space -= a.len;
atom_tab(ix)->name = (byte*)erl_atom_names[i];
}
+
+ /* Hide am_ErtsSecretAtom */
+ hash_erase(&erts_atom_table.htable, atom_tab(atom_val(am_ErtsSecretAtom)));
}
void
@@ -483,3 +500,9 @@ dump_atoms(fmtfn_t to, void *to_arg)
}
}
}
+
+Uint
+erts_get_atom_limit(void)
+{
+ return erts_atom_table.limit;
+} \ No newline at end of file
diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h
index a82efabb9f..be998a46bd 100644
--- a/erts/emulator/beam/atom.h
+++ b/erts/emulator/beam/atom.h
@@ -29,6 +29,8 @@
#define MAX_ATOM_SZ_LIMIT (4*MAX_ATOM_CHARACTERS) /* theoretical byte limit */
#define ATOM_LIMIT (1024*1024)
#define MIN_ATOM_TABLE_SIZE 8192
+#define ATOM_BAD_ENCODING_ERROR -1
+#define ATOM_MAX_CHARS_ERROR -2
#ifndef ARCH_32
/* Internal atom cache needs MAX_ATOM_TABLE_SIZE to be less than an
@@ -133,11 +135,11 @@ int atom_table_sz(void); /* table size in bytes, excluding stored objects */
Eterm am_atom_put(const char*, int); /* ONLY 7-bit ascii! */
Eterm erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
-int atom_erase(byte*, int);
-int atom_static_put(byte*, int);
+int erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
void init_atom_table(void);
void atom_info(fmtfn_t, void *);
void dump_atoms(fmtfn_t, void *);
+Uint erts_get_atom_limit(void);
int erts_atom_get(const char* name, int len, Eterm* ap, ErtsAtomEncoding enc);
void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used);
#endif
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 2b66bf6f4e..a44d23b181 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2017. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -59,6 +59,9 @@ atom nocatch
atom undefined_function
atom undefined_lambda
+# Secret internal atom that can never be found by string lookup
+# and should never leak out to be seen by the user.
+atom ErtsSecretAtom='3RT$'
# All other atoms. Try to keep the order alphabetic.
#
@@ -73,6 +76,8 @@ atom ac
atom accessor
atom active
atom active_tasks
+atom active_tasks_all
+atom alive
atom all
atom all_but_first
atom all_names
@@ -191,15 +196,21 @@ atom current_stacktrace
atom data
atom debug_flags
atom decimals
+atom default
atom delay_trap
atom dexit
atom depth
atom dgroup_leader
atom dictionary
+atom dirty_bif_exception
+atom dirty_bif_result
+atom dirty_bif_trap
atom dirty_cpu
atom dirty_cpu_schedulers_online
atom dirty_execution
atom dirty_io
+atom dirty_nif_exception
+atom dirty_nif_finalizer
atom disable_trace
atom disabled
atom discard
@@ -236,10 +247,12 @@ atom Eq='=:='
atom Eqeq='=='
atom erl_tracer
atom erlang
+atom erl_signal_server
atom ERROR='ERROR'
atom error_handler
atom error_logger
atom erts_code_purger
+atom erts_debug
atom erts_internal
atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
@@ -302,6 +315,7 @@ atom global
atom Gt='>'
atom grun
atom group_leader
+atom handle
atom have_dt_utag
atom heap_block_size
atom heap_size
@@ -366,6 +380,8 @@ atom long_schedule
atom low
atom Lt='<'
atom machine
+atom magic_ref
+atom major
atom match
atom match_limit
atom match_limit_recursion
@@ -395,6 +411,7 @@ atom milli_seconds
atom millisecond
atom min_heap_size
atom min_bin_vheap_size
+atom minor
atom minor_version
atom Minus='-'
atom module
@@ -551,6 +568,7 @@ atom return_to
atom return_trace
atom run_queue
atom run_queue_lengths
+atom run_queue_lengths_all
atom runnable
atom runnable_ports
atom runnable_procs
@@ -560,14 +578,17 @@ atom running_procs
atom runtime
atom safe
atom save_calls
-atom scheduler
+atom scheduler
atom scheduler_id
+atom scheduler_wall_time
+atom scheduler_wall_time_all
atom schedulers_online
atom scheme
atom scientific
atom scope
atom second
atom seconds
+atom send
atom send_to_non_existing_process
atom sensitive
atom sequential_tracer
@@ -585,6 +606,19 @@ atom set_tcw
atom set_tcw_fake
atom separate
atom shared
+atom sighup
+atom sigterm
+atom sigusr1
+atom sigusr2
+atom sigill
+atom sigchld
+atom sigabrt
+atom sigalrm
+atom sigstop
+atom sigint
+atom sigsegv
+atom sigtstp
+atom sigquit
atom silent
atom size
atom sl_alloc
@@ -624,8 +658,10 @@ atom Times='*'
atom timestamp
atom total
atom total_active_tasks
+atom total_active_tasks_all
atom total_heap_size
atom total_run_queue_lengths
+atom total_run_queue_lengths_all
atom tpkt
atom trace trace_ts traced
atom trace_control_word
@@ -660,11 +696,11 @@ atom value
atom values
atom version
atom visible
+atom wait
atom waiting
atom wall_clock
atom warning
atom warning_msg
-atom scheduler_wall_time
atom wordsize
atom write_concurrency
atom xor
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 27d6823d3c..14ddb74324 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -36,6 +36,13 @@
#include "erl_nif.h"
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
+#ifdef HIPE
+# include "hipe_bif0.h"
+# define IF_HIPE(X) (X)
+#else
+# define IF_HIPE(X) (0)
+#endif
#ifdef HIPE
# include "hipe_stack.h"
@@ -65,7 +72,6 @@ erts_smp_atomic_t erts_copy_literal_area__;
#define ERTS_SET_COPY_LITERAL_AREA(LA) \
erts_smp_atomic_set_nob(&erts_copy_literal_area__, \
(erts_aint_t) (LA))
-#ifdef ERTS_NEW_PURGE_STRATEGY
Process *erts_literal_area_collector = NULL;
typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef;
@@ -79,10 +85,9 @@ struct {
ErtsLiteralAreaRef *first;
ErtsLiteralAreaRef *last;
} release_literal_areas;
-#endif
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls);
+static Eterm check_process_code(Process* rp, Module* modp, int *redsp, int fcalls);
static void delete_code(Module* modp);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
@@ -92,7 +97,8 @@ init_purge_state(void)
{
purge_state.module = THE_NON_VALUE;
- erts_smp_mtx_init(&purge_state.mtx, "purge_state");
+ erts_smp_mtx_init(&purge_state.mtx, "purge_state", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
purge_state.pending_purge_lambda =
erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3);
@@ -113,11 +119,11 @@ init_purge_state(void)
void
erts_beam_bif_load_init(void)
{
-#ifdef ERTS_NEW_PURGE_STRATEGY
- erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas");
+ erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
release_literal_areas.first = NULL;
release_literal_areas.last = NULL;
-#endif
erts_smp_atomic_init_nob(&erts_copy_literal_area__,
(erts_aint_t) NULL);
@@ -147,8 +153,19 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
{
+#if !defined(HIPE)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
Module* modp;
- Eterm res;
+ Eterm res, mod;
+
+ if (!is_internal_magic_ref(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ mod = erts_module_for_prepared_code(erts_magic_ref2bin(BIF_ARG_1));
+
+ if (is_not_atom(mod))
+ BIF_ERROR(BIF_P, BADARG);
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3],
@@ -158,7 +175,7 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp && modp->curr.num_breakpoints > 0) {
ASSERT(modp->curr.code_hdr != NULL);
@@ -170,9 +187,12 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- if (res == BIF_ARG_1) {
+ if (res == mod) {
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
+ if (!modp)
+ modp = erts_get_module(mod, erts_active_code_ix());
+ hipe_redirect_to_module(modp);
}
else {
erts_abort_staging_code_ix();
@@ -181,6 +201,7 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
return res;
+#endif
}
BIF_RETTYPE
@@ -213,9 +234,9 @@ prepare_loading_2(BIF_ALIST_2)
res = TUPLE2(hp, am_error, reason);
BIF_RET(res);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
- erts_refc_dec(&magic->refc, 1);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &MSO(BIF_P), magic);
+ erts_refc_dec(&magic->intern.refc, 1);
BIF_RET(res);
}
@@ -223,15 +244,13 @@ BIF_RETTYPE
has_prepared_code_on_load_1(BIF_ALIST_1)
{
Eterm res;
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1)) {
+ if (!is_internal_magic_ref(BIF_ARG_1)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- pb = (ProcBin*) binary_val(BIF_ARG_1);
- res = erts_has_code_on_load(pb->val);
+ res = erts_has_code_on_load(erts_magic_ref2bin(BIF_ARG_1));
if (res == NIL) {
goto error;
}
@@ -245,7 +264,7 @@ struct m {
Uint exception;
};
-static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int);
+static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int);
#ifdef ERTS_SMP
static void smp_code_ix_commiter(void*);
@@ -317,13 +336,11 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
Eterm* cons = list_val(BIF_ARG_1);
Eterm term = CAR(cons);
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
+ if (!is_internal_magic_ref(term)) {
goto badarg;
}
- pb = (ProcBin*) binary_val(term);
- p[i].code = pb->val;
+ p[i].code = erts_magic_ref2bin(term);
p[i].module = erts_module_for_prepared_code(p[i].code);
if (p[i].module == NIL) {
goto badarg;
@@ -381,8 +398,9 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
if (p[i].modp->curr.num_breakpoints > 0 ||
p[i].modp->curr.num_traced_exports > 0 ||
- erts_is_default_trace_enabled()) {
- /* tracing involved, fallback with thread blocking */
+ erts_is_default_trace_enabled() ||
+ IF_HIPE(hipe_need_blocking(p[i].modp))) {
+ /* tracing or hipe need thread blocking */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
is_blocking = 1;
@@ -420,7 +438,7 @@ finish_loading_1(BIF_ALIST_1)
Eterm mod;
Eterm retval;
- erts_refc_inc(&p[i].code->refc, 1);
+ erts_refc_inc(&p[i].code->intern.refc, 1);
retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
ASSERT(retval == NIL || retval == am_on_load);
if (retval == am_on_load) {
@@ -440,32 +458,36 @@ finish_loading_1(BIF_ALIST_1)
}
done:
- return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
+ return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n, 1);
}
static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
- struct m* loaded, int nloaded)
+ struct m* mods, int nmods, int free_mods)
{
#ifdef ERTS_SMP
if (is_blocking || !commit)
#endif
{
if (commit) {
+ int i;
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
- if (loaded) {
- int i;
- for (i=0; i < nloaded; i++) {
- set_default_trace_pattern(loaded[i].module);
+
+ for (i=0; i < nmods; i++) {
+ if (mods[i].modp->curr.code_hdr) {
+ set_default_trace_pattern(mods[i].module);
}
+ #ifdef HIPE
+ hipe_redirect_to_module(mods[i].modp);
+ #endif
}
}
else {
erts_abort_staging_code_ix();
}
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
if (is_blocking) {
erts_smp_thr_progress_unblock();
@@ -478,8 +500,8 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
else {
ASSERT(is_value(res));
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
erts_end_staging_code_ix();
/*
@@ -547,7 +569,7 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls)
+erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls)
{
Module* modp;
Eterm res;
@@ -562,31 +584,23 @@ erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int
if (!modp)
return am_false;
erts_rlock_old_code(code_ix);
- res = (!modp->old.code_hdr ? am_false :
- check_process_code(c_p, modp, flags, redsp, fcalls));
+ res = (!modp->old.code_hdr
+ ? am_false
+ : check_process_code(c_p, modp, redsp, fcalls));
erts_runlock_old_code(code_ix);
return res;
}
-BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_check_process_code_1(BIF_ALIST_1)
{
int reds = 0;
- Uint flags;
Eterm res;
if (is_not_atom(BIF_ARG_1))
goto badarg;
- if (is_not_small(BIF_ARG_2))
- goto badarg;
-
- flags = unsigned_val(BIF_ARG_2);
- if (flags & ~ERTS_CPC_ALL) {
- goto badarg;
- }
-
- res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds, BIF_P->fcalls);
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, &reds, BIF_P->fcalls);
ASSERT(is_value(res));
@@ -622,7 +636,7 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
if (!rp)
BIF_RET(am_false);
- res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls);
+ res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls);
if (BIF_P != rp)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
@@ -665,8 +679,9 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
else {
if (modp->curr.num_breakpoints > 0 ||
- modp->curr.num_traced_exports > 0) {
- /* we have tracing, retry single threaded */
+ modp->curr.num_traced_exports > 0 ||
+ IF_HIPE(hipe_need_blocking(modp))) {
+ /* tracing or hipe need to go single threaded */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
is_blocking = 1;
@@ -680,7 +695,14 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
success = 1;
}
}
- return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0);
+ {
+ struct m mod;
+ Eterm retval;
+ mod.module = BIF_ARG_1;
+ mod.modp = modp;
+ retval = staging_epilogue(BIF_P, success, res, is_blocking, &mod, 1, 0);
+ return retval;
+ }
}
BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
@@ -805,23 +827,26 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
num_exps = export_list_size(code_ix);
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (ep->code[4] != 0) {
- ep->addressv[code_ix] = (void *) ep->code[4];
- ep->code[4] = 0;
+ if (ep->beam[1] != 0) {
+ ep->addressv[code_ix] = (void *) ep->beam[1];
+ ep->beam[1] = 0;
} else {
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
}
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = (BeamInstr) em_call_error_handler;
}
}
modp->curr.code_hdr->on_load_function_ptr = NULL;
set_default_trace_pattern(BIF_ARG_1);
+ #ifdef HIPE
+ hipe_redirect_to_module(modp);
+ #endif
} else if (BIF_ARG_2 == am_false) {
int i, num_exps;
@@ -833,13 +858,13 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
num_exps = export_list_size(code_ix);
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
}
- ep->code[4] = 0;
+ ep->beam[1] = 0;
}
}
erts_smp_thr_progress_unblock();
@@ -863,9 +888,9 @@ set_default_trace_pattern(Eterm module)
&trace_pattern_flags,
&meta_tracer);
if (trace_pattern_is_on) {
- Eterm mfa[1];
- mfa[0] = module;
- (void) erts_set_trace_pattern(0, mfa, 1,
+ ErtsCodeMFA mfa;
+ mfa.module = module;
+ (void) erts_set_trace_pattern(0, &mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
@@ -873,32 +898,12 @@ set_default_trace_pattern(Eterm module)
}
}
-#ifndef ERTS_NEW_PURGE_STRATEGY
-
-static ERTS_INLINE int
-check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size)
-{
- struct erl_off_heap_header* oh;
- for (oh = off_heap->first; oh; oh = oh->next) {
- if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
- ErlFunThing* funp = (ErlFunThing*) oh;
- if (ErtsInArea(funp->fe->address, area, area_size))
- return !0;
- }
- }
- return 0;
-}
-
-#endif
-
static Uint hfrag_literal_size(Eterm* start, Eterm* end,
char* lit_start, Uint lit_size);
static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
Eterm *start, Eterm *end,
char *lit_start, Uint lit_size);
-#ifdef ERTS_NEW_PURGE_STRATEGY
-
Eterm
erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed)
{
@@ -911,7 +916,7 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
la = ERTS_COPY_LITERAL_AREA();
if (!la)
- return am_ok;
+ goto return_ok;
oh = la->off_heap;
literals = (char *) &la->start[0];
@@ -975,6 +980,11 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
* this is not completely certain). We go for
* the GC directly instead of scanning everything
* one more time...
+ *
+ * Also note that calling functions expect a
+ * major GC to be performed if gc_allowed is set
+ * to true. If you change this, you need to fix
+ * callers...
*/
goto literal_gc;
}
@@ -1049,34 +1059,45 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
}
}
+return_ok:
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)))
+ c_p->flags &= ~F_DIRTY_CLA;
+#endif
+
return am_ok;
literal_gc:
if (!gc_allowed)
- return am_need_gc;
+ return am_need_gc;
if (c_p->flags & F_DISABLE_GC)
- return THE_NON_VALUE;
-
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ return THE_NON_VALUE;
- *redsp += erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls);
+ *redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize,
+ oh, fcalls);
- erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh);
-
- *redsp += lit_bsize / 64; /* Need, better value... */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->flags & F_DIRTY_CLA)
+ return THE_NON_VALUE;
+#endif
return am_ok;
}
static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
+check_process_code(Process* rp, Module* modp, int *redsp, int fcalls)
{
BeamInstr* start;
char* mod_start;
Uint mod_size;
Eterm* sp;
+#ifdef HIPE
+ void *nat_start = NULL;
+ Uint nat_size = 0;
+#endif
*redsp += 1;
@@ -1111,84 +1132,19 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
}
- /*
- * Check all continuation pointers stored in stackdump
- * and clear exception stackdump if there is a pointer
- * to the module.
- */
- if (rp->ftrace != NIL) {
- struct StackTrace *s;
- ASSERT(is_list(rp->ftrace));
- s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
- if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) ||
- (s->current && ErtsInArea(s->current, mod_start, mod_size))) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- } else {
- int i;
- for (i = 0; i < s->depth; i++) {
- if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- break;
- }
- }
- }
- }
-
- return am_false;
-}
-
-#else /* !ERTS_NEW_PURGE_STRATEGY, i.e, old style purge... */
-
-static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
-{
- BeamInstr* start;
- char* literals;
- Uint lit_bsize;
- char* mod_start;
- Uint mod_size;
- Eterm* sp;
- int done_gc = 0;
- int need_gc = 0;
- ErtsMessage *msgp;
- ErlHeapFragment *hfrag;
-
-#define ERTS_ORDINARY_GC__ (1 << 0)
-#define ERTS_LITERAL_GC__ (1 << 1)
-
- /*
- * Pick up limits for the module.
- */
- start = (BeamInstr*) modp->old.code_hdr;
- mod_start = (char *) start;
- mod_size = modp->old.code_length;
-
- /*
- * Check if current instruction or continuation pointer points into module.
- */
- if (ErtsInArea(rp->i, mod_start, mod_size)
- || ErtsInArea(rp->cp, mod_start, mod_size)) {
- return am_true;
- }
-
- *redsp += 1;
-
- if (erts_check_nif_export_in_area(rp, mod_start, mod_size))
- return am_true;
-
-
+#ifdef HIPE
/*
- * Check all continuation pointers stored on the stack.
+ * Check all continuation pointers stored on the native stack if the module
+ * has native code.
*/
- for (sp = rp->stop; sp < STACK_START(rp); sp++) {
- if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) {
+ if (modp->old.hipe_code) {
+ nat_start = modp->old.hipe_code->text_segment;
+ nat_size = modp->old.hipe_code->text_segment_size;
+ if (nat_size && nstack_any_cps_in_segment(rp, nat_start, nat_size)) {
return am_true;
}
}
+#endif
/*
* Check all continuation pointers stored in stackdump
@@ -1206,8 +1162,16 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
rp->ftrace = NIL;
} else {
int i;
+ char *area_start = mod_start;
+ Uint area_size = mod_size;
+#ifdef HIPE
+ if (rp->freason & EXF_NATIVE) {
+ area_start = nat_start;
+ area_size = nat_size;
+ }
+#endif
for (i = 0; i < s->depth; i++) {
- if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
+ if (ErtsInArea(s->trace[i], area_start, area_size)) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
@@ -1217,188 +1181,9 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
}
- if (rp->flags & F_DISABLE_GC) {
- /*
- * Cannot proceed. Process has disabled gc in order to
- * safely leave inconsistent data on the heap and/or
- * off heap lists. Need to wait for gc to be enabled
- * again.
- */
- return THE_NON_VALUE;
- }
-
- /*
- * Message queue can contains funs, and may contain
- * literals. If we got references to this module from the message
- * queue.
- *
- * If a literal is in the message queue we maka an explicit copy of
- * and attach it to the heap fragment. Each message needs to be
- * self contained, we cannot save the literal in the old_heap or
- * any other heap than the message it self.
- */
-
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
-
- if (modp->old.code_hdr->literal_area) {
- literals = (char*) modp->old.code_hdr->literal_area->start;
- lit_bsize = (char*) modp->old.code_hdr->literal_area->end - literals;
- }
- else {
- literals = NULL;
- lit_bsize = 0;
- }
-
- for (msgp = rp->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- hfrag = &msgp->hfrag;
- else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
- hfrag = msgp->data.heap_frag;
- else
- continue;
- {
- ErlHeapFragment *hf;
- Uint lit_sz;
- for (hf=hfrag; hf; hf = hf->next) {
- if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
- return am_true;
- lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- }
- if (lit_sz > 0) {
- ErlHeapFragment *bp = new_message_buffer(lit_sz);
- Eterm *hp = bp->mem;
-
- for (hf=hfrag; hf; hf = hf->next) {
- hfrag_literal_copy(&hp, &bp->off_heap,
- &hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- hfrag=hf;
- }
- /* link new hfrag last */
- ASSERT(hfrag->next == NULL);
- hfrag->next = bp;
- bp->next = NULL;
- }
- }
- }
-
- while (1) {
-
- /* Check heap, stack etc... */
- if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size))
- goto try_gc;
- if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- }
- if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize))
- goto try_literal_gc;
-#ifdef HIPE
- if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize))
- goto try_literal_gc;
-#endif
- if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
- goto try_literal_gc;
- if (rp->abandoned_heap) {
- if (any_heap_refs(rp->abandoned_heap, rp->abandoned_heap + rp->heap_sz,
- literals, lit_bsize))
- goto try_literal_gc;
- }
- if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
- goto try_literal_gc;
-
- /* Check dictionary */
- if (rp->dictionary) {
- Eterm* start = ERTS_PD_START(rp->dictionary);
- Eterm* end = start + ERTS_PD_SIZE(rp->dictionary);
-
- if (any_heap_ref_ptrs(start, end, literals, lit_bsize))
- goto try_literal_gc;
- }
-
- /* Check heap fragments */
- for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) {
- Eterm *hp, *hp_end;
- /* Off heap lists should already have been moved into process */
- ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
-
- hp = &hfrag->mem[0];
- hp_end = &hfrag->mem[hfrag->used_size];
- if (any_heap_refs(hp, hp_end, literals, lit_bsize))
- goto try_literal_gc;
- }
-
- /*
- * Message buffer fragments (matched messages)
- * - off heap lists should already have been moved into
- * process off heap structure.
- * - Check for literals
- */
- for (msgp = rp->msg_frag; msgp; msgp = msgp->next) {
- hfrag = erts_message_to_heap_frag(msgp);
- for (; hfrag; hfrag = hfrag->next) {
- Eterm *hp, *hp_end;
- ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
-
- hp = &hfrag->mem[0];
- hp_end = &hfrag->mem[hfrag->used_size];
-
- if (any_heap_refs(hp, hp_end, literals, lit_bsize))
- goto try_literal_gc;
- }
- }
-
- return am_false;
-
- try_literal_gc:
- need_gc |= ERTS_LITERAL_GC__;
-
- try_gc:
- need_gc |= ERTS_ORDINARY_GC__;
-
- if ((done_gc & need_gc) == need_gc)
- return am_true;
-
- if (!(flags & ERTS_CPC_ALLOW_GC))
- return am_aborted;
-
- need_gc &= ~done_gc;
-
- /*
- * Try to get rid of literals by by garbage collecting.
- * Clear both fvalue and ftrace.
- */
-
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
-
- if (need_gc & ERTS_ORDINARY_GC__) {
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls);
- done_gc |= ERTS_ORDINARY_GC__;
- }
- if (need_gc & ERTS_LITERAL_GC__) {
- struct erl_off_heap_header* oh;
- oh = modp->old.code_hdr->literal_area->off_heap;
- *redsp += lit_bsize / 64; /* Need, better value... */
- erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh);
- done_gc |= ERTS_LITERAL_GC__;
- }
- need_gc = 0;
- }
-
-#undef ERTS_ORDINARY_GC__
-#undef ERTS_LITERAL_GC__
-
+ return am_false;
}
-#endif /* !ERTS_NEW_PURGE_STRATEGY */
-
static int
any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
{
@@ -1527,8 +1312,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
}
}
-#ifdef ERTS_NEW_PURGE_STRATEGY
-
#ifdef ERTS_SMP
ErtsThrPrgrLaterOp later_literal_area_switch;
@@ -1559,13 +1342,8 @@ complete_literal_area_switch(void *literal_area)
}
#endif
-#endif /* ERTS_NEW_PURGE_STRATEGY */
-
BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
{
-#ifndef ERTS_NEW_PURGE_STRATEGY
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#else
ErtsLiteralArea *unused_la;
ErtsLiteralAreaRef *la_ref;
@@ -1624,7 +1402,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
BIF_RET(am_true);
#endif
-#endif /* ERTS_NEW_PURGE_STRATEGY */
}
void
@@ -1818,10 +1595,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
code = (BeamInstr*) modp->old.code_hdr;
end = (BeamInstr *)((char *)code + modp->old.code_length);
erts_fun_purge_prepare(code, end);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- ASSERT(!ERTS_COPY_LITERAL_AREA());
- ERTS_SET_COPY_LITERAL_AREA(modp->old.code_hdr->literal_area);
-#endif
}
if (BIF_ARG_2 == am_prepare_on_load) {
@@ -1866,10 +1639,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- ASSERT(ERTS_COPY_LITERAL_AREA());
- ERTS_SET_COPY_LITERAL_AREA(NULL);
-#endif
#ifndef ERTS_SMP
erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
finalize_purge_operation(BIF_P, 0);
@@ -1938,15 +1707,18 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
/*
* Unload any NIF library
*/
- if (modp->old.nif != NULL) {
+ if (modp->old.nif != NULL
+ || IF_HIPE(hipe_purge_need_blocking(modp))) {
/* ToDo: Do unload nif without blocking */
erts_rwunlock_old_code(code_ix);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
is_blocking = 1;
erts_rwlock_old_code(code_ix);
- erts_unload_nif(modp->old.nif);
- modp->old.nif = NULL;
+ if (modp->old.nif) {
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
}
/*
@@ -1965,7 +1737,9 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
modp->old.code_length = 0;
modp->old.catches = BEAM_CATCHES_NIL;
erts_remove_from_ranges(code);
-
+#ifdef HIPE
+ hipe_purge_module(modp, is_blocking);
+#endif
ERTS_BIF_PREP_RET(ret, am_true);
}
@@ -1984,14 +1758,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
finalize_purge_operation(BIF_P, ret == am_true);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
-
- ASSERT(ERTS_COPY_LITERAL_AREA() == literals);
- ERTS_SET_COPY_LITERAL_AREA(NULL);
- erts_release_literal_area(literals);
-
-#else /* ERTS_NEW_PURGE_STRATEGY */
-
if (literals) {
ErtsLiteralAreaRef *ref;
ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
@@ -2015,8 +1781,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
BIF_P->common.id);
}
-#endif /* ERTS_NEW_PURGE_STRATEGY */
-
return ret;
}
@@ -2039,34 +1803,34 @@ delete_code(Module* modp)
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep != NULL && (ep->code[0] == module)) {
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep != NULL && (ep->info.mfa.module == module)) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
}
- else if (ep->code[3] ==
+ else if (ep->beam[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- erts_clear_export_break(modp, ep->code+3);
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export trace cleared, code_ix=%d", code_ix);
+ erts_clear_export_break(modp, &ep->info);
}
- else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
+ else ASSERT(ep->beam[0] == (BeamInstr) em_call_error_handler
|| !erts_initialized);
}
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
- ep->code[4] = 0;
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = (BeamInstr) em_call_error_handler;
+ ep->beam[1] = 0;
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export invalidation, code_ix=%d", code_ix);
}
}
ASSERT(modp->curr.num_breakpoints == 0);
ASSERT(modp->curr.num_traced_exports == 0);
modp->old = modp->curr;
- modp->curr.code_hdr = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- modp->curr.nif = NULL;
-
+ erts_module_instance_init(&modp->curr);
}
@@ -2080,9 +1844,11 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->curr.code_hdr && modp->old.code_hdr) {
- return am_not_purged;
- } else if (!modp->old.code_hdr) { /* Make the current version old. */
+ if (modp->curr.code_hdr) {
+ if (modp->old.code_hdr) {
+ return am_not_purged;
+ }
+ /* Make the current version old. */
delete_code(modp);
}
return NIL;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 6d723a4b92..950639f7ae 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -32,6 +32,7 @@
#include "erl_binary.h"
#include "beam_bp.h"
#include "erl_term.h"
+#include "erl_nfunc_sched.h"
/* *************************************************************************
** Macros
@@ -120,27 +121,27 @@ release_bp_sched_ix(Uint32 ix)
/*
** Helpers
*/
-static ErtsTracer do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+static ErtsTracer do_call_trace(Process* c_p, ErtsCodeInfo *info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, ErtsTracer tracer);
-static void set_function_break(BeamInstr *pc,
+static void set_function_break(ErtsCodeInfo *ci,
Binary *match_spec,
Uint break_flags,
enum erts_break_op count_op,
ErtsTracer tracer);
static void clear_break(BpFunctions* f, Uint break_flags);
-static int clear_function_break(BeamInstr *pc, Uint break_flags);
+static int clear_function_break(ErtsCodeInfo *ci, Uint break_flags);
-static BpDataTime* get_time_break(BeamInstr *pc);
-static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static BpDataTime* get_time_break(ErtsCodeInfo *ci);
+static GenericBpData* check_break(ErtsCodeInfo *ci, Uint break_flags);
-static void bp_meta_unref(BpMetaTracer* bmt);
-static void bp_count_unref(BpCount* bcp);
-static void bp_time_unref(BpDataTime* bdt);
-static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
-static void uninstall_breakpoint(BeamInstr* pc);
+static void bp_meta_unref(BpMetaTracer *bmt);
+static void bp_count_unref(BpCount *bcp);
+static void bp_time_unref(BpDataTime *bdt);
+static void consolidate_bp_data(Module *modp, ErtsCodeInfo *ci, int local);
+static void uninstall_breakpoint(ErtsCodeInfo *ci);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -164,13 +165,14 @@ erts_bp_init(void) {
erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index");
+ erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#endif
}
void
-erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
Uint max_funcs = 0;
@@ -195,41 +197,44 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
i = 0;
for (current = 0; current < num_modules; current++) {
BeamCodeHeader* code_hdr = module[current]->curr.code_hdr;
- BeamInstr* code;
+ ErtsCodeInfo* ci;
Uint num_functions = (Uint)(UWord) code_hdr->num_functions;
Uint fi;
if (specified > 0) {
- if (mfa[0] != make_atom(module[current]->module)) {
+ if (mfa->module != make_atom(module[current]->module)) {
/* Wrong module name */
continue;
}
}
for (fi = 0; fi < num_functions; fi++) {
- BeamInstr* pc;
- int wi;
- code = code_hdr->functions[fi];
- ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- pc = code+5;
- if (erts_is_native_break(pc)) {
+ ci = code_hdr->functions[fi];
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (erts_is_function_native(ci)) {
continue;
}
- if (is_nil(code[3])) { /* Ignore BIF stub */
+ if (is_nil(ci->mfa.module)) { /* Ignore BIF stub */
continue;
}
- for (wi = 0;
- wi < specified && (Eterm) code[2+wi] == mfa[wi];
- wi++) {
- /* Empty loop body */
- }
- if (wi == specified) {
- /* Store match */
- f->matching[i].pc = pc;
- f->matching[i].mod = module[current];
- i++;
- }
+ switch (specified) {
+ case 3:
+ if (ci->mfa.arity != mfa->arity)
+ continue;
+ case 2:
+ if (ci->mfa.function != mfa->function)
+ continue;
+ case 1:
+ if (ci->mfa.module != mfa->module)
+ continue;
+ case 0:
+ break;
+ }
+ /* Store match */
+ f->matching[i].ci = ci;
+ f->matching[i].mod = module[current];
+ i++;
}
}
f->matched = i;
@@ -237,7 +242,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
}
void
-erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
int i;
@@ -249,27 +254,36 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i, code_ix);
BeamInstr* pc;
- int j;
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j < specified) {
- continue;
- }
- pc = ep->code+3;
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ pc = ep->beam;
if (ep->addressv[code_ix] == pc) {
if ((*pc == (BeamInstr) em_apply_bif ||
*pc == (BeamInstr) em_call_error_handler)) {
continue;
}
ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
- } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ } else if (erts_is_function_native(erts_code_to_codeinfo(ep->addressv[code_ix]))) {
continue;
}
- f->matching[ne].pc = pc;
- f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ f->matching[ne].ci = &ep->info;
+ f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
ne++;
}
@@ -295,7 +309,7 @@ erts_consolidate_bp_data(BpFunctions* f, int local)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < n; i++) {
- consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ consolidate_bp_data(fs[i].mod, fs[i].ci, local);
}
}
@@ -307,14 +321,14 @@ erts_consolidate_bif_bp_data(void)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < BIF_SIZE; i++) {
Export *ep = bif_export[i];
- consolidate_bp_data(0, ep->code+3, 0);
+ consolidate_bp_data(0, &ep->info, 0);
}
}
static void
-consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
GenericBpData* src;
GenericBpData* dst;
Uint flags;
@@ -360,9 +374,10 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
}
ASSERT(modp->curr.num_breakpoints >= 0);
ASSERT(modp->curr.num_traced_exports >= 0);
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ ASSERT(*erts_codeinfo_to_code(ci) !=
+ (BeamInstr) BeamOp(op_i_generic_breakpoint));
}
- pc[-4] = 0;
+ ci->u.gen_bp = NULL;
Free(g);
return;
}
@@ -411,8 +426,9 @@ erts_install_breakpoints(BpFunctions* f)
BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- GenericBp* g = (GenericBp *) pc[-4];
+ ErtsCodeInfo* ci = f->matching[i].ci;
+ BeamInstr *pc = erts_codeinfo_to_code(ci);
+ GenericBp* g = ci->u.gen_bp;
if (*pc != br && g) {
Module* modp = f->matching[i].mod;
@@ -444,16 +460,16 @@ erts_uninstall_breakpoints(BpFunctions* f)
Uint n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- uninstall_breakpoint(pc);
+ uninstall_breakpoint(f->matching[i].ci);
}
}
static void
-uninstall_breakpoint(BeamInstr* pc)
+uninstall_breakpoint(ErtsCodeInfo *ci)
{
+ BeamInstr *pc = erts_codeinfo_to_code(ci);
if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
if (g->data[erts_active_bp_ix()].flags == 0) {
/*
* The following write is not protected by any lock. We
@@ -480,30 +496,30 @@ erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
}
void
-erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- set_function_break(pc, match_spec, flags, 0, erts_tracer_nil);
+ set_function_break(ci, match_spec, flags, 0, erts_tracer_nil);
}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, ErtsTracer tracer)
+erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec, ErtsTracer tracer)
{
- set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
+ set_function_break(ci, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
-erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op count_op)
{
- set_function_break(pc, NULL,
+ set_function_break(ci, NULL,
ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
count_op, erts_tracer_nil);
}
void
-erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+erts_clear_time_trace_bif(ErtsCodeInfo *ci) {
+ clear_function_break(ci, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
}
void
@@ -532,14 +548,14 @@ erts_clear_trace_break(BpFunctions* f)
}
void
-erts_clear_call_trace_bif(BeamInstr *pc, int local)
+erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
if (g) {
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
if (g->data[erts_staging_bp_ix()].flags & flags) {
- clear_function_break(pc, flags);
+ clear_function_break(ci, flags);
}
}
}
@@ -551,9 +567,9 @@ erts_clear_mtrace_break(BpFunctions* f)
}
void
-erts_clear_mtrace_bif(BeamInstr *pc)
+erts_clear_mtrace_bif(ErtsCodeInfo *ci)
{
- clear_function_break(pc, ERTS_BPF_META_TRACE);
+ clear_function_break(ci, ERTS_BPF_META_TRACE);
}
void
@@ -595,52 +611,48 @@ erts_clear_module_break(Module *modp) {
}
n = (Uint)(UWord) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_hdr->functions[i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_function_native(ci))
continue;
- }
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
}
erts_commit_staged_bp();
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_hdr->functions[i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_function_native(ci))
continue;
- }
- uninstall_breakpoint(pc);
- consolidate_bp_data(modp, pc, 1);
- ASSERT(pc[-4] == 0);
+ uninstall_breakpoint(ci);
+ consolidate_bp_data(modp, ci, 1);
+ ASSERT(ci->u.gen_bp == NULL);
}
return n;
}
void
-erts_clear_export_break(Module* modp, BeamInstr* pc)
+erts_clear_export_break(Module* modp, ErtsCodeInfo *ci)
{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
erts_commit_staged_bp();
- *pc = (BeamInstr) 0;
- consolidate_bp_data(modp, pc, 0);
- ASSERT(pc[-4] == 0);
+ *erts_codeinfo_to_code(ci) = (BeamInstr) 0;
+ consolidate_bp_data(modp, ci, 0);
+ ASSERT(ci->u.gen_bp == NULL);
}
BeamInstr
-erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
{
GenericBp* g;
GenericBpData* bp;
Uint bp_flags;
ErtsBpIndex ix = erts_active_bp_ix();
- g = (GenericBp *) I[-4];
+ ASSERT(info->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+
+ g = info->u.gen_bp;
bp = &g->data[ix];
bp_flags = bp->flags;
ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
@@ -659,9 +671,9 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
- (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
+ (void) do_call_trace(c_p, info, reg, 1, bp->local_ms, erts_tracer_true);
} else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
+ (void) do_call_trace(c_p, info, reg, 0, bp->local_ms, erts_tracer_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
@@ -669,7 +681,8 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
- new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
+ new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer);
+
if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
&bp->meta_tracer->tracer,
@@ -688,7 +701,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
Eterm w;
- erts_trace_time_call(c_p, I, bp->time);
+ erts_trace_time_call(c_p, info, bp->time);
w = (BeamInstr) *c_p->cp;
if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
w == (BeamInstr) BeamOp(op_return_trace) ||
@@ -696,7 +709,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
Eterm* E = c_p->stop;
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - 2 < c_p->htop) {
- (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, 2, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
}
E = c_p->stop;
@@ -704,7 +717,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
ASSERT(c_p->htop <= E && E <= c_p->hend);
E -= 2;
- E[0] = make_cp(I);
+ E[0] = make_cp(erts_codeinfo_to_code(info));
E[1] = make_cp(c_p->cp); /* original return address */
c_p->cp = beam_return_time_trace;
c_p->stop = E;
@@ -732,9 +745,9 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
ErtsTracer meta_tracer = erts_tracer_nil;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
+ int applying = (I == ep->beam); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
BeamInstr *cp = p->cp;
GenericBp* g;
GenericBpData* bp = NULL;
@@ -742,7 +755,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ g = ep->info.u.gen_bp;
if (g) {
bp = &g->data[erts_active_bp_ix()];
bp_flags = bp->flags;
@@ -758,7 +771,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
- flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
local, &ERTS_TRACER(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
@@ -766,7 +779,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
old_tracer = meta_tracer;
- flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
0, &meta_tracer);
if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
@@ -784,8 +797,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
- BeamInstr *pc = (BeamInstr *)ep->code+3;
- erts_trace_time_call(p, pc, bp->time);
+ erts_trace_time_call(p, &ep->info, bp->time);
}
/* Restore original continuation pointer (if changed). */
@@ -795,6 +807,30 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
result = func(p, args, I);
+ if (erts_nif_export_check_save_trace(p, result,
+ applying, ep,
+ cp, flags,
+ flags_meta, I,
+ meta_tracer)) {
+ /*
+ * erts_bif_trace_epilogue() will be called
+ * later when appropriate via the NIF export
+ * scheduling functionality...
+ */
+ return result;
+ }
+
+ return erts_bif_trace_epilogue(p, result, applying, ep, cp,
+ flags, flags_meta, I,
+ meta_tracer);
+}
+
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
BeamInstr i_return_trace = beam_return_trace[0];
BeamInstr i_return_to_trace = beam_return_to_trace[0];
@@ -848,11 +884,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
class = exception_tag[GET_EXC_CLASS(reason)];
if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
+ erts_trace_exception(p, &ep->info.mfa, class, value,
&meta_tracer);
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
+ erts_trace_exception(p, &ep->info.mfa, class, value,
&ERTS_TRACER(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
@@ -883,11 +919,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
} else {
if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer);
+ erts_trace_return(p, &ep->info.mfa, result, &meta_tracer);
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER(p));
+ erts_trace_return(p, &ep->info.mfa, result, &ERTS_TRACER(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE &&
IS_TRACED_FL(p, F_TRACE_RETURN_TO)) {
@@ -906,7 +942,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
static ErtsTracer
-do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer)
{
Eterm* cpp;
@@ -947,7 +983,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
ASSERT(is_CP(*cpp));
}
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer);
+ flags = erts_call_trace(c_p, info, ms, reg, local, &tracer);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (cpp) {
c_p->cp = cp_save;
@@ -963,7 +999,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
if (need) {
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - need < c_p->htop) {
- (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, need, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
E = c_p->stop;
}
@@ -979,12 +1015,12 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E -= 3;
c_p->stop = E;
ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(is_CP((Eterm) (UWord) (&info->mfa.module)));
ASSERT(IS_TRACER_VALID(tracer));
E[2] = make_cp(c_p->cp);
E[1] = copy_object(tracer, c_p);
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
+ E[0] = make_cp(&info->mfa.module);
+ /* We ARE at the beginning of an instruction,
the funcinfo is above i. */
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
beam_exception_trace : beam_return_trace;
@@ -997,7 +1033,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
}
void
-erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt)
{
ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
@@ -1027,14 +1063,14 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
pbt = Alloc(sizeof(process_breakpoint_time_t));
(void) ERTS_PROC_SET_CALL_TIME(c_p, pbt);
} else {
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
/* add time to previous code */
sitem.time = time - pbt->time;
sitem.pid = c_p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* if null then the breakpoint was removed */
if (pbdt) {
@@ -1071,14 +1107,14 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
BP_TIME_ADD(item, &sitem);
}
- pbt->pc = I;
+ pbt->ci = info;
pbt->time = time;
release_bp_sched_ix(six);
}
void
-erts_trace_time_return(Process *p, BeamInstr *pc)
+erts_trace_time_return(Process *p, ErtsCodeInfo *ci)
{
ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
@@ -1110,14 +1146,14 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
/* might have been removed due to
* trace_pattern(false)
*/
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
sitem.time = time - pbt->time;
sitem.pid = p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* beware, the trace_pattern might have been removed */
if (pbdt) {
@@ -1136,7 +1172,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
}
- pbt->pc = pc;
+ pbt->ci = ci;
pbt->time = time;
}
@@ -1145,10 +1181,10 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- GenericBpData* bp = check_break(pc, flags);
+ GenericBpData* bp = check_break(ci, flags);
if (bp) {
if (match_spec_ret) {
@@ -1160,10 +1196,10 @@ erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_META_TRACE);
if (bp) {
if (match_spec_ret) {
@@ -1177,21 +1213,10 @@ erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
return 0;
}
-int
-erts_is_native_break(BeamInstr *pc) {
-#ifdef HIPE
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- return pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
- || pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
-#else
- return 0;
-#endif
-}
-
int
-erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_COUNT);
if (bp) {
if (count_ret) {
@@ -1202,13 +1227,13 @@ erts_is_count_break(BeamInstr *pc, Uint *count_ret)
return 0;
}
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *retval) {
Uint i, ix;
bp_time_hash_t hash;
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = get_time_break(pc);
+ BpDataTime *bdt = get_time_break(ci);
if (bdt) {
if (retval) {
@@ -1262,26 +1287,25 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
-BeamInstr *
-erts_find_local_func(Eterm mfa[3]) {
+ErtsCodeInfo *
+erts_find_local_func(ErtsCodeMFA *mfa) {
Module *modp;
BeamCodeHeader* code_hdr;
- BeamInstr* code_ptr;
+ ErtsCodeInfo* ci;
Uint i,n;
- if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL)
+ if ((modp = erts_get_module(mfa->module, erts_active_code_ix())) == NULL)
return NULL;
if ((code_hdr = modp->curr.code_hdr) == NULL)
return NULL;
n = (BeamInstr) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- code_ptr = code_hdr->functions[i];
- ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
- ASSERT(mfa[0] == ((Eterm) code_ptr[2]) ||
- is_nil((Eterm) code_ptr[2]));
- if (mfa[1] == ((Eterm) code_ptr[3]) &&
- ((BeamInstr) mfa[2]) == code_ptr[4]) {
- return code_ptr + 5;
+ ci = code_hdr->functions[i];
+ ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == ci->op);
+ ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module));
+ if (mfa->function == ci->mfa.function &&
+ mfa->arity == ci->mfa.arity) {
+ return ci;
}
}
return NULL;
@@ -1411,7 +1435,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
if (pbdt) {
sitem.time = get_mtime(p) - pbt->time;
sitem.pid = p->common.id;
@@ -1461,14 +1485,14 @@ set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- set_function_break(pc, match_spec, break_flags,
+ set_function_break(f->matching[i].ci,
+ match_spec, break_flags,
count_op, tracer);
}
}
static void
-set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, ErtsTracer tracer)
{
GenericBp* g;
@@ -1477,7 +1501,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ErtsBpIndex ix = erts_staging_bp_ix();
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- g = (GenericBp *) pc[-4];
+ g = ci->u.gen_bp;
if (g == 0) {
int i;
if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
@@ -1485,11 +1509,11 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
return;
}
g = Alloc(sizeof(GenericBp));
- g->orig_instr = *pc;
+ g->orig_instr = *erts_codeinfo_to_code(ci);
for (i = 0; i < ERTS_NUM_BP_IX; i++) {
g->data[i].flags = 0;
}
- pc[-4] = (BeamInstr) g;
+ ci->u.gen_bp = g;
}
bp = &g->data[ix];
@@ -1585,13 +1609,12 @@ clear_break(BpFunctions* f, Uint break_flags)
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- clear_function_break(pc, break_flags);
+ clear_function_break(f->matching[i].ci, break_flags);
}
}
static int
-clear_function_break(BeamInstr *pc, Uint break_flags)
+clear_function_break(ErtsCodeInfo *ci, Uint break_flags)
{
GenericBp* g;
GenericBpData* bp;
@@ -1600,7 +1623,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- if ((g = (GenericBp *) pc[-4]) == 0) {
+ if ((g = ci->u.gen_bp) == NULL) {
return 1;
}
@@ -1686,19 +1709,19 @@ bp_time_unref(BpDataTime* bdt)
}
static BpDataTime*
-get_time_break(BeamInstr *pc)
+get_time_break(ErtsCodeInfo *ci)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_TIME_TRACE);
return bp ? bp->time : 0;
}
static GenericBpData*
-check_break(BeamInstr *pc, Uint break_flags)
+check_break(ErtsCodeInfo *ci, Uint break_flags)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (erts_is_native_break(pc)) {
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (erts_is_function_native(ci)) {
return 0;
}
if (g) {
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 7372b9b258..56fa82b912 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -46,7 +46,7 @@ typedef struct bp_data_time { /* Call time */
typedef struct {
ErtsMonotonicTime time;
- BeamInstr *pc;
+ ErtsCodeInfo *ci;
} process_breakpoint_time_t; /* used within psd */
typedef struct {
@@ -93,7 +93,7 @@ enum erts_break_op{
typedef Uint32 ErtsBpIndex;
typedef struct {
- BeamInstr* pc;
+ ErtsCodeInfo *ci;
Module* mod;
} BpFunction;
@@ -114,8 +114,8 @@ void erts_commit_staged_bp(void);
ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
-void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
-void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
+void erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
void erts_bp_free_matched_functions(BpFunctions* f);
void erts_install_breakpoints(BpFunctions* f);
@@ -126,15 +126,15 @@ void erts_consolidate_bif_bp_data(void);
void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
void erts_clear_trace_break(BpFunctions *f);
-void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
-void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+void erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
-void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
+void erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec,
ErtsTracer tracer);
-void erts_clear_mtrace_bif(BeamInstr *pc);
+void erts_clear_mtrace_bif(ErtsCodeInfo *ci);
void erts_set_debug_break(BpFunctions *f);
void erts_clear_debug_break(BpFunctions *f);
@@ -144,32 +144,32 @@ void erts_clear_count_break(BpFunctions *f);
void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-void erts_clear_export_break(Module *modp, BeamInstr* pc);
+void erts_clear_export_break(Module *modp, ErtsCodeInfo* ci);
-BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
-BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
+BeamInstr erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *ci, Eterm* reg);
+BeamInstr erts_trace_break(Process *p, ErtsCodeInfo *ci, Eterm *args,
Uint32 *ret_flags, ErtsTracer *tracer);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
-int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+int erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local);
+int erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
+int erts_is_mtrace_bif(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
+int erts_is_native_break(ErtsCodeInfo *ci);
+int erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret);
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
-void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
-void erts_trace_time_return(Process* c_p, BeamInstr* pc);
+void erts_trace_time_call(Process* c_p, ErtsCodeInfo *ci, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, ErtsCodeInfo *ci);
void erts_schedule_time_break(Process *p, Uint out);
void erts_set_time_break(BpFunctions *f, enum erts_break_op);
void erts_clear_time_break(BpFunctions *f);
-int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
-void erts_clear_time_trace_bif(BeamInstr *pc);
+int erts_is_time_trace_bif(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
+void erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op);
+void erts_clear_time_trace_bif(ErtsCodeInfo *ci);
-BeamInstr *erts_find_local_func(Eterm mfa[3]);
+ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 21d336049f..a2060c80de 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,7 @@
#include "beam_bp.h"
#include "erl_binary.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef ARCH_64
# define HEXF "%016bpX"
@@ -51,6 +52,7 @@ void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr);
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif);
BIF_RETTYPE
erts_debug_same_2(BIF_ALIST_2)
@@ -115,7 +117,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
Eterm MFA = BIF_ARG_1;
Eterm boolean = BIF_ARG_2;
Eterm* tp;
- Eterm mfa[3];
+ ErtsCodeMFA mfa;
int i;
int specified = 0;
Eterm res;
@@ -131,23 +133,24 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
if (*tp != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+
+ if (is_small(tp[3])) {
+ mfa.arity = signed_val(tp[3]);
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
@@ -157,7 +160,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- erts_bp_match_functions(&f, mfa, specified);
+ erts_bp_match_functions(&f, &mfa, specified);
if (boolean == am_true) {
erts_set_debug_break(&f);
erts_install_breakpoints(&f);
@@ -241,9 +244,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
Eterm* tp;
Eterm bin;
Eterm mfa;
- BeamInstr* funcinfo = NULL; /* Initialized to eliminate warning. */
+ ErtsCodeMFA *cmfa = NULL;
BeamCodeHeader* code_hdr;
- BeamInstr* code_ptr = NULL; /* Initialized to eliminate warning. */
+ BeamInstr *code_ptr;
BeamInstr instr;
BeamInstr uaddr;
Uint hsz;
@@ -251,7 +254,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
if (term_to_UWord(addr, &uaddr)) {
code_ptr = (BeamInstr *) uaddr;
- if ((funcinfo = find_function_from_pc(code_ptr)) == NULL) {
+ if ((cmfa = find_function_from_pc(code_ptr)) == NULL) {
BIF_RET(am_false);
}
} else if (is_tuple(addr)) {
@@ -282,24 +285,22 @@ erts_debug_disassemble_1(BIF_ALIST_1)
* such as erts_debug:apply/4. Then search for it in the module.
*/
if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
- /* XXX: add "&& ep->address != ep->code+3" condition?
+ /* XXX: add "&& ep->address != ep->code" condition?
* Consider a traced function.
- * Its ep will have ep->address == ep->code+3.
+ * Its ep will have ep->address == ep->code.
* erts_find_function() will return the non-NULL ep.
* Below we'll try to derive a code_ptr from ep->address.
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5;
- funcinfo = code_ptr+2;
+ cmfa = erts_code_to_codemfa(ep->addressv[code_ix]);
} else if (modp == NULL || (code_hdr = modp->curr.code_hdr) == NULL) {
BIF_RET(am_undef);
} else {
n = code_hdr->num_functions;
for (i = 0; i < n; i++) {
- code_ptr = code_hdr->functions[i];
- if (code_ptr[3] == name && code_ptr[4] == arity) {
- funcinfo = code_ptr+2;
+ cmfa = &code_hdr->functions[i]->mfa;
+ if (cmfa->function == name && cmfa->arity == arity) {
break;
}
}
@@ -307,6 +308,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
BIF_RET(am_undef);
}
}
+ code_ptr = (BeamInstr*)erts_code_to_codeinfo(erts_codemfa_to_code(cmfa));
} else {
goto error;
}
@@ -332,9 +334,10 @@ erts_debug_disassemble_1(BIF_ALIST_1)
(void) erts_bld_uword(NULL, &hsz, (BeamInstr) code_ptr);
hp = HAlloc(p, hsz);
addr = erts_bld_uword(&hp, NULL, (BeamInstr) code_ptr);
- ASSERT(is_atom(funcinfo[0]) || funcinfo[0] == NIL);
- ASSERT(is_atom(funcinfo[1]) || funcinfo[1] == NIL);
- mfa = TUPLE3(hp, (Eterm) funcinfo[0], (Eterm) funcinfo[1], make_small((Eterm) funcinfo[2]));
+ ASSERT(is_atom(cmfa->module) || is_nil(cmfa->module));
+ ASSERT(is_atom(cmfa->function) || is_nil(cmfa->function));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
hp += 4;
return TUPLE3(hp, addr, bin, mfa);
}
@@ -346,11 +349,12 @@ dbg_bt(Process* p, Eterm* sp)
while (sp < stack) {
if (is_CP(*sp)) {
- BeamInstr* addr = find_function_from_pc(cp_val(*sp));
- if (addr)
+ ErtsCodeMFA* cmfa = find_function_from_pc(cp_val(*sp));
+ if (cmfa)
erts_fprintf(stderr,
HEXF ": %T:%T/%bpu\n",
- addr, (Eterm) addr[0], (Eterm) addr[1], addr[2]);
+ &cmfa->module, cmfa->module,
+ cmfa->function, cmfa->arity);
}
sp++;
}
@@ -359,17 +363,17 @@ dbg_bt(Process* p, Eterm* sp)
void
dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
{
- BeamInstr* f = find_function_from_pc(addr);
+ ErtsCodeMFA* cmfa = find_function_from_pc(addr);
- if (f == NULL) {
+ if (cmfa == NULL) {
erts_fprintf(stderr, "???\n");
} else {
int arity;
int i;
- addr = f;
- arity = addr[2];
- erts_fprintf(stderr, HEXF ": %T:%T(", addr, (Eterm) addr[0], (Eterm) addr[1]);
+ arity = cmfa->arity;
+ erts_fprintf(stderr, HEXF ": %T:%T(", addr,
+ cmfa->module, cmfa->function);
for (i = 0; i < arity; i++)
erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
erts_fprintf(stderr, ")\n");
@@ -520,27 +524,49 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
break;
case 'I': /* Untagged integer. */
case 't':
- erts_print(to, to_arg, "%d", *ap);
+ switch (op) {
+ case op_i_gc_bif1_jIsId:
+ case op_i_gc_bif2_jIIssd:
+ case op_i_gc_bif3_jIIssd:
+ {
+ const ErtsGcBif* p;
+ BifFunction gcf = (BifFunction) *ap;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->gc_bif == gcf) {
+ print_bif_name(to, to_arg, p->bif);
+ break;
+ }
+ }
+ if (p->bif == 0) {
+ erts_print(to, to_arg, "%d", (Uint)gcf);
+ }
+ break;
+ }
+ default:
+ erts_print(to, to_arg, "%d", *ap);
+ }
ap++;
break;
case 'f': /* Destination label */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
+ ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap);
+ if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) {
erts_print(to, to_arg, "f(" HEXF ")", *ap);
} else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
}
ap++;
}
break;
case 'p': /* Pointer (to label) */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
+ ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap);
+ if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) {
erts_print(to, to_arg, "p(" HEXF ")", *ap);
} else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
}
ap++;
}
@@ -553,26 +579,16 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
{
Export* ex = (Export *) *ap;
erts_print(to, to_arg,
- "%T:%T/%bpu", (Eterm) ex->code[0], (Eterm) ex->code[1], ex->code[2]);
+ "%T:%T/%bpu", (Eterm) ex->info.mfa.module,
+ (Eterm) ex->info.mfa.function,
+ ex->info.mfa.arity);
ap++;
}
break;
case 'F': /* Function definition */
break;
case 'b':
- for (i = 0; i < BIF_SIZE; i++) {
- BifFunction bif = (BifFunction) *ap;
- if (bif == bif_table[i].f) {
- break;
- }
- }
- if (i == BIF_SIZE) {
- erts_print(to, to_arg, "b(%d)", (Uint) *ap);
- } else {
- Eterm name = bif_table[i].name;
- unsigned arity = bif_table[i].arity;
- erts_print(to, to_arg, "%T/%u", name, arity);
- }
+ print_bif_name(to, to_arg, (BifFunction) *ap);
ap++;
break;
case 'P': /* Byte offset into tuple (see beam_load.c) */
@@ -731,3 +747,427 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
return size;
}
+
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (bif == bif_table[i].f) {
+ break;
+ }
+ }
+ if (i == BIF_SIZE) {
+ erts_print(to, to_arg, "b(%d)", (Uint) bif);
+ } else {
+ Eterm name = bif_table[i].name;
+ unsigned arity = bif_table[i].arity;
+ erts_print(to, to_arg, "%T/%u", name, arity);
+ }
+}
+
+/*
+ * Dirty BIF testing.
+ *
+ * The erts_debug:dirty_cpu/2, erts_debug:dirty_io/1, and
+ * erts_debug:dirty/3 BIFs are used by the dirty_bif_SUITE
+ * test suite.
+ */
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+static int ms_wait(Process *c_p, Eterm etimeout, int busy);
+static int dirty_send_message(Process *c_p, Eterm to, Eterm tag);
+#endif
+static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I);
+
+/*
+ * erts_debug:dirty_cpu/2 is statically determined to execute on
+ * a dirty CPU scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_cpu_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_cpu, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty_io/2 is statically determined to execute on
+ * a dirty I/O scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_io_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_io, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty/3 executes on a normal scheduler.
+ */
+BIF_RETTYPE
+erts_debug_dirty_3(BIF_ALIST_3)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Eterm argv[2];
+ switch (BIF_ARG_1) {
+ case am_normal:
+ return dirty_test(BIF_P, am_normal, BIF_ARG_2, BIF_ARG_3, BIF_I);
+ case am_dirty_cpu:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ case am_dirty_io:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ default:
+ BIF_ERROR(BIF_P, EXC_BADARG);
+ }
+#else
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#endif
+}
+
+
+static BIF_RETTYPE
+dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
+{
+ BIF_RETTYPE ret;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (am_scheduler == arg1) {
+ ErtsSchedulerData *esdp;
+ if (arg2 != am_type)
+ goto badarg;
+ esdp = erts_proc_sched_data(c_p);
+ if (!esdp)
+ goto scheduler_type_error;
+
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ ERTS_BIF_PREP_RET(ret, am_normal);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ ERTS_BIF_PREP_RET(ret, am_dirty_io);
+ break;
+ default:
+ scheduler_type_error:
+ ERTS_BIF_PREP_RET(ret, am_error);
+ break;
+ }
+ }
+ else if (am_error == arg1) {
+ switch (arg2) {
+ case am_notsup:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
+ break;
+ case am_undef:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
+ break;
+ case am_badarith:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
+ break;
+ case am_noproc:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
+ break;
+ case am_system_limit:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case am_badarg:
+ default:
+ goto badarg;
+ }
+ }
+ else if (am_copy == arg1) {
+ int i;
+ Eterm res;
+
+ for (res = NIL, i = 0; i < 1000; i++) {
+ Eterm *hp, sz;
+ Eterm cpy;
+ /* We do not want this to be optimized,
+ but rather the oposite... */
+ sz = size_object(arg2);
+ hp = HAlloc(c_p, sz);
+ cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
+ hp = HAlloc(c_p, 2);
+ res = CONS(hp, cpy, res);
+ }
+
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else if (am_send == arg1) {
+ dirty_send_message(c_p, arg2, am_ok);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("wait", arg1)) {
+ if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
+ goto badarg;
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
+ /*
+ * Reschedule operation after decrement of two until we reach
+ * zero. Switch between dirty scheduler types when 'n' is
+ * evenly divided by 4. If the initial value wasn't evenly
+ * dividable by 2, throw badarg exception.
+ */
+ Eterm next_type;
+ Sint n;
+ if (!term_to_Sint(arg2, &n) || n < 0)
+ goto badarg;
+ if (n == 0)
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ else {
+ Eterm argv[3];
+ Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
+ if (n % 4 != 0)
+ next_type = type;
+ else {
+ switch (type) {
+ case am_dirty_cpu: next_type = am_dirty_io; break;
+ case am_dirty_io: next_type = am_normal; break;
+ case am_normal: next_type = am_dirty_cpu; break;
+ default: goto badarg;
+ }
+ }
+ switch (next_type) {
+ case am_dirty_io:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ break;
+ case am_dirty_cpu:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ break;
+ case am_normal:
+ argv[0] = am_normal;
+ argv[1] = arg1;
+ argv[2] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_3,
+ ERTS_SCHED_NORMAL,
+ am_erts_debug,
+ am_dirty,
+ 3);
+ break;
+ default:
+ goto badarg;
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
+ ERTS_DECL_AM(ready);
+ ERTS_DECL_AM(done);
+ dirty_send_message(c_p, arg2, AM_ready);
+ ms_wait(c_p, make_small(6000), 0);
+ dirty_send_message(c_p, arg2, AM_done);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
+ Process *real_c_p = erts_proc_shadow2real(c_p);
+ Eterm *hp, *hp2;
+ Uint sz;
+ int i;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;
+
+ if (ERTS_PROC_IS_EXITING(real_c_p))
+ goto badarg;
+ dirty_send_message(c_p, arg2, am_alive);
+
+ /* Wait until dead */
+ while (!ERTS_PROC_IS_EXITING(real_c_p)) {
+ if (dirty_io)
+ ms_wait(c_p, make_small(100), 0);
+ else
+ erts_thr_yield();
+ }
+
+ ms_wait(c_p, make_small(1000), 0);
+
+ /* Should still be able to allocate memory */
+ hp = HAlloc(c_p, 3); /* Likely on heap */
+ sz = 10000;
+ hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
+ *hp2 = make_pos_bignum_header(sz);
+ for (i = 1; i < sz; i++)
+ hp2[i] = (Eterm) 4711;
+ ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
+ }
+ else {
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ }
+#else
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
+#endif
+ return ret;
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static int
+dirty_send_message(Process *c_p, Eterm to, Eterm tag)
+{
+ ErtsProcLocks c_p_locks, rp_locks;
+ Process *rp, *real_c_p;
+ Eterm msg, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ ASSERT(is_immed(tag));
+
+ real_c_p = erts_proc_shadow2real(c_p);
+ if (real_c_p != c_p)
+ c_p_locks = 0;
+ else
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+
+ ASSERT(real_c_p->common.id == c_p->common.id);
+
+ rp = erts_pid2proc_opt(real_c_p, c_p_locks,
+ to, 0,
+ ERTS_P2P_FLG_INC_REFC);
+
+ if (!rp)
+ return 0;
+
+ rp_locks = 0;
+ mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
+
+ msg = TUPLE2(hp, tag, c_p->common.id);
+ erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+
+ if (rp == real_c_p)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+ erts_proc_dec_refc(rp);
+
+ return 1;
+}
+
+static int
+ms_wait(Process *c_p, Eterm etimeout, int busy)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ErtsMonotonicTime time, timeout_time;
+ Sint64 ms;
+
+ if (!term_to_Sint64(etimeout, &ms))
+ return 0;
+
+ time = erts_get_monotonic_time(esdp);
+
+ if (ms < 0)
+ timeout_time = time;
+ else
+ timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);
+
+ while (time < timeout_time) {
+ if (busy)
+ erts_thr_yield();
+ else {
+ ErtsMonotonicTime timeout = timeout_time - time;
+
+#ifdef __WIN32__
+ Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
+#else
+ {
+ ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
+ struct timeval tv;
+
+ tv.tv_sec = (long) to / (1000*1000);
+ tv.tv_usec = (long) to % (1000*1000);
+
+ select(0, NULL, NULL, NULL, &tv);
+ }
+#endif
+ }
+
+ time = erts_get_monotonic_time(esdp);
+ }
+ return 1;
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+#ifdef ERTS_SMP
+# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
+#else
+# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit)
+#endif
+
+/*
+ * The below functions is for testing of the stack
+ * limit functionality. They are intentionally
+ * written body recursive in order to prevent
+ * last call optimization...
+ */
+
+UWord
+erts_check_stack_recursion_downwards(char *start_c)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+ UWord res;
+ if (erts_check_below_limit(&c, limit + 1024))
+ return (char *) erts_ptr_id(start_c) - (char *) erts_ptr_id(&c);
+ res = erts_check_stack_recursion_downwards(start_c);
+ erts_ptr_id(&c);
+ return res;
+}
+
+UWord
+erts_check_stack_recursion_upwards(char *start_c)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+ UWord res;
+ if (erts_check_above_limit(&c, limit - 1024))
+ return (char *) erts_ptr_id(&c) - (char *) erts_ptr_id(start_c);
+ res = erts_check_stack_recursion_upwards(start_c);
+ erts_ptr_id(&c);
+ return res;
+}
+
+int
+erts_is_above_stack_limit(char *ptr)
+{
+ return (char *) ptr > ERTS_STACK_LIMIT;
+}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 1ad13c32e3..bc83699951 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -38,6 +38,7 @@
#include "beam_bp.h"
#include "beam_catches.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef HIPE
#include "hipe_mode_switch.h"
#include "hipe_bif1.h"
@@ -116,16 +117,16 @@ do { \
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#endif
-#define GET_BIF_MODULE(p) ((Eterm) (((Export *) p)->code[0]))
-#define GET_BIF_FUNCTION(p) ((Eterm) (((Export *) p)->code[1]))
-#define GET_BIF_ARITY(p) ((Eterm) (((Export *) p)->code[2]))
-#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
+#define GET_BIF_MODULE(p) (p->info.mfa.module)
+#define GET_BIF_FUNCTION(p) (p->info.mfa.function)
+#define GET_BIF_ARITY(p) (p->info.mfa.arity)
+#define GET_BIF_ADDRESS(p) ((BifFunction) (p->beam[1]))
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
/*
* We reuse some of fields in the save area in the process structure.
- * This is safe to do, since this space is only activly used when
+ * This is safe to do, since this space is only actively used when
* the process is switched out.
*/
#define REDS_IN(p) ((p)->def_arg_reg[5])
@@ -157,7 +158,9 @@ do { \
/*
* Register target (X or Y register).
*/
-#define REG_TARGET(Target) (*(((Target) & 1) ? &yb(Target-1) : &xb(Target)))
+
+#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb(Target-1) : &xb(Target))
+#define REG_TARGET(Target) (*REG_TARGET_PTR(Target))
/*
* Store a result into a register given a destination descriptor.
@@ -171,8 +174,6 @@ do { \
REG_TARGET(stb_reg) = (Result); \
} while (0)
-#define StoreSimpleDest(Src, Dest) Dest = (Src)
-
/*
* Store a result into a register and execute the next instruction.
* Dst points to the word with a destination descriptor, which MUST
@@ -214,11 +215,12 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
BeamInstr* em_call_nif;
+BeamInstr* em_call_bif_e;
/* NOTE These should be the only variables containing trace instructions.
** Sometimes tests are form the instruction value, and sometimes
-** for the refering variable (one of these), and rouge references
+** for the referring variable (one of these), and rouge references
** will most likely cause chaos.
*/
BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
@@ -418,7 +420,7 @@ void** beam_ops;
#define TestHeapPutList(Need, Reg) \
do { \
TestHeap((Need), 1); \
- PutList(Reg, r(0), r(0), StoreSimpleDest); \
+ PutList(Reg, r(0), r(0)); \
CHECK_TERM(r(0)); \
} while (0)
@@ -545,11 +547,11 @@ void** beam_ops;
GetR((N)+1, Dst2); \
} while (0)
-#define PutList(H, T, Dst, Store) \
- do { \
- HTOP[0] = (H); HTOP[1] = (T); \
- Store(make_list(HTOP), Dst); \
- HTOP += 2; \
+#define PutList(H, T, Dst) \
+ do { \
+ HTOP[0] = (H); HTOP[1] = (T); \
+ Dst = make_list(HTOP); \
+ HTOP += 2; \
} while (0)
#define Swap(R1, R2) \
@@ -566,11 +568,7 @@ void** beam_ops;
R2 = Tmp = V; \
} while (0)
-#define Move(Src, Dst, Store) \
- do { \
- Eterm term = (Src); \
- Store(term, Dst); \
- } while (0)
+#define Move(Src, Dst) Dst = (Src)
#define Move2Par(S1, D1, S2, D2) \
do { \
@@ -633,21 +631,34 @@ void** beam_ops;
y[4] = xt4; \
} while (0)
+#define DispatchReturn \
+do { \
+ if (FCALLS > 0 || FCALLS > neg_o_reds) { \
+ FCALLS--; \
+ Goto(*I); \
+ } \
+ else { \
+ c_p->current = NULL; \
+ c_p->arity = 1; \
+ goto context_switch3; \
+ } \
+} while (0)
+
#define MoveReturn(Src) \
x(0) = (Src); \
I = c_p->cp; \
ASSERT(VALID_INSTR(*c_p->cp)); \
c_p->cp = 0; \
CHECK_TERM(r(0)); \
- Goto(*I)
+ DispatchReturn
#define DeallocateReturn(Deallocate) \
do { \
int words_to_pop = (Deallocate); \
- SET_I((BeamInstr *) cp_val(*E)); \
+ SET_I((BeamInstr *) cp_val(*E)); \
E = ADD_BYTE_OFFSET(E, words_to_pop); \
CHECK_TERM(r(0)); \
- Goto(*I); \
+ DispatchReturn; \
} while (0)
#define MoveDeallocateReturn(Src, Deallocate) \
@@ -835,6 +846,15 @@ void** beam_ops;
} while (0)
#endif
+#define IsTaggedTuple(Src,Arityval,Tag,Fail) \
+ do { \
+ if (!(is_tuple(Src) && \
+ (tuple_val(Src))[0] == Arityval && \
+ (tuple_val(Src))[1] == Tag)) { \
+ Fail; \
+ } \
+ } while (0)
+
#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
#define IsBinary(Src, Fail) \
@@ -887,7 +907,7 @@ void** beam_ops;
Target = _uint_size * Unit; \
} while (0)
-#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
+#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Fail) \
do { \
ErlBinMatchBuffer *_mb; \
Eterm _result; Sint _size; \
@@ -898,12 +918,12 @@ void** beam_ops;
LIGHT_SWAPOUT; \
_result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
+ HEAP_SPACE_VERIFIED(0); \
if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
+ else { Dst = _result; } \
} while (0)
-#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
+#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Fail) \
do { \
ErlBinMatchBuffer *_mb; \
Eterm _result; \
@@ -912,12 +932,12 @@ void** beam_ops;
LIGHT_SWAPOUT; \
_result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
+ HEAP_SPACE_VERIFIED(0); \
if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
+ else { Dst = _result; } \
} while (0)
-#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
+#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Fail) \
do { \
ErlBinMatchBuffer *_mb; \
Eterm _result; Uint _size; \
@@ -927,27 +947,27 @@ void** beam_ops;
LIGHT_SWAPOUT; \
_result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
+ HEAP_SPACE_VERIFIED(0); \
if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
+ else { Dst = _result; } \
} while (0)
-#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; \
- TestHeap(ERL_SUB_BIN_SIZE, Live); \
- _mb = ms_matchbuffer(Ms); \
- if (((_mb->size - _mb->offset) % Unit) == 0) { \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_all_2(c_p, _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- ASSERT(is_value(_result)); \
- Store(_result, Dst); \
- } else { \
- HEAP_SPACE_VERIFIED(0); \
- Fail; } \
+#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ Eterm _result; \
+ TestHeap(ERL_SUB_BIN_SIZE, Live); \
+ _mb = ms_matchbuffer(Ms); \
+ if (((_mb->size - _mb->offset) % Unit) == 0) { \
+ LIGHT_SWAPOUT; \
+ _result = erts_bs_get_binary_all_2(c_p, _mb); \
+ LIGHT_SWAPIN; \
+ HEAP_SPACE_VERIFIED(0); \
+ ASSERT(is_value(_result)); \
+ Dst = _result; \
+ } else { \
+ HEAP_SPACE_VERIFIED(0); \
+ Fail; } \
} while (0)
#define BsSkipBits2(Ms, Bits, Unit, Fail) \
@@ -1042,10 +1062,11 @@ void** beam_ops;
* The following functions are called directly by process_main().
* Don't inline them.
*/
-static BifFunction translate_gc_bif(void* gcf) NOINLINE;
+static ErtsCodeMFA *ubif2mfa(void* uf) NOINLINE;
+static ErtsCodeMFA *gcbif2mfa(void* gcf) NOINLINE;
static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
- Eterm* reg, BifFunction bf) NOINLINE;
-static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
+ Eterm* reg, ErtsCodeMFA* bif_mfa) NOINLINE;
+static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
Eterm* reg, Eterm func) NOINLINE;
static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
BeamInstr *I, Uint offs) NOINLINE;
@@ -1073,14 +1094,14 @@ static BeamInstr* next_catch(Process* c_p, Eterm *reg);
static void terminate_proc(Process* c_p, Eterm Value);
static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
- BifFunction bf, Eterm args);
+ ErtsCodeMFA *bif_mfa, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
void
init_emulator(void)
{
- process_main();
+ process_main(0, 0);
}
/*
@@ -1108,98 +1129,91 @@ init_emulator(void)
#ifdef USE_VM_CALL_PROBES
-#define DTRACE_LOCAL_CALL(p, m, f, a) \
+#define DTRACE_LOCAL_CALL(p, mfa) \
if (DTRACE_ENABLED(local_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(local_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_GLOBAL_CALL(p, m, f, a) \
+#define DTRACE_GLOBAL_CALL(p, mfa) \
if (DTRACE_ENABLED(global_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(global_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_RETURN(p, m, f, a) \
+#define DTRACE_RETURN(p, mfa) \
if (DTRACE_ENABLED(function_return)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(function_return, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(function_return, process_name, mfa_buf, depth); \
}
-#define DTRACE_BIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(bif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_entry, process_name, mfa); \
+#define DTRACE_BIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(bif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_entry, process_name, mfa_buf); \
}
-#define DTRACE_BIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(bif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_return, process_name, mfa); \
+#define DTRACE_BIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(bif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_return, process_name, mfa_buf); \
}
-#define DTRACE_NIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(nif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_entry, process_name, mfa); \
+#define DTRACE_NIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(nif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_entry, process_name, mfa_buf); \
}
-#define DTRACE_NIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(nif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_return, process_name, mfa); \
+#define DTRACE_NIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(nif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_return, process_name, mfa_buf); \
}
#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
do { \
if (DTRACE_ENABLED(global_function_entry)) { \
BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
- DTRACE_GLOBAL_CALL((p), (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); \
+ DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
} \
} while(0)
#define DTRACE_RETURN_FROM_PC(p) \
do { \
- BeamInstr* fp; \
- if (DTRACE_ENABLED(function_return) && (fp = find_function_from_pc((p)->cp))) { \
- DTRACE_RETURN((p), (Eterm)fp[0], (Eterm)fp[1], (Uint)fp[2]); \
+ ErtsCodeMFA* cmfa; \
+ if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc((p)->cp))) { \
+ DTRACE_RETURN((p), cmfa); \
} \
} while(0)
#else /* USE_VM_PROBES */
-#define DTRACE_LOCAL_CALL(p, m, f, a) do {} while (0)
-#define DTRACE_GLOBAL_CALL(p, m, f, a) do {} while (0)
+#define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
+#define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
-#define DTRACE_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_RETURN(p, mfa) do {} while (0)
#define DTRACE_RETURN_FROM_PC(p) do {} while (0)
-#define DTRACE_BIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_BIF_RETURN(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
+#define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
#endif /* USE_VM_PROBES */
#ifdef DEBUG
@@ -1227,7 +1241,7 @@ init_emulator(void)
* the instructions' C labels to the loader.
* The second call starts execution of BEAM code. This call never returns.
*/
-void process_main(void)
+void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
{
static int init_done = 0;
Process* c_p = NULL;
@@ -1239,7 +1253,7 @@ void process_main(void)
/* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
* in all other cases x0 is used.
*/
- register Eterm* reg REG_xregs = NULL;
+ register Eterm* reg REG_xregs = x_reg_array;
/*
* Top of heap (next free location); grows upwards.
@@ -1266,7 +1280,7 @@ void process_main(void)
* X registers and floating point registers are located in
* scheduler specific data.
*/
- register FloatDef *freg;
+ register FloatDef *freg = f_reg_array;
/*
* For keeping the negative old value of 'reds' when call saving is active.
@@ -1315,6 +1329,7 @@ void process_main(void)
goto do_schedule1;
do_schedule:
+ ASSERT(c_p->arity < 6);
ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
reds_used = REDS_IN(c_p) - FCALLS;
@@ -1326,8 +1341,8 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
- BeamInstr *inptr = find_function_from_pc(start_time_i);
- BeamInstr *outptr = find_function_from_pc(c_p->i);
+ ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
+ ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
}
}
@@ -1352,8 +1367,6 @@ void process_main(void)
start_time_i = c_p->i;
}
- reg = erts_proc_sched_data(c_p)->x_reg_array;
- freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
@@ -1403,10 +1416,9 @@ void process_main(void)
if (ERTS_PROC_IS_EXITING(c_p)) {
strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa,
NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
@@ -1587,7 +1599,7 @@ void process_main(void)
/* FALL THROUGH */
OpCase(i_call_only_f): {
SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
Dispatch();
}
@@ -1599,7 +1611,7 @@ void process_main(void)
RESTORE_CP(E);
E = ADD_BYTE_OFFSET(E, Arg(1));
SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
Dispatch();
}
@@ -1611,7 +1623,7 @@ void process_main(void)
OpCase(i_call_f): {
SET_CP(c_p, I+2);
SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
Dispatch();
}
@@ -1691,7 +1703,7 @@ void process_main(void)
c_p->cp = 0;
CHECK_TERM(r(0));
HEAP_SPACE_VERIFIED(0);
- Goto(*I);
+ DispatchReturn;
}
/*
@@ -1798,6 +1810,7 @@ void process_main(void)
c_p->catches--;
make_blank(yb(Arg(0)));
if (is_non_value(r(0))) {
+ c_p->fvalue = NIL;
if (x(1) == am_throw) {
r(0) = x(2);
} else {
@@ -1827,6 +1840,7 @@ void process_main(void)
c_p->catches--;
make_blank(yb(Arg(0)));
if (is_non_value(r(0))) {
+ c_p->fvalue = NIL;
r(0) = x(1);
x(1) = x(2);
x(2) = x(3);
@@ -1918,6 +1932,7 @@ void process_main(void)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
SWAPOUT;
c_p->flags &= ~F_DELAY_GC;
+ c_p->arity = 0;
goto do_schedule; /* Will be rescheduled for exit */
}
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
@@ -2160,6 +2175,7 @@ void process_main(void)
* in limbo forever.
*/
SWAPOUT;
+ c_p->arity = 0;
goto do_schedule;
}
#endif
@@ -2578,7 +2594,7 @@ do { \
OpCase(bif1_fbsd):
{
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm tmp_reg[1];
Eterm result;
@@ -2589,7 +2605,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2610,19 +2626,19 @@ do { \
OpCase(bif1_body_bsd):
{
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm tmp_reg[1];
Eterm result;
GetArg1(1, tmp_reg[0]);
- bf = (BifFunction) Arg(0);
+ bf = (ErtsBifFunc) Arg(0);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2635,7 +2651,7 @@ do { \
}
reg[0] = tmp_reg[0];
SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2671,7 +2687,7 @@ do { \
Goto(*I);
}
x(0) = x(live);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2716,7 +2732,7 @@ do { \
live--;
x(0) = x(live);
x(1) = x(live+1);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2762,7 +2778,7 @@ do { \
x(0) = x(live);
x(1) = x(live+1);
x(2) = x(live+2);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2772,17 +2788,17 @@ do { \
OpCase(i_bif2_fbssd):
{
Eterm tmp_reg[2];
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm result;
GetArg2(2, tmp_reg[0], tmp_reg[1]);
- bf = (BifFunction) Arg(1);
+ bf = (ErtsBifFunc) Arg(1);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2803,15 +2819,15 @@ do { \
OpCase(i_bif2_body_bssd):
{
Eterm tmp_reg[2];
- Eterm (*bf)(Process*, Eterm*);
+ ErtsBifFunc bf;
Eterm result;
GetArg2(1, tmp_reg[0], tmp_reg[1]);
- bf = (BifFunction) Arg(0);
+ bf = (ErtsBifFunc) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
+ result = (*bf)(c_p, tmp_reg, I);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -2824,7 +2840,7 @@ do { \
reg[0] = tmp_reg[0];
reg[1] = tmp_reg[1];
SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
goto post_error_handling;
}
@@ -2834,29 +2850,31 @@ do { \
*/
OpCase(call_bif_e):
{
- Eterm (*bf)(Process*, Eterm*, BeamInstr*);
+ ErtsBifFunc bf;
Eterm result;
BeamInstr *next;
ErlHeapFragment *live_hf_end;
+ Export *export = (Export*)Arg(0);
if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
/* If we have run out of reductions, we do a context
switch before calling the bif */
- c_p->arity = ((Export *)Arg(0))->code[2];
- c_p->current = ((Export *)Arg(0))->code;
+ c_p->arity = GET_BIF_ARITY(export);
+ c_p->current = &export->info.mfa;
goto context_switch3;
}
- ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(Arg(0)), GET_BIF_ADDRESS(Arg(0)));
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(
+ GET_BIF_MODULE(export), GET_BIF_ADDRESS(export));
- bf = GET_BIF_ADDRESS(Arg(0));
+ bf = GET_BIF_ADDRESS(export);
PRE_BIF_SWAPOUT(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS - 1;
if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
+ save_calls(c_p, export);
}
PreFetch(1, next);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -2870,7 +2888,7 @@ do { \
ERTS_HOLE_CHECK(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (ERTS_IS_GC_DESIRED(c_p)) {
- Uint arity = ((Export *)Arg(0))->code[2];
+ Uint arity = GET_BIF_ARITY(export);
result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, reg, arity);
E = c_p->stop;
}
@@ -2899,7 +2917,7 @@ do { \
* Error handling. SWAPOUT is not needed because it was done above.
*/
ASSERT(c_p->stop == E);
- I = handle_error(c_p, I, reg, bf);
+ I = handle_error(c_p, I, reg, &export->info.mfa);
goto post_error_handling;
}
@@ -2985,7 +3003,7 @@ do { \
}
/*
- * An error occured in an arithmetic operation or test that could
+ * An error occurred in an arithmetic operation or test that could
* appear either in a head or in a body.
* In a head, execution should continue at failure address in Arg(0).
* In a body, Arg(0) == 0 and an exception should be raised.
@@ -3024,10 +3042,12 @@ do { \
GetArg2(2, Op1, Op2);
if (is_both_small(Op1, Op2)) {
/*
- * We could extract the tag from one argument, but a tag extraction
- * could mean a shift. Therefore, play it safe here.
+ * TAG ^ TAG == 0.
+ *
+ * Therefore, we perform the XOR operation on the tagged values,
+ * and OR in the tag bits.
*/
- Eterm result = make_small(signed_val(Op1) ^ signed_val(Op2));
+ Eterm result = (Op1 ^ Op2) | make_small(0);
StoreBifResult(4, result);
}
DO_OUTLINED_ARITH_2(bxor, Op1, Op2);
@@ -3203,7 +3223,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3218,7 +3238,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3231,7 +3251,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3246,7 +3266,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3262,7 +3282,7 @@ do { \
SET_I(next);
Dispatch();
}
- I = handle_error(c_p, I, reg, apply_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
goto post_error_handling;
}
@@ -3363,14 +3383,15 @@ do { \
* called from I[-3], I[-2], and I[-1] respectively.
*/
context_switch_fun:
- c_p->arity = I[-1] + 1;
+ /* Add one for the environment of the fun */
+ c_p->arity = erts_code_to_codemfa(I)->arity + 1;
goto context_switch2;
context_switch:
- c_p->arity = I[-1];
+ c_p->arity = erts_code_to_codemfa(I)->arity;
- context_switch2: /* Entry for fun calls. */
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ context_switch2: /* Entry for fun calls. */
+ c_p->current = erts_code_to_codemfa(I);
context_switch3:
@@ -3511,7 +3532,8 @@ do { \
* code[4]: Not used
*/
HEAVY_SWAPOUT;
- I = call_error_handler(c_p, I-3, reg, am_undefined_function);
+ I = call_error_handler(c_p, erts_code_to_codemfa(I),
+ reg, am_undefined_function);
HEAVY_SWAPIN;
if (I) {
Goto(*I);
@@ -3548,17 +3570,25 @@ do { \
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
* I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
*/
BifFunction vbf;
ErlHeapFragment *live_hf_end;
+ ErtsCodeMFA *codemfa;
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
+ codemfa = erts_code_to_codemfa(I);
+
+ c_p->current = codemfa; /* current and vbf set to please handle_error */
+
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+
HEAVY_SWAPOUT;
+
PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
+ bif_nif_arity = codemfa->arity;
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -3585,19 +3615,19 @@ do { \
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ DTRACE_NIF_RETURN(c_p, codemfa);
goto apply_bif_or_nif_epilogue;
OpCase(apply_bif):
/*
- * At this point, I points to the code[3] in the export entry for
+ * At this point, I points to the code[0] in the export entry for
* the BIF:
*
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&apply_bif
- * code[4]: Function pointer to BIF function
+ * code[-3]: Module
+ * code[-2]: Function
+ * code[-1]: Arity
+ * code[0]: &&apply_bif
+ * code[1]: Function pointer to BIF function
*/
if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
@@ -3606,26 +3636,30 @@ do { \
goto context_switch;
}
- ERTS_MSACC_SET_BIF_STATE_CACHED_X((Eterm)I[-3], (BifFunction)Arg(0));
+ codemfa = erts_code_to_codemfa(I);
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0));
- c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
+
+ /* In case we apply process_info/1,2 or load_nif/1 */
+ c_p->current = codemfa;
c_p->i = I; /* In case we apply check_process_code/2. */
c_p->arity = 0; /* To allow garbage collection on ourselves
* (check_process_code/2).
*/
- DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ DTRACE_BIF_ENTRY(c_p, codemfa);
SWAPOUT;
ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
c_p->fcalls = FCALLS - 1;
vbf = (BifFunction) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
+ bif_nif_arity = codemfa->arity;
ASSERT(bif_nif_arity <= 4);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
{
- Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
+ ErtsBifFunc bf = vbf;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
@@ -3642,7 +3676,7 @@ do { \
if (ERTS_MSACC_IS_ENABLED_CACHED_X())
ERTS_MSACC_UPDATE_CACHE_X();
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ DTRACE_BIF_RETURN(c_p, codemfa);
apply_bif_or_nif_epilogue:
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
@@ -3669,7 +3703,7 @@ do { \
}
Dispatch();
}
- I = handle_error(c_p, c_p->cp, reg, vbf);
+ I = handle_error(c_p, c_p->cp, reg, c_p->current);
goto post_error_handling;
}
}
@@ -3709,8 +3743,9 @@ do { \
goto find_func_info;
OpCase(i_func_info_IaaI): {
+ ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
c_p->freason = EXC_FUNCTION_CLAUSE;
- c_p->current = I + 2;
+ c_p->current = &ci->mfa;
goto handle_error;
}
@@ -3850,7 +3885,6 @@ do { \
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(num_bytes);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -3945,7 +3979,6 @@ do { \
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(BsOp1);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -4701,11 +4734,11 @@ do { \
*/
OpCase(return_trace): {
- BeamInstr* code = (BeamInstr *) (UWord) E[0];
+ ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]);
SWAPOUT; /* Needed for shared heap */
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return(c_p, code, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
+ erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
SWAPIN;
c_p->cp = NULL;
@@ -4716,9 +4749,8 @@ do { \
OpCase(i_generic_breakpoint): {
BeamInstr real_I;
- ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
HEAVY_SWAPOUT;
- real_I = erts_generic_breakpoint(c_p, I, reg);
+ real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg);
HEAVY_SWAPIN;
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
@@ -4727,7 +4759,7 @@ do { \
OpCase(i_return_time_trace): {
BeamInstr *pc = (BeamInstr *) (UWord) E[0];
SWAPOUT;
- erts_trace_time_return(c_p, pc);
+ erts_trace_time_return(c_p, erts_code_to_codeinfo(pc));
SWAPIN;
c_p->cp = NULL;
SET_I((BeamInstr *) cp_val(E[1]));
@@ -4912,16 +4944,18 @@ do { \
* I[ 0]: &&lb_hipe_trap_call
* ... remainder of original BEAM code
*/
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
+ ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
+ ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
+ c_p->hipe.u.ncallee = ci->u.ncallee;
++hipe_trap_count;
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8));
+ HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (ci->mfa.arity << 8));
}
OpCase(hipe_trap_call_closure): {
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
+ ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
+ ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
+ c_p->hipe.u.ncallee = ci->u.ncallee;
++hipe_trap_count;
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8));
+ HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (ci->mfa.arity << 8));
}
OpCase(hipe_trap_return): {
HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RETURN);
@@ -4990,8 +5024,9 @@ do { \
* I[ 0]: &&lb_hipe_call_count
* ... remainder of original BEAM code
*/
- struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
+ ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
+ struct hipe_call_count *hcc = ci->u.hcc;
+ ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
ASSERT(hcc != NULL);
ASSERT(VALID_INSTR(hcc->opcode));
++(hcc->count);
@@ -5021,7 +5056,7 @@ do { \
goto do_schedule;
} else {
HEAVY_SWAPIN;
- I = handle_error(c_p, I, reg, hibernate_3);
+ I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa);
goto post_error_handling;
}
}
@@ -5040,7 +5075,7 @@ do { \
} else {
TestHeap(ERTS_SINT64_HEAP_SIZE(ts),0);
r(0) = make_big(HTOP);
-#if defined(ARCH_32) || HALFWORD_HEAP
+#if defined(ARCH_32)
if (ts >= (((Uint64) 1) << 32)) {
*HTOP = make_pos_bignum_header(2);
BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff));
@@ -5060,7 +5095,7 @@ do { \
OpCase(i_debug_breakpoint): {
HEAVY_SWAPOUT;
- I = call_error_handler(c_p, I-3, reg, am_breakpoint);
+ I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint);
HEAVY_SWAPIN;
if (I) {
Goto(*I);
@@ -5115,6 +5150,7 @@ do { \
em_call_error_handler = OpCode(call_error_handler);
em_apply_bif = OpCode(apply_bif);
em_call_nif = OpCode(call_nif);
+ em_call_bif_e = OpCode(call_bif_e);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
beam_apply[1] = (BeamInstr) OpCode(normal_exit);
@@ -5133,10 +5169,10 @@ do { \
bif_table[i].name,
bif_table[i].arity);
bif_export[i] = ep;
- ep->code[3] = (BeamInstr) OpCode(apply_bif);
- ep->code[4] = (BeamInstr) bif_table[i].f;
+ ep->beam[0] = (BeamInstr) OpCode(apply_bif);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
/* XXX: set func info for bifs */
- ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
}
return;
@@ -5216,8 +5252,8 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
goto do_dirty_schedule;
context_switch:
- c_p->arity = I[-1];
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
+ c_p->arity = c_p->current->arity;
{
int reds_used;
@@ -5301,10 +5337,25 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_MSACC_UPDATE_CACHE_X();
- reg = esdp->x_reg_array;
- {
+ /*
+ * Set fcalls even though we ignore it, so we don't
+ * confuse code accessing it...
+ */
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ c_p->fcalls = 0;
+ else
+ c_p->fcalls = CONTEXT_REDS;
+
+ if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
+ erts_execute_dirty_system_task(c_p);
+ goto do_dirty_schedule;
+ }
+ else {
+ ErtsCodeMFA *codemfa;
Eterm* argp;
- int i;
+ int i, exiting;
+
+ reg = esdp->x_reg_array;
argp = c_p->arg_reg;
for (i = c_p->arity - 1; i >= 0; i--) {
@@ -5320,17 +5371,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
I = c_p->i;
- ASSERT(em_call_nif == (BeamInstr *) *I);
-
- /*
- * Set fcalls even though we ignore it, so we don't
- * confuse code accessing it...
- */
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
- c_p->fcalls = 0;
- else
- c_p->fcalls = CONTEXT_REDS;
-
SWAPIN;
#ifdef USE_VM_PROBES
@@ -5342,11 +5382,9 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
if (ERTS_PROC_IS_EXITING(c_p)) {
strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
- NULL, fun_buf);
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
"<unknown/%p>", *I);
@@ -5356,107 +5394,81 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
DTRACE2(process_scheduled, process_buf, fun_buf);
}
#endif
- }
-
- {
-#ifdef DEBUG
- Eterm result;
-#endif
- Eterm arity;
- {
- /*
- * call_nif is always first instruction in function:
- *
- * I[-3]: Module
- * I[-2]: Function
- * I[-1]: Arity
- * I[0]: &&call_nif
- * I[1]: Function pointer to NIF function
- * I[2]: Pointer to erl_module_nif
- * I[3]: Function pointer to dirty NIF
- */
- BifFunction vbf;
-
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
-
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- arity = I[-1];
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
+ */
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- {
- typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
- NifF* fp = vbf = (NifF*) I[1];
- struct enif_environment_t env;
- ASSERT(!c_p->scheduler_data);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- erts_pre_dirty_nif(esdp, &env, c_p,
- (struct erl_module_nif*)I[2]);
+ codemfa = erts_code_to_codemfa(I);
-#ifdef DEBUG
- result =
-#else
- (void)
-#endif
- (*fp)(&env, arity, reg);
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+ c_p->current = codemfa;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_post_dirty_nif(&env);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if (em_apply_bif == (BeamInstr *) *I) {
+ exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
+ }
+ else {
+ ASSERT(em_call_nif == (BeamInstr *) *I);
+ exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
+ }
- ASSERT(!is_value(result));
- ASSERT(c_p->freason == TRAP);
- ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
+ ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- if (env.exiting)
- goto do_dirty_schedule;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- }
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (exiting)
+ goto do_dirty_schedule;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- ERTS_HOLE_CHECK(c_p);
- SWAPIN;
- I = c_p->i;
- goto context_switch;
- }
+ DTRACE_NIF_RETURN(c_p, codemfa);
+ ERTS_HOLE_CHECK(c_p);
+ SWAPIN;
+ I = c_p->i;
+ goto context_switch;
}
#endif /* ERTS_DIRTY_SCHEDULERS */
}
-static BifFunction
-translate_gc_bif(void* gcf)
+static ErtsCodeMFA *
+gcbif2mfa(void* gcf)
{
- if (gcf == erts_gc_length_1) {
- return length_1;
- } else if (gcf == erts_gc_size_1) {
- return size_1;
- } else if (gcf == erts_gc_bit_size_1) {
- return bit_size_1;
- } else if (gcf == erts_gc_byte_size_1) {
- return byte_size_1;
- } else if (gcf == erts_gc_map_size_1) {
- return map_size_1;
- } else if (gcf == erts_gc_abs_1) {
- return abs_1;
- } else if (gcf == erts_gc_float_1) {
- return float_1;
- } else if (gcf == erts_gc_round_1) {
- return round_1;
- } else if (gcf == erts_gc_trunc_1) {
- return round_1;
- } else if (gcf == erts_gc_binary_part_2) {
- return binary_part_2;
- } else if (gcf == erts_gc_binary_part_3) {
- return binary_part_3;
- } else {
- erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ int i;
+ for (i = 0; erts_gc_bifs[i].bif; i++) {
+ if (erts_gc_bifs[i].gc_bif == gcf)
+ return &bif_export[erts_gc_bifs[i].exp_ix]->info.mfa;
}
+ erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ return NULL;
+}
+
+static ErtsCodeMFA *
+ubif2mfa(void* uf)
+{
+ int i;
+ for (i = 0; erts_u_bifs[i].bif; i++) {
+ if (erts_u_bifs[i].bif == uf)
+ return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
+ }
+ erts_exit(ERTS_ERROR_EXIT, "bad u bif");
+ return NULL;
}
/*
@@ -5515,15 +5527,27 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
*/
static BeamInstr*
-handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
+handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
{
Eterm* hp;
Eterm Value = c_p->fvalue;
Eterm Args = am_true;
- c_p->i = pc; /* In case we call erts_exit(). */
ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
+ if (c_p->freason & EXF_RESTORE_NIF)
+ erts_nif_export_restore_error(c_p, &pc, reg, &bif_mfa);
+
+#ifdef DEBUG
+ if (bif_mfa) {
+ /* Verify that bif_mfa does not point into our nif export */
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(NifExport)));
+ }
+#endif
+
+ c_p->i = pc; /* In case we call erts_exit(). */
+
/*
* Check if we have an arglist for the top level call. If so, this
* is encoded in Value, so we have to dig out the real Value as well
@@ -5546,7 +5570,7 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
* more modular.
*/
if (c_p->freason & EXF_SAVETRACE) {
- save_stacktrace(c_p, pc, reg, bf, Args);
+ save_stacktrace(c_p, pc, reg, bif_mfa, Args);
}
/*
@@ -5616,7 +5640,8 @@ next_catch(Process* c_p, Eterm *reg) {
/* Can not follow cp here - code may be unloaded */
BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
reg[1], reg[2],
ERTS_TRACER_FROM_ETERM(ptr+1));
/* Skip return_trace parameters */
@@ -5644,7 +5669,8 @@ next_catch(Process* c_p, Eterm *reg) {
if (is_catch(*ptr) && active_catches) goto found_catch;
}
if (cp_val(*prev) == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
reg[1], reg[2],
ERTS_TRACER_FROM_ETERM(ptr+1));
}
@@ -5819,11 +5845,12 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
*/
static void
-save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
- Eterm args) {
+save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
+ ErtsCodeMFA *bif_mfa, Eterm args) {
struct StackTrace* s;
int sz;
int depth = erts_backtrace_depth; /* max depth (never negative) */
+
if (depth > 0) {
/* There will always be a current function */
depth --;
@@ -5840,33 +5867,29 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
/*
* If the failure was in a BIF other than 'error/1', 'error/2',
- * 'exit/1' or 'throw/1', find the bif-table index and save the
- * argument registers by consing up an arglist.
+ * 'exit/1' or 'throw/1', save BIF-MFA and save the argument
+ * registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1
- && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2
- && bf != wrap_exit_1 && bf != wrap_throw_1) {
- int i;
- int a = 0;
- for (i = 0; i < BIF_SIZE; i++) {
- if (bf == bif_table[i].f || bf == bif_table[i].traced) {
- Export *ep = bif_export[i];
- s->current = ep->code;
- a = bif_table[i].arity;
+ if (bif_mfa) {
+ if (bif_mfa->module == am_erlang) {
+ switch (bif_mfa->function) {
+ case am_error:
+ if (bif_mfa->arity == 1 || bif_mfa->arity == 2)
+ goto non_bif_stacktrace;
+ break;
+ case am_exit:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ case am_throw:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ default:
break;
}
}
- if (i >= BIF_SIZE) {
- /*
- * The Bif does not really exist (no BIF entry). It is a
- * TRAP and traps are called through apply_bif, which also
- * sets c_p->current (luckily).
- * OR it is a NIF called by call_nif where current is also set.
- */
- ASSERT(c_p->current);
- s->current = c_p->current;
- a = s->current[2];
- }
+ s->current = bif_mfa;
/* Save first stack entry */
ASSERT(pc);
if (depth > 0) {
@@ -5879,8 +5902,11 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
depth--;
}
s->pc = NULL;
- args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
+ args = make_arglist(c_p, reg, bif_mfa->arity); /* Overwrite CAR(c_p->ftrace) */
} else {
+
+ non_bif_stacktrace:
+
s->current = c_p->current;
/*
* For a function_clause error, the arguments are in the beam
@@ -5890,7 +5916,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
(GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
int a;
ASSERT(s->current);
- a = s->current[2];
+ a = s->current->arity;
args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
/* Save first stack entry */
ASSERT(c_p->cp);
@@ -6079,7 +6105,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_lookup_function_info(&fi, s->pc, 1);
} else if (GET_EXC_INDEX(s->freason) ==
GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
- erts_lookup_function_info(&fi, s->current, 1);
+ erts_lookup_function_info(&fi, erts_codemfa_to_code(s->current), 1);
} else {
erts_set_current_function(&fi, s->current);
}
@@ -6090,9 +6116,9 @@ build_stacktrace(Process* c_p, Eterm exc) {
* stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
if (depth <= 0)
- erts_set_current_function(&fi, c_p->u.initial);
+ erts_set_current_function(&fi, &c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6104,10 +6130,10 @@ build_stacktrace(Process* c_p, Eterm exc) {
*/
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.current ? fi.needed + 2 : 0;
+ heap_size = fi.mfa ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -6123,7 +6149,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- if (fi.current) {
+ if (fi.mfa) {
hp = erts_build_mfa_item(&fi, hp, args, &mfa);
res = CONS(hp, mfa, res);
}
@@ -6133,7 +6159,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
}
static BeamInstr*
-call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
+call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func)
{
Eterm* hp;
Export* ep;
@@ -6142,13 +6168,14 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
Uint sz;
int i;
+ DBG_TRACE_MFA_P(mfa, "call_error_handler");
/*
* Search for the error_handler module.
*/
ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
erts_active_code_ix());
if (ep == NULL) { /* No error handler */
- p->current = fi;
+ p->current = mfa;
p->freason = EXC_UNDEF;
return 0;
}
@@ -6157,7 +6184,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
* Create a list with all arguments in the x registers.
*/
- arity = fi[2];
+ arity = mfa->arity;
sz = 2 * arity;
if (HeapWordsLeft(p) < sz) {
erts_garbage_collect(p, sz, reg, arity);
@@ -6173,8 +6200,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
/*
* Set up registers for call to error_handler:<func>/3.
*/
- reg[0] = fi[0];
- reg[1] = fi[1];
+ reg[0] = mfa->module;
+ reg[1] = mfa->function;
reg[2] = args;
return ep->addressv[erts_active_code_ix()];
}
@@ -6234,7 +6261,7 @@ apply_bif_error_adjustment(Process *p, Export *ep,
* and apply_last_IP.
*/
if (I
- && ep->code[3] == (BeamInstr) em_apply_bif
+ && ep->beam[0] == (BeamInstr) em_apply_bif
&& (ep == bif_export[BIF_error_1]
|| ep == bif_export[BIF_error_2]
|| ep == bif_export[BIF_exit_1]
@@ -6251,7 +6278,7 @@ apply_bif_error_adjustment(Process *p, Export *ep,
* stackframe correct. Without the following adjustment,
* 'p->cp' will point into the function that called
* current function when handling the error. We add a
- * dummy stackframe in order to achive this.
+ * dummy stackframe in order to achieve this.
*
* Note that these BIFs unconditionally will cause
* an exception to be raised. That is, our modifications
@@ -6558,11 +6585,11 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_hibernate)) {
+ ErtsCodeMFA cmfa = { module, function, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(c_p, module, function, arity,
- process_name, mfa);
- DTRACE2(process_hibernate, process_name, mfa);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_fun_decode(c_p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_hibernate, process_name, mfa_buf);
}
#endif
/*
@@ -6595,7 +6622,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
#ifndef ERTS_SMP
if (ERTS_PROC_IS_EXITING(c_p)) {
/*
- * See comment in the begining of the function...
+ * See comment in the beginning of the function...
*
* This second test is needed since gc might be traced.
*/
@@ -6609,7 +6636,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- c_p->current = bif_export[BIF_hibernate_3]->code;
+ c_p->current = &bif_export[BIF_hibernate_3]->info.mfa;
c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
return 1;
}
@@ -6632,21 +6659,15 @@ call_fun(Process* p, /* Current process. */
if (is_fun_header(hdr)) {
ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
- ErlFunEntry* fe;
- BeamInstr* code_ptr;
+ ErlFunEntry* fe = funp->fe;
+ BeamInstr* code_ptr = fe->address;
Eterm* var_ptr;
- int actual_arity;
- unsigned num_free;
-
- fe = funp->fe;
- num_free = funp->num_free;
- code_ptr = fe->address;
- actual_arity = (int) code_ptr[-1];
+ unsigned num_free = funp->num_free;
+ ErtsCodeMFA *mfa = erts_code_to_codemfa(code_ptr);
+ int actual_arity = mfa->arity;
if (actual_arity == arity+num_free) {
- DTRACE_LOCAL_CALL(p, (Eterm)code_ptr[-3],
- (Eterm)code_ptr[-2],
- code_ptr[-1]);
+ DTRACE_LOCAL_CALL(p, mfa);
if (num_free == 0) {
return code_ptr;
} else {
@@ -6751,10 +6772,10 @@ call_fun(Process* p, /* Current process. */
int actual_arity;
ep = *((Export **) (export_val(fun) + 1));
- actual_arity = (int) ep->code[2];
+ actual_arity = ep->info.mfa.arity;
if (arity == actual_arity) {
- DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
+ DTRACE_GLOBAL_CALL(p, &ep->info.mfa);
return ep->addressv[erts_active_code_ix()];
} else {
/*
@@ -6808,7 +6829,7 @@ apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
}
if (is_not_nil(tmp)) { /* Must be well-formed list */
- p->freason = EXC_UNDEF;
+ p->freason = EXC_BADARG;
return NULL;
}
reg[arity] = fun;
@@ -6842,9 +6863,6 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
funp->fe = fe;
funp->num_free = num_free;
funp->creator = p->common.id;
-#ifdef HIPE
- funp->native_address = fe->native_address;
-#endif
funp->arity = (int)fe->address[-1] - num_free;
for (i = 0; i < num_free; i++) {
*hp++ = reg[i];
@@ -7375,15 +7393,15 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
return 1;
}
- e.code[0] = Mod;
- e.code[1] = Name;
- e.code[2] = arity;
+ e.info.mfa.module = Mod;
+ e.info.mfa.function = Name;
+ e.info.mfa.arity = arity;
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->addressv[erts_active_code_ix()] == ep->code+3
- && (ep->code[3] == (BeamInstr) em_apply_bif);
+ return ep->addressv[erts_active_code_ix()] == ep->beam
+ && (ep->beam[0] == (BeamInstr) em_apply_bif);
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 2bad3ab4c5..23258dbe9c 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -39,11 +39,13 @@
#include "erl_binary.h"
#include "erl_zlib.h"
#include "erl_map.h"
+#include "erl_process_dict.h"
#ifdef HIPE
#include "hipe_bif0.h"
#include "hipe_mode_switch.h"
#include "hipe_arch.h"
+#include "hipe_load.h"
#endif
ErlDrvBinary* erts_gzinflate_buffer(char*, int);
@@ -155,13 +157,15 @@ typedef struct {
#define STR_CHUNK 2
#define IMP_CHUNK 3
#define EXP_CHUNK 4
-#define NUM_MANDATORY 5
+#define MIN_MANDATORY 1
+#define MAX_MANDATORY 5
#define LAMBDA_CHUNK 5
#define LITERAL_CHUNK 6
#define ATTR_CHUNK 7
#define COMPILE_CHUNK 8
#define LINE_CHUNK 9
+#define UTF8_ATOM_CHUNK 10
#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
@@ -171,9 +175,13 @@ typedef struct {
static Uint chunk_types[] = {
/*
- * Mandatory chunk types -- these MUST be present.
+ * Atom chunk types -- Atom or AtU8 MUST be present.
*/
MakeIffId('A', 't', 'o', 'm'), /* 0 */
+
+ /*
+ * Mandatory chunk types -- these MUST be present.
+ */
MakeIffId('C', 'o', 'd', 'e'), /* 1 */
MakeIffId('S', 't', 'r', 'T'), /* 2 */
MakeIffId('I', 'm', 'p', 'T'), /* 3 */
@@ -187,6 +195,7 @@ static Uint chunk_types[] = {
MakeIffId('A', 't', 't', 'r'), /* 7 */
MakeIffId('C', 'I', 'n', 'f'), /* 8 */
MakeIffId('L', 'i', 'n', 'e'), /* 9 */
+ MakeIffId('A', 't', 'U', '8'), /* 10 */
};
/*
@@ -479,15 +488,18 @@ typedef struct LoaderState {
static void free_loader_state(Binary* magic);
static ErlHeapFragment* new_literal_fragment(Uint size);
static void free_literal_fragment(ErlHeapFragment*);
-static void loader_state_dtor(Binary* magic);
+static int loader_state_dtor(Binary* magic);
+#ifdef HIPE
static Eterm stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- BeamCodeHeader* code, Uint size);
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code);
+#endif
static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
- Uint num_types, Uint num_mandatory);
+ Uint num_types);
static int verify_chunks(LoaderState* stp);
-static int load_atom_table(LoaderState* stp);
+static int load_atom_table(LoaderState* stp, ErtsAtomEncoding enc);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
static int is_bif(Eterm mod, Eterm func, unsigned arity);
@@ -537,8 +549,6 @@ static Eterm compilation_info_for_module(Process* p, BeamCodeHeader*);
static Eterm md5_of_module(Process* p, BeamCodeHeader*);
static Eterm has_native(BeamCodeHeader*);
static Eterm native_addresses(Process* p, BeamCodeHeader*);
-int patch_funentries(Eterm Patchlist);
-int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
static int must_swap_floats;
@@ -626,7 +636,7 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
CHKALLOC();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
if (!init_iff_file(stp, code, unloaded_size) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto load_error;
}
@@ -671,9 +681,16 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
*/
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto load_error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto load_error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto load_error;
+ }
}
/*
@@ -806,21 +823,21 @@ erts_finish_loading(Binary* magic, Process* c_p,
num_exps = export_list_size(code_ix);
for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep == NULL || ep->code[0] != module) {
+ if (ep == NULL || ep->info.mfa.module != module) {
continue;
}
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (ep->beam[0] == (BeamInstr) em_apply_bif) {
continue;
- } else if (ep->code[3] ==
+ } else if (ep->beam[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(mod_tab_p->curr.num_traced_exports > 0);
- erts_clear_export_break(mod_tab_p, ep->code+3);
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
- ep->code[4] = 0;
+ erts_clear_export_break(mod_tab_p, &ep->info);
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
+ ep->beam[1] = 0;
}
- ASSERT(ep->code[4] == 0);
+ ASSERT(ep->beam[1] == 0);
}
}
ASSERT(mod_tab_p->curr.num_breakpoints == 0);
@@ -842,9 +859,7 @@ erts_finish_loading(Binary* magic, Process* c_p,
erts_alloc(ERTS_ALC_T_PREPARED_CODE,
sizeof(struct erl_module_instance));
inst_p = mod_tab_p->on_load;
- inst_p->nif = 0;
- inst_p->num_breakpoints = 0;
- inst_p->num_traced_exports = 0;
+ erts_module_instance_init(inst_p);
}
inst_p->code_hdr = stp->hdr;
@@ -898,7 +913,7 @@ erts_alloc_loader_state(void)
magic = erts_create_magic_binary(sizeof(LoaderState),
loader_state_dtor);
- erts_refc_inc(&magic->refc, 1);
+ erts_refc_inc(&magic->intern.refc, 1);
stp = ERTS_MAGIC_BIN_DATA(magic);
stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
@@ -943,6 +958,13 @@ erts_module_for_prepared_code(Binary* magic)
LoaderState* stp;
if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+#ifdef HIPE
+ HipeLoaderState *hipe_stp;
+ if ((hipe_stp = hipe_get_loader_state(magic))
+ && hipe_stp->text_segment != 0) {
+ return hipe_stp->module;
+ }
+#endif
return NIL;
}
stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -974,9 +996,7 @@ static void
free_loader_state(Binary* magic)
{
loader_state_dtor(magic);
- if (erts_refc_dectest(&magic->refc, 0) == 0) {
- erts_bin_free(magic);
- }
+ erts_bin_release(magic);
}
static ErlHeapFragment* new_literal_fragment(Uint size)
@@ -1004,7 +1024,7 @@ static void free_literal_fragment(ErlHeapFragment* bp)
/*
* This destructor function can safely be called multiple times.
*/
-static void
+static int
loader_state_dtor(Binary* magic)
{
LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -1089,12 +1109,15 @@ loader_state_dtor(Binary* magic)
*/
ASSERT(stp->genop_blocks == 0);
+ return 1;
}
+#ifdef HIPE
static Eterm
stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- BeamCodeHeader* code_hdr, Uint size)
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code)
{
Module* modp;
Eterm retval;
@@ -1117,6 +1140,9 @@ stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modp->curr.code_hdr = code_hdr;
modp->curr.code_length = size;
modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
+ DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "insert_new_code "
+ "first_hipe_ref = %p", hipe_code->first_hipe_ref);
+ modp->curr.hipe_code = hipe_code;
/*
* Update ranges (used for finding a function from a PC value).
@@ -1125,6 +1151,7 @@ stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
erts_update_ranges((BeamInstr*)modp->curr.code_hdr, size);
return NIL;
}
+#endif
static int
init_iff_file(LoaderState* stp, byte* code, Uint size)
@@ -1198,7 +1225,7 @@ init_iff_file(LoaderState* stp, byte* code, Uint size)
* Scan the IFF file. The header should have been verified by init_iff_file().
*/
static int
-scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types)
{
Uint count;
Uint id;
@@ -1277,7 +1304,16 @@ verify_chunks(LoaderState* stp)
MD5_CTX context;
MD5Init(&context);
- for (i = 0; i < NUM_MANDATORY; i++) {
+
+ if (stp->chunks[UTF8_ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[UTF8_ATOM_CHUNK].start, stp->chunks[UTF8_ATOM_CHUNK].size);
+ } else if (stp->chunks[ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[ATOM_CHUNK].start, stp->chunks[ATOM_CHUNK].size);
+ } else {
+ LoadError0(stp, "mandatory chunk of type 'Atom' or 'AtU8' not found\n");
+ }
+
+ for (i = MIN_MANDATORY; i < MAX_MANDATORY; i++) {
if (stp->chunks[i].start != NULL) {
MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
} else {
@@ -1338,7 +1374,7 @@ verify_chunks(LoaderState* stp)
}
static int
-load_atom_table(LoaderState* stp)
+load_atom_table(LoaderState* stp, ErtsAtomEncoding enc)
{
unsigned int i;
@@ -1357,7 +1393,7 @@ load_atom_table(LoaderState* stp)
GetByte(stp, n);
GetString(stp, atom, n);
- stp->atom[i] = erts_atom_put(atom, n, ERTS_ATOM_ENC_LATIN1, 1);
+ stp->atom[i] = erts_atom_put(atom, n, enc, 1);
}
/*
@@ -1420,8 +1456,8 @@ load_import_table(LoaderState* stp)
* the BIF function.
*/
if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
- if (e->code[3] == (BeamInstr) em_apply_bif) {
- stp->import[i].bf = (BifFunction) e->code[4];
+ if (e->beam[0] == (BeamInstr) em_apply_bif) {
+ stp->import[i].bf = (BifFunction) e->beam[1];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
stp->may_load_nif = 1;
}
@@ -1514,7 +1550,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity)
if (e == NULL) {
return 0;
}
- if (e->code[3] != (BeamInstr) em_apply_bif) {
+ if (e->beam[0] != (BeamInstr) em_apply_bif) {
return 0;
}
if (mod == am_erlang && func == am_apply && arity == 3) {
@@ -1893,7 +1929,7 @@ load_code(LoaderState* stp)
* by both the nif functionality and line instructions.
*/
enum {
- FUNC_INFO_SZ = 5
+ FUNC_INFO_SZ = sizeof(ErtsCodeInfo) / sizeof(Eterm)
};
code = stp->codev;
@@ -2532,15 +2568,10 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
-#ifdef ERTS_DIRTY_SCHEDULERS
- enum { MIN_FUNC_SZ = 4 };
-#else
- enum { MIN_FUNC_SZ = 3 };
-#endif
- if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
+ if (finfo_ix - last_func_start < BEAM_NIF_MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
- int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
- ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
+ int pad = BEAM_NIF_MIN_FUNC_SZ - (finfo_ix - last_func_start);
+ ASSERT(pad > 0 && pad < BEAM_NIF_MIN_FUNC_SZ);
CodeNeed(pad);
sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
FUNC_INFO_SZ*sizeof(BeamInstr));
@@ -2565,8 +2596,11 @@ load_code(LoaderState* stp)
stp->function = code[ci-2];
stp->arity = code[ci-1];
+ /* When this assert is triggered, it is normally a sign that
+ the size of the ops.tab i_func_info instruction is not
+ the same as FUNC_INFO_SZ */
ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ);
- stp->hdr->functions[function_number] = (BeamInstr*) stp->labels[last_label].patches;
+ stp->hdr->functions[function_number] = (ErtsCodeInfo*) stp->labels[last_label].patches;
offset = function_number;
stp->labels[last_label].patches = offset;
function_number++;
@@ -4031,60 +4065,52 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)
op->next = NULL;
return op;
}
+
+static GenOp*
+translate_gc_bif(LoaderState* stp, GenOp* op, GenOpArg Bif)
+{
+ const ErtsGcBif* p;
+ BifFunction bf;
+
+ bf = stp->import[Bif.val].bf;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->bif == bf) {
+ op->a[1].type = TAG_u;
+ op->a[1].val = (BeamInstr) p->gc_bif;
+ return op;
+ }
+ }
+
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
+}
+
/*
- * Rewrite gc_bifs with one parameter (the common case). Utilized
- * in ops.tab to rewrite instructions calling bif's in guards
- * to use a garbage collecting implementation.
+ * Rewrite gc_bifs with one parameter (the common case).
*/
static GenOp*
gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg Src, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == length_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_length_1;
- } else if (bf == size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_size_1;
- } else if (bf == bit_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1;
- } else if (bf == byte_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1;
- } else if (bf == map_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1;
- } else if (bf == abs_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1;
- } else if (bf == float_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_float_1;
- } else if (bf == round_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_round_1;
- } else if (bf == trunc_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif1_5;
op->arity = 5;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Src;
op->a[3] = Live;
op->a[4] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
@@ -4095,35 +4121,18 @@ gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_2) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif2_6;
op->arity = 6;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Live;
op->a[3] = S1;
op->a[4] = S2;
op->a[5] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
@@ -4134,37 +4143,19 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_3) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_ii_gc_bif3_7;
op->arity = 7;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Live;
op->a[3] = S1;
op->a[4] = S2;
op->a[5] = S3;
op->a[6] = Dst;
- op->next = NULL;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
static GenOp*
@@ -4386,22 +4377,25 @@ gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
static int
hash_internal_genop_arg(LoaderState* stp, GenOpArg Key, Uint32* hx)
{
+ Eterm key_term;
switch (Key.type) {
case TAG_a:
- *hx = atom_tab(atom_val(Key.val))->slot.bucket.hvalue;
- return 1;
+ key_term = Key.val;
+ break;
case TAG_i:
- *hx = Key.val;
- return 1;
+ key_term = make_small(Key.val);
+ break;
case TAG_n:
- *hx = make_internal_hash(NIL);
- return 1;
+ key_term = NIL;
+ break;
case TAG_q:
- *hx = make_internal_hash(stp->literals[Key.val].term);
- return 1;
+ key_term = stp->literals[Key.val].term;
+ break;
default:
return 0;
}
+ *hx = erts_pd_make_hx(key_term);
+ return 1;
}
@@ -4556,7 +4550,7 @@ freeze_code(LoaderState* stp)
* function table in the beginning of the file.
*/
- code_hdr->functions[stp->num_functions] = (codev + stp->ci - 1);
+ code_hdr->functions[stp->num_functions] = (ErtsCodeInfo*)(codev + stp->ci - 1);
CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
@@ -4816,16 +4810,16 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
}
ep = erts_export_put(stp->module, stp->export[i].function,
stp->export[i].arity);
- if (!on_load) {
- ep->addressv[erts_staging_code_ix()] = address;
- } else {
+ if (on_load) {
/*
* on_load: Don't make any of the exported functions
* callable yet. Keep any function in the current
* code callable.
*/
- ep->code[4] = (BeamInstr) address;
+ ep->beam[1] = (BeamInstr) address;
}
+ else
+ ep->addressv[erts_staging_code_ix()] = address;
}
/*
@@ -4871,7 +4865,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
}
fe->address = code_ptr;
#ifdef HIPE
- hipe_set_closure_stub(fe, stp->lambdas[i].num_free);
+ hipe_set_closure_stub(fe);
#endif
}
}
@@ -4882,7 +4876,7 @@ transform_engine(LoaderState* st)
{
Uint op;
int ap; /* Current argument. */
- Uint* restart; /* Where to restart if current match fails. */
+ const Uint* restart; /* Where to restart if current match fails. */
GenOpArg var[TE_MAX_VARS]; /* Buffer for variables. */
GenOpArg* rest_args = NULL;
int num_rest_args = 0;
@@ -4891,7 +4885,7 @@ transform_engine(LoaderState* st)
GenOp* instr;
GenOp* first = st->genop;
GenOp* keep = NULL;
- Uint* pc;
+ const Uint* pc;
static Uint restart_fail[1] = {TOP_fail};
ASSERT(gen_opc[first->op].transform != -1);
@@ -5002,7 +4996,7 @@ transform_engine(LoaderState* st)
if (i >= st->num_imports || st->import[i].bf == NULL)
goto restart;
if (bif_number != -1 &&
- bif_export[bif_number]->code[4] != (BeamInstr) st->import[i].bf) {
+ bif_export[bif_number]->beam[1] != (BeamInstr) st->import[i].bf) {
goto restart;
}
}
@@ -5627,18 +5621,16 @@ functions_in_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo* ci = code_hdr->functions[i];
Eterm tuple;
/*
* If the function name is [], this entry is a stub for
* a BIF that should be ignored.
*/
- ASSERT(is_atom(name) || is_nil(name));
- if (is_atom(name)) {
- tuple = TUPLE2(hp, name, make_small(arity));
+ ASSERT(is_atom(ci->mfa.function) || is_nil(ci->mfa.function));
+ if (is_atom(ci->mfa.function)) {
+ tuple = TUPLE2(hp, ci->mfa.function, make_small(ci->mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5678,9 +5670,7 @@ erts_release_literal_area(ErtsLiteralArea* literal_area)
Binary* bptr;
ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
bptr = ((ProcBin*)oh)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
+ erts_bin_release(bptr);
oh = oh->next;
}
erts_free(ERTS_ALC_T_LITERAL, literal_area);
@@ -5695,17 +5685,28 @@ erts_is_module_native(BeamCodeHeader* code_hdr)
if (code_hdr != NULL) {
num_functions = code_hdr->num_functions;
for (i=0; i<num_functions; i++) {
- BeamInstr* func_info = (BeamInstr *) code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- if (is_atom(name)) {
- return func_info[1] != 0;
+ ErtsCodeInfo* ci = code_hdr->functions[i];
+ if (is_atom(ci->mfa.function)) {
+ return erts_is_function_native(ci);
}
- else ASSERT(is_nil(name)); /* ignore BIF stubs */
+ else ASSERT(is_nil(ci->mfa.function)); /* ignore BIF stubs */
}
}
return 0;
}
+int
+erts_is_function_native(ErtsCodeInfo *ci)
+{
+#ifdef HIPE
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ return erts_codeinfo_to_code(ci)[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
+ || erts_codeinfo_to_code(ci)[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
+#else
+ return 0;
+#endif
+}
+
/*
* Builds a list of all functions including native addresses.
* [{Name,Arity,NativeAddress},...]
@@ -5714,33 +5715,35 @@ erts_is_module_native(BeamCodeHeader* code_hdr)
static Eterm
native_addresses(Process* p, BeamCodeHeader* code_hdr)
{
+ Eterm result = NIL;
+#ifdef HIPE
int i;
Eterm* hp;
Uint num_functions;
Uint need;
Eterm* hp_end;
- Eterm result = NIL;
num_functions = code_hdr->num_functions;
need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo *ci = code_hdr->functions[i];
Eterm tuple;
- ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */
- if (func_info[1] != 0) {
- Eterm addr;
- ASSERT(is_atom(name));
- addr = erts_bld_uint(&hp, NULL, func_info[1]);
- tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
+ ASSERT(is_atom(ci->mfa.function)
+ || is_nil(ci->mfa.function)); /* [] if BIF stub */
+ if (ci->u.ncallee != NULL) {
+ Eterm addr;
+ ASSERT(is_atom(ci->mfa.function));
+ addr = erts_bld_uint(&hp, NULL, (Uint)ci->u.ncallee);
+ tuple = erts_bld_tuple(&hp, NULL, 3, ci->mfa.function,
+ make_small(ci->mfa.arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
}
HRelease(p, hp_end, hp);
+#endif
return result;
}
@@ -5763,11 +5766,11 @@ exported_from_module(Process* p, /* Process whose heap to use. */
for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i,code_ix);
- if (ep->code[0] == mod) {
+ if (ep->info.mfa.module == mod) {
Eterm tuple;
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_call_error_handler) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ ep->beam[0] == (BeamInstr) em_call_error_handler) {
/* There is a call to the function, but it does not exist. */
continue;
}
@@ -5777,7 +5780,8 @@ exported_from_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hend = hp + need;
}
- tuple = TUPLE2(hp, ep->code[1], make_small(ep->code[2]));
+ tuple = TUPLE2(hp, ep->info.mfa.function,
+ make_small(ep->info.mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5851,7 +5855,6 @@ md5_of_module(Process* p, /* Process whose heap to use. */
Eterm*
erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
{
- BeamInstr* current = fi->current;
Eterm loc = NIL;
if (fi->loc != LINE_INVALID_LOCATION) {
@@ -5861,7 +5864,7 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
Eterm file_term = NIL;
if (file == 0) {
- Atom* ap = atom_tab(atom_val(fi->current[0]));
+ Atom* ap = atom_tab(atom_val(fi->mfa->module));
file_term = buf_to_intlist(&hp, ".erl", 4, NIL);
file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term);
} else {
@@ -5880,10 +5883,12 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
}
if (is_list(args) || is_nil(args)) {
- *mfa_p = TUPLE4(hp, current[0], current[1], args, loc);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ args, loc);
} else {
- Eterm arity = make_small(current[2]);
- *mfa_p = TUPLE4(hp, current[0], current[1], arity, loc);
+ Eterm arity = make_small(fi->mfa->arity);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ arity, loc);
}
return hp + 5;
}
@@ -5894,9 +5899,9 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
* the function.
*/
void
-erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
+erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa)
{
- fi->current = current;
+ fi->mfa = mfa;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
}
@@ -5905,13 +5910,13 @@ erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
/*
* Returns a pointer to {module, function, arity}, or NULL if not found.
*/
-BeamInstr*
+ErtsCodeMFA*
find_function_from_pc(BeamInstr* pc)
{
FunctionInfo fi;
erts_lookup_function_info(&fi, pc, 0);
- return fi.current;
+ return fi.mfa;
}
/*
@@ -5966,7 +5971,7 @@ code_get_chunk_2(BIF_ALIST_2)
goto error;
}
if (!init_iff_file(stp, start, binary_size(Bin)) ||
- !scan_iff_file(stp, &chunk, 1, 1) ||
+ !scan_iff_file(stp, &chunk, 1) ||
stp->chunks[0].start == NULL) {
res = am_undefined;
goto done;
@@ -6015,7 +6020,7 @@ code_module_md5_1(BIF_ALIST_1)
}
stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
if (!init_iff_file(stp, bytes, binary_size(Bin)) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
res = am_undefined;
goto done;
@@ -6028,24 +6033,21 @@ code_module_md5_1(BIF_ALIST_1)
return res;
}
-#define WORDS_PER_FUNCTION 6
+#ifdef HIPE
+#define WORDS_PER_FUNCTION (sizeof(ErtsCodeInfo) / sizeof(UWord) + 1)
static BeamInstr*
-make_stub(BeamInstr* fp, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
+make_stub(ErtsCodeInfo* info, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
{
- fp[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- fp[1] = native;
- fp[2] = mod;
- fp[3] = func;
- fp[4] = arity;
-#ifdef HIPE
- if (native) {
- fp[5] = BeamOpCode(op_move_return_n);
- hipe_mfa_save_orig_beam_op(mod, func, arity, fp+5);
- }
-#endif
- fp[5] = OpCode;
- return fp + WORDS_PER_FUNCTION;
+ DBG_TRACE_MFA(mod,func,arity,"make beam stub at %p", erts_codeinfo_to_code(info));
+ ASSERT(WORDS_PER_FUNCTION == 6);
+ info->op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ info->u.ncallee = (void (*)(void)) native;
+ info->mfa.module = mod;
+ info->mfa.function = func;
+ info->mfa.arity = arity;
+ erts_codeinfo_to_code(info)[0] = OpCode;
+ return erts_codeinfo_to_code(info)+1;
}
static byte*
@@ -6104,22 +6106,17 @@ stub_read_export_table(LoaderState* stp)
}
static void
-stub_final_touch(LoaderState* stp, BeamInstr* fp)
+stub_final_touch(LoaderState* stp, ErtsCodeInfo* ci)
{
unsigned int i;
unsigned int n = stp->num_exps;
- Eterm mod = fp[2];
- Eterm function = fp[3];
- int arity = fp[4];
-#ifdef HIPE
Lambda* lp;
-#endif
- if (is_bif(mod, function, arity)) {
- fp[1] = 0;
- fp[2] = 0;
- fp[3] = 0;
- fp[4] = 0;
+ if (is_bif(ci->mfa.module, ci->mfa.function, ci->mfa.arity)) {
+ ci->u.ncallee = NULL;
+ ci->mfa.module = 0;
+ ci->mfa.function = 0;
+ ci->mfa.arity = 0;
return;
}
@@ -6128,9 +6125,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
*/
for (i = 0; i < n; i++) {
- if (stp->export[i].function == function && stp->export[i].arity == arity) {
- Export* ep = erts_export_put(mod, function, arity);
- ep->addressv[erts_staging_code_ix()] = fp+5;
+ if (stp->export[i].function == ci->mfa.function &&
+ stp->export[i].arity == ci->mfa.arity) {
+ Export* ep = erts_export_put(ci->mfa.module,
+ ci->mfa.function,
+ ci->mfa.arity);
+ ep->addressv[erts_staging_code_ix()] = erts_codeinfo_to_code(ci);
+ DBG_TRACE_MFA_P(&ci->mfa,"set beam stub at %p in export at %p (code_ix=%d)",
+ erts_codeinfo_to_code(ci), ep, erts_staging_code_ix());
return;
}
}
@@ -6140,16 +6142,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
* Search the lambda table to find out which.
*/
-#ifdef HIPE
n = stp->num_lambdas;
for (i = 0, lp = stp->lambdas; i < n; i++, lp++) {
ErlFunEntry* fe = stp->lambdas[i].fe;
- if (lp->function == function && lp->arity == arity) {
- fp[5] = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
- fe->address = &(fp[5]);
+ if (lp->function == ci->mfa.function && lp->arity == ci->mfa.arity) {
+ *erts_codeinfo_to_code(ci) = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
+ fe->address = erts_codeinfo_to_code(ci);
}
}
-#endif
return;
}
@@ -6158,10 +6158,9 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
[{Adr, Patchtyppe} | Addresses]
and the address of a fun_entry.
*/
-int
+static int
patch(Eterm Addresses, Uint fe)
{
-#ifdef HIPE
Eterm* listp;
Eterm tuple;
Eterm* tp;
@@ -6197,15 +6196,13 @@ patch(Eterm Addresses, Uint fe)
}
-#endif
return 1;
}
-int
+static int
patch_funentries(Eterm Patchlist)
{
-#ifdef HIPE
while (!is_nil(Patchlist)) {
Eterm Info;
Eterm MFA;
@@ -6279,38 +6276,29 @@ patch_funentries(Eterm Patchlist)
fe = erts_get_fun_entry(Mod, uniq, index);
fe->native_address = (Uint *)native_address;
- /* Deliberate MEMORY LEAK of native fun entries!!!
- *
- * Uncomment line below when hipe code upgrade and purging works correctly.
- * Today we may get cases when old (leaked) native code of a purged module
- * gets called and tries to create instances of a deleted fun entry.
- *
- * Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
- *
- * erts_smp_refc_dec(&fe->refc, 1);
- */
+ erts_smp_refc_dec(&fe->refc, 1);
if (!patch(Addresses, (Uint) fe))
return 0;
}
-#endif
return 1; /* Signal that all went well */
}
-
/*
* Do a dummy load of a module. No threaded code will be loaded.
* Used for loading native code.
* Will also patch all references to fun_entries to point to
* the new fun_entries created.
*/
-
Eterm
-erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
+erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info)
{
Binary* magic;
+ Binary* hipe_magic;
LoaderState* stp;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
BeamInstr Funcs;
BeamInstr Patchlist;
Eterm MD5Bin;
@@ -6333,8 +6321,12 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
magic = erts_alloc_loader_state();
stp = ERTS_MAGIC_BIN_DATA(magic);
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code));
- if (is_not_atom(Mod)) {
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
goto error;
}
if (is_not_tuple(Info)) {
@@ -6362,13 +6354,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Scan the Beam binary and read the interesting sections.
*/
- stp->module = Mod;
+ stp->module = hipe_stp->module;
stp->group_leader = p->group_leader;
stp->num_functions = n;
if (!init_iff_file(stp, bytes, size)) {
goto error;
}
- if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto error;
}
@@ -6376,9 +6368,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (!read_code_header(stp)) {
goto error;
}
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto error;
+ }
}
define_file(stp, "export table", EXP_CHUNK);
if (!stub_read_export_table(stp)) {
@@ -6470,20 +6469,17 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Set the pointer and make the stub. Put a return instruction
* as the body until we know what kind of trap we should put there.
*/
- code_hdr->functions[i] = fp;
-#ifdef HIPE
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */
-#else
- op = (Eterm) BeamOpCode(op_move_return_n);
-#endif
- fp = make_stub(fp, Mod, func, arity, (Uint)native_address, op);
+ fp = make_stub((ErtsCodeInfo*)fp, hipe_stp->module, func, arity,
+ (Uint)native_address, op);
}
/*
* Insert the last pointer and the int_code_end instruction.
*/
- code_hdr->functions[i] = fp;
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
*fp++ = (BeamInstr) BeamOp(op_int_code_end);
/*
@@ -6516,11 +6512,20 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
}
/*
+ * Initialise HiPE module
+ */
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ /*
* Insert the module in the module table.
*/
- rval = stub_insert_new_code(p, 0, p->group_leader, Mod,
- code_hdr, code_size);
+ rval = stub_insert_new_code(p, 0, p->group_leader, hipe_stp->module,
+ code_hdr, code_size, hipe_code);
if (rval != NIL) {
goto error;
}
@@ -6531,23 +6536,74 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
fp = code_base;
for (i = 0; i < n; i++) {
- stub_final_touch(stp, fp);
+ stub_final_touch(stp, (ErtsCodeInfo*)fp);
fp += WORDS_PER_FUNCTION;
}
if (patch_funentries(Patchlist)) {
+ Eterm mod = hipe_stp->module;
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
- return Mod;
+ hipe_free_loader_state(hipe_stp);
+
+ return mod;
}
error:
+ erts_free(ERTS_ALC_T_HIPE_LL, hipe_code);
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin)
+{
+ Binary* hipe_magic;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
+ Module* modp;
+
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
+ return 0;
+ }
+
+ modp = erts_get_module(hipe_stp->module, erts_active_code_ix());
+ if (!modp)
+ return 0;
+
+ /*
+ * Initialise HiPE module
+ */
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code));
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ modp->curr.hipe_code = hipe_code;
+
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
+ return 1;
+}
+
#undef WORDS_PER_FUNCTION
+#endif /* HIPE */
+
static int safe_mul(UWord a, UWord b, UWord* resp)
{
@@ -6561,3 +6617,46 @@ static int safe_mul(UWord a, UWord b, UWord* resp)
}
}
+#ifdef ENABLE_DBG_TRACE_MFA
+
+#define MFA_MAX 10
+Eterm dbg_trace_m[MFA_MAX];
+Eterm dbg_trace_f[MFA_MAX];
+Uint dbg_trace_a[MFA_MAX];
+unsigned int dbg_trace_ix = 0;
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a)
+{
+ unsigned i = dbg_trace_ix++;
+ ASSERT(i < MFA_MAX);
+ dbg_trace_m[i] = am_atom_put(m, strlen(m));
+ dbg_trace_f[i] = am_atom_put(f, strlen(f));
+ dbg_trace_a[i] = a;
+}
+
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a)
+{
+ unsigned int i;
+ for (i = 0; i < dbg_trace_ix; ++i) {
+ if (m == dbg_trace_m[i] &&
+ (!f || (f == dbg_trace_f[i] && a == dbg_trace_a[i]))) {
+
+ return i+1;
+ }
+ }
+ return 0;
+}
+
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...)
+{
+ va_list arglist;
+ va_start(arglist, format);
+ ASSERT(--ix < MFA_MAX);
+ erts_fprintf(stderr, "MFA TRACE %T:%T/%u: ",
+ dbg_trace_m[ix], dbg_trace_f[ix], (int)dbg_trace_a[ix]);
+
+ erts_vfprintf(stderr, format, arglist);
+ va_end(arglist);
+}
+
+#endif /* ENABLE_DBG_TRACE_MFA */
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 9be5e14e40..c088bdb751 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,15 +35,7 @@ typedef struct gen_op_entry {
int transform;
} GenOpEntry;
-extern GenOpEntry gen_opc[];
-
-#ifdef NO_JUMP_TABLE
-#define BeamOp(Op) (Op)
-#else
-extern void** beam_ops;
-#define BeamOp(Op) beam_ops[(Op)]
-#endif
-
+extern const GenOpEntry gen_opc[];
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
@@ -115,12 +107,19 @@ typedef struct beam_code_header {
* The actual loaded code (for the first function) start just beyond
* this table.
*/
- BeamInstr* functions[1];
+ ErtsCodeInfo* functions[1];
}BeamCodeHeader;
+#ifdef ERTS_DIRTY_SCHEDULERS
+# define BEAM_NIF_MIN_FUNC_SZ 4
+#else
+# define BEAM_NIF_MIN_FUNC_SZ 3
+#endif
+
void erts_release_literal_area(struct ErtsLiteralArea_* literal_area);
int erts_is_module_native(BeamCodeHeader* code);
+int erts_is_function_native(ErtsCodeInfo*);
void erts_beam_bif_load_init(void);
struct erl_fun_entry;
void erts_purge_state_add_fun(struct erl_fun_entry *fe);
@@ -152,4 +151,34 @@ struct BeamCodeLineTab_ {
#define LOC_FILE(Loc) ((Loc) >> 24)
#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
+/*
+ * MFA event debug "tracing" usage:
+ *
+ * #define ENABLE_DBG_TRACE_MFA
+ * call dbg_set_traced_mfa("mymod","myfunc",arity)
+ * for the function(s) to trace, in some init function.
+ *
+ * Run and get stderr printouts when interesting things happen to your MFA.
+ */
+#ifdef ENABLE_DBG_TRACE_MFA
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a);
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a);
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...);
+#define DBG_TRACE_MFA(M,F,A,FMT, ...) do {\
+ unsigned ix;\
+ if ((ix=dbg_is_traced_mfa(M,F,A))) \
+ dbg_vtrace_mfa(ix, FMT"\n", ##__VA_ARGS__);\
+ }while(0)
+
+#define DBG_TRACE_MFA_P(MFA, FMT, ...) \
+ DBG_TRACE_MFA((MFA)->module, (MFA)->function, (MFA)->arity, FMT, ##__VA_ARGS__)
+
+#else
+# define dbg_set_traced_mfa(M,F,A)
+# define DBG_TRACE_MFA(M,F,A,FMT, ...)
+# define DBG_TRACE_MFA_P(MFA,FMT, ...)
+#endif /* ENABLE_DBG_TRACE_MFA */
+
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
index 55342a38c6..9b0335e83d 100644
--- a/erts/emulator/beam/beam_ranges.c
+++ b/erts/emulator/beam/beam_ranges.c
@@ -221,13 +221,13 @@ erts_ranges_sz(void)
void
erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
{
- BeamInstr** low;
- BeamInstr** high;
- BeamInstr** mid;
+ ErtsCodeInfo** low;
+ ErtsCodeInfo** high;
+ ErtsCodeInfo** mid;
Range* rp;
BeamCodeHeader* hdr;
- fi->current = NULL;
+ fi->mfa = NULL;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
rp = find_range(pc);
@@ -240,12 +240,12 @@ erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
high = low + hdr->num_functions;
while (low < high) {
mid = low + (high-low) / 2;
- if (pc < mid[0]) {
+ if (pc < (BeamInstr*)(mid[0])) {
high = mid;
- } else if (pc < mid[1]) {
- fi->current = mid[0]+2;
+ } else if (pc < (BeamInstr*)(mid[1])) {
+ fi->mfa = &mid[0]->mfa;
if (full_info) {
- BeamInstr** fp = hdr->functions;
+ ErtsCodeInfo** fp = hdr->functions;
int idx = mid - fp;
lookup_loc(fi, pc, hdr, idx);
}
@@ -316,7 +316,7 @@ lookup_loc(FunctionInfo* fi, const BeamInstr* pc,
file = LOC_FILE(fi->loc);
if (file == 0) {
/* Special case: Module name with ".erl" appended */
- Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ Atom* mod_atom = atom_tab(atom_val(fi->mfa->module));
fi->needed += 2*(mod_atom->len+4);
} else {
Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index d9048065c8..b6595d2a5d 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -62,9 +62,6 @@ static erts_smp_atomic32_t msacc;
static Export *await_sched_wall_time_mod_trap;
static erts_smp_atomic32_t sched_wall_time;
-static erts_smp_mtx_t ports_snapshot_mtx;
-erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/*
@@ -199,7 +196,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
goto res_no_proc;
case ERTS_PORT_OP_SCHEDULED:
if (refp) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
default:
@@ -361,7 +358,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
c_p->common.id,
(mon->name != NIL
? mon->name
- : mon->pid),
+ : mon->u.pid),
ref,
0);
res = (code == ERTS_DSIG_SEND_YIELD ? am_yield : am_true);
@@ -467,7 +464,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target)
}
/* Can return atom true, false, yield, internal_error, badarg or
- * THE_NON_VALUE if error occured or trap has been set up
+ * THE_NON_VALUE if error occurred or trap has been set up
*/
static
BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
@@ -498,7 +495,7 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
res = am_true;
break;
case MON_ORIGIN:
- to = mon->pid;
+ to = mon->u.pid;
*multip = am_false;
if (is_atom(to)) {
/* Monitoring a name at node to */
@@ -597,7 +594,7 @@ BIF_RETTYPE demonitor_2(BIF_ALIST_2)
switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
case THE_NON_VALUE:
- /* If other error occured or trap has been set up - pass through */
+ /* If other error occurred or trap has been set up - pass through */
BIF_RET(THE_NON_VALUE);
case am_false:
if (info)
@@ -782,7 +779,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name)
case ERTS_PORT_OP_DONE:
return ret;
case ERTS_PORT_OP_SCHEDULED: { /* Scheduled a signal */
- ASSERT(is_internal_ref(ret));
+ ASSERT(is_internal_ordinary_ref(ret));
BIF_TRAP3(await_port_send_result_trap, self,
ret, am_true, ret);
/* bif_trap returns */
@@ -1180,7 +1177,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp);
if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
}
@@ -1586,7 +1583,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
ERTS_BIF_CHK_EXITED(BIF_P);
if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
}
@@ -2047,8 +2044,6 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_YIELD_CONTINUE (-8)
-Sint do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext*);
-
static Sint remote_send(Process *p, DistEntry *dep,
Eterm to, Eterm full_to, Eterm msg,
ErtsSendContext* ctx)
@@ -2102,8 +2097,8 @@ static Sint remote_send(Process *p, DistEntry *dep,
return res;
}
-Sint
-do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
+static Sint
+do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
{
Eterm portid;
Port *pt;
@@ -2221,7 +2216,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
/* Fall through */
case ERTS_PORT_OP_SCHEDULED:
if (is_not_nil(*refp)) {
- ASSERT(is_internal_ref(*refp));
+ ASSERT(is_internal_ordinary_ref(*refp));
ret_val = SEND_AWAIT_RESULT;
}
break;
@@ -2409,7 +2404,7 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
ERTS_BIF_PREP_YIELD_RETURN(retval, p, am_ok);
break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_TRAP3(retval, await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
break;
case SEND_BADARG:
@@ -2446,7 +2441,7 @@ BIF_RETTYPE send_2(BIF_ALIST_2)
static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
{
- Binary* bin = ((ProcBin*) binary_val(BIF_ARG_1))->val;
+ Binary* bin = erts_magic_ref2bin(BIF_ARG_1);
ErtsSendContext* ctx = (ErtsSendContext*) ERTS_MAGIC_BIN_DATA(bin);
Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(BIF_P) * TERM_TO_BINARY_LOOP_FACTOR);
int result;
@@ -2526,7 +2521,7 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
ERTS_BIF_PREP_YIELD_RETURN(retval, p, msg);
break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_TRAP3(retval,
await_port_send_result_trap, p, ref, msg, msg);
break;
@@ -3022,18 +3017,18 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
{
Eterm res;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
- Sint i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
-
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = erts_list_length(BIF_ARG_1);
- if (i > MAX_ATOM_CHARACTERS) {
+ if (i == -2) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
BIF_ERROR(BIF_P, BADARG);
}
- res = erts_atom_put((byte *) buf, i, ERTS_ATOM_ENC_LATIN1, 1);
+ res = erts_atom_put(buf, written, ERTS_ATOM_ENC_UTF8, 1);
ASSERT(is_atom(res));
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(res);
@@ -3043,17 +3038,18 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
{
- Sint i;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
-
- if ((i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS)) < 0) {
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
+ if (i < 0) {
error:
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_ERROR(BIF_P, BADARG);
} else {
Eterm a;
- if (erts_atom_get(buf, i, &a, ERTS_ATOM_ENC_LATIN1)) {
+ if (erts_atom_get((char *) buf, written, &a, ERTS_ATOM_ENC_UTF8)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(a);
} else {
@@ -3109,7 +3105,7 @@ BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
* On error returns: {error,not_a_list}, or {error, no_integer}
*/
-BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_integer_1(BIF_ALIST_1)
{
Eterm res;
Eterm tail;
@@ -3295,7 +3291,7 @@ BIF_RETTYPE float_to_binary_2(BIF_ALIST_2)
#define LOAD_E(xi,xim,xl,xlm) ((xi)=(xim), (xl)=(xlm))
#define STRING_TO_FLOAT_BUF_INC_SZ (128)
-BIF_RETTYPE string_to_float_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_float_1(BIF_ALIST_1)
{
Eterm orig = BIF_ARG_1;
Eterm list = orig;
@@ -3865,11 +3861,21 @@ BIF_RETTYPE now_0(BIF_ALIST_0)
/**********************************************************************/
-BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
+/*
+ * Pass atom 'minor' for relaxed generational GC run. This is only
+ * recommendation, major run may still be chosen by VM.
+ * Pass atom 'major' for default behaviour - major GC run (fullsweep)
+ */
+BIF_RETTYPE
+erts_internal_garbage_collect_1(BIF_ALIST_1)
{
- FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
+ switch (BIF_ARG_1) {
+ case am_minor: break;
+ case am_major: FLAGS(BIF_P) |= F_NEED_FULLSWEEP; break;
+ default: BIF_ERROR(BIF_P, BADARG);
+ }
erts_garbage_collect(BIF_P, 0, NULL, 0);
- BIF_RET(am_true);
+ return am_true;
}
/**********************************************************************/
@@ -3938,15 +3944,18 @@ BIF_RETTYPE display_string_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm string = BIF_ARG_1;
- Sint len = is_string(string);
- char *str;
+ Sint len = erts_unicode_list_to_buf_len(string);
+ Sint written;
+ byte *str;
+ int res;
- if (len <= 0) {
+ if (len < 0) {
BIF_ERROR(p, BADARG);
}
- str = (char *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
- if (intlist_to_buf(string, str, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ str = (byte *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
+ res = erts_unicode_list_to_buf(string, str, len, &written);
+ if (res != 0 || written != len)
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error (%d)\n", __FILE__, __LINE__, res);
str[len] = '\0';
erts_fprintf(stderr, "%s", str);
erts_free(ERTS_ALC_T_TMP, (void *) str);
@@ -3962,9 +3971,6 @@ BIF_RETTYPE display_nl_0(BIF_ALIST_0)
/**********************************************************************/
-#define HALT_MSG_SIZE 200
-static char halt_msg[HALT_MSG_SIZE+1];
-
/* stop the system with exit code and flags */
BIF_RETTYPE halt_2(BIF_ALIST_2)
{
@@ -4014,16 +4020,17 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_exit(ERTS_ABORT_EXIT, "");
}
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- Sint i;
+ else if (is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+# define HALT_MSG_SIZE 200
+ static byte halt_msg[4*HALT_MSG_SIZE+1];
+ Sint written;
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE)) == -1) {
+ if (erts_unicode_list_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE,
+ &written) == -1 ) {
goto error;
}
- if (i == -2) /* truncated string */
- i = HALT_MSG_SIZE;
- ASSERT(i >= 0 && i <= HALT_MSG_SIZE);
- halt_msg[i] = '\0';
+ ASSERT(written >= 0 && written < sizeof(halt_msg));
+ halt_msg[written] = '\0';
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -4099,6 +4106,7 @@ BIF_RETTYPE ref_to_list_1(BIF_ALIST_1)
{
if (is_not_ref(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
+ erts_magic_ref_save_bin(BIF_ARG_1);
BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
}
@@ -4243,6 +4251,201 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE list_to_port_1(BIF_ALIST_1)
+{
+ /*
+ * A valid port identifier is on the format
+ * "#Port<N.P>" where N is node and P is
+ * the port id. Both N and P are of type Uint32.
+ */
+ Uint32 n, p;
+ char* cp;
+ int i;
+ DistEntry *dep = NULL;
+ char buf[6 /* #Port< */
+ + (2)*(10 + 1) /* N.P> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (strncmp("#Port<", cp, 6) != 0)
+ goto bad;
+
+ cp += 6; /* strlen("#Port<") */
+
+ if (sscanf(cp, "%u.%u>", &n, &p) < 2)
+ goto bad;
+
+ if (p > ERTS_MAX_PORT_NUMBER)
+ goto bad;
+
+ dep = erts_channel_no_to_dist_entry(n);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ erts_deref_dist_entry(dep);
+ BIF_RET(make_internal_port(p));
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+ etp->header = make_external_port_header(1);
+ etp->next = MSO(BIF_P).first;
+ etp->node = enp;
+ etp->data.ui[0] = p;
+
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
+ erts_deref_dist_entry(dep);
+ BIF_RET(make_external_port(etp));
+ }
+
+ bad:
+ if (dep)
+ erts_deref_dist_entry(dep);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
+{
+ /*
+ * A valid reference is on the format
+ * "#Ref<N.X.Y.Z>" where N, X, Y, and Z are
+ * 32-bit integers (i.e., max 10 characters).
+ */
+ Eterm *hp;
+ Eterm res;
+ Uint32 refn[ERTS_MAX_REF_NUMBERS];
+ int n = 0;
+ Uint ints[1 + ERTS_MAX_REF_NUMBERS] = {0};
+ char* cp;
+ Sint i;
+ DistEntry *dep = NULL;
+ char buf[5 /* #Ref< */
+ + (1 + ERTS_MAX_REF_NUMBERS)*(10 + 1) /* N.X.Y.Z> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (*cp++ != '#') goto bad;
+ if (*cp++ != 'R') goto bad;
+ if (*cp++ != 'e') goto bad;
+ if (*cp++ != 'f') goto bad;
+ if (*cp++ != '<') goto bad;
+
+ for (i = 0; i < sizeof(ints)/sizeof(Uint); i++) {
+ if (*cp < '0' || *cp > '9') goto bad;
+
+ while (*cp >= '0' && *cp <= '9') {
+ ints[i] = 10*ints[i] + (*cp - '0');
+ cp++;
+ }
+
+ n++;
+ if (ints[i] > ~((Uint32) 0)) goto bad;
+ if (*cp == '>') break;
+ if (*cp++ != '.') goto bad;
+ }
+
+ if (*cp++ != '>') goto bad;
+ if (*cp != '\0') goto bad;
+
+ if (n < 2) goto bad;
+
+ for (n = 0; i > 0; i--)
+ refn[n++] = (Uint32) ints[i];
+
+ ASSERT(n <= ERTS_MAX_REF_NUMBERS);
+
+ dep = erts_channel_no_to_dist_entry(ints[0]);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ ErtsMagicBinary *mb;
+ Uint32 sid;
+ if (refn[0] > MAX_REFERENCE) goto bad;
+ if (n != ERTS_REF_NUMBERS) goto bad;
+ sid = erts_get_ref_numbers_thr_id(refn);
+ if (sid > erts_no_schedulers) goto bad;
+ mb = erts_magic_ref_lookup_bin(refn);
+ if (mb) {
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &BIF_P->off_heap,
+ (Binary *) mb);
+ }
+ else {
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ res = make_internal_ref(hp);
+ }
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+ Uint hsz;
+ int j;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ hsz = EXTERNAL_THING_HEAD_SIZE;
+#if defined(ARCH_64)
+ hsz += n/2 + 1;
+#else
+ hsz += n;
+#endif
+
+ etp = (ExternalThing *) HAlloc(BIF_P, hsz);
+ etp->header = make_external_ref_header(n/2);
+ etp->next = BIF_P->off_heap.first;
+ etp->node = enp;
+ i = 0;
+#if defined(ARCH_64)
+ etp->data.ui32[i] = n;
+#endif
+ for (j = 0; j < n; j++) {
+ etp->data.ui32[i] = refn[j];
+ i++;
+ }
+
+ BIF_P->off_heap.first = (struct erl_off_heap_header*) etp;
+ res = make_external_ref(etp);
+ }
+
+ erts_deref_dist_entry(dep);
+ BIF_RET(res);
+
+ bad:
+ if (dep)
+ erts_deref_dist_entry(dep);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
/**********************************************************************/
BIF_RETTYPE group_leader_0(BIF_ALIST_0)
@@ -4307,7 +4510,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2)
erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
if (new_member == BIF_P
|| !(erts_smp_atomic32_read_nob(&new_member->state)
- & (ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
+ & ERTS_PSFLG_DIRTY_RUNNING)) {
new_member->group_leader = STORE_NC_IN_PROC(new_member,
BIF_ARG_1);
}
@@ -4369,41 +4572,37 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
|| BIF_ARG_2 == am_block_normal);
int normal = (BIF_ARG_2 == am_block_normal
|| BIF_ARG_2 == am_unblock_normal);
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else {
- switch (erts_block_multi_scheduling(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- block,
- normal,
- 0)) {
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- BIF_RET(am_blocked);
- case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
- BIF_RET(am_blocked_normal);
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked_normal,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_DONE:
- BIF_RET(am_enabled);
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- ERTS_VBUMP_ALL_REDS(BIF_P);
- BIF_TRAP2(bif_export[BIF_system_flag_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_EINVAL:
- goto error;
- default:
- ASSERT(0);
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- break;
- }
- }
+ switch (erts_block_multi_scheduling(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ block,
+ normal,
+ 0)) {
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ BIF_RET(am_blocked);
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ BIF_RET(am_blocked_normal);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked_normal,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(am_enabled);
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
#endif
}
} else if (BIF_ARG_1 == am_schedulers_online) {
@@ -4591,7 +4790,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0;
erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time,
new);
- Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new);
+ Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0);
ASSERT(is_value(ref));
BIF_TRAP2(await_sched_wall_time_mod_trap,
BIF_P,
@@ -4713,25 +4912,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
/**********************************************************************/
-BIF_RETTYPE hash_2(BIF_ALIST_2)
-{
- Uint32 hash;
- Sint range;
-
- if (is_not_small(BIF_ARG_2)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if ((range = signed_val(BIF_ARG_2)) <= 0) { /* [1..MAX_SMALL] */
- BIF_ERROR(BIF_P, BADARG);
- }
-#if defined(ARCH_64)
- if (range > ((1L << 27) - 1))
- BIF_ERROR(BIF_P, BADARG);
-#endif
- hash = make_broken_hash(BIF_ARG_1);
- BIF_RET(make_small(1 + (hash % range))); /* [1..range] */
-}
-
BIF_RETTYPE phash_2(BIF_ALIST_2)
{
Uint32 hash;
@@ -4941,25 +5121,22 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Export bif_return_trap_export;
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(BIF_ALIST_0))
+ Eterm (*bif)(BIF_ALIST))
{
int i;
sys_memset((void *) ep, 0, sizeof(Export));
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->addressv[i] = &ep->code[3];
+ ep->addressv[i] = ep->beam;
}
- ep->code[0] = m;
- ep->code[1] = f;
- ep->code[2] = a;
- ep->code[3] = (BeamInstr) em_apply_bif;
- ep->code[4] = (BeamInstr) bif;
+ ep->info.mfa.module = m;
+ ep->info.mfa.function = f;
+ ep->info.mfa.arity = a;
+ ep->beam[0] = (BeamInstr) em_apply_bif;
+ ep->beam[1] = (BeamInstr) bif;
}
void erts_init_bif(void)
{
- erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
-
/*
* bif_return_trap/2 is a hidden BIF that bifs that need to
* yield the calling process traps to.
@@ -5002,6 +5179,314 @@ void erts_init_bif(void)
erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED());
}
+/*
+ * Scheduling of BIFs via NifExport...
+ */
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
+
+#define ERTS_SCHED_BIF_TRAP_MARKER ((void *) (UWord) 1)
+
+static ERTS_INLINE void
+schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ ErtsBifFunc dfunc, void *ifunc,
+ Eterm module, Eterm function,
+ int argc, Eterm *argv)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ (void) erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ mfa, pc, (BeamInstr) em_apply_bif,
+ dfunc, ifunc,
+ module, function,
+ argc, argv);
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+ erts_nif_export_restore(BIF_P, nep, BIF_ARG_1);
+ BIF_RET(BIF_ARG_1);
+}
+
+static BIF_RETTYPE dirty_bif_trap(BIF_ALIST)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+
+ /*
+ * Arity and argument registers already set
+ * correct by call to dirty_bif_trap()...
+ */
+
+ ASSERT(BIF_P->arity == nep->exp.info.mfa.arity);
+
+ erts_nif_export_restore(BIF_P, nep, THE_NON_VALUE);
+
+ BIF_P->i = (BeamInstr *) nep->func;
+ BIF_P->freason = TRAP;
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2)
+{
+ Eterm freason;
+
+ ASSERT(is_small(BIF_ARG_1));
+
+ freason = signed_val(BIF_ARG_1);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ freason |= EXF_RESTORE_NIF;
+
+ BIF_P->fvalue = BIF_ARG_2;
+
+ BIF_ERROR(BIF_P, freason);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+extern BeamInstr* em_call_bif_e;
+static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I);
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc bif,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc)
+{
+ Process *c_p, *dirty_shadow_proc;
+ ErtsCodeMFA *mfa;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ dirty_shadow_proc = proc;
+ c_p = proc->next;
+ ASSERT(c_p->common.id == dirty_shadow_proc->common.id);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ else
+#endif
+ {
+ dirty_shadow_proc = NULL;
+ c_p = proc;
+ }
+
+ if (!ERTS_PROC_IS_EXITING(c_p)) {
+ Export *exp;
+ BifFunction dbif, ibif;
+ BeamInstr *pc;
+
+ /*
+ * dbif - direct bif
+ * ibif - indirect bif
+ */
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_aint32_t set, mask;
+ mask = (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC);
+ switch (sched_type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ set = ERTS_PSFLG_DIRTY_CPU_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ set = ERTS_PSFLG_DIRTY_IO_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_NORMAL:
+ default:
+ set = 0;
+ dbif = call_bif;
+ ibif = bif;
+ break;
+ }
+
+ (void) erts_smp_atomic32_read_bset_nob(&c_p->state, mask, set);
+#else
+ dbif = call_bif;
+ ibif = bif;
+#endif
+
+ if (i == NULL) {
+ ERTS_INTERNAL_ERROR("Missing instruction pointer");
+ }
+#ifdef HIPE
+ else if (proc->flags & F_HIPE_MODE) {
+ /* Pointer to bif export in i */
+ exp = (Export *) i;
+ pc = c_p->cp;
+ mfa = &exp->info.mfa;
+ }
+#endif
+ else if (em_call_bif_e == (BeamInstr *) *i) {
+ /* Pointer to bif export in i+1 */
+ exp = (Export *) i[1];
+ pc = i;
+ mfa = &exp->info.mfa;
+ }
+ else if (em_apply_bif == (BeamInstr *) *i) {
+ /* Pointer to bif in i+1, and mfa in i-3 */
+ pc = c_p->cp;
+ mfa = erts_code_to_codemfa(i);
+ }
+ else {
+ ERTS_INTERNAL_ERROR("erts_schedule_bif() called "
+ "from unexpected instruction");
+ }
+ ASSERT(bif);
+
+ if (argc < 0) { /* reschedule original call */
+ mod = mfa->module;
+ func = mfa->function;
+ argc = (int) mfa->arity;
+ }
+
+ schedule(c_p, dirty_shadow_proc, mfa, pc, dbif, ibif,
+ mod, func, argc, argv);
+ }
+
+ if (dirty_shadow_proc)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE
+call_bif(Process *c_p, Eterm *reg, BeamInstr *I)
+{
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsBifFunc bif = (ErtsBifFunc) nep->func;
+ BIF_RETTYPE ret;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ ASSERT(bif);
+
+ ret = (*bif)(c_p, reg, I);
+
+ if (is_value(ret))
+ erts_nif_export_restore(c_p, nep, ret);
+ else if (c_p->freason != TRAP)
+ c_p->freason |= EXF_RESTORE_NIF; /* restore in handle_error() */
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* BIF did an ordinary trap... */
+ erts_nif_export_restore(c_p, nep, ret);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+
+ return ret;
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+int
+erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ BIF_RETTYPE result;
+ int exiting;
+ Process *dirty_shadow_proc;
+ ErtsBifFunc bf;
+ NifExport *nep;
+#ifdef DEBUG
+ Eterm *c_p_htop;
+ erts_aint32_t state;
+
+ ASSERT(!c_p->scheduler_data);
+ state = erts_smp_atomic32_read_nob(&c_p->state);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+#endif
+
+ nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ bf = (ErtsBifFunc) I[1];
+
+ erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ dirty_shadow_proc->freason = c_p->freason;
+ dirty_shadow_proc->fvalue = c_p->fvalue;
+ dirty_shadow_proc->ftrace = c_p->ftrace;
+ dirty_shadow_proc->cp = c_p->cp;
+ dirty_shadow_proc->i = c_p->i;
+
+#ifdef DEBUG
+ c_p_htop = c_p->htop;
+#endif
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*bf)(dirty_shadow_proc, reg, I);
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p_htop == c_p->htop);
+ ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(dirty_shadow_proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (is_value(result))
+ schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_result,
+ NULL, am_erts_internal, am_dirty_bif_result, 1, &result);
+ else if (dirty_shadow_proc->freason != TRAP) {
+ Eterm argv[2];
+ ASSERT(dirty_shadow_proc->freason <= MAX_SMALL);
+ argv[0] = make_small(dirty_shadow_proc->freason);
+ argv[1] = dirty_shadow_proc->fvalue;
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_exception, NULL, am_erts_internal,
+ am_dirty_bif_exception, 2, argv);
+ }
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* Dirty BIF did an ordinary trap... */
+ ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state)
+ & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)));
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_trap, (void *) dirty_shadow_proc->i,
+ am_erts_internal, am_dirty_bif_trap,
+ dirty_shadow_proc->arity, reg);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+ c_p->freason = dirty_shadow_proc->freason;
+ c_p->fvalue = dirty_shadow_proc->fvalue;
+ c_p->ftrace = dirty_shadow_proc->ftrace;
+ c_p->cp = dirty_shadow_proc->cp;
+ c_p->i = dirty_shadow_proc->i;
+ c_p->arity = dirty_shadow_proc->arity;
+ }
+
+ erts_flush_dirty_shadow_proc(dirty_shadow_proc);
+
+ return exiting;
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+
#ifdef HARDDEBUG
/*
You'll need this line in bif.tab to be able to use this debug bif
@@ -5256,12 +5741,10 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
SEQ_TRACE_TOKEN(BIF_P) = am_have_dt_utag;
}
}
-#else
+#else
if (BIF_ARG_1 != am_true) {
BIF_ERROR(BIF_P,BADARG);
}
#endif
BIF_RET(am_true);
}
-
-
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 2203182a0d..01cca90a7a 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -29,17 +29,35 @@ extern Export *erts_convert_time_unit_trap;
#define BIF_P A__p
-#define BIF_ALIST_0 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_4 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST Process* A__p, Eterm* BIF__ARGS, BeamInstr *A__I
+#define BIF_CALL_ARGS A__p, BIF__ARGS, A__I
+
+#define BIF_ALIST_0 BIF_ALIST
+#define BIF_ALIST_1 BIF_ALIST
+#define BIF_ALIST_2 BIF_ALIST
+#define BIF_ALIST_3 BIF_ALIST
+#define BIF_ALIST_4 BIF_ALIST
#define BIF_ARG_1 (BIF__ARGS[0])
#define BIF_ARG_2 (BIF__ARGS[1])
#define BIF_ARG_3 (BIF__ARGS[2])
#define BIF_ARG_4 (BIF__ARGS[3])
+#define BIF_I A__I
+
+/* NBIF_* is for bif calls from native code... */
+
+#define NBIF_ALIST Process* A__p, Eterm* BIF__ARGS
+#define NBIF_CALL_ARGS A__p, BIF__ARGS
+
+#define NBIF_ALIST_0 NBIF_ALIST
+#define NBIF_ALIST_1 NBIF_ALIST
+#define NBIF_ALIST_2 NBIF_ALIST
+#define NBIF_ALIST_3 NBIF_ALIST
+#define NBIF_ALIST_4 NBIF_ALIST
+
+typedef BIF_RETTYPE (*ErtsBifFunc)(BIF_ALIST);
+
#define ERTS_IS_PROC_OUT_OF_REDS(p) \
((p)->fcalls > 0 \
? 0 \
@@ -159,7 +177,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
do { \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
return THE_NON_VALUE; \
} while (0)
@@ -167,7 +185,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
return THE_NON_VALUE; \
} while (0)
@@ -176,7 +194,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
return THE_NON_VALUE; \
@@ -186,7 +204,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
@@ -202,7 +220,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
do { \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -210,7 +228,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -219,7 +237,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
(Ret) = THE_NON_VALUE; \
@@ -229,7 +247,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
@@ -480,6 +498,43 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
+#ifdef ERTS_DIRTY_SCHEDULERS
+int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+#endif
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc);
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type)
+{
+ return erts_schedule_bif(proc, argv, i, dbf, sched_type,
+ THE_NON_VALUE, THE_NON_VALUE, -1);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#ifdef ERL_WANT_HIPE_BIF_WRAPPER__
#ifndef HIPE
@@ -510,16 +565,16 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
-BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
- Eterm* args); \
-BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
- Eterm* args) \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST); \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST) \
{ \
BIF_RETTYPE res; \
- hipe_reserve_beam_trap_frame(c_p, args, ARITY); \
- res = BIF_NAME ## _ ## ARITY (c_p, args); \
- if (is_value(res) || c_p->freason != TRAP) { \
- hipe_unreserve_beam_trap_frame(c_p); \
+ hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, ARITY); \
+ res = nbif_impl_ ## BIF_NAME ## _ ## ARITY (NBIF_CALL_ARGS); \
+ if (is_value(res) || BIF_P->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(BIF_P); \
} \
return res; \
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 80db4eb6ff..962b00ae7b 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2017. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,22 +23,24 @@
#
# Lines starting with '#' are ignored.
#
-# <bif-decl> ::= "bif" <bif> <C-name>* | "ubif" <bif> <C-name>*
+# <bif-decl> ::= "bif" <bif> <C-name>* |
+# "ubif" <bif> <C-name>* |
+# "gcbif" <bif> <C-name>*
# <bif> ::= <module> ":" <name> "/" <arity>
#
-# "ubif" is an unwrapped bif, i.e. a bif without a trace wrapper,
-# or rather; the trace entry point in the export entry is the same
-# as the normal entry point, and no trace wrapper is generated.
+# ubif: Use for operators and guard BIFs that never build anything
+# on the heap (such as tuple_size/1) and operators.
#
-# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs.
+# gcbif: Use for guard BIFs that may build on the heap (such as abs/1).
+#
+# bif: Use for all other BIFs.
#
# Add new BIFs to the end of the file.
#
-# Note: Guards BIFs require special support in the compiler (to be able to actually
-# call them from within a guard).
+# Note: Guards BIFs usually require special support in the compiler.
#
-ubif erlang:abs/1
+gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
bif erlang:adler32_combine/3
@@ -62,11 +64,11 @@ bif erlang:exit/1
bif erlang:exit/2
bif erlang:external_size/1
bif erlang:external_size/2
-ubif erlang:float/1
+gcbif erlang:float/1
bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
-bif erlang:garbage_collect/0
+bif erts_internal:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
@@ -79,13 +81,15 @@ bif erlang:phash2/2
ubif erlang:hd/1
bif erlang:integer_to_list/1
bif erlang:is_alive/0
-ubif erlang:length/1
+gcbif erlang:length/1
bif erlang:link/1
bif erlang:list_to_atom/1
bif erlang:list_to_binary/1
bif erlang:list_to_float/1
bif erlang:list_to_integer/1
bif erlang:list_to_pid/1
+bif erlang:list_to_port/1
+bif erlang:list_to_ref/1
bif erlang:list_to_tuple/1
bif erlang:loaded/0
bif erlang:localtime/0
@@ -126,10 +130,10 @@ bif erlang:processes/0
bif erlang:put/2
bif erlang:register/2
bif erlang:registered/0
-ubif erlang:round/1
+gcbif erlang:round/1
ubif erlang:self/0
bif erlang:setelement/3
-ubif erlang:size/1
+gcbif erlang:size/1
bif erlang:spawn/3
bif erlang:spawn_link/3
bif erlang:split_binary/2
@@ -139,7 +143,7 @@ bif erlang:term_to_binary/2
bif erlang:throw/1
bif erlang:time/0
ubif erlang:tl/1
-ubif erlang:trunc/1
+gcbif erlang:trunc/1
bif erlang:tuple_to_list/1
bif erlang:universaltime/0
bif erlang:universaltime_to_localtime/1
@@ -162,7 +166,7 @@ bif erts_internal:port_connect/2
bif erts_internal:request_system_task/3
bif erts_internal:request_system_task/4
-bif erts_internal:check_process_code/2
+bif erts_internal:check_process_code/1
bif erts_internal:map_to_tuple_keys/1
bif erts_internal:term_type/1
@@ -322,7 +326,7 @@ bif erlang:match_spec_test/3
# Bifs in ets module.
#
-bif ets:all/0
+bif ets:internal_request_all/0
bif ets:new/2
bif ets:delete/1
bif ets:delete/2
@@ -358,6 +362,7 @@ bif ets:select_reverse/1
bif ets:select_reverse/2
bif ets:select_reverse/3
bif ets:select_delete/2
+bif ets:select_replace/2
bif ets:match_spec_compile/1
bif ets:match_spec_run_r/3
@@ -389,6 +394,7 @@ bif erl_ddll:demonitor/1
#
# Bifs in the re module
#
+bif re:version/0
bif re:compile/1
bif re:compile/2
bif re:run/2
@@ -417,6 +423,9 @@ bif erts_debug:set_internal_state/2
bif erts_debug:display/1
bif erts_debug:dist_ext_to_term/2
bif erts_debug:instructions/0
+bif erts_debug:dirty_cpu/2
+bif erts_debug:dirty_io/2
+bif erts_debug:dirty/3
#
# Monitor testing bif's...
@@ -428,7 +437,10 @@ bif erts_debug:dump_links/1
#
# Lock counter bif's
#
-bif erts_debug:lock_counters/1
+bif erts_debug:lcnt_control/2
+bif erts_debug:lcnt_control/1
+bif erts_debug:lcnt_collect/0
+bif erts_debug:lcnt_clear/0
#
# New Bifs in R8.
@@ -452,8 +464,8 @@ bif error_logger:warning_map/0
bif erlang:get_module_info/1
bif erlang:get_module_info/2
ubif erlang:is_boolean/1
-bif string:to_integer/1
-bif string:to_float/1
+bif string:list_to_integer/1
+bif string:list_to_float/1
bif erlang:make_fun/3
bif erlang:iolist_size/1
bif erlang:iolist_to_binary/1
@@ -464,8 +476,8 @@ bif erlang:list_to_existing_atom/1
#
ubif erlang:is_bitstring/1
ubif erlang:tuple_size/1
-ubif erlang:byte_size/1
-ubif erlang:bit_size/1
+gcbif erlang:byte_size/1
+gcbif erlang:bit_size/1
bif erlang:list_to_bitstring/1
bif erlang:bitstring_to_list/1
@@ -517,8 +529,8 @@ bif erlang:binary_to_term/2
#
# The searching/splitting/substituting thingies
#
-ubif erlang:binary_part/2
-ubif erlang:binary_part/3
+gcbif erlang:binary_part/2
+gcbif erlang:binary_part/3
bif binary:compile_pattern/1
bif binary:match/2
@@ -610,7 +622,7 @@ bif os:unsetenv/1
bif re:inspect/2
ubif erlang:is_map/1
-ubif erlang:map_size/1
+gcbif erlang:map_size/1
bif maps:to_list/1
bif maps:find/2
bif maps:get/2
@@ -657,7 +669,13 @@ bif erlang:has_prepared_code_on_load/1
bif maps:take/2
#
-# Obsolete
+# New in 20.0
#
-bif erlang:hash/2
+gcbif erlang:floor/1
+gcbif erlang:ceil/1
+bif math:floor/1
+bif math:ceil/1
+bif math:fmod/2
+bif os:set_signal/2
+bif erts_internal:maps_to_list/2
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 4baee7900b..5eaf262cd8 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1293,8 +1293,11 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c ^ *y++;
x++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c;
+ x++;
+ }
}
else {
ErtsDigit b1, b2;
@@ -1312,7 +1315,9 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
x++; y++;
}
while(xl--) {
- *r++ = *x++;
+ DSUBb(*x,0,b1,c1);
+ *r++ = c1;
+ x++;
}
}
}
@@ -2266,21 +2271,6 @@ Eterm big_minus(Eterm x, Eterm y, Eterm *r)
}
/*
-** Subtract a digit from big number
-*/
-Eterm big_minus_small(Eterm x, Eterm y, Eterm *r)
-{
- Eterm* xp = big_val(x);
-
- if (BIG_SIGN(xp))
- return big_norm(r, D_add(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
- (short) BIG_SIGN(xp));
- else
- return big_norm(r, D_sub(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
- (short) BIG_SIGN(xp));
-}
-
-/*
** Multiply smallnums
*/
@@ -2412,16 +2402,6 @@ Eterm big_rem(Eterm x, Eterm y, Eterm *r)
}
}
-Eterm big_neg(Eterm x, Eterm *r)
-{
- Eterm* xp = big_val(x);
- dsize_t xsz = BIG_SIZE(xp);
- short xsgn = BIG_SIGN(xp);
-
- MOVE_DIGITS(BIG_V(r), BIG_V(xp), xsz);
- return big_norm(r, xsz, (short) !xsgn);
-}
-
Eterm big_band(Eterm x, Eterm y, Eterm *r)
{
Eterm* xp = big_val(x);
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 4a96d971c3..48efce20e7 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -118,9 +118,7 @@ Eterm big_minus(Eterm, Eterm, Eterm*);
Eterm big_times(Eterm, Eterm, Eterm*);
Eterm big_div(Eterm, Eterm, Eterm*);
Eterm big_rem(Eterm, Eterm, Eterm*);
-Eterm big_neg(Eterm, Eterm*);
-Eterm big_minus_small(Eterm, Uint, Eterm*);
Eterm big_plus_small(Eterm, Uint, Eterm*);
Eterm big_times_small(Eterm, Uint, Eterm*);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 071a356260..ca3e48e205 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -47,13 +47,8 @@ void
erts_init_binary(void)
{
/* Verify Binary alignment... */
- if ((((UWord) &((Binary *) 0)->orig_bytes[0]) % ((UWord) 8)) != 0) {
- /* I assume that any compiler should be able to optimize this
- away. If not, this test is not very expensive... */
- erts_exit(ERTS_ABORT_EXIT,
- "Internal error: Address of orig_bytes[0] of a Binary"
- " is *not* 8-byte aligned\n");
- }
+ ERTS_CT_ASSERT((offsetof(Binary,orig_bytes) % 8) == 0);
+ ERTS_CT_ASSERT((offsetof(ErtsMagicBinary,u.aligned.data) % 8) == 0);
erts_init_trap_export(&binary_to_list_continue_export,
am_erts_internal, am_binary_to_list_continue, 1,
@@ -89,7 +84,6 @@ new_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
@@ -107,7 +101,7 @@ new_binary(Process *p, byte *buf, Uint len)
pb->flags = 0;
/*
- * Miscellanous updates. Return the tagged binary.
+ * Miscellaneous updates. Return the tagged binary.
*/
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
@@ -126,7 +120,6 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
@@ -144,7 +137,7 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len)
pb->flags = 0;
/*
- * Miscellanous updates. Return the tagged binary.
+ * Miscellaneous updates. Return the tagged binary.
*/
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
@@ -355,9 +348,10 @@ typedef struct {
Uint bitoffs;
} ErtsB2LState;
-static void b2l_state_destructor(Binary *mbp)
+static int b2l_state_destructor(Binary *mbp)
{
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+ return 1;
}
static BIF_RETTYPE
@@ -445,18 +439,17 @@ binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes,
sp->size = size;
sp->bitoffs = bitoffs;
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
}
}
static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
{
- Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
-
ASSERT(BIF_P->flags & F_DISABLE_GC);
return binary_to_list_chunk(BIF_P,
@@ -729,12 +722,13 @@ list_to_binary_engine(ErtsL2BState *sp)
}
}
-static void
+static int
l2b_state_destructor(Binary *mbp)
{
ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+ return 1;
}
static ERTS_INLINE Eterm
@@ -792,8 +786,8 @@ list_to_binary_chunk(Eterm mb_eterm,
ERTS_L2B_STATE_MOVE(new_sp, sp);
sp = new_sp;
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb_eterm = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ASSERT(is_value(mb_eterm));
@@ -839,9 +833,9 @@ list_to_binary_chunk(Eterm mb_eterm,
static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
{
- Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
ASSERT(BIF_P->flags & F_DISABLE_GC);
return list_to_binary_chunk(BIF_ARG_1,
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 74a8a3b852..76a0c5c716 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -178,19 +178,28 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
prefix = "";
}
- if (mon->type == MON_ORIGIN) {
- if (is_atom(mon->pid)) { /* dist by name */
- ASSERT(is_node_name_atom(mon->pid));
+ switch (mon->type) {
+ case MON_ORIGIN:
+ if (is_atom(mon->u.pid)) { /* dist by name */
+ ASSERT(is_node_name_atom(mon->u.pid));
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
- mon->pid, mon->ref);
+ mon->u.pid, mon->ref);
} else if (is_atom(mon->name)){ /* local by name */
erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
erts_this_dist_entry->sysname, mon->ref);
} else { /* local and distributed by pid */
- erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->pid, mon->ref);
+ erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->u.pid, mon->ref);
}
- } else { /* MON_TARGET */
- erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->pid, mon->ref);
+ break;
+ case MON_TARGET:
+ erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->u.pid, mon->ref);
+ break;
+ case MON_NIF_TARGET: {
+ ErtsResource* rsrc = mon->u.resource;
+ erts_print(to, to_arg, "%s{from,{%T,%T},%T}", prefix, rsrc->type->module,
+ rsrc->type->name, mon->ref);
+ break;
+ }
}
}
@@ -230,9 +239,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
* Display the initial function name
*/
erts_print(to, to_arg, "Spawned as: %T:%T/%bpu\n",
- p->u.initial[INITIAL_MOD],
- p->u.initial[INITIAL_FUN],
- p->u.initial[INITIAL_ARI]);
+ p->u.initial.module,
+ p->u.initial.function,
+ p->u.initial.arity);
if (p->current != NULL) {
if (running) {
@@ -241,9 +250,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "Current call: ");
}
erts_print(to, to_arg, "%T:%T/%bpu\n",
- p->current[0],
- p->current[1],
- p->current[2]);
+ p->current->module,
+ p->current->function,
+ p->current->arity);
}
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
@@ -290,9 +299,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "timeout");
else
erts_print(to, to_arg, "%T:%T/%bpu\n",
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- scb->ct[j]->code[2]);
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ scb->ct[j]->info.mfa.arity);
}
erts_print(to, to_arg, "\n");
}
@@ -503,6 +512,8 @@ do_break(void)
erts_free_read_env(mode);
#endif /* __WIN32__ */
+ ASSERT(erts_smp_thr_progress_is_blocking());
+
erts_printf("\n"
"BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n"
" (v)ersion (k)ill (D)b-tables (d)istribution\n");
@@ -579,7 +590,7 @@ do_break(void)
#endif
#ifdef DEBUG
case 't':
- erts_p_slpq();
+ /* erts_p_slpq(); */
return;
case 'b':
bin_check();
@@ -649,7 +660,7 @@ bin_check(void)
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
bp->val,
bp->val->orig_size,
- erts_refc_read(&bp->val->refc, 1));
+ erts_refc_read(&bp->val->intern.refc, 1));
}
}
if (printed) {
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index ec6267711b..8a3d1b20b4 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -57,7 +57,8 @@ void erts_code_ix_init(void)
*/
erts_smp_atomic32_init_nob(&the_active_code_index, 0);
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
- erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+ erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_key_create(&has_code_write_permission,
"erts_has_code_write_permission");
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
index 584a605771..a28b0cd36e 100644
--- a/erts/emulator/beam/code_ix.h
+++ b/erts/emulator/beam/code_ix.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -56,12 +56,55 @@
# endif
# include "sys.h"
#endif
+
+#include "beam_opcodes.h"
+
struct process;
#define ERTS_NUM_CODE_IX 3
typedef unsigned ErtsCodeIndex;
+typedef struct ErtsCodeMFA_ {
+ Eterm module;
+ Eterm function;
+ Uint arity;
+} ErtsCodeMFA;
+
+/*
+ * The ErtsCodeInfo structure is used both in the Export entry
+ * and in the code as the function header.
+ */
+
+/* If you change the size of this, you also have to update the code
+ in ops.tab to reflect the new func_info size */
+typedef struct ErtsCodeInfo_ {
+ BeamInstr op; /* OpCode(i_func_info) */
+ union {
+ struct generic_bp* gen_bp; /* Trace breakpoint */
+#ifdef HIPE
+ void (*ncallee)(void);
+ struct hipe_call_count* hcc;
+#endif
+ }u;
+ ErtsCodeMFA mfa;
+} ErtsCodeInfo;
+
+/* Get the code associated with a ErtsCodeInfo ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci);
+
+/* Get the ErtsCodeInfo for from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I);
+
+/* Get the code associated with a ErtsCodeMFA ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa);
+
+/* Get the ErtsCodeMFA from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I);
/* Called once at emulator initialization.
*/
@@ -121,10 +164,47 @@ void erts_abort_staging_code_ix(void);
int erts_has_code_write_permission(void);
#endif
-
+/* module/function/arity can be NIL/NIL/-1 when the MFA is pointing to some
+ invalid code, for instance unloaded_fun. */
+#define ASSERT_MFA(MFA) \
+ ASSERT((is_atom((MFA)->module) || is_nil((MFA)->module)) && \
+ (is_atom((MFA)->function) || is_nil((MFA)->function)) && \
+ (((MFA)->arity >= 0 && (MFA)->arity < 1024) || (MFA)->arity == -1))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci)
+{
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return (BeamInstr*)(ci + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I)
+{
+ ErtsCodeInfo *ci = ((ErtsCodeInfo *)(((char *)(I)) - sizeof(ErtsCodeInfo)));
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return ci;
+}
+
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa)
+{
+ ASSERT_MFA(mfa);
+ return (BeamInstr*)(mfa + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I)
+{
+ ErtsCodeMFA *mfa = ((ErtsCodeMFA *)(((char *)(I)) - sizeof(ErtsCodeMFA)));
+ ASSERT_MFA(mfa);
+ return mfa;
+}
+
extern erts_smp_atomic32_t the_active_code_index;
extern erts_smp_atomic32_t the_staging_code_index;
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index de68adbeb6..fefde256d7 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -40,7 +40,8 @@ static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int);
/*
* Copy object "obj" to process p.
*/
-Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
+Eterm copy_object_x(Eterm obj, Process* to, Uint extra)
+{
if (!is_immed(obj)) {
Uint size = size_object(obj);
Eterm* hp = HAllocX(to, size, extra);
@@ -70,33 +71,46 @@ Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
* Return the "flat" size of the object.
*/
-Uint size_object(Eterm obj)
+#define in_literal_purge_area(PTR) \
+ (lit_purge_ptr && ( \
+ (lit_purge_ptr <= (PTR) && \
+ (PTR) < (lit_purge_ptr + lit_purge_sz))))
+
+Uint size_object_x(Eterm obj, erts_literal_area_t *litopt)
{
Uint sum = 0;
Eterm* ptr;
int arity;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
-
DECLARE_ESTACK(s);
-
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj));
for (;;) {
switch (primary_tag(obj)) {
case TAG_PRIMARY_LIST:
- sum += 2;
ptr = list_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ sum += 2;
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
{
- Eterm hdr = *boxed_val(obj);
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ hdr = *ptr;
ASSERT(is_header(hdr));
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -272,17 +286,8 @@ do { \
(dst) = result; \
} while(0)
-#define BOXED_VISITED_MASK ((Eterm) 3)
-#define BOXED_VISITED ((Eterm) 1)
-#define BOXED_SHARED_UNPROCESSED ((Eterm) 2)
-#define BOXED_SHARED_PROCESSED ((Eterm) 3)
-
#define COUNT_OFF_HEAP (0)
-#define IN_LITERAL_PURGE_AREA(info, ptr) \
- ((info)->range_ptr && ( \
- (info)->range_ptr <= (ptr) && \
- (ptr) < ((info)->range_ptr + (info)->range_sz)))
/*
* Return the real size of an object and find sharing information
* This currently returns the same as erts_debug:size/1.
@@ -599,7 +604,7 @@ cleanup:
/*
* Copy a structure to a heap.
*/
-Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz)
+Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt)
{
char* hstart;
Uint hsize;
@@ -616,6 +621,8 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
Eterm hdr;
Eterm *hend;
int i;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm org_obj = obj;
Uint org_sz = sz;
@@ -651,7 +658,6 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy:
while (hp != htop) {
obj = *hp;
-
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1:
hp++;
@@ -667,6 +673,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_list:
tailp = argp;
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
for (;;) {
tp = tailp;
elem = CAR(objp);
@@ -674,18 +684,23 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
hbot -= 2;
CAR(hbot) = elem;
tailp = &CDR(hbot);
- }
- else {
+ } else {
CAR(htop) = elem;
tailp = &CDR(htop);
htop += 2;
}
*tp = make_list(tailp - 1);
obj = CDR(objp);
+
if (!is_list(obj)) {
break;
}
objp = list_val(obj);
+
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
}
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
@@ -695,7 +710,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
"%s, line %d: Internal error in copy_struct: 0x%08x\n",
__FILE__, __LINE__,obj);
}
-
+
case TAG_PRIMARY_BOXED:
if (ErtsInArea(boxed_val(obj),hstart,hsize)) {
hp++;
@@ -705,6 +720,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_boxed:
objp = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *argp = obj;
+ break;
+ }
hdr = *objp;
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -742,7 +761,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
}
*argp = make_binary(hbot);
pb = (ProcBin*) hbot;
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -765,7 +784,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
extra_bytes = 1;
} else {
extra_bytes = 0;
- }
+ }
real_size = size+extra_bytes;
objp = binary_val(real_bin);
if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
@@ -780,7 +799,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
} else {
ProcBin* from = (ProcBin *) objp;
ProcBin* to;
-
+
ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
if (from->flags) {
erts_emasculate_writable_binary(from);
@@ -790,7 +809,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
@@ -834,20 +853,24 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing *etp = (ExternalThing *) htop;
-
+ ExternalThing *etp = (ExternalThing *) objp;
+ erts_smp_refc_inc(&etp->node->refc, 2);
+ }
+ L_off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) htop;
i = thing_arityval(hdr) + 1;
+ *argp = make_boxed(htop);
tp = htop;
while (i--) {
*htop++ = *objp++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)etp;
- erts_smp_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
- *argp = make_external(tp);
}
break;
case MAP_SUBTAG:
@@ -875,6 +898,13 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case BIN_MATCHSTATE_SUBTAG:
erts_exit(ERTS_ABORT_EXIT,
"copy_struct: matchstate term not allowed");
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(objp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) objp;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto L_off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
i = thing_arityval(hdr)+1;
hbot -= i;
@@ -900,6 +930,12 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
*bsz = hend - hbot;
} else {
#ifdef DEBUG
+ if (!eq(org_obj, res)) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " not equal to copy %T\n",
+ org_obj, res);
+ }
if (htop != hbot)
erts_exit(ERTS_ABORT_EXIT,
"Internal error in copy_struct() when copying %T:"
@@ -1036,6 +1072,8 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Uint e;
unsigned sz;
Eterm* ptr;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1081,7 +1119,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1132,7 +1170,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1298,6 +1336,8 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
Eterm* resp;
Eterm *hbot, *hend;
unsigned remaining;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1347,11 +1387,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1415,11 +1455,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!in_literal_purge_area(ptr)) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1545,7 +1585,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
while (sz-- > 0) {
*hp++ = *ptr++;
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -1604,7 +1644,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
@@ -1615,20 +1655,33 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
}
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG: {
- ExternalThing *etp = (ExternalThing *) hp;
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing *etp = (ExternalThing *) ptr;
+ erts_smp_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) hp;
sz = thing_arityval(hdr);
- *resp = make_external(hp);
+ *resp = make_boxed(hp);
*hp++ = hdr;
ptr++;
while (sz-- > 0) {
*hp++ = *ptr++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*) etp;
- erts_smp_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
goto cleanup_next;
}
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(ptr)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) ptr;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
sz = thing_arityval(hdr);
*resp = make_boxed(hp);
@@ -1794,7 +1847,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
case REFC_BINARY_SUBTAG:
{
ProcBin* pb = (ProcBin *) (tp-1);
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
goto off_heap_common;
@@ -1825,6 +1878,15 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
off_heap->first = ohh;
}
break;
+ case REF_SUBTAG: {
+ ErtsRefThing *rtp = (ErtsRefThing *) (tp - 1);
+ if (is_magic_ref_thing(rtp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) rtp;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto off_heap_common;
+ }
+ /* Fall through... */
+ }
default:
{
int tari = header_arity(val);
@@ -1923,8 +1985,11 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
if (is_header(val)) {
struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ move_boxed(&ptr, val, &hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hdr))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -1937,7 +2002,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, hp, &dummy_ref);
+ move_cons(&ptr, val, &hp, &dummy_ref);
ptr += 2;
}
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 3611eeee02..09fdb897f5 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -25,6 +25,7 @@
/* define this to get a lot of debug output */
/* #define ERTS_DIST_MSG_DBG */
+/* #define ERTS_RAW_DIST_MSG_DBG */
#ifdef HAVE_CONFIG_H
# include "config.h"
@@ -258,12 +259,12 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
if (!rp)
goto done;
if (mon->type == MON_ORIGIN) {
- /* local pid is beeing monitored */
+ /* local pid is being monitored */
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); nope, can happen during process exit */
if (rmon != NULL) {
@@ -277,10 +278,11 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); can happen during process exit */
if (rmon != NULL) {
+ ASSERT(rmon->type == MON_ORIGIN);
ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
watched = (is_atom(rmon->name)
? TUPLE2(lhp, rmon->name, dep->sysname)
- : rmon->pid);
+ : rmon->u.pid);
#ifdef ERTS_SMP
rp_locks |= ERTS_PROC_LOCKS_MSG_SEND;
erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
@@ -627,7 +629,6 @@ alloc_dist_obuf(Uint size)
ErtsDistOutputBuf *obuf;
Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
Binary *bin = erts_bin_drv_alloc(obuf_size);
- erts_refc_init(&bin->refc, 1);
obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
#ifdef DEBUG
obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
@@ -641,8 +642,7 @@ free_dist_obuf(ErtsDistOutputBuf *obuf)
{
Binary *bin = ErtsDistOutputBuf2Binary(obuf);
ASSERT(obuf->dbg_pattern == ERTS_DIST_OUTPUT_BUF_DBG_PATTERN);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
static ERTS_INLINE Sint
@@ -712,7 +712,7 @@ static void clear_dist_entry(DistEntry *dep)
}
}
-void erts_dsend_context_dtor(Binary* ctx_bin)
+int erts_dsend_context_dtor(Binary* ctx_bin)
{
ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
switch (ctx->dss.phase) {
@@ -729,6 +729,8 @@ void erts_dsend_context_dtor(Binary* ctx_bin)
}
if (ctx->dep_to_deref)
erts_deref_dist_entry(ctx->dep_to_deref);
+
+ return 1;
}
Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
@@ -740,7 +742,7 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
Binary* ctx_bin = erts_create_magic_binary(sizeof(struct exported_ctx),
erts_dsend_context_dtor);
struct exported_ctx* dst = ERTS_MAGIC_BIN_DATA(ctx_bin);
- Eterm* hp = HAlloc(p, PROC_BIN_SIZE);
+ Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
sys_memcpy(&dst->ctx, ctx, sizeof(ErtsSendContext));
ASSERT(ctx->dss.ctl == make_tuple(ctx->ctl_heap));
@@ -749,7 +751,7 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
sys_memcpy(&dst->acm, ctx->dss.acmp, sizeof(ErtsAtomCacheMap));
dst->ctx.dss.acmp = &dst->acm;
}
- return erts_mk_magic_binary_term(&hp, &MSO(p), ctx_bin);
+ return erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
}
@@ -794,7 +796,7 @@ erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
}
-/* A local process that's beeing monitored by a remote one exits. We send:
+/* A local process that's being monitored by a remote one exits. We send:
{DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason},
which is rather sad as only the ref is needed, no pid's... */
int
@@ -1388,7 +1390,7 @@ int erts_net_message(Port *prt,
if (mon == NULL) {
break;
}
- watched = mon->pid;
+ watched = mon->u.pid;
erts_destroy_monitor(mon);
rp = erts_pid2proc_opt(NULL, 0,
watched, ERTS_PROC_LOCK_LINK,
@@ -1546,7 +1548,7 @@ int erts_net_message(Port *prt,
if (mon == NULL) {
break;
}
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
erts_destroy_monitor(mon);
if (rp == NULL) {
@@ -1563,7 +1565,7 @@ int erts_net_message(Port *prt,
watched = (is_not_nil(mon->name)
? TUPLE2(&lhp[0], mon->name, sysname)
- : mon->pid);
+ : mon->u.pid);
erts_queue_monitor_message(rp, &rp_locks,
ref, am_process, watched, reason);
@@ -2412,21 +2414,21 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
void *arg = ((struct print_to_data *) vptdp)->arg;
Process *rp;
ErtsMonitor *rmon;
- rp = erts_proc_lookup(mon->pid);
+ rp = erts_proc_lookup(mon->u.pid);
if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) {
- erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
+ erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->u.pid);
} else if (mon->type == MON_ORIGIN) {
/* Local pid is being monitored */
erts_print(to, arg, "Remotely monitored by: %T %T\n",
- mon->pid, rmon->pid);
+ mon->u.pid, rmon->u.pid);
} else {
- erts_print(to, arg, "Remote monitoring: %T ", mon->pid);
- if (is_not_atom(rmon->pid))
- erts_print(to, arg, "%T\n", rmon->pid);
+ erts_print(to, arg, "Remote monitoring: %T ", mon->u.pid);
+ if (is_not_atom(rmon->u.pid))
+ erts_print(to, arg, "%T\n", rmon->u.pid);
else
erts_print(to, arg, "{%T, %T}\n",
rmon->name,
- rmon->pid); /* which in this case is the
+ rmon->u.pid); /* which in this case is the
remote system name... */
}
}
@@ -3221,7 +3223,8 @@ static ErtsNodesMonitor *nodes_monitors_end;
static void
init_nodes_monitors(void)
{
- erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors");
+ erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
nodes_monitors = NULL;
nodes_monitors_end = NULL;
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index e82b416286..3e17645997 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -115,8 +115,8 @@ extern int erts_is_alive;
* erts_dsig_prepare() prepares a send of a distributed signal.
* One of the values defined below are returned. If the returned
* value is another than ERTS_DSIG_PREP_CONNECTED, the
- * distributed signal cannot be sent before apropriate actions
- * have been taken. Apropriate actions would typically be setting
+ * distributed signal cannot be sent before appropriate actions
+ * have been taken. Appropriate actions would typically be setting
* up the connection.
*/
@@ -375,7 +375,7 @@ extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
extern int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx);
-extern void erts_dsend_context_dtor(Binary*);
+extern int erts_dsend_context_dtor(Binary*);
extern Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx);
extern int erts_dist_command(Port *prt, int reds);
diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h
index 6f70d5961e..15ea182976 100644
--- a/erts/emulator/beam/dtrace-wrapper.h
+++ b/erts/emulator/beam/dtrace-wrapper.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2016.
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2017.
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,7 +74,7 @@
#if defined(_SDT_PROBE) && !defined(STAP_PROBE11)
/* SLF: This is Ubuntu 11-style SystemTap hackery */
-/* work arround for missing STAP macro */
+/* workaround for missing STAP macro */
#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
_SDT_PROBE(provider, name, 11, \
(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 214fb1f2af..c7ab444c96 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -44,9 +44,11 @@
#include "erl_hl_timer.h"
#include "erl_cpu_topology.h"
#include "erl_thr_queue.h"
+#include "erl_nfunc_sched.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
#include "erl_check_io.h"
#endif
+#include "erl_bif_unique.h"
#define GET_ERL_GF_ALLOC_IMPL
#include "erl_goodfit_alloc.h"
@@ -151,8 +153,7 @@ typedef struct {
int internal;
Uint req_sched;
Process *proc;
- Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ ErtsIRefStorage iref;
int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
} ErtsAllocInfoReq;
@@ -373,10 +374,16 @@ set_default_exec_alloc_opts(struct au_init *ip)
ip->init.util.rmbcmt = 0;
ip->init.util.acul = 0;
+# ifdef ERTS_HAVE_EXEC_MMAPPER
ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
ip->init.util.mseg_mmapper = &erts_exec_mmapper;
+# else
+ ip->init.util.mseg_alloc = &erts_alcu_exec_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_exec_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_exec_mseg_dealloc;
+# endif
}
#endif /* ERTS_ALC_A_EXEC */
@@ -657,6 +664,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= sizeof(ErtsDrvEventDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
= sizeof(ErtsDrvSelectDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
+ = sizeof(ErtsNifSelectDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
= sizeof(ErtsMessageRef);
#ifdef ERTS_SMP
@@ -669,10 +678,12 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)]
- = erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER);
-#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
+ = sizeof(NifExportTrace);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
+ = sizeof(ErtsNSchedMagicRefTableEntry);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
+ = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
#ifdef HARD_DEBUG
hdbg_init();
@@ -1571,7 +1582,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
break;
case 'X':
if (has_prefix("scs", argv[i]+3)) {
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
init->mseg.exec_mmap.scs =
#endif
get_mb_value(argv[i]+6, argv, &i);
@@ -2425,12 +2436,10 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_BIF_TIMER);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
add_fix_values(&size.processes,
&size.processes_used,
fi,
- ERTS_ALC_T_ABIF_TIMER);
-#endif
+ ERTS_ALC_T_NIF_EXP_TRACE);
}
if (want.atom || want.atom_used) {
@@ -2852,7 +2861,7 @@ erts_allocator_info(fmtfn_t to, void *arg)
erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
#endif
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
erts_print(to, arg, "=allocator:erts_mmap.exec_mmap\n");
erts_mmap_info(&erts_exec_mmapper, &to, arg, NULL, NULL, &emis);
#endif
@@ -3010,7 +3019,7 @@ erts_allocator_options(void *proc)
#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
terms[length++] = ERTS_MAKE_AM("literal_mmap");
#endif
-#ifdef ERTS_ALC_A_EXEC
+#ifdef ERTS_HAVE_EXEC_MMAPPER
terms[length++] = ERTS_MAKE_AM("exec_mmap");
#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
@@ -3102,7 +3111,7 @@ reply_alloc_info(void *vair)
# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
struct erts_mmap_info_struct mmap_info_literal;
# endif
-# ifdef ERTS_ALC_A_EXEC
+# ifdef ERTS_HAVE_EXEC_MMAPPER
struct erts_mmap_info_struct mmap_info_exec;
# endif
#endif
@@ -3126,9 +3135,10 @@ reply_alloc_info(void *vair)
while (1) {
if (hpp)
- ref_copy = STORE_NC(hpp, ohp, air->ref);
+ ref_copy = erts_iref_storage_make_ref(&air->iref,
+ hpp, ohp, 0);
else
- *szp += REF_THING_SIZE;
+ *szp += erts_iref_storage_heap_size(&air->iref);
ai_list = NIL;
for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
@@ -3232,7 +3242,7 @@ reply_alloc_info(void *vair)
erts_bld_atom(hpp,szp,"literal_mmap"),
ainfo);
# endif
-# ifdef ERTS_ALC_A_EXEC
+# ifdef ERTS_HAVE_EXEC_MMAPPER
ai_list = erts_bld_cons(hpp, szp,
ainfo, ai_list);
ainfo = (air->only_sz ? NIL :
@@ -3357,8 +3367,10 @@ reply_alloc_info(void *vair)
erts_smp_proc_unlock(rp, rp_locks);
erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
+ if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0) {
+ erts_iref_storage_clean(&air->iref);
aireq_free(air);
+ }
}
int
@@ -3371,7 +3383,6 @@ erts_request_alloc_info(struct process *c_p,
ErtsAllocInfoReq *air = aireq_alloc();
Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
Eterm alist;
- Eterm *hp;
int airix = 0, ai;
air->req_sched = erts_get_scheduler_id();
@@ -3385,8 +3396,7 @@ erts_request_alloc_info(struct process *c_p,
if (is_not_internal_ref(ref))
return 0;
- hp = &air->ref_heap[0];
- air->ref = STORE_NC(&hp, NULL, ref);
+ erts_iref_storage_save(&air->iref, ref);
if (is_not_list(allocs))
return 0;
@@ -3502,28 +3512,6 @@ void erts_allctr_wrapper_pre_unlock(void)
}
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Deprecated functions *
- * *
- * These functions are still defined since "non-OTP linked in drivers" may *
- * contain (illegal) calls to them. *
-\* */
-
-/* --- DO *NOT* USE THESE FUNCTIONS --- */
-
-void *sys_alloc(Uint sz)
-{ return erts_alloc_fnf(ERTS_ALC_T_UNDEF, sz); }
-void *sys_realloc(void *ptr, Uint sz)
-{ return erts_realloc_fnf(ERTS_ALC_T_UNDEF, ptr, sz); }
-void sys_free(void *ptr)
-{ erts_free(ERTS_ALC_T_UNDEF, ptr); }
-void *safe_alloc(Uint sz)
-{ return erts_alloc(ERTS_ALC_T_UNDEF, sz); }
-void *safe_realloc(void *ptr, Uint sz)
-{ return erts_realloc(ERTS_ALC_T_UNDEF, ptr, sz); }
-
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_alc_test() is only supposed to be used for testing. *
* *
@@ -3831,7 +3819,8 @@ hdbg_init(void)
hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
free_hdbg_mblks = &hdbg_mblks[0];
used_hdbg_mblks = NULL;
- erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
}
static void *check_memory_fence(void *ptr,
@@ -4129,12 +4118,20 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *dptr;
Uint size;
+ int free_pattern = n;
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
- sys_memset((void *) dptr, n, size + FENCE_SZ);
+#ifdef ERTS_ALC_A_EXEC
+# if defined(__i386__) || defined(__x86_64__)
+ if (ERTS_ALC_T2A(ERTS_ALC_N2T(n)) == ERTS_ALC_A_EXEC) {
+ free_pattern = 0x0f; /* Illegal instruction */
+ }
+# endif
+#endif
+ sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
(*real_af->free)(n, real_af->extra, dptr);
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 56a3b73bf9..97a1cf1308 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -173,13 +173,6 @@ __decl_noreturn void erts_realloc_n_enomem(ErtsAlcType_t,void*,Uint)
__decl_noreturn void erts_alc_fatal_error(int,int,ErtsAlcType_t,...)
__noreturn;
-/* --- DO *NOT* USE THESE DEPRECATED FUNCTIONS --- Instead use: */
-void *safe_alloc(Uint) __deprecated; /* erts_alloc() */
-void *safe_realloc(void *, Uint) __deprecated; /* erts_realloc() */
-void sys_free(void *) __deprecated; /* erts_free() */
-void *sys_alloc(Uint ) __deprecated; /* erts_alloc_fnf() */
-void *sys_realloc(void *, Uint) __deprecated; /* erts_realloc_fnf() */
-
#undef ERTS_HAVE_IS_IN_LITERAL_RANGE
#if defined(ARCH_32) || defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
# define ERTS_HAVE_IS_IN_LITERAL_RANGE
@@ -351,7 +344,8 @@ ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
static erts_smp_spinlock_t NAME##_lck; \
ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_smp_spin_lock(&NAME##_lck), \
erts_smp_spin_unlock(&NAME##_lck))
@@ -365,7 +359,8 @@ ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
static erts_mtx_t NAME##_lck; \
ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_mtx_init(NAME##_lck, #NAME "_alloc_lock"), \
+ erts_mtx_init(NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_mtx_lock(&NAME##_lck), \
erts_mtx_unlock(&NAME##_lck))
@@ -378,7 +373,8 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
#define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \
static erts_spinlock_t NAME##_lck; \
ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
- erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_spin_lock(&NAME##_lck), \
erts_spin_unlock(&NAME##_lck))
@@ -455,7 +451,7 @@ NAME##_free(TYPE *p) \
}
#ifdef DEBUG
-#define ERTS_PRE_ALLOC_SIZE(SZ) 2
+#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100)
#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
#else
#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1)
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6e8710eb8a..50a1d97dd5 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2016. All Rights Reserved.
+# Copyright Ericsson AB 2003-2017. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -65,7 +65,7 @@
# --- Allocator declarations -------------------------------------------------
#
-# If, and only if, the same thread performes *all* allocations,
+# If, and only if, the same thread performs *all* allocations,
# reallocations and deallocations of all memory types that are handled
# by a specific allocator (<ALLOCATOR> in type declaration), set
# <MULTI_THREAD> for this specific allocator to false; otherwise, set
@@ -168,7 +168,6 @@ type TIMER_SERVICE LONG_LIVED SYSTEM timer_service
type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer
type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer
type BIF_TIMER FIXED_SIZE PROCESSES bif_timer
-# type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer
type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request
type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state
type REG_TABLE STANDARD SYSTEM reg_tab
@@ -227,6 +226,7 @@ type DB_DMC_ERROR ETS ETS db_dmc_error
type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info
type DB_TERM ETS ETS db_term
type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state
+type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request
type INSTR_INFO LONG_LIVED SYSTEM instr_info
type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
@@ -280,6 +280,11 @@ type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
type DIRTY_START STANDARD PROCESSES dirty_start
type DIRTY_SL SHORT_LIVED SYSTEM dirty_short_lived
+type MREF_NSCHED_ENT FIXED_SIZE SYSTEM nsched_magic_ref_entry
+type MREF_ENT STANDARD SYSTEM magic_ref_entry
+type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets
+type MREF_TAB LONG_LIVED SYSTEM magic_ref_table
+type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
@@ -345,8 +350,9 @@ type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths
+if hipe
-# Currently most hipe code use this type.
-type HIPE SYSTEM SYSTEM hipe_data
+type HIPE_LL LONG_LIVED SYSTEM hipe_long_lived
+type HIPE_SL SHORT_LIVED SYSTEM hipe_short_lived
+type HIPE_STK STANDARD SYSTEM hipe_nstack
+if exec_alloc
type HIPE_EXEC EXEC CODE hipe_code
@@ -362,6 +368,13 @@ type SSB SHORT_LIVED PROCESSES ssb
+endif
++if lcnt
+
+type LCNT_CARRIER STANDARD SYSTEM lcnt_lock_info_carrier
+type LCNT_VECTOR SHORT_LIVED SYSTEM lcnt_sample_vector
+
++endif
+
type DEBUG SHORT_LIVED SYSTEM debugging
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
@@ -376,7 +389,8 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
-type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
+type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
+type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
@@ -396,6 +410,7 @@ type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
+type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
type FD_LIST SHORT_LIVED SYSTEM fd_list
type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array
type POLLSET LONG_LIVED SYSTEM pollset
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 230ca6ccbb..af86ad0548 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -821,7 +821,7 @@ static void clear_literal_range(void* start, Uint size)
#if HAVE_ERTS_MSEG
-void*
+static void*
erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
@@ -832,7 +832,7 @@ erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
return res;
}
-void*
+static void*
erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
Uint old_size, Uint *new_size_p)
{
@@ -845,7 +845,7 @@ erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
return res;
}
-void
+static void
erts_alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
{
erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
@@ -907,7 +907,9 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
#elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
-/* Used by literal allocator that has its own mmapper (super carrier) */
+/* For allocators that have their own mmapper (super carrier),
+ * like literal_alloc and exec_alloc on amd64
+ */
void*
erts_alcu_mmapper_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
@@ -948,9 +950,53 @@ erts_alcu_mmapper_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
}
#endif /* ARCH_64 && ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
-#endif /* HAVE_ERTS_MSEG */
+#if defined(ERTS_ALC_A_EXEC) && !defined(ERTS_HAVE_EXEC_MMAPPER)
+
+/*
+ * For exec_alloc on non-amd64 that just need memory with PROT_EXEC
+ */
+void*
+erts_alcu_exec_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+{
+ void* res = erts_alcu_mseg_alloc(allctr, size_p, flags);
+
+ if (res) {
+ int r = mprotect(res, *size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
void*
+erts_alcu_exec_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
+{
+ void *res;
+
+ if (seg && old_size) {
+ int r = mprotect(seg, old_size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ res = erts_alcu_mseg_realloc(allctr, seg, old_size, new_size_p);
+ if (res) {
+ int r = mprotect(res, *new_size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
+
+void
+erts_alcu_exec_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
+{
+ int r = mprotect(seg, size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ erts_alcu_mseg_dealloc(allctr, seg, size, flags);
+}
+#endif /* ERTS_ALC_A_EXEC && !ERTS_HAVE_EXEC_MMAPPER */
+
+#endif /* HAVE_ERTS_MSEG */
+
+static void*
erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
{
void *res;
@@ -967,7 +1013,7 @@ erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
return res;
}
-void*
+static void*
erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign)
{
void *res;
@@ -989,7 +1035,7 @@ erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size,
return res;
}
-void
+static void
erts_alcu_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
{
#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
@@ -1361,6 +1407,7 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
res = fix->list;
if (res) {
@@ -1372,8 +1419,6 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
fix_cpool_check_shrink(allctr, type, fix, NULL);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (size >= allctr->sbc_threshold) {
Block_t *blk;
blk = create_carrier(allctr, size, CFLG_SBC);
@@ -1493,6 +1538,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
fix->u.nocpool.used++;
@@ -1515,8 +1561,6 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (fix->u.nocpool.limit < fix->u.nocpool.used)
fix->u.nocpool.limit = fix->u.nocpool.used;
if (fix->u.nocpool.max_used < fix->u.nocpool.used)
@@ -5142,7 +5186,7 @@ erts_alcu_sz_info(Allctr_t *allctr,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -5228,7 +5272,7 @@ erts_alcu_info(Allctr_t *allctr,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -6039,7 +6083,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(FreeBlkFtr_t));
-#if ERTS_SMP
+#ifdef ERTS_SMP
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
sz += (init->fix ?
@@ -6090,18 +6134,10 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
#ifdef USE_THREADS
if (init->ts) {
allctr->thread_safe = 1;
-
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC,1);
-#else
- erts_mtx_init_x(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),1);
-#endif /*ERTS_ENABLE_LOCK_COUNT*/
-
+
+ erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+
#ifdef DEBUG
allctr->debug.saved_tid = 0;
#endif
@@ -6280,7 +6316,8 @@ erts_alcu_init(AlcUInit_t *init)
carrier_alignment = sizeof(Unit_t);
#endif
- erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms");
+ erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
atoms_initialized = 0;
initialized = 1;
@@ -6548,3 +6585,45 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
+ if(!allocator->thread_safe) {
+ return;
+ }
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
+ "alcu_allocator", make_small(allocator->alloc_no),
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ } else {
+ erts_lcnt_uninstall(&allocator->mutex.lcnt);
+ }
+}
+
+static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
+ if(tspec->enabled) {
+ int i;
+
+ for(i = 0; i < tspec->size; i++) {
+ lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
+ }
+ }
+}
+
+void erts_lcnt_update_allocator_locks(int enable) {
+ int i;
+
+ for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
+
+ if(ai->enabled && ai->alloc_util) {
+ if(ai->thr_spec) {
+ lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
+ } else {
+ lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
+ }
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 81180382af..73c467aa0a 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -195,10 +195,6 @@ extern UWord erts_literal_vspace_map[];
# define ERTS_VSPACE_WORD_BITS (sizeof(UWord)*8)
#endif
-void* erts_alcu_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
-void* erts_alcu_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
-void erts_alcu_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
-
#if HAVE_ERTS_MSEG
# if defined(ARCH_32)
void* erts_alcu_literal_32_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
@@ -210,17 +206,24 @@ void* erts_alcu_mmapper_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
void* erts_alcu_mmapper_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
void erts_alcu_mmapper_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
# endif
+
+# if defined(ERTS_ALC_A_EXEC) && !defined(ERTS_HAVE_EXEC_MMAPPER)
+void* erts_alcu_exec_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
+void* erts_alcu_exec_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+void erts_alcu_exec_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
+# endif
#endif /* HAVE_ERTS_MSEG */
-void* erts_alcu_sys_alloc(Allctr_t*, Uint *size_p, int superalign);
-void* erts_alcu_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint old_size, int superalign);
-void erts_alcu_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
#ifdef ARCH_32
void* erts_alcu_literal_32_sys_alloc(Allctr_t*, Uint *size_p, int superalign);
void* erts_alcu_literal_32_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint old_size, int superalign);
void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_allocator_locks(int enable);
+#endif
+
#endif /* !ERL_ALLOC_UTIL__ */
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
@@ -674,7 +677,6 @@ void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
int is_sbc_blk(Block_t*);
#endif
-
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
&& !defined(ERL_ALLOC_UTIL_IMPL__) */
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 84254af0c2..9a93034fcb 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -194,7 +194,8 @@ erts_init_async(void)
ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
async->init.data.no_initialized = 0;
- erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_cnd_init(&async->init.data.cnd);
erts_atomic_init_nob(&async->init.data.id, 0);
@@ -213,7 +214,8 @@ erts_init_async(void)
for (i = 1; i <= erts_no_schedulers; i++) {
ErtsAsyncReadyQ *arq = async_ready_q(i);
#if ERTS_USE_ASYNC_READY_ENQ_MTX
- erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
#endif
erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
qinit.arg = (void *) (SWord) i;
diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h
index 473c7686e5..4b470e7679 100644
--- a/erts/emulator/beam/erl_async.h
+++ b/erts/emulator/beam/erl_async.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,7 +27,6 @@ extern int erts_async_max_threads;
#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
extern int erts_async_thread_suggested_stack_size;
-#ifdef USE_THREADS
#ifdef ERTS_SMP
/*
@@ -47,6 +46,10 @@ extern int erts_async_thread_suggested_stack_size;
# define ERTS_USE_ASYNC_READY_Q 0
#endif
+#ifndef USE_THREADS
+# undef ERTS_USE_ASYNC_READY_Q
+# define ERTS_USE_ASYNC_READY_Q 0
+#endif /* !USE_THREADS */
#if ERTS_USE_ASYNC_READY_Q
int erts_check_async_ready(void *);
int erts_async_ready_clean(void *, void *);
@@ -58,10 +61,7 @@ void *erts_get_async_ready_queue(Uint sched_id);
#endif
#endif /* ERTS_USE_ASYNC_READY_Q */
-#endif /* USE_THREADS */
-
void erts_init_async(void);
void erts_exit_flush_async(void);
-
#endif /* ERL_ASYNC_H__ */
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 6e10980b6b..756c7dce05 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -239,13 +239,13 @@ static void dump_ac_node(ACNode *node, int indent, int ch);
/*
* Callback for the magic binary
*/
-static void cleanup_my_data_ac(Binary *bp)
+static int cleanup_my_data_ac(Binary *bp)
{
- return;
+ return 1;
}
-static void cleanup_my_data_bm(Binary *bp)
+static int cleanup_my_data_bm(Binary *bp)
{
- return;
+ return 1;
}
/*
@@ -380,7 +380,7 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
qbuff[qt++] = child;
/* Search for correct failure function, follow the parent's
failure function until you find a similar transition
- funtion to this child's */
+ function to this child's */
r = parent->h;
while (r != NULL && r->g[i] == NULL) {
r = r->h;
@@ -1010,8 +1010,8 @@ BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1)
if (do_binary_match_compile(BIF_ARG_1,&tag,&bin)) {
BIF_ERROR(BIF_P,BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE+3);
- ret = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE+3);
+ ret = erts_mk_magic_ref(&hp, &MSO(BIF_P), bin);
ret = TUPLE2(hp, tag, ret);
BIF_RET(ret);
}
@@ -1426,11 +1426,11 @@ binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags)
goto badarg;
}
if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
+ !is_internal_magic_ref(tp[2])) {
goto badarg;
}
bfs.type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
+ bin = erts_magic_ref2bin(tp[2]);
if (bfs.type == am_bm &&
ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
goto badarg;
@@ -1448,8 +1448,8 @@ binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags)
bfs.global_result = &do_match_global_result;
runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
+ Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1512,11 +1512,11 @@ binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
goto badarg;
}
if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
+ !is_internal_magic_ref(tp[2])) {
goto badarg;
}
bfs.type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
+ bin = erts_magic_ref2bin(tp[2]);
if (bfs.type == am_bm &&
ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
goto badarg;
@@ -1534,8 +1534,8 @@ binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
bfs.global_result = &do_split_global_result;
runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
+ Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1767,7 +1767,8 @@ static BIF_RETTYPE binary_find_trap(BIF_ALIST_3)
{
int runres;
Eterm result;
- Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ Binary *bin = erts_magic_ref2bin(BIF_ARG_3);
+
runres = do_binary_find(BIF_P, BIF_ARG_1, NULL, bin, BIF_ARG_2, &result);
if (runres == DO_BIN_MATCH_OK) {
BIF_RET(result);
@@ -2066,7 +2067,7 @@ static int do_search_backward(CommonData *cd, Uint *posp, Uint *redsp)
}
}
-static void cleanup_common_data(Binary *bp)
+static int cleanup_common_data(Binary *bp)
{
int i;
CommonData *cd;
@@ -2083,7 +2084,7 @@ static void cleanup_common_data(Binary *bp)
break;
}
}
- return;
+ return 1;
}
static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
@@ -2186,8 +2187,8 @@ static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
cd[i].type = CL_TYPE_HEAP;
}
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP3(trapper, p, bin_term, epos,list);
}
@@ -2214,8 +2215,7 @@ static BIF_RETTYPE do_longest_common_trap(Process *p, Eterm bin_term, Eterm curr
#else
term_to_Uint(current_pos, &pos);
#endif
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin_term));
- bin = ((ProcBin *) binary_val(bin_term))->val;
+ bin = erts_magic_ref2bin(bin_term);
cd = (CommonData *) ERTS_MAGIC_BIN_DATA(bin);
if (direction == DIRECTION_PREFIX) {
trapper = &binary_longest_prefix_trap_export;
@@ -2563,7 +2563,7 @@ typedef struct {
#define BINARY_COPY_LOOP_FACTOR 100
-static void cleanup_copy_bin_state(Binary *bp)
+static int cleanup_copy_bin_state(Binary *bp)
{
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(bp);
if (cbs->result != NULL) {
@@ -2583,6 +2583,7 @@ static void cleanup_copy_bin_state(Binary *bp)
break;
}
cbs->source_type = BC_TYPE_EMPTY;
+ return 1;
}
/*
@@ -2668,7 +2669,6 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
}
cbs->result = erts_bin_nrml_alloc(target_size); /* Always offheap
if trapping */
- erts_refc_init(&(cbs->result->refc), 1);
t = (byte *) cbs->result->orig_bytes; /* No offset or anything */
pos = 0;
i = 0;
@@ -2680,8 +2680,8 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
cbs->source_size = size;
cbs->result_pos = pos;
cbs->times_left = n-i;
- hp = HAlloc(p,PROC_BIN_SIZE);
- trap_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ trap_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP2(&binary_copy_trap_export, p, bin, trap_term);
} else {
@@ -2716,7 +2716,7 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
Uint reds = get_reds(BIF_P, BINARY_COPY_LOOP_FACTOR);
byte *t;
Uint pos;
- Binary *mb = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ Binary *mb = erts_magic_ref2bin(BIF_ARG_2);
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(mb);
Uint opos;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 4948975851..e9bfb39035 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1099,7 +1099,7 @@ void erts_ddll_increment_port_count(DE_Handle *dh)
void erts_ddll_decrement_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
-#if DEBUG
+#ifdef DEBUG
ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0);
#else
erts_smp_atomic32_dec_nob(&dh->port_count);
@@ -1476,8 +1476,10 @@ static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
static Eterm copy_ref(Eterm ref, Eterm *hp)
{
- RefThing *ptr = ref_thing_ptr(ref);
- memcpy(hp, ptr, sizeof(RefThing));
+ ErtsORefThing *ptr;
+ ASSERT(is_internal_ordinary_ref(ref));
+ ptr = ordinary_ref_thing_ptr(ref);
+ memcpy(hp, ptr, sizeof(ErtsORefThing));
return (make_internal_ref(hp));
}
@@ -1720,10 +1722,10 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
Eterm e;
mp = erts_alloc_message_heap(proc, &rp_locks,
(6 /* tuple */ + 3 /* Error tuple */ +
- REF_THING_SIZE + need),
+ ERTS_REF_THING_SIZE + need),
&hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
e = build_load_error_hp(hp, errcode);
hp += need;
mess = TUPLE2(hp,tag,e);
@@ -1731,10 +1733,10 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
mess = TUPLE5(hp,type,r,am_driver,driver_name,mess);
} else {
mp = erts_alloc_message_heap(proc, &rp_locks,
- 6 /* tuple */ + REF_THING_SIZE,
+ 6 /* tuple */ + ERTS_REF_THING_SIZE,
&hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
erts_queue_message(proc, rp_locks, mp, mess, am_system);
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index ea508bd1c4..8a5c6ada6c 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -141,6 +141,39 @@ BIF_RETTYPE trunc_1(BIF_ALIST_1)
BIF_RET(res);
}
+BIF_RETTYPE floor_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ GET_DOUBLE(BIF_ARG_1, f);
+ res = double_to_integer(BIF_P, floor(f.fd));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE ceil_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ /* check arg */
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ /* get the float */
+ GET_DOUBLE(BIF_ARG_1, f);
+
+ res = double_to_integer(BIF_P, ceil(f.fd));
+ BIF_RET(res);
+}
+
BIF_RETTYPE round_1(BIF_ALIST_1)
{
Eterm res;
@@ -620,6 +653,38 @@ Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
reg, live);
}
+Eterm erts_gc_floor_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, floor(f.fd), reg, live);
+}
+
+Eterm erts_gc_ceil_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, ceil(f.fd), reg, live);
+}
+
static Eterm
gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
{
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 7bd45916f5..e5d7efcc72 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -94,6 +94,9 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
" [ds:%beu:%beu:%beu]"
#endif
+#if defined(ERTS_DIRTY_SCHEDULERS_TEST)
+ " [dirty-schedulers-TEST]"
+#endif
" [async-threads:%d]"
#endif
#ifdef HIPE
@@ -174,7 +177,33 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_refc_read(&pb->val->refc, 1);
+ Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
+ }
+ }
+ return res;
+}
+
+static Eterm
+bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
+{
+ struct erl_off_heap_header* ohh;
+ Eterm res = NIL;
+ Eterm tuple;
+
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (is_ref_thing_header((*((Eterm *) ohh)))) {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
+ Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -199,8 +228,10 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
- *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
+ *psz += NC_HEAP_SIZE(mon->ref);
+ *psz += (mon->type == MON_NIF_TARGET ?
+ erts_resource_ref_size(mon->u.resource) :
+ (is_immed(mon->u.pid) ? 0 : NC_HEAP_SIZE(mon->u.pid)));
*psz += 8; /* CONS + 5-tuple */
}
@@ -215,12 +246,11 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
{
MonListContext *pmlc = vpmlc;
Eterm tup;
- Eterm r = (IS_CONST(mon->ref)
- ? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
- Eterm p = (IS_CONST(mon->pid)
- ? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
+ Eterm r = STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref);
+ Eterm p = (mon->type == MON_NIF_TARGET ?
+ erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->u.resource)
+ : (is_immed(mon->u.pid) ? mon->u.pid
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->u.pid)));
tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
pmlc->hp += 6;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
@@ -259,7 +289,7 @@ make_monitor_list(Process *p, ErtsMonitor *root)
static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
+ *psz += is_immed(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
/* Node links use this pointer as ref counter... */
erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
@@ -279,7 +309,7 @@ static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
LnkListContext *pllc = vpllc;
Eterm tup;
Eterm old_res, targets = NIL;
- Eterm p = (IS_CONST(lnk->pid)
+ Eterm p = (is_immed(lnk->pid)
? lnk->pid
: STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
if (lnk->type == LINK_NODE) {
@@ -363,8 +393,12 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
typedef struct {
/* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
* {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
- * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name */
- Eterm entity;
+ * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
+ * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
+ union {
+ Eterm term;
+ ErtsResource* resource;
+ }entity;
Eterm node;
/* pid is actual target being monitored, no matter pid/port or name */
Eterm pid;
@@ -410,7 +444,7 @@ static void collect_one_link(ErtsLink *lnk, void *vmicp)
if (!(lnk->type == LINK_PID)) {
return;
}
- micp->mi[micp->mi_i].entity = lnk->pid;
+ micp->mi[micp->mi_i].entity.term = lnk->pid;
micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
micp->mi_i++;
}
@@ -423,20 +457,20 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
return;
}
EXTEND_MONITOR_INFOS(micp);
- if (is_atom(mon->pid)) { /* external by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = mon->pid;
+ if (is_atom(mon->u.pid)) { /* external by name */
+ micp->mi[micp->mi_i].entity.term = mon->name;
+ micp->mi[micp->mi_i].node = mon->u.pid;
micp->sz += 3; /* need one 2-tuple */
- } else if (is_external_pid(mon->pid)) { /* external by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
+ } else if (is_external_pid(mon->u.pid)) { /* external by pid */
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
micp->mi[micp->mi_i].node = NIL;
- micp->sz += NC_HEAP_SIZE(mon->pid);
+ micp->sz += NC_HEAP_SIZE(mon->u.pid);
} else if (!is_nil(mon->name)) { /* internal by name */
- micp->mi[micp->mi_i].entity = mon->name;
+ micp->mi[micp->mi_i].entity.term = mon->name;
micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
micp->sz += 3; /* need one 2-tuple */
} else { /* internal by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
micp->mi[micp->mi_i].node = NIL;
/* no additional heap space needed */
}
@@ -444,7 +478,7 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
/* have always pid at hand, to assist with figuring out if its a port or
* a process, when we monitored by name and process_info is requested.
* See: erl_bif_info.c:process_info_aux section for am_monitors */
- micp->mi[micp->mi_i].pid = mon->pid;
+ micp->mi[micp->mi_i].pid = mon->u.pid;
micp->mi_i++;
micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
@@ -454,15 +488,24 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_TARGET) {
- return;
+ if (mon->type != MON_TARGET && mon->type != MON_NIF_TARGET) {
+ return;
}
EXTEND_MONITOR_INFOS(micp);
- micp->mi[micp->mi_i].node = NIL;
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
+
+ if (mon->type == MON_NIF_TARGET) {
+ micp->mi[micp->mi_i].entity.resource = mon->u.resource;
+ micp->mi[micp->mi_i].node = make_small(MON_NIF_TARGET);
+ micp->sz += erts_resource_ref_size(mon->u.resource);
+ }
+ else {
+ micp->mi[micp->mi_i].entity.term = mon->u.pid;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->u.pid);
+ }
+ micp->sz += 2; /* cons */;
micp->mi_i++;
}
@@ -610,7 +653,8 @@ static Eterm pi_args[] = {
am_current_location,
am_current_stacktrace,
am_message_queue_data,
- am_garbage_collection_info
+ am_garbage_collection_info,
+ am_magic_ref
};
#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
@@ -661,6 +705,7 @@ pi_arg2ix(Eterm arg)
case am_current_stacktrace: return 31;
case am_message_queue_data: return 32;
case am_garbage_collection_info: return 33;
+ case am_magic_ref: return 34;
default: return -1;
}
}
@@ -1114,9 +1159,9 @@ process_info_aux(Process *BIF_P,
case am_initial_call:
hp = HAlloc(BIF_P, 3+4);
res = TUPLE3(hp,
- rp->u.initial[INITIAL_MOD],
- rp->u.initial[INITIAL_FUN],
- make_small(rp->u.initial[INITIAL_ARI]));
+ rp->u.initial.module,
+ rp->u.initial.function,
+ make_small(rp->u.initial.arity));
hp += 4;
break;
@@ -1191,7 +1236,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1209,7 +1254,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- if (is_atom(mic.mi[i].entity)) {
+ if (is_atom(mic.mi[i].entity.term)) {
/* Monitor by name.
* Build {process|port, {Name, Node}} and cons it.
*/
@@ -1221,7 +1266,7 @@ process_info_aux(Process *BIF_P,
|| is_port(mic.mi[i].pid)
|| is_atom(mic.mi[i].pid));
- t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
+ t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
hp += 3;
t2 = TUPLE2(hp, m_type, t1);
hp += 3;
@@ -1231,7 +1276,7 @@ process_info_aux(Process *BIF_P,
else {
/* Monitor by pid. Build {process|port, Pid} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
ASSERT(is_pid(mic.mi[i].pid)
@@ -1258,7 +1303,12 @@ process_info_aux(Process *BIF_P,
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ if (mic.mi[i].node == make_small(MON_NIF_TARGET)) {
+ item = erts_bld_resource_ref(&hp, &MSO(BIF_P), mic.mi[i].entity.resource);
+ }
+ else {
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
+ }
res = CONS(hp, item, res);
hp += 2;
}
@@ -1563,9 +1613,9 @@ process_info_aux(Process *BIF_P,
term = am_timeout;
else {
term = TUPLE3(hp,
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- make_small(scb->ct[j]->code[2]));
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ make_small(scb->ct[j]->info.mfa.arity));
hp += 4;
}
list = CONS(hp, term, list);
@@ -1596,6 +1646,14 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3);
break;
+ case am_magic_ref: {
+ Uint sz = 3;
+ (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
+ hp = HAlloc(BIF_P, sz);
+ res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
+ break;
+ }
+
default:
return THE_NON_VALUE; /* will produce badarg */
@@ -1614,10 +1672,10 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
if (rp->current == NULL) {
erts_lookup_function_info(&fi, rp->i, full_info);
- rp->current = fi.current;
+ rp->current = fi.mfa;
} else if (full_info) {
erts_lookup_function_info(&fi, rp->i, full_info);
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
/* Use the current function without location info */
erts_set_current_function(&fi, rp->current);
}
@@ -1633,9 +1691,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* instead if it can be looked up.
*/
erts_lookup_function_info(&fi2, rp->cp, full_info);
- if (fi2.current) {
+ if (fi2.mfa) {
fi = fi2;
- rp->current = fi2.current;
+ rp->current = fi2.mfa;
}
}
@@ -1650,8 +1708,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
hp = erts_build_mfa_item(&fi, hp, am_true, &res);
} else {
hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
+ res = TUPLE3(hp, rp->current->module,
+ rp->current->function,
+ make_small(rp->current->arity));
hp += 4;
}
*hpp = hp;
@@ -1692,7 +1751,7 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
heap_size = 3;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -2391,7 +2450,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ERTS_ATOM_ENC_LATIN1,
1),
erts_bld_uint(hpp, hszp,
- opc[i].count)),
+ erts_instr_count[i])),
res);
}
@@ -2676,20 +2735,30 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(active));
#endif
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
+#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
+#else
+ dirty_cpu = 0;
+#endif
BIF_RET(make_small(dirty_cpu));
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
Uint dirty_cpu_onln;
+#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
+#else
+ dirty_cpu_onln = 0;
+#endif
BIF_RET(make_small(dirty_cpu_onln));
} else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
Uint dirty_io;
+#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
- BIF_RET(make_small(dirty_io));
+#else
+ dirty_io = 0;
#endif
+ BIF_RET(make_small(dirty_io));
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
@@ -2859,6 +2928,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_get_atom_limit()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
+ BIF_RET(make_small(atom_table_size()));
+ }
else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
if (erts_has_time_correction()
&& erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
@@ -2883,27 +2958,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(AM_tag);
#endif
}
- else if (ERTS_IS_ATOM_STR("check_process_code",BIF_ARG_1)) {
- Eterm terms[3];
- Sint length = 1;
- Uint sz = 0;
- Eterm *hp, res;
- DECL_AM(direct_references);
-
- terms[0] = AM_direct_references;
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- {
- DECL_AM(indirect_references);
- terms[1] = AM_indirect_references;
- terms[2] = am_copy_literals;
- length = 3;
- }
-#endif
- erts_bld_list(NULL, &sz, length, terms);
- hp = HAlloc(BIF_P, sz);
- res = erts_bld_list(&hp, NULL, length, terms);
- BIF_RET(res);
- }
BIF_ERROR(BIF_P, BADARG);
}
@@ -2951,7 +3005,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -2982,7 +3036,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
Eterm t;
Eterm m_type;
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
m_type = is_port(item) ? am_port : am_process;
t = TUPLE2(*hpp, m_type, item);
*hpp += 3;
@@ -3011,7 +3065,8 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ ASSERT(mic.mi[i].node == NIL);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -3248,7 +3303,7 @@ fun_info_2(BIF_ALIST_2)
break;
case am_module:
hp = HAlloc(p, 3);
- val = exp->code[0];
+ val = exp->info.mfa.module;
break;
case am_new_index:
hp = HAlloc(p, 3);
@@ -3276,11 +3331,11 @@ fun_info_2(BIF_ALIST_2)
break;
case am_arity:
hp = HAlloc(p, 3);
- val = make_small(exp->code[2]);
+ val = make_small(exp->info.mfa.arity);
break;
case am_name:
hp = HAlloc(p, 3);
- val = exp->code[1];
+ val = exp->info.mfa.function;
break;
default:
goto error;
@@ -3306,7 +3361,9 @@ fun_info_mfa_1(BIF_ALIST_1)
} else if (is_export(fun)) {
Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
hp = HAlloc(p, 4);
- BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ BIF_RET(TUPLE3(hp,exp->info.mfa.module,
+ exp->info.mfa.function,
+ make_small(exp->info.mfa.arity)));
}
BIF_ERROR(p, BADARG);
}
@@ -3384,13 +3441,24 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm* hp;
if (BIF_ARG_1 == am_scheduler_wall_time) {
- res = erts_sched_wall_time_request(BIF_P, 0, 0);
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
if (is_non_value(res))
BIF_RET(am_undefined);
BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
- } else if (BIF_ARG_1 == am_total_active_tasks
- || BIF_ARG_1 == am_total_run_queue_lengths) {
- Uint no = erts_run_queues_len(NULL, 0, BIF_ARG_1 == am_total_active_tasks);
+ } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_run_queue_lengths)
+ | (BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
+ Uint no = erts_run_queues_len(NULL, 0,
+ ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_active_tasks_all)),
+ ((BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
if (IS_USMALL(0, no))
res = make_small(no);
else {
@@ -3398,13 +3466,21 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = uint_to_big(no, hp);
}
BIF_RET(res);
- } else if (BIF_ARG_1 == am_active_tasks
- || BIF_ARG_1 == am_run_queue_lengths) {
+ } else if ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_run_queue_lengths)
+ | (BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all));
+ int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
+ (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs, 0, BIF_ARG_1 == am_active_tasks);
+ (void) erts_run_queues_len(qszs, 0,
+ ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_active_tasks_all)),
+ incl_dirty_io);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3468,24 +3544,32 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- UWord u1, u2, dummy;
+ ErtsMonotonicTime u1, u2;
Eterm b1, b2;
- elapsed_time_both(&u1,&dummy,&u2,&dummy);
- b1 = erts_make_integer(u1,BIF_P);
- b2 = erts_make_integer(u2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ elapsed_time_both(&u1, NULL, &u2, NULL);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, u1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, u2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, u1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, u2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
- res = erts_run_queues_len(NULL, 1, 0);
+ res = erts_run_queues_len(NULL, 1, 0, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- UWord w1, w2;
+ ErtsMonotonicTime w1, w2;
Eterm b1, b2;
+ Uint hsz;
wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer((Uint) w1,BIF_P);
- b2 = erts_make_integer((Uint) w2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, w1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, w2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, w1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, w2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_io) {
@@ -3495,9 +3579,9 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs, 0, 0);
+ (void) erts_run_queues_len(qszs, 0, 0, 1);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3525,6 +3609,11 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
static erts_smp_atomic_t available_internal_state;
+static int empty_magic_ref_destructor(Binary *bin)
+{
+ return 1;
+}
+
BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
{
/*
@@ -3563,7 +3652,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
- BIF_RET(make_small((Uint) words));
+ Eterm* hp = HAlloc(BIF_P ,3);
+ BIF_RET(TUPLE2(hp, make_small((Uint) words),
+ erts_ets_hash_sizeof_ext_segtab()));
}
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
/* Used by driver_SUITE (emulator) */
@@ -3674,6 +3765,21 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_sint64_to_big(value, &hp));
}
}
+ else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
+ UWord size;
+ char c;
+ if (erts_is_above_stack_limit(&c))
+ size = erts_check_stack_recursion_downwards(&c);
+ else
+ size = erts_check_stack_recursion_upwards(&c);
+ if (IS_SSMALL(size))
+ BIF_RET(make_small(size));
+ else {
+ Uint hsz = BIG_UWORD_HEAP_SIZE(size);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(uword_to_big(size, hp));
+ }
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3866,7 +3972,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
}
else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
- Uint hash = (Uint) make_internal_hash(tp[2]);
+ Uint hash = (Uint) make_internal_hash(tp[2], 0);
Uint hsz = 0;
Eterm* hp;
erts_bld_uint(NULL, &hsz, hash);
@@ -3884,6 +3990,34 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
return make_atom(ix);
}
+ else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
+ Binary *bin;
+ UWord bin_addr, refc;
+ Eterm bin_addr_term, refc_term, test_type;
+ Uint sz;
+ Eterm *hp;
+ if (!is_internal_magic_ref(tp[2])) {
+ if (is_internal_ordinary_ref(tp[2])) {
+ ErtsORefThing *rtp;
+ rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
+ if (erts_is_ref_numbers_magic(rtp->num))
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+ }
+ bin = erts_magic_ref2bin(tp[2]);
+ refc = erts_refc_read(&bin->intern.refc, 1);
+ bin_addr = (UWord) bin;
+ sz = 4;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ erts_bld_uword(NULL, &sz, refc);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ refc_term = erts_bld_uword(&hp, NULL, refc);
+ test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
+ ? am_true : am_false);
+ BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
+ }
break;
}
@@ -3972,7 +4106,6 @@ static void broken_halt_test(Eterm bif_arg_2)
erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
}
-
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
/*
@@ -4298,54 +4431,141 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
BIF_RET(am_ok);
}
+ else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
+ Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
+ UWord bin_addr = (UWord) bin;
+ Eterm bin_addr_term, magic_ref, res;
+ Eterm *hp;
+ Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
+ res = TUPLE2(hp, magic_ref, bin_addr_term);
+ BIF_RET(res);
+ }
+ }
}
BIF_ERROR(BIF_P, BADARG);
}
#ifdef ERTS_ENABLE_LOCK_COUNT
+
+typedef struct {
+ /* info->location_count may increase between size calculation and term
+ * building, so we cap it at the value sampled in lcnt_build_result_vector.
+ *
+ * Shrinking is safe though. */
+ int max_location_count;
+ erts_lcnt_lock_info_t *info;
+} lcnt_sample_t;
+
+typedef struct lcnt_sample_vector_ {
+ lcnt_sample_t *elements;
+ size_t size;
+} lcnt_sample_vector_t;
+
+static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
+ erts_lcnt_lock_info_t *iterator;
+ lcnt_sample_vector_t result;
+ size_t allocated_entries;
+
+ allocated_entries = 64;
+ result.size = 0;
+
+ result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
+ allocated_entries * sizeof(lcnt_sample_t));
+
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(list, &iterator)) {
+ erts_lcnt_retain_lock_info(iterator);
+
+ result.elements[result.size].max_location_count = iterator->location_count;
+ result.elements[result.size].info = iterator;
+
+ result.size++;
+
+ if(result.size >= allocated_entries) {
+ allocated_entries *= 2;
+
+ result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
+ allocated_entries * sizeof(lcnt_sample_t));
+ }
+ }
+
+ return result;
+}
+
+static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
+ size_t i;
+
+ for(i = 0; i < vector->size; i++) {
+ erts_lcnt_release_lock_info(vector->elements[i].info);
+ }
+
+ erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
+}
+
+/* The size of an integer is not guaranteed to be constant since we're walking
+ * over live data, and may cross over into bignum territory between size calc
+ * and the actual build. This takes care of that through always assuming the
+ * worst, but needs to be fixed up with HRelease once the final term has been
+ * built. */
+static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
+ Eterm res = THE_NON_VALUE;
+
+ if(szp) {
+ *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
+ }
+
+ if(hpp) {
+ if (IS_USMALL(0, ui)) {
+ res = make_small(ui);
+ } else {
+ res = erts_uint64_to_big(ui, hpp);
+ }
+ }
+
+ return res;
+}
+
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- Uint tries = 0, colls = 0;
- unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
- unsigned int line = 0;
unsigned int i;
-
+ const char *file;
+
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
-
+
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
- * { .. histogram .. }]
- */
+ * [{{file, line},
+ {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }] */
- tries = (Uint) ethr_atomic_read(&stats->tries);
- colls = (Uint) ethr_atomic_read(&stats->colls);
-
- line = stats->line;
- timer_s = stats->timer.s;
- timer_ns = stats->timer.ns;
- timer_n = stats->timer_n;
-
- af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
- uil = erts_bld_uint( hpp, szp, line);
+ file = stats->file ? stats->file : "undefined";
+
+ af = erts_atom_put((byte *)file, strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
+ uil = erts_bld_uint( hpp, szp, stats->line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
-
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
- uits = erts_bld_uint( hpp, szp, timer_s);
- uitns = erts_bld_uint( hpp, szp, timer_ns);
- uitn = erts_bld_uint( hpp, szp, timer_n);
+ uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
+ uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
+
+ uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
+ uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
+ uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
- vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
}
+
thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
@@ -4354,185 +4574,266 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
return res;
}
-static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
+ Eterm id = info->id;
+
+ if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_TYPE_PROCLOCK) {
+ /* Use registered names as id's for process locks if available. Thread
+ * progress is delayed since we may be running on a dirty scheduler. */
+ ErtsThrPrgrDelayHandle delay_handle;
+ Process *process;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+
+ process = erts_proc_lookup(info->id);
+ if (process && process->common.u.alive.reg) {
+ id = process->common.u.alive.reg->name;
+ }
+
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
+ const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
+ id = erts_atom_put((byte*)name, strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ }
+
+ return id;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
+ erts_lcnt_lock_info_t *info = sample->info;
+
Eterm name, type, id, stats = NIL, t;
- Process *proc = NULL;
- char *ltype;
+ const char *lock_desc;
int i;
+
+ /* term: [{name, id, type, stats()}] */
+
+ ASSERT(info->name);
- /* term:
- * [{name, id, type, stats()}]
- */
-
- ASSERT(lock->name);
-
- ltype = erts_lcnt_lock_type(lock->flag);
-
- ASSERT(ltype);
-
- type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
-
- if (lock->flag & ERTS_LCNT_LT_ALLOC) {
- /* use allocator types names as id's for allocator locks */
- ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
- /* use registered names as id's for process locks if available */
- proc = erts_proc_lookup(lock->id);
- if (proc && proc->common.u.alive.reg) {
- id = proc->common.u.alive.reg->name;
- } else {
- /* otherwise use process id */
- id = lock->id;
- }
+ lock_desc = erts_lock_flags_get_type_name(info->flags);
+
+ type = erts_atom_put((byte*)lock_desc, strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte*)info->name, strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
+
+ /* Only attempt to resolve ids when actually emitting the term. This ought
+ * to be safe since all immediates are the same size. */
+ if(hpp != NULL) {
+ id = lcnt_pretty_print_lock_id(info);
} else {
- id = lock->id;
+ id = NIL;
}
- for (i = 0; i < lock->n_stats; i++) {
- stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+ for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
}
t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
- res = erts_bld_cons( hpp, szp, t, res);
+ res = erts_bld_cons(hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
+ lcnt_sample_vector_t *current_locks,
+ lcnt_sample_vector_t *deleted_locks, Eterm res) {
+ const char *str_duration = "duration";
+ const char *str_locks = "locks";
+
Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
- erts_lcnt_lock_t *lock = NULL;
- char *str_duration = "duration";
- char *str_locks = "locks";
-
- /* term:
- * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
- */
-
+ size_t i;
+
+ /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
+
/* duration tuple */
- dts = erts_bld_uint( hpp, szp, data->duration.s);
- dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ dts = bld_unstable_uint64(hpp, szp, duration->s);
+ dtns = bld_unstable_uint64(hpp, szp, duration->ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
-
+
adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
-
aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
-
- for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < current_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
}
-
- for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < deleted_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
}
-
+
tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
-
- res = erts_bld_cons( hpp, szp, tloc, res);
- res = erts_bld_cons( hpp, szp, tdur, res);
+
+ res = erts_bld_cons(hpp, szp, tloc, res);
+ res = erts_bld_cons(hpp, szp, tdur, res);
return res;
-}
+}
+
+static struct {
+ const char *name;
+ erts_lock_flags_t flag;
+} lcnt_category_map[] = {
+ {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
+ {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
+ {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
+ {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
+ {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
+ {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
+ {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
+ {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
+ {NULL, 0}
+ };
+
+static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
+ int i = 0;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
+ return lcnt_category_map[i].flag;
+ }
+ }
+
+ return 0;
+}
+
+static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
+ Eterm res;
+ int i;
+
+ res = NIL;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(mask & lcnt_category_map[i].flag) {
+ Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
+ strlen(lcnt_category_map[i].name),
+ ERTS_ATOM_ENC_UTF8, 0);
+
+ res = erts_bld_cons(hpp, szp, category, res);
+ }
+ }
+
+ return res;
+}
+
#endif
-BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
{
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Eterm res = NIL;
-#endif
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
+#else
+ erts_lcnt_clear_counters();
+ BIF_RET(am_ok);
+#endif
+}
- if (BIF_ARG_1 == am_enabled) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- BIF_RET(am_true);
+BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
+{
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
#else
- BIF_RET(am_false);
-#endif
- }
-#ifdef ERTS_ENABLE_LOCK_COUNT
+ lcnt_sample_vector_t current_locks, deleted_locks;
+ erts_lcnt_data_t data;
- else if (BIF_ARG_1 == am_info) {
- erts_lcnt_data_t *data;
- Uint hsize = 0;
- Uint *szp;
- Eterm* hp;
+ Eterm *term_heap_start, *term_heap_end;
+ Uint term_heap_size = 0;
+ Eterm result;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ data = erts_lcnt_get_data();
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- data = erts_lcnt_get_data();
+ current_locks = lcnt_build_sample_vector(data.current_locks);
+ deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
- /* calculate size */
+ lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
+ &current_locks, &deleted_locks, NIL);
- szp = &hsize;
- lcnt_build_result_term(NULL, szp, data, NIL);
+ term_heap_start = HAlloc(BIF_P, term_heap_size);
+ term_heap_end = term_heap_start;
- /* alloc and build */
+ result = lcnt_build_result_term(&term_heap_end, NULL,
+ &data.duration, &current_locks, &deleted_locks, NIL);
- hp = HAlloc(BIF_P, hsize);
+ HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
- res = lcnt_build_result_term(&hp, NULL, data, res);
-
- erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ lcnt_destroy_sample_vector(&current_locks);
+ lcnt_destroy_sample_vector(&deleted_locks);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(res);
- } else if (BIF_ARG_1 == am_clear) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ BIF_RET(result);
+#endif
+}
- erts_lcnt_clear_counters();
+BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t mask;
+ Eterm *term_heap_block;
+ Uint term_heap_size;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ mask = erts_lcnt_get_category_mask();
+ term_heap_size = 0;
- BIF_RET(am_ok);
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* ptr = tuple_val(BIF_ARG_1);
-
- if ((arityval(ptr[0]) == 2) && (ptr[2] == am_false || ptr[2] == am_true)) {
- int lock_opt = 0, enable = (ptr[2] == am_true) ? 1 : 0;
- if (ERTS_IS_ATOM_STR("copy_save", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_COPYSAVE;
- } else if (ERTS_IS_ATOM_STR("process_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PROCLOCK;
- } else if (ERTS_IS_ATOM_STR("port_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PORTLOCK;
- } else if (ERTS_IS_ATOM_STR("suspend", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_SUSPEND;
- } else if (ERTS_IS_ATOM_STR("location", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_LOCATION;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ lcnt_build_category_list(NULL, &term_heap_size, mask);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ term_heap_block = HAlloc(BIF_P, term_heap_size);
- if (enable) res = erts_lcnt_set_rt_opt(lock_opt) ? am_true : am_false;
- else res = erts_lcnt_clear_rt_opt(lock_opt) ? am_true : am_false;
-
-#ifdef ERTS_SMP
- if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PORTLOCK) {
- erts_lcnt_enable_io_lock_count(enable);
- } else if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PROCLOCK) {
- erts_lcnt_enable_proc_lock_count(enable);
- }
+ BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
+ } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ if(erts_lcnt_get_preserve_info()) {
+ BIF_RET(am_true);
+ }
+
+ BIF_RET(am_false);
+ }
#endif
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t category_mask = 0;
+ Eterm categories = BIF_ARG_2;
+
+ if(!(is_list(categories) || is_nil(categories))) {
+ BIF_ERROR(BIF_P, BADARG);
}
- }
-#endif
+ while(is_list(categories)) {
+ Eterm *cell = list_val(categories);
+ erts_lock_flags_t category;
+
+ category = lcnt_atom_to_lock_category(CAR(cell));
+
+ if(!category) {
+ Eterm *hp = HAlloc(BIF_P, 4);
+
+ BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
+ }
+
+ category_mask |= category;
+ categories = CDR(cell);
+ }
+
+ erts_lcnt_set_category_mask(category_mask);
+
+ BIF_RET(am_ok);
+ } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ int enabled = (BIF_ARG_2 == am_true);
+
+ if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ erts_lcnt_set_preserve_info(enabled);
+
+ BIF_RET(am_ok);
+ }
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index aecb8bf0c1..a594ec1493 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -260,7 +260,7 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
} else if (is_export(arg1)) {
Export* exp = (Export *) (export_val(arg1)[1]);
- if (exp->code[2] == (Uint) arity) {
+ if (exp->info.mfa.arity == (Uint) arity) {
BIF_RET(am_true);
}
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 46777d3aa5..5cd172c26f 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -203,3 +203,17 @@ BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1)
}
BIF_RET(am_true);
}
+
+BIF_RETTYPE os_set_signal_2(BIF_ALIST_2) {
+ if (is_atom(BIF_ARG_1) && ((BIF_ARG_2 == am_ignore) ||
+ (BIF_ARG_2 == am_default) ||
+ (BIF_ARG_2 == am_handle))) {
+ if (!erts_set_signal(BIF_ARG_1, BIF_ARG_2))
+ goto error;
+
+ BIF_RET(am_ok);
+ }
+
+error:
+ BIF_ERROR(BIF_P, BADARG);
+}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 90e78a9b0b..ff03151619 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -214,7 +214,7 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
/* Fall through... */
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_RET(res, ref);
break;
case ERTS_PORT_OP_DONE:
@@ -260,7 +260,7 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -310,7 +310,7 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -356,7 +356,7 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
case ERTS_PORT_OP_DONE:
BIF_RET(am_true);
@@ -389,7 +389,7 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
break;
case ERTS_PORT_OP_DONE:
@@ -428,7 +428,7 @@ BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -467,7 +467,7 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index ff7746ce1d..ad124fd979 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -64,12 +64,47 @@ static void erts_erts_pcre_stack_free(void *ptr) {
erts_free(ERTS_ALC_T_RE_STACK,ptr);
}
+#define ERTS_PCRE_STACK_MARGIN (10*1024)
+
+#ifdef ERTS_SMP
+# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
+#else
+# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit)
+#endif
+
+static int
+stack_guard_downwards(void)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+
+ ASSERT(limit);
+
+ return erts_check_below_limit(&c, limit + ERTS_PCRE_STACK_MARGIN);
+}
+
+static int
+stack_guard_upwards(void)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+
+ ASSERT(limit);
+
+ return erts_check_above_limit(&c, limit - ERTS_PCRE_STACK_MARGIN);
+}
+
void erts_init_bif_re(void)
{
+ char c;
erts_pcre_malloc = &erts_erts_pcre_malloc;
erts_pcre_free = &erts_erts_pcre_free;
erts_pcre_stack_malloc = &erts_erts_pcre_stack_malloc;
erts_pcre_stack_free = &erts_erts_pcre_stack_free;
+ if ((char *) erts_ptr_id(&c) > ERTS_STACK_LIMIT)
+ erts_pcre_stack_guard = stack_guard_downwards;
+ else
+ erts_pcre_stack_guard = stack_guard_upwards;
default_table = NULL; /* ISO8859-1 default, forced into pcre */
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
@@ -477,6 +512,17 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
* Compile BIFs
*/
+BIF_RETTYPE
+re_version_0(BIF_ALIST_0)
+{
+ Eterm ret;
+ size_t version_size = 0;
+ byte *version = (byte *) erts_pcre_version();
+ version_size = strlen((const char *) version);
+ ret = new_binary(BIF_P, version, version_size);
+ BIF_RET(ret);
+}
+
static BIF_RETTYPE
re_compile(Process* p, Eterm arg1, Eterm arg2)
{
@@ -600,10 +646,11 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
/*
@@ -1319,17 +1366,17 @@ handle_iolist:
Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
cleanup_restart_context_bin);
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
- Eterm magic_bin;
+ Eterm magic_ref;
Eterm *hp;
memcpy(restartp,&restart,sizeof(RestartContext));
BUMP_ALL_REDS(p);
- hp = HAlloc(p, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(p), mbp);
BIF_TRAP3(&re_exec_trap_export,
p,
arg1,
arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
- magic_bin);
+ magic_ref);
}
res = build_exec_return(p, rc, &restart, arg1);
@@ -1366,9 +1413,7 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
Uint loop_limit_tmp;
Eterm res;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_3));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_3);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 4fed09f085..45159c4392 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -125,7 +125,6 @@ erts_internal_trace_pattern_3(BIF_ALIST_3)
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
- DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
int matches = -1;
int specified = 0;
@@ -308,30 +307,30 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
matches = 0;
} else if (is_tuple(MFA)) {
+ ErtsCodeMFA mfa;
Eterm *tp = tuple_val(MFA);
if (tp[0] != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+ if (specified == 3) {
+ mfa.arity = signed_val(tp[3]);
}
- matches = erts_set_trace_pattern(p, mfa, specified,
+ matches = erts_set_trace_pattern(p, &mfa, specified,
match_prog_set, match_prog_set,
on, flags, meta_tracer, 0);
} else if (is_atom(MFA)) {
@@ -343,7 +342,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
error:
MatchSetUnref(match_prog_set);
- UnUseTmpHeap(3,p);
ERTS_TRACER_CLEAR(&meta_tracer);
@@ -975,13 +973,14 @@ static int function_is_traced(Process *p,
Export e;
Export* ep;
BeamInstr* pc;
+ ErtsCodeInfo *ci;
/* First look for an export entry */
- e.code[0] = mfa[0];
- e.code[1] = mfa[1];
- e.code[2] = mfa[2];
+ e.info.mfa.module = mfa[0];
+ e.info.mfa.function = mfa[1];
+ e.info.mfa.arity = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- pc = ep->code+3;
+ pc = ep->beam;
if (ep->addressv[erts_active_code_ix()] == pc &&
*pc != (BeamInstr) em_call_error_handler) {
@@ -990,17 +989,17 @@ static int function_is_traced(Process *p,
ASSERT(*pc == (BeamInstr) em_apply_bif ||
*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
- if (erts_is_trace_break(pc, ms, 0)) {
+ if (erts_is_trace_break(&ep->info, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (erts_is_trace_break(pc, ms, 1)) {
+ if (erts_is_trace_break(&ep->info, ms, 1)) {
r |= FUNC_TRACE_LOCAL_TRACE;
}
- if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ if (erts_is_mtrace_break(&ep->info, ms_meta, tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
- if (erts_is_time_break(p, pc, call_time)) {
+ if (erts_is_time_break(p, &ep->info, call_time)) {
r |= FUNC_TRACE_TIME_TRACE;
}
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1008,15 +1007,15 @@ static int function_is_traced(Process *p,
}
/* OK, now look for breakpoint tracing */
- if ((pc = erts_find_local_func(mfa)) != NULL) {
+ if ((ci = erts_find_local_func(&e.info.mfa)) != NULL) {
int r =
- (erts_is_trace_break(pc, ms, 1)
+ (erts_is_trace_break(ci, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(ci, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(pc, count)
+ | (erts_is_count_break(ci, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, pc, call_time)
+ | (erts_is_time_break(p, ci, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1357,7 +1356,7 @@ trace_info_event(Process* p, Eterm event, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
ErtsTracer meta_tracer, int is_blocking)
@@ -1377,22 +1376,23 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
n = finish_bp.e.matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ ErtsCodeInfo *ci = fp[i].ci;
+ BeamInstr* pc = erts_codeinfo_to_code(ci);
+ Export* ep = ErtsContainerStruct(ci, Export, info);
if (on && !flags.breakpoint) {
/* Turn on global call tracing */
if (ep->addressv[code_ix] != pc) {
fp[i].mod->curr.num_traced_exports++;
#ifdef DEBUG
- pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
#endif
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
- pc[1] = (BeamInstr) ep->addressv[code_ix];
+ ep->beam[0] = (BeamInstr) BeamOp(op_jump_f);
+ ep->beam[1] = (BeamInstr) ep->addressv[code_ix];
}
- erts_set_call_trace_bif(pc, match_prog_set, 0);
+ erts_set_call_trace_bif(ci, match_prog_set, 0);
if (ep->addressv[code_ix] != pc) {
- pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ ep->beam[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
}
} else if (!on && flags.breakpoint) {
/* Turn off breakpoint tracing -- nothing to do here. */
@@ -1401,9 +1401,9 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
* Turn off global tracing, either explicitly or implicitly
* before turning on breakpoint tracing.
*/
- erts_clear_call_trace_bif(pc, 0);
- if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ erts_clear_call_trace_bif(ci, 0);
+ if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ ep->beam[0] = (BeamInstr) BeamOp(op_jump_f);
}
}
}
@@ -1413,68 +1413,76 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
*/
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- int j;
-
+
if (!ExportIsBuiltIn(ep)) {
continue;
}
-
+
if (bif_table[i].f == bif_table[i].traced) {
/* Trace wrapper same as regular function - untraceable */
continue;
}
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
- if (! flags.breakpoint) { /* Export entry call trace */
- if (on) {
- erts_clear_call_trace_bif(pc, 1);
- erts_clear_mtrace_bif(pc);
- erts_set_call_trace_bif(pc, match_prog_set, 0);
- } else { /* off */
- erts_clear_call_trace_bif(pc, 0);
- }
- matches++;
- } else { /* Breakpoint call trace */
- int m = 0;
-
- if (on) {
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 0);
- erts_set_call_trace_bif(pc, match_prog_set, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_set_mtrace_bif(pc, meta_match_prog_set,
- meta_tracer);
- m = 1;
- }
- if (flags.call_time) {
- erts_set_time_trace_bif(pc, on);
- /* I don't want to remove any other tracers */
- m = 1;
- }
- } else { /* off */
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_clear_mtrace_bif(pc);
- m = 1;
- }
- if (flags.call_time) {
- erts_clear_time_trace_bif(pc);
- m = 1;
- }
- }
- matches += m;
- }
- }
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ if (! flags.breakpoint) { /* Export entry call trace */
+ if (on) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ erts_clear_mtrace_bif(&ep->info);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 0);
+ } else { /* off */
+ erts_clear_call_trace_bif(&ep->info, 0);
+ }
+ matches++;
+ } else { /* Breakpoint call trace */
+ int m = 0;
+
+ if (on) {
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 0);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_set_mtrace_bif(&ep->info, meta_match_prog_set,
+ meta_tracer);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_set_time_trace_bif(&ep->info, on);
+ /* I don't want to remove any other tracers */
+ m = 1;
+ }
+ } else { /* off */
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_clear_mtrace_bif(&ep->info);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(&ep->info);
+ m = 1;
+ }
+ }
+ matches += m;
+ }
}
/*
@@ -1676,10 +1684,9 @@ install_exp_breakpoints(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- ep->addressv[code_ix] = pc;
+ ep->addressv[code_ix] = ep->beam;
}
}
@@ -1692,14 +1699,13 @@ uninstall_exp_breakpoints(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] != pc) {
+ if (ep->addressv[code_ix] != ep->beam) {
continue;
}
- ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
+ ASSERT(ep->beam[0] == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
}
}
@@ -1712,15 +1718,14 @@ clean_export_entries(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] == pc) {
+ if (ep->addressv[code_ix] == ep->beam) {
continue;
}
- if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
- ep->code[3] = (BeamInstr) 0;
- ep->code[4] = (BeamInstr) 0;
+ if (ep->beam[0] == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->beam[0] = (BeamInstr) 0;
+ ep->beam[1] = (BeamInstr) 0;
}
}
}
@@ -1732,11 +1737,11 @@ setup_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ GenericBp* g = ep->info.u.gen_bp;
if (g) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].traced;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].traced;
}
}
}
@@ -1750,12 +1755,11 @@ reset_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- BeamInstr* pc = ep->code+3;
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ep->info.u.gen_bp;
if (g && g->data[active].flags == 0) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].f;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
}
}
}
@@ -2359,7 +2363,7 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Eterm target;
erts_smp_atomic32_t refc;
} ErtsTraceDeliveredAll;
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index 7c70217d8d..2f8adc87d5 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,12 +22,15 @@
# include "config.h"
#endif
+#define ERL_BIF_UNIQUE_C__
#include "sys.h"
#include "erl_vm.h"
#include "erl_alloc.h"
#include "export.h"
#include "bif.h"
#include "erl_bif_unique.h"
+#include "hash.h"
+#include "erl_binary.h"
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Reference *
@@ -58,9 +61,20 @@ static union {
static Uint32 max_thr_id;
#endif
+static void init_magic_ref_tables(void);
+
+static Uint64 ref_init_value;
+
static void
init_reference(void)
{
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ ref_init_value = 0;
+ ref_init_value |= (Uint64) tv.tv_sec;
+ ref_init_value |= ((Uint64) tv.tv_usec) << 32;
+ ref_init_value *= (Uint64) 268438039;
+ ref_init_value += (Uint64) tv.tv_usec;
#ifdef DEBUG
max_thr_id = (Uint32) erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -68,11 +82,13 @@ init_reference(void)
max_thr_id += (Uint32) erts_no_dirty_io_schedulers;
#endif
#endif
- erts_atomic64_init_nob(&global_reference.count, 0);
+ erts_atomic64_init_nob(&global_reference.count,
+ (erts_aint64_t) ref_init_value);
+ init_magic_ref_tables();
}
static ERTS_INLINE void
-global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_REF_NUMBERS])
{
Uint64 value;
@@ -82,7 +98,7 @@ global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
}
static ERTS_INLINE void
-make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
if (esdp)
@@ -92,15 +108,23 @@ make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
}
void
-erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ make_ref_in_array(ref);
+}
+
+void
+erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
{
make_ref_in_array(ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
}
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE])
{
Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
make_ref_in_array(ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
@@ -110,11 +134,11 @@ Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
Eterm erts_make_ref(Process *c_p)
{
Eterm* hp;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
make_ref_in_array(ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
@@ -122,6 +146,274 @@ Eterm erts_make_ref(Process *c_p)
return make_internal_ref(hp);
}
+/*
+ * Magic reference tables
+ */
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+ Uint32 thr_id;
+} ErtsMagicRefTableEntry;
+
+typedef struct {
+ erts_rwmtx_t rwmtx;
+ Hash hash;
+ char name[32];
+} ErtsMagicRefTable;
+
+typedef struct {
+ union {
+ ErtsMagicRefTable table;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMagicRefTable))];
+ } u;
+} ErtsAlignedMagicRefTable;
+
+ErtsAlignedMagicRefTable *magic_ref_table;
+
+ErtsMagicBinary *
+erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicBinary *mb;
+ ErtsMagicRefTable *tblp;
+
+ ASSERT(erts_is_ref_numbers_magic(refn));
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+ if (!tep)
+ mb = NULL;
+ else {
+ erts_aint_t refc;
+ mb = tep->mb;
+ refc = erts_refc_inc_unless(&mb->intern.refc, 0, 0);
+ if (refc == 0)
+ mb = NULL;
+ }
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ return mb;
+}
+
+void
+erts_magic_ref_save_bin__(Eterm ref)
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMRefThing *mrtp;
+ ErtsMagicRefTable *tblp;
+ Uint32 *refn;
+
+ ASSERT(is_internal_magic_ref(ref));
+
+ mrtp = (ErtsMRefThing *) internal_ref_val(ref);
+ refn = mrtp->mb->refn;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (!tep) {
+ ErtsMagicRefTableEntry *used_tep;
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ if (tblp != &magic_ref_table[0].u.table) {
+ tep = erts_alloc(ERTS_ALC_T_MREF_NSCHED_ENT,
+ sizeof(ErtsNSchedMagicRefTableEntry));
+ }
+ else {
+ tep = erts_alloc(ERTS_ALC_T_MREF_ENT,
+ sizeof(ErtsMagicRefTableEntry));
+ tep->thr_id = tmpl.thr_id;
+ }
+
+ tep->value = tmpl.value;
+ tep->mb = mrtp->mb;
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ used_tep = hash_put(&tblp->hash, tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (used_tep != tep) {
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+ }
+}
+
+void
+erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicRefTable *tblp;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (tep) {
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ tep = hash_remove(&tblp->hash, &tmpl);
+ ASSERT(tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+}
+
+static int nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsNSchedMagicRefTableEntry *e1 = ve1;
+ ErtsNSchedMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value;
+}
+
+static int non_nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsMagicRefTableEntry *e1 = ve1;
+ ErtsMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value || e1->thr_id != e2->thr_id;
+}
+
+static HashValue nsched_mreft_hash(void *ve)
+{
+ ErtsNSchedMagicRefTableEntry *e = ve;
+ return (HashValue) e->value;
+}
+
+static HashValue non_nsched_mreft_hash(void *ve)
+{
+ ErtsMagicRefTableEntry *e = ve;
+ HashValue h;
+ h = (HashValue) e->thr_id;
+ h *= 268440163;
+ h += (HashValue) e->value;
+ return h;
+}
+
+static void *mreft_alloc(void *ve)
+{
+ /*
+ * We allocate the element before
+ * hash_put() and pass it as
+ * template which we get as
+ * input...
+ */
+ return ve;
+}
+
+static void mreft_free(void *ve)
+{
+ /*
+ * We free the element ourselves
+ * after hash_remove()...
+ */
+}
+
+static void *mreft_meta_alloc(int i, size_t size)
+{
+ return erts_alloc(ERTS_ALC_T_MREF_TAB_BKTS, size);
+}
+
+static void mreft_meta_free(int i, void *ptr)
+{
+ erts_free(ERTS_ALC_T_MREF_TAB_BKTS, ptr);
+}
+
+static void
+init_magic_ref_tables(void)
+{
+ HashFunctions hash_funcs;
+ int i;
+ ErtsMagicRefTable *tblp;
+
+ magic_ref_table = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MREF_TAB,
+ (sizeof(ErtsAlignedMagicRefTable)
+ * (erts_no_schedulers + 1)));
+
+ hash_funcs.hash = non_nsched_mreft_hash;
+ hash_funcs.cmp = non_nsched_mreft_cmp;
+
+ hash_funcs.alloc = mreft_alloc;
+ hash_funcs.free = mreft_free;
+ hash_funcs.meta_alloc = mreft_meta_alloc;
+ hash_funcs.meta_free = mreft_meta_free;
+ hash_funcs.meta_print = erts_print;
+
+ tblp = &magic_ref_table[0].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_0");
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ hash_funcs.hash = nsched_mreft_hash;
+ hash_funcs.cmp = nsched_mreft_cmp;
+
+ for (i = 1; i <= erts_no_schedulers; i++) {
+ ErtsMagicRefTable *tblp = &magic_ref_table[i].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_%d", i);
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ }
+}
+
+void erts_ref_bin_free(ErtsMagicBinary *mb)
+{
+ erts_bin_free((Binary *) mb);
+}
+
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Unique Integer *
\* */
@@ -498,7 +790,7 @@ void
erts_sched_bif_unique_init(ErtsSchedulerData *esdp)
{
esdp->unique = (Uint64) 0;
- esdp->ref = (Uint64) 0;
+ esdp->ref = (Uint64) ref_init_value;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -513,7 +805,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
- hp = HAlloc(BIF_P, REF_THING_SIZE);
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index c6481864d0..9aa631fde9 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,16 +21,25 @@
#ifndef ERTS_BIF_UNIQUE_H__
#define ERTS_BIF_UNIQUE_H__
+#include "erl_term.h"
#include "erl_process.h"
#include "big.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
void erts_bif_unique_init(void);
void erts_sched_bif_unique_init(ErtsSchedulerData *esdp);
/* reference */
Eterm erts_make_ref(Process *);
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
-void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS]);
+void erts_magic_ref_save_bin__(Eterm ref);
+ErtsMagicBinary *erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS]);
+
/* strict monotonic counter */
@@ -67,19 +76,35 @@ Eterm erts_debug_make_unique_integer(Process *c_p,
Eterm etval1);
-ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS],
+ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS],
Uint32 thr_id, Uint64 value);
-ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE int erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS]);
ERTS_GLB_INLINE void erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
- Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE void erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS]);
ERTS_GLB_INLINE Eterm erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
- Eterm buffer[REF_THING_SIZE]);
+ Eterm buffer[ERTS_REF_THING_SIZE]);
+ERTS_GLB_INLINE Eterm erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp);
+ERTS_GLB_INLINE Binary *erts_magic_ref2bin(Eterm mref);
+ERTS_GLB_INLINE void erts_magic_ref_save_bin(Eterm ref);
+ERTS_GLB_INLINE ErtsMagicBinary *erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS]);
+
+#define ERTS_REF1_MAGIC_MARKER_BIT_NO__ \
+ (_REF_NUM_SIZE-1)
+#define ERTS_REF1_MAGIC_MARKER_BIT__ \
+ (((Uint32) 1) << ERTS_REF1_MAGIC_MARKER_BIT_NO__)
+#define ERTS_REF1_THR_ID_MASK__ \
+ (ERTS_REF1_MAGIC_MARKER_BIT__-1)
+#define ERTS_REF1_NUM_MASK__ \
+ (~(ERTS_REF1_THR_ID_MASK__|ERTS_REF1_MAGIC_MARKER_BIT__))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 value)
+erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS], Uint32 thr_id, Uint64 value)
{
/*
* We cannot use thread id in the first 18-bit word since
@@ -87,30 +112,39 @@ erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 val
* we did, we would get really poor hash values. Instead
* we have to shuffle the bits a bit.
*/
- ASSERT(thr_id == (thr_id & ((Uint32) 0x3ffff)));
- ref[0] = (Uint32) (value & ((Uint64) 0x3ffff));
- ref[1] = (((Uint32) (value & ((Uint64) 0xfffc0000)))
- | (thr_id & ((Uint32) 0x3ffff)));
+ ASSERT(thr_id == (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
+ ref[0] = (Uint32) (value & ((Uint64) REF_MASK));
+ ref[1] = (((Uint32) (value & ((Uint64) ERTS_REF1_NUM_MASK__)))
+ | (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
ref[2] = (Uint32) ((value >> 32) & ((Uint64) 0xffffffff));
}
ERTS_GLB_INLINE Uint32
-erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS])
{
- return ref[1] & ((Uint32) 0x3ffff);
+ return ref[1] & ((Uint32) ERTS_REF1_THR_ID_MASK__);
+}
+
+ERTS_GLB_INLINE int
+erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ return !!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__);
}
ERTS_GLB_INLINE Uint64
-erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS])
{
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ | REF_MASK) == 0xffffffff);
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ & REF_MASK) == 0);
+
return (((((Uint64) ref[2]) & ((Uint64) 0xffffffff)) << 32)
- | (((Uint64) ref[1]) & ((Uint64) 0xfffc0000))
- | (((Uint64) ref[0]) & ((Uint64) 0x3ffff)));
+ | (((Uint64) ref[1]) & ((Uint64) ERTS_REF1_NUM_MASK__))
+ | (((Uint64) ref[0]) & ((Uint64) REF_MASK)));
}
ERTS_GLB_INLINE void
erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
- Uint32 ref[ERTS_MAX_REF_NUMBERS])
+ Uint32 ref[ERTS_REF_NUMBERS])
{
Uint64 value;
@@ -119,18 +153,288 @@ erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
erts_set_ref_numbers(ref, (Uint32) esdp->thr_id, value);
}
+ERTS_GLB_INLINE void
+erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS])
+{
+ erts_sched_make_ref_in_array(esdp, ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
+}
+
ERTS_GLB_INLINE Eterm
erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
- Eterm buffer[REF_THING_SIZE])
+ Eterm buffer[ERTS_REF_THING_SIZE])
{
Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
erts_sched_make_ref_in_array(esdp, ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
return make_internal_ref(hp);
}
+ERTS_GLB_INLINE Eterm
+erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *bp)
+{
+ Eterm *hp = *hpp;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ write_magic_ref_thing(hp, ohp, (ErtsMagicBinary *) bp);
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ erts_refc_inc(&bp->intern.refc, 1);
+ OH_OVERHEAD(ohp, bp->orig_size / sizeof(Eterm));
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_magic_ref2bin(Eterm mref)
+{
+ ErtsMRefThing *mrtp;
+ ASSERT(is_internal_magic_ref(mref));
+ mrtp = (ErtsMRefThing *) internal_ref_val(mref);
+ return (Binary *) mrtp->mb;
+}
+
+/*
+ * Save the magic binary of a ref when the
+ * ref is exposed to the outside world...
+ */
+ERTS_GLB_INLINE void
+erts_magic_ref_save_bin(Eterm ref)
+{
+ if (is_internal_magic_ref(ref))
+ erts_magic_ref_save_bin__(ref);
+}
+
+/*
+ * Look up the magic binary of a magic ref
+ * when the ref comes from the outside world...
+ */
+ERTS_GLB_INLINE ErtsMagicBinary *
+erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ if (!erts_is_ref_numbers_magic(ref))
+ return NULL;
+ return erts_magic_ref_lookup_bin__(ref);
+}
+
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * Storage of internal refs in misc structures...
+ */
+
+#include "erl_message.h"
+
+#if ERTS_REF_NUMBERS != 3
+# error fix this...
+#endif
+
+ERTS_GLB_INLINE int erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS])
+{
+ if (num1[2] != num2[2])
+ return (int) ((Sint64) num1[2] - (Sint64) num2[2]);
+ if (num1[1] != num2[1])
+ return (int) ((Sint64) num1[1] - (Sint64) num2[1]);
+ if (num1[0] != num2[0])
+ return (int) ((Sint64) num1[0] - (Sint64) num2[0]);
+ return 0;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* Iref storage for all internal references... */
+typedef struct {
+ Uint32 is_magic;
+ union {
+ ErtsMagicBinary *mb;
+ Uint32 num[ERTS_REF_NUMBERS];
+ } u;
+} ErtsIRefStorage;
+
+void erts_ref_bin_free(ErtsMagicBinary *mb);
+
+ERTS_GLB_INLINE void erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref);
+ERTS_GLB_INLINE void erts_iref_storage_clean(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Uint erts_iref_storage_heap_size(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Eterm erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage);
+ERTS_GLB_INLINE int erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref)
+{
+ Eterm *hp;
+
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ref(ref));
+
+ hp = boxed_val(ref);
+
+ if (is_ordinary_ref_thing(hp)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) hp;
+ iref->is_magic = 0;
+ iref->u.num[0] = rtp->num[0];
+ iref->u.num[1] = rtp->num[1];
+ iref->u.num[2] = rtp->num[2];
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) hp;
+ ASSERT(is_magic_ref_thing(hp));
+ iref->is_magic = 1;
+ iref->u.mb = mrtp->mb;
+ erts_refc_inc(&mrtp->mb->intern.refc, 1);
+ }
+}
+
+ERTS_GLB_INLINE void
+erts_iref_storage_clean(ErtsIRefStorage *iref)
+{
+ if (iref->is_magic && erts_refc_dectest(&iref->u.mb->intern.refc, 0) == 0)
+ erts_ref_bin_free(iref->u.mb);
+#ifdef DEBUG
+ memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+}
+
+ERTS_GLB_INLINE Uint
+erts_iref_storage_heap_size(ErtsIRefStorage *iref)
+{
+ return iref->is_magic ? ERTS_MAGIC_REF_THING_SIZE : ERTS_REF_THING_SIZE;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage)
+{
+ Eterm *hp = *hpp;
+ if (!iref->is_magic) {
+ write_ref_thing(hp, iref->u.num[0], iref->u.num[1],
+ iref->u.num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ }
+ else {
+ write_magic_ref_thing(hp, ohp, iref->u.mb);
+ OH_OVERHEAD(ohp, iref->u.mb->orig_size / sizeof(Eterm));
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ /*
+ * If we clean storage, the term inherits the
+ * refc increment of the cleaned storage...
+ */
+ if (!clean_storage)
+ erts_refc_inc(&iref->u.mb->intern.refc, 1);
+ }
+
+#ifdef DEBUG
+ if (clean_storage)
+ memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2)
+{
+ Uint32 *num1 = iref1->is_magic ? iref1->u.mb->refn : iref1->u.num;
+ Uint32 *num2 = iref2->is_magic ? iref2->u.mb->refn : iref2->u.num;
+ return erts_internal_ref_number_cmp(num1, num2);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/* OIref storage for ordinary internal references only... */
+typedef struct {
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsOIRefStorage;
+
+ERTS_GLB_INLINE void erts_oiref_storage_save(ErtsOIRefStorage *oiref,
+ Eterm ref);
+ERTS_GLB_INLINE Eterm erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref,
+ Eterm **hpp);
+ERTS_GLB_INLINE int erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_oiref_storage_save(ErtsOIRefStorage *oiref, Eterm ref)
+{
+ ErtsORefThing *rtp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ rtp = (ErtsORefThing *) internal_ref_val(ref);
+
+ oiref->num[0] = rtp->num[0];
+ oiref->num[1] = rtp->num[1];
+ oiref->num[2] = rtp->num[2];
+}
+
+ERTS_GLB_INLINE Eterm
+erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref, Eterm **hpp)
+{
+ Eterm *hp = *hpp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ write_ref_thing(hp, oiref->num[0], oiref->num[1], oiref->num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2)
+{
+ return erts_internal_ref_number_cmp(oiref1->num, oiref2->num);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Eterm *hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif
+
#endif /* ERTS_BIF_UNIQUE_H__ */
+
+#if (defined(ERTS_ALLOC_C__) || defined(ERL_BIF_UNIQUE_C__)) \
+ && !defined(ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__)
+#define ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__
+
+#include "hash.h"
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+} ErtsNSchedMagicRefTableEntry;
+
+#endif
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index fdc5efea98..b036b28dbf 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,34 +18,153 @@
* %CopyrightEnd%
*/
-#ifndef __ERL_BINARY_H
-#define __ERL_BINARY_H
+#ifndef ERL_BINARY_H__TYPES__
+#define ERL_BINARY_H__TYPES__
-#include "erl_threads.h"
-#include "bif.h"
+/*
+** Just like the driver binary but with initial flags
+** Note that the two structures Binary and ErlDrvBinary HAVE to
+** be equal except for extra fields in the beginning of the struct.
+** ErlDrvBinary is defined in erl_driver.h.
+** When driver_alloc_binary is called, a Binary is allocated, but
+** the pointer returned is to the address of the first element that
+** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
+** The driver need never know about additions to the internal Binary of the
+** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
+** and Binary, the macros below can convert one type to the other, as they both
+** in reality are equal.
+*/
+
+#ifdef ARCH_32
+ /* *DO NOT USE* only for alignment. */
+#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
+#else
+#define ERTS_BINARY_STRUCT_ALIGNMENT
+#endif
+
+/* Add fields in binary_internals, otherwise the drivers crash */
+struct binary_internals {
+ UWord flags;
+ erts_refc_t refc;
+ ERTS_BINARY_STRUCT_ALIGNMENT
+};
+
+
+typedef struct binary {
+ struct binary_internals intern;
+ SWord orig_size;
+ char orig_bytes[1]; /* to be continued */
+} Binary;
+
+#define ERTS_SIZEOF_Binary(Sz) \
+ (offsetof(Binary,orig_bytes) + (Sz))
+
+#if ERTS_REF_NUMBERS != 3
+#error "Update ErtsMagicBinary"
+#endif
+
+typedef struct magic_binary ErtsMagicBinary;
+struct magic_binary {
+ struct binary_internals intern;
+ SWord orig_size;
+ int (*destructor)(Binary *);
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsAlcType_t alloc_type;
+ union {
+ struct {
+ ERTS_BINARY_STRUCT_ALIGNMENT
+ char data[1];
+ } aligned;
+ struct {
+ char data[1];
+ } unaligned;
+ } u;
+};
+
+#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - \
+ offsetof(ErtsMagicBinary,u.unaligned.data))
+
+typedef union {
+ Binary binary;
+ ErtsMagicBinary magic_binary;
+ struct {
+ struct binary_internals intern;
+ ErlDrvBinary binary;
+ } driver;
+} ErtsBinary;
/*
- * Maximum number of bytes to place in a heap binary.
+ * 'Binary' alignment:
+ * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
+ * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
+ * 32-bits architectures and 8 bytes on 64-bits architectures.
*/
-#define ERL_ONHEAP_BIN_LIMIT 64
+#define ERTS_MAGIC_BIN_REFN(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.refn
+#define ERTS_MAGIC_BIN_ATYPE(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.alloc_type
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.destructor
+#define ERTS_MAGIC_BIN_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
+#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_DATA_OFFSET)
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.aligned.data)))
+
+/* On 32-bit arch these macro variants will save memory
+ by not forcing 8-byte alignment for the magic payload.
+*/
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
+#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
+#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
+
+
+#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
+#define ErlDrvBinary2Binary(D) ((Binary *) \
+ (((char *) (D)) \
+ - offsetof(ErtsBinary, driver.binary)))
+
+/* A "magic" binary flag */
+#define BIN_FLAG_MAGIC 1
+#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
+#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
+#define BIN_FLAG_DRV 8
+
+#endif /* ERL_BINARY_H__TYPES__ */
+
+#if !defined(ERL_BINARY_H__) && !defined(ERTS_BINARY_TYPES_ONLY__)
+#define ERL_BINARY_H__
+
+#include "erl_threads.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#include "erl_bits.h"
/*
- * This structure represents a SUB_BINARY.
- *
- * Note: The last field (orig) is not counted in arityval in the header.
- * This simplifies garbage collection.
+ * Maximum number of bytes to place in a heap binary.
*/
-typedef struct erl_sub_bin {
- Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
- Uint size; /* Binary size in bytes. */
- Uint offs; /* Offset into original binary. */
- byte bitsize;
- byte bitoffs;
- byte is_writable; /* The underlying binary is writable */
- Eterm orig; /* Original binary (REFC or HEAP binary). */
-} ErlSubBin;
+#define ERL_ONHEAP_BIN_LIMIT 64
#define ERL_SUB_BIN_SIZE (sizeof(ErlSubBin)/sizeof(Eterm))
#define HEADER_SUB_BIN _make_header(ERL_SUB_BIN_SIZE-2,_TAG_HEADER_SUB_BIN)
@@ -165,6 +284,17 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
+typedef union {
+ /*
+ * These two are almost always of
+ * the same size, but when fallback
+ * atomics are used they might
+ * differ in size.
+ */
+ erts_smp_atomic_t smp_atomic_word;
+ erts_atomic_t atomic_word;
+} ErtsMagicIndirectionWord;
+
#if defined(__i386__) || !defined(__GNUC__)
/*
* Doubles aren't required to be 8-byte aligned on intel x86.
@@ -188,11 +318,16 @@ ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
+ERTS_GLB_INLINE void erts_bin_release(Binary *bp);
ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
- void (*destructor)(Binary *),
+ int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
int unaligned);
ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
- void (*destructor)(Binary *));
+ int (*destructor)(Binary *));
+ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *));
+ERTS_GLB_INLINE erts_smp_atomic_t *erts_smp_binary_to_magic_indirection(Binary *bp);
+ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -242,7 +377,8 @@ erts_bin_drv_alloc_fnf(Uint size)
ERTS_CHK_BIN_ALIGNMENT(res);
if (res) {
res->orig_size = size;
- res->flags = BIN_FLAG_DRV;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
}
return res;
}
@@ -260,7 +396,8 @@ erts_bin_drv_alloc(Uint size)
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
res->orig_size = size;
- res->flags = BIN_FLAG_DRV;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
return res;
}
@@ -278,7 +415,8 @@ erts_bin_nrml_alloc(Uint size)
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
res->orig_size = size;
- res->flags = 0;
+ res->intern.flags = 0;
+ erts_refc_init(&res->intern.refc, 1);
return res;
}
@@ -287,9 +425,9 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
: ERTS_ALC_T_BINARY;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
if (bsize < size) /* overflow */
return NULL;
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
@@ -304,9 +442,9 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
: ERTS_ALC_T_BINARY;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
if (bsize < size) /* overflow */
erts_realloc_enomem(type, bp, size);
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
@@ -320,39 +458,87 @@ erts_bin_realloc(Binary *bp, Uint size)
ERTS_GLB_INLINE void
erts_bin_free(Binary *bp)
{
- if (bp->flags & BIN_FLAG_MAGIC)
- ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp);
- if (bp->flags & BIN_FLAG_DRV)
+ if (bp->intern.flags & BIN_FLAG_MAGIC) {
+ if (!ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp)) {
+ /* Destructor took control of the deallocation */
+ return;
+ }
+ erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
+ erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
+ }
+ else if (bp->intern.flags & BIN_FLAG_DRV)
erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
else
erts_free(ERTS_ALC_T_BINARY, (void *) bp);
}
+ERTS_GLB_INLINE void
+erts_bin_release(Binary *bp)
+{
+ if (erts_refc_dectest(&bp->intern.refc, 0) == 0) {
+ erts_bin_free(bp);
+ }
+}
+
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary_x(Uint size, void (*destructor)(Binary *),
+erts_create_magic_binary_x(Uint size, int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
int unaligned)
{
Uint bsize = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_SIZE(size)
: ERTS_MAGIC_BIN_SIZE(size);
- Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ Binary* bptr = erts_alloc_fnf(alloc_type, bsize);
ASSERT(bsize > size);
if (!bptr)
- erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
+ erts_alloc_n_enomem(ERTS_ALC_T2N(alloc_type), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
- bptr->flags = BIN_FLAG_MAGIC;
+ bptr->intern.flags = BIN_FLAG_MAGIC;
bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size)
: ERTS_MAGIC_BIN_ORIG_SIZE(size);
- erts_refc_init(&bptr->refc, 0);
+ erts_refc_init(&bptr->intern.refc, 0);
ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
+ ERTS_MAGIC_BIN_ATYPE(bptr) = alloc_type;
+ erts_make_magic_ref_in_array(ERTS_MAGIC_BIN_REFN(bptr));
return bptr;
}
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+erts_create_magic_binary(Uint size, int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(size, destructor,
+ ERTS_ALC_T_BINARY, 0);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_create_magic_indirection(int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(sizeof(ErtsMagicIndirectionWord),
+ destructor,
+ ERTS_ALC_T_MINDIRECTION,
+ 1); /* Not 64-bit aligned,
+ but word aligned */
+}
+
+ERTS_GLB_INLINE erts_smp_atomic_t *
+erts_smp_binary_to_magic_indirection(Binary *bp)
+{
+ ErtsMagicIndirectionWord *mip;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
+ mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
+ return &mip->smp_atomic_word;
+}
+
+ERTS_GLB_INLINE erts_atomic_t *
+erts_binary_to_magic_indirection(Binary *bp)
{
- return erts_create_magic_binary_x(size, destructor, 0);
+ ErtsMagicIndirectionWord *mip;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
+ mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
+ return &mip->atomic_word;
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* !__ERL_BINARY_H */
+#endif /* !ERL_BINARY_H__ */
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 6bf52fb303..b4e611f01b 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -110,9 +110,6 @@ erts_init_bits(void)
{
ERTS_CT_ASSERT(offsetof(Binary,orig_bytes) % 8 == 0);
ERTS_CT_ASSERT(offsetof(ErtsMagicBinary,u.aligned.data) % 8 == 0);
- ERTS_CT_ASSERT(ERTS_MAGIC_BIN_BYTES_TO_ALIGN ==
- (offsetof(ErtsMagicBinary,u.aligned.data)
- - offsetof(ErtsMagicBinary,u.unaligned.data)));
ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes)
== offsetof(Binary,orig_bytes));
@@ -1324,7 +1321,14 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
used_size_in_bits = erts_bin_offset + build_size_in_bits;
+
sb->is_writable = 0; /* Make sure that no one else can write. */
pb->size = NBYTES(used_size_in_bits);
pb->flags |= PB_ACTIVE_WRITER;
@@ -1398,16 +1402,27 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
- used_size_in_bits = erts_bin_offset + build_size_in_bits;
- used_size_in_bytes = NBYTES(used_size_in_bits);
- bin_size = 2*used_size_in_bytes;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
+ used_size_in_bits = erts_bin_offset + build_size_in_bits;
+ used_size_in_bytes = NBYTES(used_size_in_bits);
+
+ if(used_size_in_bits < (ERTS_UINT_MAX / 2)) {
+ bin_size = 2 * used_size_in_bytes;
+ } else {
+ bin_size = NBYTES(ERTS_UINT_MAX);
+ }
+
bin_size = (bin_size < 256) ? 256 : bin_size;
/*
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -1491,6 +1506,12 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* Calculate new size in bytes.
*/
erts_bin_offset = 8*sb->size + sb->bitsize;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
pos_in_bits_after_build = erts_bin_offset + build_size_in_bits;
pb->size = (pos_in_bits_after_build+7) >> 3;
pb->flags |= PB_ACTIVE_WRITER;
@@ -1521,14 +1542,11 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* binary and copy the contents of the old binary into it.
*/
Binary* bptr = erts_bin_nrml_alloc(new_size);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size);
pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
pb->val = bptr;
pb->bytes = (byte *) bptr->orig_bytes;
- if (erts_refc_dectest(&binp->refc, 0) == 0) {
- erts_bin_free(binp);
- }
+ erts_bin_release(binp);
}
}
erts_current_bin = pb->bytes;
@@ -1568,7 +1586,6 @@ erts_bs_init_writable(Process* p, Eterm sz)
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- erts_refc_init(&bptr->refc, 1);
/*
* Now allocate the ProcBin on the heap.
@@ -1644,7 +1661,7 @@ erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb)
return LSB[0] | (LSB[1]<<8) | (LSB[2]<<16) | (LSB[3]<<24);
}
-void
+static void
erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf)
{
Uint bits = mb->size - mb->offset;
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 4bd5b24157..5da2b28a89 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +22,23 @@
#define __ERL_BITS_H__
/*
+ * This structure represents a SUB_BINARY.
+ *
+ * Note: The last field (orig) is not counted in arityval in the header.
+ * This simplifies garbage collection.
+ */
+
+typedef struct erl_sub_bin {
+ Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ Uint offs; /* Offset into original binary. */
+ byte bitsize;
+ byte bitoffs;
+ byte is_writable; /* The underlying binary is writable */
+ Eterm orig; /* Original binary (REFC or HEAP binary). */
+} ErlSubBin;
+
+/*
* This structure represents a binary to be matched.
*/
@@ -185,7 +202,6 @@ void erts_new_bs_put_string(ERL_BITS_PROTO_2(byte* iptr, Uint num_bytes));
Uint erts_bits_bufs_size(void);
Uint32 erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb);
-void erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf);
Eterm erts_bs_get_utf8(ErlBinMatchBuffer* mb);
Eterm erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags);
Eterm erts_bs_append(Process* p, Eterm* reg, Uint live, Eterm build_size_term,
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 28aaeeb479..f8b2fa744f 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -827,17 +827,6 @@ erts_sched_bind_atfork_child(int unbind)
return 0;
}
-char *
-erts_sched_bind_atvfork_child(int unbind)
-{
- if (unbind) {
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
- || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
- return erts_get_unbind_from_cpu_str(cpuinfo);
- }
- return "false";
-}
-
void
erts_sched_bind_atfork_parent(int unbind)
{
@@ -1717,7 +1706,8 @@ erts_init_cpu_topology(void)
{
int ix;
- erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info");
+ erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA,
@@ -2254,45 +2244,6 @@ add_cpu_groups(int groups,
return cgm;
}
-static void
-remove_cpu_groups(erts_cpu_groups_callback_t callback, void *arg)
-{
- erts_cpu_groups_map_t *prev_cgm, *cgm;
- erts_cpu_groups_callback_list_t *prev_cgcl, *cgcl;
-
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
-
- no_cpu_groups_callbacks--;
-
- prev_cgm = NULL;
- for (cgm = cpu_groups_maps; cgm; cgm = cgm->next) {
- prev_cgcl = NULL;
- for (cgcl = cgm->callback_list; cgcl; cgcl = cgcl->next) {
- if (cgcl->callback == callback && cgcl->arg == arg) {
- if (prev_cgcl)
- prev_cgcl->next = cgcl->next;
- else
- cgm->callback_list = cgcl->next;
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgcl);
- if (!cgm->callback_list) {
- if (prev_cgm)
- prev_cgm->next = cgm->next;
- else
- cpu_groups_maps = cgm->next;
- if (cgm->array)
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgm->array);
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgm);
- }
- return;
- }
- prev_cgcl = cgcl;
- }
- prev_cgm = cgm;
- }
-
- erts_exit(ERTS_ABORT_EXIT, "Cpu groups not found\n");
-}
-
static int
cpu_groups_lookup(erts_cpu_groups_map_t *map,
ErtsSchedulerData *esdp)
@@ -2332,21 +2283,3 @@ update_cpu_groups_maps(void)
for (cgm = cpu_groups_maps; cgm; cgm = cgm->next)
make_cpu_groups_map(cgm, 0);
}
-
-void
-erts_add_cpu_groups(int groups,
- erts_cpu_groups_callback_t callback,
- void *arg)
-{
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
- add_cpu_groups(groups, callback, arg);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
-}
-
-void erts_remove_cpu_groups(erts_cpu_groups_callback_t callback,
- void *arg)
-{
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
- remove_cpu_groups(callback, arg);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
-}
diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h
index 45324ac4a0..c922214702 100644
--- a/erts/emulator/beam/erl_cpu_topology.h
+++ b/erts/emulator/beam/erl_cpu_topology.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -85,22 +85,14 @@ void erts_sched_bind_atthrcreate_parent(int unbind);
int erts_sched_bind_atfork_prepare(void);
int erts_sched_bind_atfork_child(int unbind);
-char *erts_sched_bind_atvfork_child(int unbind);
void erts_sched_bind_atfork_parent(int unbind);
Eterm erts_fake_scheduler_bindings(Process *p, Eterm how);
Eterm erts_debug_cpu_groups_map(Process *c_p, int groups);
-
typedef void (*erts_cpu_groups_callback_t)(int,
ErtsSchedulerData *,
int,
void *);
-void erts_add_cpu_groups(int groups,
- erts_cpu_groups_callback_t callback,
- void *arg);
-void erts_remove_cpu_groups(erts_cpu_groups_callback_t callback,
- void *arg);
-
#endif
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 23c8a5bae9..b83134c79c 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -19,9 +19,7 @@
*/
/*
- * This file contains the bif interface functions and
- * the handling of the "meta tables" ie the tables of
- * db tables.
+ * This file contains the 'ets' bif interface functions.
*/
/*
@@ -43,6 +41,7 @@
#include "erl_db.h"
#include "bif.h"
#include "big.h"
+#include "erl_binary.h"
erts_smp_atomic_t erts_ets_misc_mem_size;
@@ -74,62 +73,226 @@ enum DbIterSafety {
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
+/*
+ * "fixed_tabs": list of all fixed tables for a process
+ */
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix);
+#endif
-/*
-** The main meta table, containing all ets tables.
-*/
-#ifdef ERTS_SMP
+static void fixed_tabs_insert(Process* p, DbFixation* fix)
+{
+ DbFixation* first = erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
+
+ if (!first) {
+ fix->tabs.next = fix->tabs.prev = fix;
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix);
+ }
+ else {
+ ASSERT(!fixed_tabs_find(first, fix));
+ fix->tabs.prev = first->tabs.prev;
+ fix->tabs.next = first;
+ fix->tabs.prev->tabs.next = fix;
+ first->tabs.prev = fix;
+ }
+}
+
+static void fixed_tabs_delete(Process *p, DbFixation* fix)
+{
+ if (fix->tabs.next == fix) {
+ DbFixation* old;
+ ASSERT(fix->tabs.prev == fix);
+ old = erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, NULL);
+ ASSERT(old == fix); (void)old;
+ }
+ else {
+ DbFixation *first = (DbFixation*) erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
-#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
-#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
-#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+ ASSERT(fixed_tabs_find(first, fix));
+ fix->tabs.prev->tabs.next = fix->tabs.next;
+ fix->tabs.next->tabs.prev = fix->tabs.prev;
-typedef union {
- erts_smp_rwmtx_t rwmtx;
- byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
- sizeof(erts_smp_rwmtx_t))];
-} erts_meta_main_tab_lock_t;
+ if (fix == first)
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix->tabs.next);
+ }
+}
-static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix)
+{
+ DbFixation* p;
+ if (!first) {
+ first = (DbFixation*) erts_psd_get(fix->procs.p, ERTS_PSD_ETS_FIXED_TABLES);
+ }
+ p = first;
+ do {
+ if (p == fix)
+ return 1;
+ ASSERT(p->procs.p == fix->procs.p);
+ ASSERT(p->tabs.next->tabs.prev == p);
+ p = p->tabs.next;
+ } while (p != first);
+ return 0;
+}
#endif
-static struct {
- union {
- DbTable *tb; /* Only directly readable if slot is ALIVE */
- UWord next_free; /* (index<<2)|1 if slot is FREE */
- }u;
-} *meta_main_tab;
-/* A slot in meta_main_tab can have three states:
- * FREE : Free to use for new table. Part of linked free-list.
- * ALIVE: Contains a table
- * DEAD : Contains a table that is being removed.
+
+/*
+ * fixing_procs: tree of all processes fixating a table
*/
-#define IS_SLOT_FREE(i) (meta_main_tab[(i)].u.next_free & 1)
-#define IS_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free & 2)
-#define IS_SLOT_ALIVE(i) (!(meta_main_tab[(i)].u.next_free & (1|2)))
-#define GET_NEXT_FREE_SLOT(i) (meta_main_tab[(i)].u.next_free >> 2)
-#define SET_NEXT_FREE_SLOT(i,next) (meta_main_tab[(i)].u.next_free = ((next)<<2)|1)
-#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
-#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
+#define ERTS_RBT_PREFIX fixing_procs
+#define ERTS_RBT_T DbFixation
+#define ERTS_RBT_KEY_T Process*
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->procs.parent = NULL; \
+ (T)->procs.right = NULL; \
+ (T)->procs.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->procs.is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!(T)->procs.is_red)
+#define ERTS_RBT_SET_BLACK(T) ((T)->procs.is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->procs.is_red = (F))
+#define ERTS_RBT_GET_PARENT(T) ((T)->procs.parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->procs.parent = (P))
+#define ERTS_RBT_GET_RIGHT(T) ((T)->procs.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->procs.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->procs.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->procs.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->procs.p)
+#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
+
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#ifdef DEBUG
+# define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#ifdef HARDDEBUG
+# error Do something useful with CHECK_TABLES maybe
+#else
+# define CHECK_TABLES()
+#endif
+
-static ERTS_INLINE erts_smp_rwmtx_t *
-get_meta_main_tab_lock(unsigned slot)
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data);
+static void schedule_free_dbtable(DbTable* tb);
+static void delete_sched_table(Process *c_p, DbTable *tb);
+
+static void table_dec_refc(DbTable *tb, erts_aint_t min_val)
+{
+ if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0)
+ schedule_free_dbtable(tb);
+}
+
+static int
+db_table_tid_destructor(Binary *unused)
+{
+ return 1;
+}
+
+static ERTS_INLINE void
+make_btid(DbTable *tb)
+{
+ Binary *btid = erts_create_magic_indirection(db_table_tid_destructor);
+ erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
+ erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb);
+ tb->common.btid = btid;
+ /*
+ * Table and magic indirection refer eachother,
+ * and table is refered once by being alive...
+ */
+ erts_smp_refc_init(&tb->common.refc, 2);
+ erts_refc_inc(&btid->intern.refc, 1);
+}
+
+static ERTS_INLINE DbTable* btid2tab(Binary* btid)
+{
+ erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
+ return (DbTable *) erts_smp_atomic_read_nob(tbref);
+}
+
+static DbTable *
+tid2tab(Eterm tid)
+{
+ DbTable *tb;
+ Binary *btid;
+ erts_smp_atomic_t *tbref;
+ if (!is_internal_magic_ref(tid))
+ return NULL;
+
+ btid = erts_magic_ref2bin(tid);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor)
+ return NULL;
+
+ tbref = erts_smp_binary_to_magic_indirection(btid);
+ tb = (DbTable *) erts_smp_atomic_read_nob(tbref);
+
+ ASSERT(!tb || tb->common.btid == btid);
+
+ return tb;
+}
+
+static ERTS_INLINE int
+is_table_alive(DbTable *tb)
+{
+ erts_smp_atomic_t *tbref;
+ DbTable *rtb;
+
+ tbref = erts_smp_binary_to_magic_indirection(tb->common.btid);
+ rtb = (DbTable *) erts_smp_atomic_read_nob(tbref);
+
+ ASSERT(!rtb || rtb == tb);
+
+ return !!rtb;
+}
+
+static ERTS_INLINE int
+is_table_named(DbTable *tb)
{
#ifdef ERTS_SMP
- return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+ return tb->common.type & DB_NAMED_TABLE;
#else
- return NULL;
+ return tb->common.status & DB_NAMED_TABLE;
#endif
}
-static erts_smp_spinlock_t meta_main_tab_main_lock;
-static Uint meta_main_tab_first_free; /* Index of first free slot */
-static int meta_main_tab_cnt; /* Number of active tables */
-static int meta_main_tab_top; /* Highest ever used slot + 1 */
-static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */
-static Uint meta_main_tab_seq_incr;
-static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
+
+static ERTS_INLINE void
+tid_clear(Process *c_p, DbTable *tb)
+{
+ DbTable *rtb;
+ Binary *btid = tb->common.btid;
+ erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
+ rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL);
+ ASSERT(!rtb || tb == rtb);
+ if (rtb) {
+ table_dec_refc(tb, 1);
+ delete_sched_table(c_p, tb);
+ }
+}
+
+static ERTS_INLINE Eterm
+make_tid(Process *c_p, DbTable *tb)
+{
+ Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &c_p->off_heap, tb->common.btid);
+}
+
/*
** The meta hash table of all NAMED ets tables
@@ -181,8 +344,6 @@ int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
int erts_ets_always_compress;
static int db_max_tabs;
-static DbTable *meta_pid_to_tab; /* Pid mapped to owned tables */
-static DbTable *meta_pid_to_fixed_tab; /* Pid mapped to fixed tables */
static Eterm ms_delete_all;
static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
of all objects */
@@ -195,15 +356,13 @@ static void fix_table_locked(Process* p, DbTable* tb);
static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind);
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
-static void free_fixations_locked(DbTable *tb);
+static SWord free_fixations_locked(Process* p, DbTable *tb);
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab);
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds);
static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb);
static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
@@ -218,6 +377,7 @@ static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
*/
Export ets_select_delete_continue_exp;
Export ets_select_count_continue_exp;
+Export ets_select_replace_continue_exp;
Export ets_select_continue_exp;
/*
@@ -235,23 +395,16 @@ free_dbtable(void *vtb)
erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
- erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
- tb->common.id);
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
-
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
#endif
#ifdef ERTS_SMP
erts_smp_rwmtx_destroy(&tb->common.rwlock);
erts_smp_mtx_destroy(&tb->common.fixlock);
#endif
ASSERT(is_immed(tb->common.heir_data));
+
+ if (tb->common.btid)
+ erts_bin_release(tb->common.btid);
+
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
}
@@ -266,15 +419,163 @@ static void schedule_free_dbtable(DbTable* tb)
* Caller is *not* allowed to access the specialized part
* (hash or tree) of *tb after this function has returned.
*/
- ASSERT(erts_smp_refc_read(&tb->common.ref, 0) == 0);
+ ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0);
+ ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
(void *) tb,
&tb->release.data,
sizeof(DbTable));
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
- char *rwname, char* fixname)
+static ERTS_INLINE void
+save_sched_table(Process *c_p, DbTable *tb)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ DbTable *first;
+
+ ASSERT(esdp);
+ esdp->ets_tables.count++;
+ erts_smp_refc_inc(&tb->common.refc, 1);
+
+ first = esdp->ets_tables.clist;
+ if (!first) {
+ tb->common.all.next = tb->common.all.prev = tb;
+ esdp->ets_tables.clist = tb;
+ }
+ else {
+ tb->common.all.prev = first->common.all.prev;
+ tb->common.all.next = first;
+ tb->common.all.prev->common.all.next = tb;
+ first->common.all.prev = tb;
+ }
+}
+
+static ERTS_INLINE void
+remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb)
+{
+ ErtsEtsAllYieldData *eaydp;
+ ASSERT(esdp);
+ ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid))
+ == (Uint32) esdp->no);
+
+ ASSERT(esdp->ets_tables.count > 0);
+ esdp->ets_tables.count--;
+
+ eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ if (eaydp->ongoing) {
+ /* ets:all() op process list from last to first... */
+ if (eaydp->tab == tb) {
+ if (eaydp->tab == esdp->ets_tables.clist)
+ eaydp->tab = NULL;
+ else
+ eaydp->tab = tb->common.all.prev;
+ }
+ }
+
+ if (tb->common.all.next == tb) {
+ ASSERT(tb->common.all.prev == tb);
+ ASSERT(esdp->ets_tables.clist == tb);
+ esdp->ets_tables.clist = NULL;
+ }
+ else {
+#ifdef DEBUG
+ DbTable *tmp = esdp->ets_tables.clist;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.all.next;
+ } while (tmp != esdp->ets_tables.clist);
+ ASSERT(tmp == tb);
+#endif
+ tb->common.all.prev->common.all.next = tb->common.all.next;
+ tb->common.all.next->common.all.prev = tb->common.all.prev;
+
+ if (esdp->ets_tables.clist == tb)
+ esdp->ets_tables.clist = tb->common.all.next;
+
+ }
+
+ table_dec_refc(tb, 0);
+}
+
+static void
+scheduled_remove_sched_table(void *vtb)
+{
+ remove_sched_table(erts_get_scheduler_data(), (DbTable *) vtb);
+}
+
+static void
+delete_sched_table(Process *c_p, DbTable *tb)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ Uint32 sid;
+
+ ASSERT(esdp);
+
+ ASSERT(tb->common.btid);
+ sid = erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ if (sid == (Uint32) esdp->no)
+ remove_sched_table(esdp, tb);
+ else
+ erts_schedule_misc_aux_work((int) sid, scheduled_remove_sched_table, tb);
+}
+
+static ERTS_INLINE void
+save_owned_table(Process *c_p, DbTable *tb)
+{
+ DbTable *first;
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+
+ erts_smp_refc_inc(&tb->common.refc, 1);
+
+ if (!first) {
+ tb->common.owned.next = tb->common.owned.prev = tb;
+ erts_psd_set(c_p, ERTS_PSD_ETS_OWNED_TABLES, tb);
+ }
+ else {
+ tb->common.owned.prev = first->common.owned.prev;
+ tb->common.owned.next = first;
+ tb->common.owned.prev->common.owned.next = tb;
+ first->common.owned.prev = tb;
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+}
+
+static ERTS_INLINE void
+delete_owned_table(Process *p, DbTable *tb)
+{
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (tb->common.owned.next == tb) {
+ DbTable* old;
+ ASSERT(tb->common.owned.prev == tb);
+ old = erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, NULL);
+ ASSERT(old == tb); (void)old;
+ }
+ else {
+ DbTable *first = (DbTable*) erts_psd_get(p, ERTS_PSD_ETS_OWNED_TABLES);
+#ifdef DEBUG
+ DbTable *tmp = first;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.owned.next;
+ } while (tmp != first);
+ ASSERT(tmp == tb);
+#endif
+ tb->common.owned.prev->common.owned.next = tb->common.owned.next;
+ tb->common.owned.next->common.owned.prev = tb->common.owned.prev;
+
+ if (tb == first)
+ erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ table_dec_refc(tb, 1);
+}
+
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
#ifdef ERTS_SMP
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
@@ -284,9 +585,10 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
#endif
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
- rwname, tb->common.the_name);
- erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
}
@@ -294,7 +596,6 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
@@ -327,8 +628,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
* to follow the tb pointer!
*/
#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
-
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
ASSERT(tb->common.is_thread_safe);
@@ -354,20 +653,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
#endif
}
-
-static ERTS_INLINE void db_meta_lock(DbTable* tb, db_lock_kind_t kind)
-{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
- /* As long as we only lock for READ we don't have to lock at all. */
-}
-
-static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
-{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
-}
-
static ERTS_INLINE
DbTable* db_get_table_aux(Process *p,
Eterm id,
@@ -375,7 +660,7 @@ DbTable* db_get_table_aux(Process *p,
db_lock_kind_t kind,
int meta_already_locked)
{
- DbTable *tb = NULL;
+ DbTable *tb;
erts_smp_rwmtx_t *mtl = NULL;
/*
@@ -385,23 +670,7 @@ DbTable* db_get_table_aux(Process *p,
*/
ASSERT(erts_get_scheduler_data());
- if (is_small(id)) {
- Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- if (!meta_already_locked) {
- mtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rlock(mtl);
- }
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- else {
- erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
- || erts_lc_rwmtx_is_rwlocked(test_mtl));
- }
-#endif
- if (slot < db_max_tabs && IS_SLOT_ALIVE(slot))
- tb = meta_main_tab[slot].u.tb;
- }
- else if (is_atom(id)) {
+ if (is_atom(id)) {
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
erts_smp_rwmtx_rlock(mtl);
@@ -410,7 +679,7 @@ DbTable* db_get_table_aux(Process *p,
|| erts_lc_rwmtx_is_rwlocked(mtl));
mtl = NULL;
}
-
+ tb = NULL;
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id)
@@ -428,11 +697,13 @@ DbTable* db_get_table_aux(Process *p,
}
}
}
+ else
+ tb = tid2tab(id);
+
if (tb) {
db_lock(tb, kind);
- if (tb->common.id != id
- || ((tb->common.status & what) == 0
- && p->common.id != tb->common.owner)) {
+ if ((tb->common.status & what) == 0
+ && p->common.id != tb->common.owner) {
db_unlock(tb, kind);
tb = NULL;
}
@@ -451,18 +722,6 @@ DbTable* db_get_table(Process *p,
return db_get_table_aux(p, id, what, kind, 0);
}
-/* Requires meta_main_tab_locks[slot] locked.
-*/
-static ERTS_INLINE void free_slot(int slot)
-{
- ASSERT(!IS_SLOT_FREE(slot));
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- SET_NEXT_FREE_SLOT(slot,meta_main_tab_first_free);
- meta_main_tab_first_free = slot;
- meta_main_tab_cnt--;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-}
-
static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
@@ -527,9 +786,10 @@ static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
- Eterm name_atom = tb->common.id;
+ Eterm name_atom = tb->common.the_name;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
+ ASSERT(is_table_named(tb));
#ifdef ERTS_SMP
if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
@@ -600,11 +860,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_smp_refc_inc(&tb->common.ref, 1);
+ erts_smp_refc_inc(&tb->common.fix_count, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_smp_refc_dectest(&tb->common.ref, 0) == 0) {
+ if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -1244,6 +1504,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ Eterm old_name;
erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
@@ -1260,12 +1521,10 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
(void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
- if (is_small(BIF_ARG_1)) {
- Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
- lck2 = get_meta_main_tab_lock(slot);
- }
- else if (is_atom(BIF_ARG_1)) {
- (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (is_atom(BIF_ARG_1)) {
+ old_name = BIF_ARG_1;
+ named_tab:
+ (void) meta_name_tab_bucket(old_name, &lck2);
if (lck1 == lck2)
lck2 = NULL;
else if (lck1 > lck2) {
@@ -1275,7 +1534,16 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
}
}
else {
- BIF_ERROR(BIF_P, BADARG);
+ tb = tid2tab(BIF_ARG_1);
+ if (!tb)
+ BIF_ERROR(BIF_P, BADARG);
+ else {
+ if (is_table_named(tb)) {
+ old_name = tb->common.the_name;
+ goto named_tab;
+ }
+ lck2 = NULL;
+ }
}
erts_smp_rwmtx_rwlock(lck1);
@@ -1286,21 +1554,19 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
if (!tb)
goto badarg;
- if (is_not_atom(tb->common.id)) { /* Not a named table */
- tb->common.the_name = BIF_ARG_2;
- goto done;
- }
+ if (is_table_named(tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
+ goto badarg;
- if (!insert_named_tab(BIF_ARG_2, tb, 1))
- goto badarg;
-
- if (!remove_named_tab(tb, 1))
- erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.id);
-
- tb->common.id = tb->common.the_name = BIF_ARG_2;
+ if (!remove_named_tab(tb, 1))
+ erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.the_name);
+ ret = BIF_ARG_2;
+ }
+ else { /* Not a named table */
+ ret = BIF_ARG_1;
+ }
+ tb->common.the_name = BIF_ARG_2;
- done:
- ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
erts_smp_rwmtx_rwunlock(lck1);
if (lck2)
@@ -1324,7 +1590,6 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable* tb = NULL;
- int slot;
Eterm list;
Eterm val;
Eterm ret;
@@ -1339,9 +1604,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
#ifdef DEBUG
int cret;
#endif
- DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
- erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1350,7 +1613,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
- status = DB_NORMAL | DB_SET | DB_PROTECTED;
+ status = DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
#ifdef ERTS_SMP
@@ -1433,6 +1696,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
else if (val == am_named_table) {
is_named = 1;
+ status |= DB_NAMED_TABLE;
}
else if (val == am_compressed) {
is_compressed = 1;
@@ -1487,16 +1751,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- erts_smp_refc_init(&tb->common.ref, 0);
- db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
- "db_tab", "db_tab_fix");
+ erts_smp_refc_init(&tb->common.fix_count, 0);
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
erts_smp_atomic_init_nob(&tb->common.nitems, 0);
- tb->common.fixations = NULL;
+ tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
#ifdef DEBUG
@@ -1505,87 +1768,36 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
meth->db_create(BIF_P, tb);
ASSERT(cret == DB_ERROR_NONE);
- erts_smp_spin_lock(&meta_main_tab_main_lock);
+ make_btid(tb);
- if (meta_main_tab_cnt >= db_max_tabs) {
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
- erts_send_error_to_logger_str(BIF_P->group_leader,
- "** Too many db tables **\n");
- free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
- free_dbtable((void *) tb);
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
-
- slot = meta_main_tab_first_free;
- ASSERT(slot>=0 && slot<db_max_tabs);
- meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot);
- meta_main_tab_cnt++;
- if (slot >= meta_main_tab_top) {
- ASSERT(slot == meta_main_tab_top);
- meta_main_tab_top = slot + 1;
- }
-
- if (is_named) {
- ret = BIF_ARG_1;
- }
- else {
- ret = make_small(slot | meta_main_tab_seq_cnt);
- meta_main_tab_seq_cnt += meta_main_tab_seq_incr;
- ASSERT((unsigned_val(ret) & meta_main_tab_slot_mask) == slot);
- }
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-
- tb->common.id = ret;
- tb->common.slot = slot; /* store slot for erase */
+ if (is_named)
+ ret = BIF_ARG_1;
+ else
+ ret = make_tid(BIF_P, tb);
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- meta_main_tab[slot].u.tb = tb;
- ASSERT(IS_SLOT_ALIVE(slot));
- erts_smp_rwmtx_rwunlock(mmtl);
+ save_sched_table(BIF_P, tb);
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- free_slot(slot);
- erts_smp_rwmtx_rwunlock(mmtl);
+ tid_clear(BIF_P, tb);
db_lock(tb,LCK_WRITE);
free_heir_data(tb);
tb->common.meth->db_free_table(tb);
- schedule_free_dbtable(tb);
db_unlock(tb,LCK_WRITE);
+ table_dec_refc(tb, 0);
BIF_ERROR(BIF_P, BADARG);
}
BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */
+ save_owned_table(BIF_P, tb);
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
- erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
#endif
- UseTmpHeap(3,BIF_P);
-
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(slot)),
- 0) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Could not update ets metadata.");
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
-
- UnUseTmpHeap(3,BIF_P);
-
BIF_RET(ret);
}
@@ -1690,9 +1902,9 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
- int trap;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
DbTable* tb;
- erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1715,7 +1927,6 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status |= DB_DELETE;
if (tb->common.owner != BIF_P->common.id) {
- DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
* The table is being deleted by a process other than its owner.
@@ -1723,50 +1934,33 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* current process will be killed (e.g. by an EXIT signal), we will
* now transfer the ownership to the current process.
*/
- UseTmpHeap(3,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
- BIF_P->flags |= F_USING_DB;
- tb->common.owner = BIF_P->common.id;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(3,BIF_P);
- }
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
- db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(mmtl);
- db_lock(tb, LCK_WRITE);
+ Process *rp = erts_proc_lookup_raw(tb->common.owner);
+ /*
+ * Process 'rp' might be exiting, but our table lock prevents it
+ * from terminating as it cannot complete erts_db_process_exiting().
+ */
+ ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)));
+
+ delete_owned_table(rp, tb);
+ BIF_P->flags |= F_USING_DB;
+ tb->common.owner = BIF_P->common.id;
+ save_owned_table(BIF_P, tb);
}
-#endif
- /* We must keep the slot, to be found by db_proc_dead() if process dies */
- MARK_SLOT_DEAD(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
- if (is_atom(tb->common.id))
+
+ tid_clear(BIF_P, tb);
+
+ if (is_table_named(tb))
remove_named_tab(tb, 0);
/* disable inheritance */
free_heir_data(tb);
tb->common.heir = am_none;
- free_fixations_locked(tb);
-
- trap = free_table_cont(BIF_P, tb, 1, 1);
+ reds -= free_fixations_locked(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
- if (trap) {
+
+ if (free_table_continue(BIF_P, tb, reds) < 0) {
/*
* Package the DbTable* pointer into a bignum so that it can be safely
* passed through a trap. We used to pass the DbTable* pointer directly
@@ -1776,9 +1970,11 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
Eterm *hp = HAlloc(BIF_P, 2);
hp[0] = make_pos_bignum_header(1);
hp[1] = (Eterm) tb;
+ BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp));
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
@@ -1790,7 +1986,6 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
{
Process* to_proc = NULL;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,BIF_P);
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
@@ -1812,26 +2007,14 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg; /* or should we be idempotent? return false maybe */
}
- UseTmpHeap(5,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
+ delete_owned_table(BIF_P, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ save_owned_table(to_proc, tb);
db_unlock(tb,LCK_WRITE);
- erts_send_message(BIF_P, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER,
- tb->common.id,
- from_pid,
- BIF_ARG_3),
- 0);
+ send_ets_transfer_message(BIF_P, to_proc, &to_locks,
+ tb, BIF_ARG_3);
erts_smp_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
BIF_RET(am_true);
@@ -2074,7 +2257,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P,tb);
@@ -2101,46 +2284,254 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
return result;
}
-/*
-** Return a list of tables on this node
-*/
-BIF_RETTYPE ets_all_0(BIF_ALIST_0)
+/*
+ * ets:all/0
+ *
+ * ets:all() calls ets:internal_request_all/0 which
+ * requests information about all tables from
+ * each scheduler thread. Each scheduler replies
+ * to the calling process with information about
+ * existing tables created on that specific scheduler.
+ */
+
+struct ErtsEtsAllReq_ {
+ erts_smp_atomic32_t refc;
+ Process *proc;
+ ErtsOIRefStorage ref;
+ ErtsEtsAllReqList list[1]; /* one per scheduler */
+};
+
+#define ERTS_ETS_ALL_REQ_SIZE \
+ (sizeof(ErtsEtsAllReq) \
+ + (sizeof(ErtsEtsAllReqList) \
+ * (erts_no_schedulers - 1)))
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllData;
+
+/* Tables handled before yielding */
+#define ERTS_ETS_ALL_TB_YCNT 200
+/*
+ * Min yield count required before starting
+ * an operation that will require yield.
+ */
+#define ERTS_ETS_ALL_TB_YCNT_START 10
+
+#ifdef DEBUG
+/* Test yielding... */
+#undef ERTS_ETS_ALL_TB_YCNT
+#undef ERTS_ETS_ALL_TB_YCNT_START
+#define ERTS_ETS_ALL_TB_YCNT 10
+#define ERTS_ETS_ALL_TB_YCNT_START 1
+#endif
+
+static int
+ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
+ ErlHeapFragment **hfragpp, DbTable **tablepp,
+ int *yield_count_p)
{
- DbTable* tb;
- Eterm previous;
- int i;
- Eterm* hp;
- Eterm* hendp;
- int t_tabs_cnt;
- int t_top;
-
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- t_tabs_cnt = meta_main_tab_cnt;
- t_top = meta_main_tab_top;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
-
- previous = NIL;
- for(i = 0; i < t_top; i++) {
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(i)) {
- if (hp == hendp) {
- /* Racing table creator, grab some more heap space */
- t_tabs_cnt = 10;
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
- }
- tb = meta_main_tab[i].u.tb;
- previous = CONS(hp, tb->common.id, previous);
- hp += 2;
- }
- erts_smp_rwmtx_runlock(mmtl);
+ ErtsEtsAllReq *reqp = *reqpp;
+ ErlHeapFragment *hfragp = *hfragpp;
+ int ycount = *yield_count_p;
+ DbTable *tb, *first;
+ Uint sz;
+ Eterm list, msg, ref, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ /*
+ * - save_sched_table() inserts at end of circular list.
+ *
+ * - This function scans from the end so we know that
+ * the amount of tables to scan wont grow even if we
+ * yield.
+ *
+ * - remove_sched_table() updates the table we yielded
+ * on if it removes it.
+ */
+
+ if (hfragp) {
+ /* Restart of a yielded operation... */
+ ASSERT(hfragp->used_size < hfragp->alloc_size);
+ ohp = &hfragp->off_heap;
+ hp = &hfragp->mem[hfragp->used_size];
+ list = *hp;
+ hfragp->used_size = hfragp->alloc_size;
+ first = esdp->ets_tables.clist;
+ tb = *tablepp;
+ }
+ else {
+ /* A new operation... */
+ ASSERT(!*tablepp);
+
+ /* Max heap size needed... */
+ sz = esdp->ets_tables.count;
+ sz *= ERTS_MAGIC_REF_THING_SIZE + 2;
+ sz += 3 + ERTS_REF_THING_SIZE;
+ hfragp = new_message_buffer(sz);
+
+ hp = &hfragp->mem[0];
+ ohp = &hfragp->off_heap;
+ list = NIL;
+ first = esdp->ets_tables.clist;
+ tb = first ? first->common.all.prev : NULL;
+ }
+
+ if (tb) {
+ while (1) {
+ if (is_table_alive(tb)) {
+ Eterm tid;
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ list = CONS(hp, tid, list);
+ hp += 2;
+ }
+
+ if (tb == first)
+ break;
+
+ tb = tb->common.all.prev;
+
+ if (--ycount <= 0) {
+ sz = hp - &hfragp->mem[0];
+ ASSERT(hfragp->alloc_size > sz + 1);
+ *hp = list;
+ hfragp->used_size = sz;
+ *hfragpp = hfragp;
+ *reqpp = reqp;
+ *tablepp = tb;
+ *yield_count_p = 0;
+ return 1; /* Yield! */
+ }
+ }
}
- HRelease(BIF_P, hendp, hp);
- BIF_RET(previous);
+
+ ref = erts_oiref_storage_make_ref(&reqp->ref, &hp);
+ msg = TUPLE2(hp, ref, list);
+ hp += 3;
+
+ sz = hp - &hfragp->mem[0];
+ ASSERT(sz <= hfragp->alloc_size);
+
+ hfragp = erts_resize_message_buffer(hfragp, sz, &msg, 1);
+
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = hfragp;
+
+ erts_queue_message(reqp->proc, 0, mp, msg, am_system);
+
+ erts_proc_dec_refc(reqp->proc);
+
+ if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0)
+ erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp);
+
+ *reqpp = NULL;
+ *hfragpp = NULL;
+ *tablepp = NULL;
+ *yield_count_p = ycount;
+
+ return 0;
+}
+
+int
+erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp,
+ ErtsEtsAllYieldData *eaydp)
+{
+ int ix = (int) esdp->no - 1;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+
+ while (1) {
+ if (!eaydp->ongoing) {
+ ErtsEtsAllReq *ongoing;
+
+ if (!eaydp->queue)
+ return 0; /* All work completed! */
+
+ if (yc < ERTS_ETS_ALL_TB_YCNT_START && yc > esdp->ets_tables.count)
+ return 1; /* Yield! */
+
+ eaydp->ongoing = ongoing = eaydp->queue;
+ if (ongoing->list[ix].next == ongoing)
+ eaydp->queue = NULL;
+ else {
+ ongoing->list[ix].next->list[ix].prev = ongoing->list[ix].prev;
+ ongoing->list[ix].prev->list[ix].next = ongoing->list[ix].next;
+ eaydp->queue = ongoing->list[ix].next;
+ }
+ ASSERT(!eaydp->hfrag);
+ ASSERT(!eaydp->tab);
+ }
+
+ if (ets_all_reply(esdp, &eaydp->ongoing, &eaydp->hfrag, &eaydp->tab, &yc))
+ return 1; /* Yield! */
+ }
+}
+
+static void
+handle_ets_all_request(void *vreq)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsEtsAllYieldData *eayp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ ErtsEtsAllReq *req = (ErtsEtsAllReq *) vreq;
+
+ if (!eayp->ongoing && !eayp->queue) {
+ /* No ets:all() operations ongoing... */
+ ErlHeapFragment *hf = NULL;
+ DbTable *tb = NULL;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+ if (ets_all_reply(esdp, &req, &hf, &tb, &yc)) {
+ /* Yielded... */
+ ASSERT(hf);
+ eayp->ongoing = req;
+ eayp->hfrag = hf;
+ eayp->tab = tb;
+ erts_notify_new_aux_yield_work(esdp);
+ }
+ }
+ else {
+ /* Ongoing ets:all() operations; queue up this request... */
+ int ix = (int) esdp->no - 1;
+ if (!eayp->queue) {
+ req->list[ix].next = req;
+ req->list[ix].prev = req;
+ eayp->queue = req;
+ }
+ else {
+ req->list[ix].next = eayp->queue;
+ req->list[ix].prev = eayp->queue->list[ix].prev;
+ eayp->queue->list[ix].prev = req;
+ req->list[ix].prev->list[ix].next = req;
+ }
+ }
+}
+
+BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
+{
+ Eterm ref = erts_make_ref(BIF_P);
+ ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ,
+ ERTS_ETS_ALL_REQ_SIZE);
+ erts_smp_atomic32_init_nob(&req->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_oiref_storage_save(&req->ref, ref);
+ req->proc = BIF_P;
+ erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers);
+
+#ifdef ERTS_SMP
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ handle_ets_all_request,
+ (void *) req);
+#endif
+
+ handle_ets_all_request((void *) req);
+ BIF_RET(ref);
}
@@ -2245,7 +2636,7 @@ ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(p, tb,
+ cret = tb->common.meth->db_select_chunk(p, tb, arg1,
arg2, chunk_size,
0 /* not reversed */,
&ret);
@@ -2414,8 +2805,7 @@ ets_select2(Process* p, Eterm arg1, Eterm arg2)
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, arg2,
- 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, arg1, arg2, 0, &ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2506,7 +2896,7 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_count(BIF_P,tb,BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
@@ -2532,6 +2922,103 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
return result;
}
+/*
+ ** This is for trapping, cannot be called directly.
+ */
+static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
+{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ db_lock_kind_t kind = LCK_WRITE_REC;
+
+ CHECK_TABLES();
+ ASSERT(is_tuple(a1));
+ tptr = tuple_val(a1);
+ ASSERT(arityval(*tptr) >= 1);
+
+ if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
+ BIF_ERROR(p,BADARG);
+ }
+
+ cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret);
+
+ if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ unfix_table_locked(p, tb, &kind);
+ }
+
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
+ break;
+ }
+ erts_match_set_release_result(p);
+
+ return result;
+}
+
+
+BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ enum DbIterSafety safety;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (tb->common.status & DB_BAG) {
+ /* Bag implementation presented both semantic consistency
+ and performance issues */
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_replace(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P,tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
{
@@ -2560,7 +3047,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(BIF_P,tb,
+ cret = tb->common.meth->db_select_chunk(BIF_P,tb, BIF_ARG_1,
BIF_ARG_2, chunk_size,
1 /* reversed */, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2610,7 +3097,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(BIF_P,tb,BIF_ARG_2,
+ cret = tb->common.meth->db_select(BIF_P,tb, BIF_ARG_1, BIF_ARG_2,
1 /*reversed*/, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2701,7 +3188,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
*/
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
BIF_ERROR(BIF_P, BADARG);
@@ -2763,7 +3250,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
Eterm ret = THE_NON_VALUE;
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
BIF_ERROR(BIF_P, BADARG);
@@ -2779,7 +3266,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
BIF_RETTYPE ets_is_compiled_ms_1(BIF_ALIST_1)
{
- if (erts_db_is_compiled_ms(BIF_ARG_1)) {
+ if (erts_db_get_match_prog_binary(BIF_ARG_1)) {
BIF_RET(am_true);
} else {
BIF_RET(am_false);
@@ -2794,9 +3281,9 @@ BIF_RETTYPE ets_match_spec_compile_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
- BIF_RET(erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mp));
+ BIF_RET(erts_db_make_match_prog_ref(BIF_P, mp, &hp));
}
BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
@@ -2805,24 +3292,18 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
int i = 0;
Eterm *hp;
Eterm lst;
- ProcBin *bp;
Binary *mp;
Eterm res;
Uint32 dummy;
- if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) || !is_binary(BIF_ARG_2)) {
+ if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- bp = (ProcBin*) binary_val(BIF_ARG_2);
- if (thing_subtag(bp->thing_word) != REFC_BINARY_SUBTAG) {
+ mp = erts_db_get_match_prog_binary(BIF_ARG_2);
+ if (!mp)
goto error;
- }
- mp = bp->val;
- if (!IsMatchProgBinary(mp)) {
- goto error;
- }
if (BIF_ARG_1 == NIL) {
BIF_RET(BIF_ARG_3);
@@ -2859,7 +3340,6 @@ int erts_ets_rwmtx_spin_count = -1;
void init_db(ErtsDbSpinCount db_spin_count)
{
- DbTable init_tb;
int i;
Eterm *hp;
unsigned bits;
@@ -2908,19 +3388,10 @@ void init_db(ErtsDbSpinCount db_spin_count)
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- meta_main_tab_locks =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES,
- sizeof(erts_meta_main_tab_lock_t)
- * ERTS_META_MAIN_TAB_LOCK_TAB_SIZE);
-
- for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
- "meta_main_tab_slot", make_small(i));
- }
- erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
- "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
#endif
@@ -2937,20 +3408,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
erts_exit(ERTS_ERROR_EXIT,"Max limit for ets tabled too high %u (max %u).",
db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_main_tab_slot_mask = (((Uint)1)<<bits) - 1;
- meta_main_tab_seq_incr = (((Uint)1)<<bits);
-
- size = sizeof(*meta_main_tab)*db_max_tabs;
- meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
- ERTS_ETS_MISC_MEM_ADD(size);
-
- meta_main_tab_cnt = 0;
- meta_main_tab_top = 0;
- for (i=1; i<db_max_tabs; i++) {
- SET_NEXT_FREE_SLOT(i-1,i);
- }
- SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1);
- meta_main_tab_first_free = 0;
meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
@@ -2965,70 +3422,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_initialize_hash();
db_initialize_tree();
- /*TT*/
- /* Create meta table invertion. */
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_tab->common.id = NIL;
- meta_pid_to_tab->common.the_name = am_true;
- meta_pid_to_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_tab->common.type
- = meta_pid_to_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_tab->common.keypos = 1;
- meta_pid_to_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0);
- meta_pid_to_tab->common.slot = -1;
- meta_pid_to_tab->common.meth = &db_hash;
- meta_pid_to_tab->common.compress = 0;
-
- erts_smp_refc_init(&meta_pid_to_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_tab) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables.");
- }
-
- erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_fixed_tab->common.id = NIL;
- meta_pid_to_fixed_tab->common.the_name = am_true;
- meta_pid_to_fixed_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_fixed_tab->common.type
- = meta_pid_to_fixed_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_fixed_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_fixed_tab->common.keypos = 1;
- meta_pid_to_fixed_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0);
- meta_pid_to_fixed_tab->common.slot = -1;
- meta_pid_to_fixed_tab->common.meth = &db_hash;
- meta_pid_to_fixed_tab->common.compress = 0;
-
- erts_smp_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_fixed_tab) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables.");
- }
-
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
am_ets, am_atom_put("delete_trap",11), 1,
@@ -3040,6 +3433,11 @@ void init_db(ErtsDbSpinCount db_spin_count)
&ets_select_count_1);
/* Non visual BIF to trap to. */
+ erts_init_trap_export(&ets_select_replace_continue_exp,
+ am_ets, am_atom_put("replace_trap",11), 1,
+ &ets_select_replace_1);
+
+ /* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_continue_exp,
am_ets, am_atom_put("select_trap",11), 1,
&ets_select_trap_1);
@@ -3057,81 +3455,18 @@ void init_db(ErtsDbSpinCount db_spin_count)
ms_delete_all = CONS(hp, ms_delete_all,NIL);
}
-#define ARRAY_CHUNK 100
-
-typedef enum {
- ErtsDbProcCleanupProgressTables,
- ErtsDbProcCleanupProgressFixations,
- ErtsDbProcCleanupProgressDone,
-} ErtsDbProcCleanupProgress;
-
-typedef enum {
- ErtsDbProcCleanupOpGetTables,
- ErtsDbProcCleanupOpDeleteTables,
- ErtsDbProcCleanupOpGetFixations,
- ErtsDbProcCleanupOpDeleteFixations,
- ErtsDbProcCleanupOpDone
-} ErtsDbProcCleanupOperation;
-
-typedef struct {
- ErtsDbProcCleanupProgress progress;
- ErtsDbProcCleanupOperation op;
- struct {
- Eterm arr[ARRAY_CHUNK];
- int size;
- int ix;
- int clean_ix;
- } slots;
-} ErtsDbProcCleanupState;
-
-
-static void
-proc_exit_cleanup_tables_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
+void
+erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp)
{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
+ ErtsEtsAllYieldData *eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ eaydp->ongoing = NULL;
+ eaydp->hfrag = NULL;
+ eaydp->tab = NULL;
+ eaydp->queue = NULL;
+ esdp->ets_tables.clist = NULL;
+ esdp->ets_tables.count = 0;
}
-static void
-proc_exit_cleanup_fixations_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
-{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_fixed_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
-}
/* In: Table LCK_WRITE
** Return TRUE : ok, table not mine and NOT locked anymore.
@@ -3141,7 +3476,6 @@ static int give_away_to_heir(Process* p, DbTable* tb)
{
Process* to_proc;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,p);
Eterm to_pid;
UWord heir_data;
@@ -3185,19 +3519,12 @@ retry:
erts_smp_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
- UseTmpHeap(5,p);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
+ delete_owned_table(p, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(5,p);
+ save_owned_table(to_proc, tb);
+
db_unlock(tb,LCK_WRITE);
heir_data = tb->common.heir_data;
if (!is_immed(heir_data)) {
@@ -3205,17 +3532,98 @@ retry:
ASSERT(arityval(*tpv) == 1);
heir_data = tpv[1];
}
- erts_send_message(p, to_proc, &to_locks,
- TUPLE4(buf,
- am_ETS_TRANSFER,
- tb->common.id,
- p->common.id,
- heir_data),
- 0);
+ send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data);
erts_smp_proc_unlock(to_proc, to_locks);
return !0;
}
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data)
+{
+ Uint hsz, hd_sz;
+ ErtsMessage *mp;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ Eterm tid, hd_copy, msg, sender;
+
+ hsz = 5;
+ if (!is_table_named(tb))
+ hsz += ERTS_MAGIC_REF_THING_SIZE;
+ if (is_immed(heir_data))
+ hd_sz = 0;
+ else {
+ hd_sz = size_object(heir_data);
+ hsz += hd_sz;
+ }
+
+ mp = erts_alloc_message_heap(proc, locks, hsz, &hp, &ohp);
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ if (!hd_sz)
+ hd_copy = heir_data;
+ else
+ hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
+ sender = c_p->common.id;
+ msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
+ erts_queue_message(proc, *locks, mp, msg, sender);
+}
+
+
+/* Auto-release fixation from exiting process */
+static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
+{
+ DbTable* tb = btid2tab(fix->tabs.btid);
+ SWord work = 0;
+
+ ASSERT(fix->procs.p == p); (void)p;
+ if (tb) {
+ db_lock(tb, LCK_WRITE_REC);
+ if (!(tb->common.status & DB_DELETE)) {
+ erts_aint_t diff;
+ #ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+ #endif
+
+ ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
+
+ diff = -((erts_aint_t) fix->counter);
+ erts_smp_refc_add(&tb->common.fix_count,diff,0);
+ fix->counter = 0;
+
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
+
+ #ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+ #endif
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ work += db_unfix_table_hash(&(tb->hash));
+ }
+
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof(DbFixation), 0);
+ }
+ else {
+ ASSERT(fix->counter == 0);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+ }
+ else {
+ ASSERT(fix->counter == 0);
+ }
+
+ erts_bin_release(fix->tabs.btid);
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ ++work;
+
+ return work;
+}
+
+
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
@@ -3229,276 +3637,160 @@ retry:
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
+ typedef struct {
+ enum {
+ GET_OWNED_TABLE,
+ FREE_OWNED_TABLE,
+ UNFIX_TABLES,
+ }op;
+ DbTable *tb;
+ } CleanupState;
+ CleanupState *state = (CleanupState *) c_p->u.terminate;
Eterm pid = c_p->common.id;
- ErtsDbProcCleanupState default_state;
- int ret;
+ CleanupState default_state;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(c_p);
+ SWord reds = initial_reds;
if (!state) {
state = &default_state;
- state->progress = ErtsDbProcCleanupProgressTables;
- state->op = ErtsDbProcCleanupOpGetTables;
+ state->op = GET_OWNED_TABLE;
+ state->tb = NULL;
}
- while (!0) {
+ do {
switch (state->op) {
- case ErtsDbProcCleanupOpGetTables:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_tab, LCK_READ);
- if (ret == DB_ERROR_BADKEY) {
- /* Done with tables; now fixations */
- state->progress = ErtsDbProcCleanupProgressFixations;
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets table metadata");
- }
-
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteTables;
- /* Fall through */
-
- case ErtsDbProcCleanupOpDeleteTables:
-
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (!IS_SLOT_FREE(ix)) {
- tb = GET_ANY_SLOT_TAB(ix);
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int do_yield;
- db_lock(tb, LCK_WRITE);
- /* Ownership may have changed since
- we looked up the table. */
- if (tb->common.owner != pid) {
- do_yield = 0;
- db_unlock(tb, LCK_WRITE);
- }
- else if (tb->common.heir != am_none
- && tb->common.heir != pid
- && give_away_to_heir(c_p, tb)) {
- do_yield = 0;
- }
- else {
- int first_call;
-#ifdef HARDDEBUG
- erts_fprintf(stderr,
- "erts_db_process_exiting(); Table: %T, "
- "Process: %T\n",
- tb->common.id, pid);
-#endif
- first_call = (tb->common.status & DB_DELETE) == 0;
- if (first_call) {
- /* Clear all access bits. */
- tb->common.status &= ~(DB_PROTECTED
- | DB_PUBLIC
- | DB_PRIVATE);
- tb->common.status |= DB_DELETE;
-
- if (is_atom(tb->common.id))
- remove_named_tab(tb, 0);
-
- free_heir_data(tb);
- free_fixations_locked(tb);
- }
-
- do_yield = free_table_cont(c_p, tb, first_call, 0);
- db_unlock(tb, LCK_WRITE);
- }
- if (do_yield)
- goto yield;
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ case GET_OWNED_TABLE: {
+ DbTable* tb;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!tb) {
+ /* Done with owned tables; now fixations */
+ state->op = UNFIX_TABLES;
+ break;
+ }
- proc_exit_cleanup_tables_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetTables;
- break;
+ ASSERT(tb != state->tb);
+ state->tb = tb;
+ db_lock(tb, LCK_WRITE);
+ /*
+ * Ownership may have changed since we looked up the table.
+ */
+ if (tb->common.owner != pid) {
+ db_unlock(tb, LCK_WRITE);
+ break;
+ }
+ if (tb->common.heir != am_none
+ && tb->common.heir != pid
+ && give_away_to_heir(c_p, tb)) {
+ break;
+ }
+ tid_clear(c_p, tb);
+ /* Clear all access bits. */
+ tb->common.status &= ~(DB_PROTECTED | DB_PUBLIC | DB_PRIVATE);
+ tb->common.status |= DB_DELETE;
+
+ if (is_table_named(tb))
+ remove_named_tab(tb, 0);
+
+ free_heir_data(tb);
+ reds -= free_fixations_locked(c_p, tb);
+ db_unlock(tb, LCK_WRITE);
+ state->op = FREE_OWNED_TABLE;
+ break;
+ }
+ case FREE_OWNED_TABLE:
+ reds = free_table_continue(c_p, state->tb, reds);
+ if (reds < 0)
+ goto yield;
- case ErtsDbProcCleanupOpGetFixations:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_fixed_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_READ);
-
- if (ret == DB_ERROR_BADKEY) {
- /* Done */
- state->progress = ErtsDbProcCleanupProgressDone;
- state->op = ErtsDbProcCleanupOpDone;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets fix table metadata");
- }
+ state->op = GET_OWNED_TABLE;
+ break;
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteFixations;
- /* Fall through */
+ case UNFIX_TABLES: {
+ DbFixation* fix;
- case ErtsDbProcCleanupOpDeleteFixations:
+ fix = (DbFixation*) erts_psd_get(c_p, ERTS_PSD_ETS_FIXED_TABLES);
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(ix)) {
- tb = meta_main_tab[ix].u.tb;
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int reds = 0;
-
- db_lock(tb, LCK_WRITE_REC);
- if (!(tb->common.status & DB_DELETE)) {
- DbFixation** pp;
-
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
- reds = 10;
-
- for (pp = &tb->common.fixations; *pp != NULL;
- pp = &(*pp)->next) {
- if ((*pp)->pid == pid) {
- DbFixation* fix = *pp;
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_smp_refc_add(&tb->common.ref,diff,0);
- *pp = fix->next;
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
- break;
- }
- }
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
- if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
- db_unfix_table_hash(&(tb->hash));
- reds += 40;
- }
- }
- db_unlock(tb, LCK_WRITE_REC);
- BUMP_REDS(c_p, reds);
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ if (!fix) {
+ /* Done */
- proc_exit_cleanup_fixations_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
+ if (state != &default_state)
+ erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
+ c_p->u.terminate = NULL;
- case ErtsDbProcCleanupOpDone:
+ BUMP_REDS(c_p, (initial_reds - reds));
+ return 0;
+ }
- if (state != &default_state)
- erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.terminate = NULL;
- return 0;
+ fixed_tabs_delete(c_p, fix);
+ reds -= proc_cleanup_fixed_table(c_p, fix);
+ break;
+ }
default:
ERTS_DB_INTERNAL_ERROR("Bad internal state");
- }
- }
+ }
- yield:
+ } while (reds > 0);
- switch (state->progress) {
- case ErtsDbProcCleanupProgressTables:
- proc_exit_cleanup_tables_meta_data(pid, state);
- break;
- case ErtsDbProcCleanupProgressFixations:
- proc_exit_cleanup_fixations_meta_data(pid, state);
- break;
- default:
- break;
- }
-
- ASSERT(c_p->u.terminate == (void *) state
- || state == &default_state);
+ yield:
if (state == &default_state) {
c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
- sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.terminate,
- (void*) state,
- sizeof(ErtsDbProcCleanupState));
+ sizeof(CleanupState));
+ sys_memcpy(c_p->u.terminate, (void*) state, sizeof(CleanupState));
}
+ else
+ ASSERT(state == c_p->u.terminate);
return !0;
}
+
/* SMP note: table only need to be LCK_READ locked */
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
- DeclareTmpHeap(meta_tuple,3,p);
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
- erts_smp_refc_inc(&tb->common.ref,1);
- fix = tb->common.fixations;
+ erts_smp_refc_inc(&tb->common.fix_count,1);
+ fix = tb->common.fixing_procs;
if (fix == NULL) {
tb->common.time.monotonic
= erts_get_monotonic_time(erts_proc_sched_data(p));
tb->common.time.offset = erts_get_time_offset();
}
else {
- for (; fix != NULL; fix = fix->next) {
- if (fix->pid == p->common.id) {
- ++(fix->counter);
+ fix = fixing_procs_rbt_lookup(fix, p);
+ if (fix) {
+ ASSERT(fixed_tabs_find(NULL, fix));
+ ++(fix->counter);
+
#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
+ erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
- return;
- }
+ return;
}
}
fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
- fix->pid = p->common.id;
+ fix->tabs.btid = tb->common.btid;
+ erts_refc_inc(&fix->tabs.btid->intern.refc, 2);
+ fix->procs.p = p;
fix->counter = 1;
- fix->next = tb->common.fixations;
- tb->common.fixations = fix;
+ fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
+
#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
- p->flags |= F_USING_DB;
- UseTmpHeap(3,p);
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_fixed_tab,
- TUPLE2(meta_tuple,
- p->common.id,
- make_small(tb->common.slot)),
- 0) != DB_ERROR_NONE) {
- UnUseTmpHeap(3,p);
- erts_exit(ERTS_ERROR_EXIT,"Could not insert ets metadata in safe_fixtable.");
- }
- UnUseTmpHeap(3,p);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ p->flags |= F_USING_DB;
+
+ fixed_tabs_insert(p, fix);
}
/* SMP note: May re-lock table
@@ -3506,28 +3798,26 @@ static void fix_table_locked(Process* p, DbTable* tb)
static void unfix_table_locked(Process* p, DbTable* tb,
db_lock_kind_t* kind_p)
{
- DbFixation** pp;
+ DbFixation* fix;
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
- for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
- if ((*pp)->pid == p->common.id) {
- DbFixation* fix = *pp;
- erts_smp_refc_dec(&tb->common.ref,0);
- --(fix->counter);
- ASSERT(fix->counter >= 0);
- if (fix->counter > 0) {
- break;
- }
- *pp = fix->next;
+ fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
+
+ if (fix) {
+ erts_smp_refc_dec(&tb->common.fix_count,0);
+ --(fix->counter);
+ ASSERT(fix->counter >= 0);
+ if (fix->counter == 0) {
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- p->common.id, make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ fixed_tabs_delete(p, fix);
+
+ erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
+
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
@@ -3554,29 +3844,84 @@ unlocked:
}
}
-/* Assume that tb is WRITE locked */
-static void free_fixations_locked(DbTable *tb)
+struct free_fixations_ctx
{
- DbFixation *fix;
- DbFixation *next_fix;
+ Process* p;
+ DbTable* tb;
+ SWord cnt;
+};
+
+static void free_fixations_op(DbFixation* fix, void* vctx)
+{
+ struct free_fixations_ctx* ctx = (struct free_fixations_ctx*) vctx;
+ erts_aint_t diff;
+#ifdef DEBUG
+ DbTable* dbg_tb = btid2tab(fix->tabs.btid);
+#endif
- fix = tb->common.fixations;
- while (fix != NULL) {
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_smp_refc_add(&tb->common.ref,diff,0);
- next_fix = fix->next;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- fix->pid,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, (void *) fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ ASSERT(!dbg_tb || dbg_tb == ctx->tb);
+ ASSERT(fix->counter > 0);
+ ASSERT(ctx->tb->common.status & DB_DELETE);
- fix = next_fix;
+ diff = -((erts_aint_t) fix->counter);
+ erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0);
+
+#ifdef ERTS_SMP
+ if (fix->procs.p != ctx->p) { /* Fixated by other process */
+ fix->counter = 0;
+
+ /* Fake memory stats for table */
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(ctx->tb, sizeof(DbFixation), 0);
+
+ erts_schedule_ets_free_fixation(fix->procs.p->common.id, fix);
+ /*
+ * Either sys task is scheduled and erts_db_execute_free_fixation()
+ * will remove 'fix' or process will exit, drop sys task and
+ * proc_cleanup_fixed_table() will remove 'fix'.
+ */
}
- tb->common.fixations = NULL;
+ else
+#endif
+ {
+ fixed_tabs_delete(fix->procs.p, fix);
+
+ erts_bin_release(fix->tabs.btid);
+
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ ctx->tb, (void *) fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ }
+ ctx->cnt++;
+}
+
+#ifdef ERTS_SMP
+int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
+{
+ ASSERT(fix->counter == 0);
+ fixed_tabs_delete(p, fix);
+
+ erts_bin_release(fix->tabs.btid);
+
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ return 1;
+}
+#endif
+
+static SWord free_fixations_locked(Process* p, DbTable *tb)
+{
+ struct free_fixations_ctx ctx;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ ctx.p = p;
+ ctx.tb = tb;
+ ctx.cnt = 0;
+ fixing_procs_rbt_foreach_destroy(&tb->common.fixing_procs,
+ free_fixations_op, &ctx);
+ tb->common.fixing_procs = NULL;
+ return ctx.cnt;
}
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
@@ -3640,55 +3985,40 @@ static void free_heir_data(DbTable* tb)
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
{
- Process *p = BIF_P;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
Eterm cont = BIF_ARG_1;
- int trap;
Eterm* ptr = big_val(cont);
DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
ASSERT(*ptr == make_pos_bignum_header(1));
- db_lock(tb, LCK_WRITE);
- trap = free_table_cont(p, tb, 0, 1);
- db_unlock(tb, LCK_WRITE);
-
- if (trap) {
- BIF_TRAP1(&ets_delete_continue_exp, p, cont);
+ if (free_table_continue(BIF_P, tb, reds) < 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont);
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
/*
- * free_table_cont() returns 0 when done and !0 when more work is needed.
+ * free_table_continue() returns reductions left
+ * done if >= 0
+ * yield if < 0
*/
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab)
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds)
{
- Eterm result;
- erts_smp_rwmtx_t *mmtl;
-
-#ifdef HARDDEBUG
- if (!first) {
- erts_fprintf(stderr,"ets: free_table_cont %T (continue)\r\n",
- tb->common.id);
- }
-#endif
+ reds = tb->common.meth->db_free_table_continue(tb, reds);
- result = tb->common.meth->db_free_table_continue(tb);
-
- if (result == 0) {
+ if (reds < 0) {
#ifdef HARDDEBUG
erts_fprintf(stderr,"ets: free_table_cont %T (continue begin)\r\n",
tb->common.id);
#endif
/* More work to be done. Let other processes work and call us again. */
- BUMP_ALL_REDS(p);
- return !0;
}
else {
#ifdef HARDDEBUG
@@ -3696,27 +4026,28 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(mmtl);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
- }
-#endif
- free_slot(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
-
- if (clean_meta_tab) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab,tb->common.owner,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- }
- schedule_free_dbtable(tb);
- BUMP_REDS(p, 100);
- return 0;
+ delete_owned_table(p, tb);
+ table_dec_refc(tb, 0);
}
+ return reds;
+}
+
+struct fixing_procs_info_ctx
+{
+ Process* p;
+ Eterm list;
+};
+
+static void fixing_procs_info_op(DbFixation* fix, void* vctx)
+{
+ struct fixing_procs_info_ctx* ctx = (struct fixing_procs_info_ctx*) vctx;
+ Eterm* hp;
+ Eterm tpl;
+
+ hp = HAllocX(ctx->p, 5, 100);
+ tpl = TUPLE2(hp, fix->procs.p->common.id, make_small(fix->counter));
+ hp += 3;
+ ctx->list = CONS(hp, tpl, ctx->list);
}
static Eterm table_info(Process* p, DbTable* tb, Eterm What)
@@ -3765,7 +4096,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else if (What == am_node) {
ret = erts_this_dist_entry->sysname;
} else if (What == am_named_table) {
- ret = is_atom(tb->common.id) ? am_true : am_false;
+ ret = is_table_named(tb) ? am_true : am_false;
} else if (What == am_compressed) {
ret = tb->common.compress ? am_true : am_false;
}
@@ -3790,9 +4121,9 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
- Eterm tpl, lst;
- DbFixation *fix;
+ Eterm time;
Sint64 mtime;
+ struct fixing_procs_info_ctx ctx;
need = 3;
if (use_monotonic) {
@@ -3805,19 +4136,15 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
mtime = 0;
need += 4;
}
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- need += 5;
- }
+ ctx.p = p;
+ ctx.list = NIL;
+ fixing_procs_rbt_foreach(tb->common.fixing_procs,
+ fixing_procs_info_op,
+ &ctx);
+
hp = HAlloc(p, need);
- lst = NIL;
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- tpl = TUPLE2(hp,fix->pid,make_small(fix->counter));
- hp += 3;
- lst = CONS(hp,tpl,lst);
- hp += 2;
- }
if (use_monotonic)
- tpl = (IS_SSMALL(mtime)
+ time = (IS_SSMALL(mtime)
? make_small(mtime)
: erts_sint64_to_big(mtime, &hp));
else {
@@ -3825,10 +4152,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
erts_make_timestamp_value(&ms, &s, &us,
tb->common.time.monotonic,
tb->common.time.offset);
- tpl = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ time = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
hp += 4;
}
- ret = TUPLE2(hp, tpl, lst);
+ ret = TUPLE2(hp, time, ctx.list);
} else {
ret = am_false;
}
@@ -3873,7 +4200,19 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
{
- erts_print(to, to_arg, "Table: %T\n", tb->common.id);
+ Eterm tid;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
+
+ if (is_table_named(tb)) {
+ tid = tb->common.the_name;
+ } else {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ write_magic_ref_thing(heap, &oh, (ErtsMagicBinary *) tb->common.btid);
+ tid = make_internal_ref(heap);
+ }
+
+ erts_print(to, to_arg, "Table: %T\n", tid);
erts_print(to, to_arg, "Name: %T\n", tb->common.the_name);
tb->common.meth->db_print(to, to_arg, show, tb);
@@ -3891,21 +4230,30 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency));
}
+typedef struct {
+ fmtfn_t to;
+ void *to_arg;
+ int show;
+} ErtsPrintDbInfo;
+
+static void
+db_info_print(DbTable *tb, void *vpdbip)
+{
+ ErtsPrintDbInfo *pdbip = (ErtsPrintDbInfo *) vpdbip;
+ erts_print(pdbip->to, pdbip->to_arg, "=ets:%T\n", tb->common.owner);
+ erts_print(pdbip->to, pdbip->to_arg, "Slot: %bpu\n", (Uint) tb);
+ print_table(pdbip->to, pdbip->to_arg, pdbip->show, tb);
+}
+
void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler */
{
- int i;
- for (i=0; i < db_max_tabs; i++)
- if (IS_SLOT_ALIVE(i)) {
- erts_print(to, to_arg, "=ets:%T\n", meta_main_tab[i].u.tb->common.owner);
- erts_print(to, to_arg, "Slot: %d\n", i);
- print_table(to, to_arg, show, meta_main_tab[i].u.tb);
- }
-#ifdef DEBUG
- erts_print(to, to_arg, "=internal_ets: Process to table index\n");
- print_table(to, to_arg, show, meta_pid_to_tab);
- erts_print(to, to_arg, "=internal_ets: Process to fixation index\n");
- print_table(to, to_arg, show, meta_pid_to_fixed_tab);
-#endif
+ ErtsPrintDbInfo pdbi;
+
+ pdbi.to = to;
+ pdbi.to_arg = to_arg;
+ pdbi.show = show;
+
+ erts_db_foreach_table(db_info_print, &pdbi);
}
Uint
@@ -3920,15 +4268,22 @@ erts_get_ets_misc_mem_size(void)
void
erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
{
- int i, j;
- j = 0;
- for(i = 0; (i < db_max_tabs && j < meta_main_tab_cnt); i++) {
- if (IS_SLOT_ALIVE(i)) {
- j++;
- (*func)(meta_main_tab[i].u.tb, arg);
- }
+ int ix;
+
+ ASSERT(erts_smp_thr_progress_is_blocking());
+
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ DbTable *first = esdp->ets_tables.clist;
+ if (first) {
+ DbTable *tb = first;
+ do {
+ if (is_table_alive(tb))
+ (*func)(tb, arg);
+ tb = tb->common.all.next;
+ } while (tb != first);
+ }
}
- ASSERT(j == meta_main_tab_cnt);
}
/* SMP Note: May only be used when system is locked */
@@ -3978,23 +4333,47 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
-#ifdef HARDDEBUG /* Here comes some debug functions */
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_lcnt_install_new_lock_info(&tb->common.fixlock.lcnt, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(&tb->common.rwlock.lcnt);
+ erts_lcnt_uninstall(&tb->common.fixlock.lcnt);
+ }
-void db_check_tables(void)
-{
-#ifdef ERTS_SMP
- return;
-#else
- int i;
+ if(IS_HASH_TABLE(tb->common.status)) {
+ erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ }
+}
- for (i = 0; i < db_max_tabs; i++) {
- if (IS_SLOT_ALIVE(i)) {
- DbTable* tb = meta_main_tab[i].t;
- tb->common.meth->db_check_table(tb);
- }
+static void lcnt_update_db_locks_per_sched(void *enable) {
+ ErtsSchedulerData *esdp;
+ DbTable *head;
+
+ esdp = erts_get_scheduler_data();
+ head = esdp->ets_tables.clist;
+
+ if(head) {
+ DbTable *iterator = head;
+
+ do {
+ if(is_table_alive(iterator)) {
+ erts_lcnt_enable_db_lock_count(iterator, !!enable);
+ }
+
+ iterator = iterator->common.all.next;
+ } while (iterator != head);
}
-#endif
}
-#endif /* HARDDEBUG */
+void erts_lcnt_update_db_locks(int enable) {
+ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
+ &lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
+}
+
+#endif /* ERTS_ENABLE_LOCK_COUNT */ \ No newline at end of file
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index b0508f2e74..d83126b3a2 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -24,8 +24,37 @@
*
*/
-#ifndef __DB_H__
-#define __DB_H__
+#ifndef ERTS_DB_SCHED_SPEC_TYPES__
+#define ERTS_DB_SCHED_SPEC_TYPES__
+
+union db_table;
+typedef union db_table DbTable;
+
+typedef struct ErtsEtsAllReq_ ErtsEtsAllReq;
+
+typedef struct {
+ ErtsEtsAllReq *next;
+ ErtsEtsAllReq *prev;
+} ErtsEtsAllReqList;
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllYieldData;
+
+typedef struct {
+ Uint count;
+ DbTable *clist;
+} ErtsEtsTables;
+
+#endif /* ERTS_DB_SCHED_SPEC_TYPES__ */
+
+#ifndef ERTS_ONLY_SCHED_SPEC_ETS_DATA
+
+#ifndef ERL_DB_H__
+#define ERL_DB_H__
#include "sys.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -46,6 +75,12 @@ typedef struct {
ErtsThrPrgrLaterOp data;
} DbTableRelease;
+struct ErtsSchedulerData_;
+int erts_handle_yielded_ets_all_request(struct ErtsSchedulerData_ *esdp,
+ ErtsEtsAllYieldData *eadp);
+
+void erts_ets_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
+
/*
* So, the structure for a database table, NB this is only
* interesting in db.c.
@@ -74,6 +109,7 @@ typedef enum {
void init_db(ErtsDbSpinCount);
int erts_db_process_exiting(Process *, ErtsProcLocks);
+int erts_db_execute_free_fixation(Process*, DbFixation*);
void db_info(fmtfn_t, void *, int);
void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
void erts_db_foreach_offheap(DbTable *,
@@ -86,15 +122,20 @@ extern int erts_ets_realloc_always_moves; /* set in erl_init */
extern int erts_ets_always_compress; /* set in erl_init */
extern Export ets_select_delete_continue_exp;
extern Export ets_select_count_continue_exp;
+extern Export ets_select_replace_continue_exp;
extern Export ets_select_continue_exp;
extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
-
Uint erts_db_get_max_tabs(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable);
+void erts_lcnt_update_db_locks(int enable);
#endif
+#endif /* ERL_DB_H__ */
+
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
#define ERTS_HAVE_DB_INTERNAL__
@@ -267,7 +308,6 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#undef ERTS_DB_ALC_MEM_UPDATE_
-
#endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */
+#endif /* !ERTS_ONLY_SCHED_SPEC_ETS_DATA */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 390369fdb9..ae9322dfd3 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -84,25 +84,28 @@
#include "erl_db_hash.h"
-#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */
-# define IF_DEBUG(x) x
-# define MY_ASSERT(x) ASSERT(x)
-#else
-# define IF_DEBUG(x)
-# define MY_ASSERT(x)
-#endif
-
/*
* The following symbols can be manipulated to "tune" the linear hash array
*/
-#define CHAIN_LEN 6 /* Medium bucket chain len */
+#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1)
+#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2)
+
+/*
+** We want the first mandatory segment to be small (to reduce minimal footprint)
+** and larger extra segments (to reduce number of alloc/free calls).
+*/
-/* Number of slots per segment */
-#define SEGSZ_EXP 8
-#define SEGSZ (1 << SEGSZ_EXP)
-#define SEGSZ_MASK (SEGSZ-1)
+/* Number of slots in first segment */
+#define FIRST_SEGSZ_EXP 8
+#define FIRST_SEGSZ (1 << FIRST_SEGSZ_EXP)
+#define FIRST_SEGSZ_MASK (FIRST_SEGSZ - 1)
-#define NSEG_1 2 /* Size of first segment table (must be at least 2) */
+/* Number of slots per extra segment */
+#define EXT_SEGSZ_EXP 11
+#define EXT_SEGSZ (1 << EXT_SEGSZ_EXP)
+#define EXT_SEGSZ_MASK (EXT_SEGSZ-1)
+
+#define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*))
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
@@ -123,7 +126,9 @@
#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
-#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK]
+#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
+
+#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK]
/*
* When deleting a table, the number of records to delete.
@@ -184,11 +189,12 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_internal_hash(term)) % MAX_HASH)
+ make_internal_hash(term, 0)) % MAX_HASH)
#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
+# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
/* Fine grained read lock */
static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
@@ -282,6 +288,9 @@ static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
#endif
}
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
/*
* Some special binary flags
@@ -318,27 +327,23 @@ struct mp_info {
/* A table segment */
struct segment {
- HashDbTerm* buckets[SEGSZ];
-#ifdef MYDEBUG
- int is_ext_segment;
-#endif
+ HashDbTerm* buckets[1];
};
+#define SIZEOF_SEGMENT(N) \
+ (offsetof(struct segment,buckets) + sizeof(HashDbTerm*)*(N))
-/* A segment that also contains a segment table */
-struct ext_segment {
- struct segment s; /* The segment itself. Must be first */
-
+/* An extended segment table */
+struct ext_segtab {
+#ifdef ERTS_SMP
+ ErtsThrPrgrLaterOp lop;
+#endif
struct segment** prev_segtab; /* Used when table is shrinking */
- int nsegs; /* Size of segtab */
+ int prev_nsegs; /* Size of prev_segtab */
+ int nsegs; /* Size of this segtab */
struct segment* segtab[1]; /* The segment table */
};
-#define SIZEOF_EXTSEG(NSEGS) \
- (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS))
-
-#if defined(DEBUG) || defined(VALGRIND)
-# define EXTSEG(SEGTAB_PTR) \
- ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
-#endif
+#define SIZEOF_EXT_SEGTAB(NSEGS) \
+ (offsetof(struct ext_segtab,segtab) + sizeof(struct segment*)*(NSEGS))
static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
@@ -348,53 +353,28 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
else
erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
-#ifdef VALGRIND
- tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
-#endif
}
-
-/* How the table segments relate to each other:
-
- ext_segment: ext_segment: "plain" segment
- #=================# #================# #=============#
- | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] |
- | bucket[1] | | | | [257] | | | [513] |
- : : | | : : | : :
- | bucket[255] | | | | [511] | | | [767] |
- |-----------------| | | |----------------| | #=============#
- | prev_segtab=NULL| | | +--<---prev_segtab | |
- | nsegs = 2 | | | | | nsegs = 256 | |
-+->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ |
-| | segtab[1] -->-----------+---|--<---segtab[1] | | |
-| #=================# | | segtab[2] -->-----|--+ ext_segment:
-| | : : | #================#
-+----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]|
- #================# | | |
- | : :
- | |----------------|
- +----<---prev_segtab |
- : :
-*/
-
+/* Used by select_replace on analyze_pattern */
+typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body);
/*
** Forward decl's (static functions)
*/
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab);
-static int alloc_seg(DbTableHash *tb);
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
+static void alloc_seg(DbTableHash *tb);
static int free_seg(DbTableHash *tb, int free_records);
static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
HashValue hval, HashDbTerm *list);
-static void shrink(DbTableHash* tb, int nactive);
-static void grow(DbTableHash* tb, int nactive);
+static void shrink(DbTableHash* tb, int nitems);
+static void grow(DbTableHash* tb, int nitems);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
Uint sz, DbTableHash*);
-static int analyze_pattern(DbTableHash *tb, Eterm pattern,
- struct mp_info *mpi);
+static int analyze_pattern(DbTableHash *tb, Eterm pattern,
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi);
/*
* Method interface functions
@@ -418,24 +398,29 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_hash(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse, Eterm *ret);
-static int db_select_hash(Process *p, DbTable *tbl,
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret);
-static int db_select_count_hash(Process *p, DbTable *tbl,
- Eterm pattern, Eterm *ret);
-static int db_select_delete_hash(Process *p, DbTable *tbl,
- Eterm pattern, Eterm *ret);
-
-static int db_select_continue_hash(Process *p, DbTable *tbl,
+static int db_select_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_count_continue_hash(Process *p, DbTable *tbl,
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_count_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_hash(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+
static int db_take_hash(Process *, DbTable *, Eterm, Eterm *);
static void db_print_hash(fmtfn_t to,
void *to_arg,
@@ -443,7 +428,7 @@ static void db_print_hash(fmtfn_t to,
DbTable *tbl);
static int db_free_table_hash(DbTable *tbl);
-static int db_free_table_continue_hash(DbTable *tbl);
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds);
static void db_foreach_offheap_hash(DbTable *,
@@ -463,9 +448,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
int nactive = NACTIVE(tb);
- if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN)
+ int nitems = NITEMS(tb);
+ if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)
&& !IS_FIXED(tb)) {
- shrink(tb, nactive);
+ shrink(tb, nitems);
}
}
@@ -547,17 +533,14 @@ DbTableMethod db_hash =
db_select_delete_continue_hash,
db_select_count_hash,
db_select_count_continue_hash,
+ db_select_replace_hash,
+ db_select_replace_continue_hash,
db_take_hash,
db_delete_all_objects_hash,
db_free_table_hash,
db_free_table_continue_hash,
db_print_hash,
db_foreach_offheap_hash,
-#ifdef HARDDEBUG
- db_check_table_hash,
-#else
- NULL,
-#endif
db_lookup_dbterm_hash,
db_finalize_dbterm_hash
};
@@ -605,9 +588,10 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
** Table interface routines ie what's called by the bif's
*/
-void db_unfix_table_hash(DbTableHash *tb)
+SWord db_unfix_table_hash(DbTableHash *tb)
{
FixedDeletion* fixdel;
+ SWord work = 0;
ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
|| (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
@@ -628,7 +612,7 @@ restart:
if (!IS_FIXED(tb)) {
goto restart; /* unfixed again! */
}
- return;
+ return work;
}
if (ix < NACTIVE(tb)) {
bp = &BUCKET(tb, ix);
@@ -638,6 +622,7 @@ restart:
if (b->hvalue == INVALID_HASH) {
*bp = b->next;
free_term(tb, b);
+ work++;
b = *bp;
} else {
bp = &b->next;
@@ -653,25 +638,32 @@ restart:
(void *) fx,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ work++;
}
/* ToDo: Maybe try grow/shrink the table as well */
+
+ return work;
}
int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK);
- erts_smp_atomic_init_nob(&tb->nactive, SEGSZ);
+ erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
+ erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
- SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab);
+ SET_SEGTAB(tb, tb->first_segtab);
tb->nsegs = NSEG_1;
- tb->nslots = SEGSZ;
+ tb->nslots = FIRST_SEGSZ;
+ tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(FIRST_SEGSZ));
+ sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
- erts_smp_atomic_init_nob(&tb->is_resizing, 0);
#ifdef ERTS_SMP
+ erts_smp_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
int i;
@@ -683,10 +675,10 @@ int db_create_hash(Process *p, DbTable *tbl)
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
- "db_hash_slot", make_small(i));
+ erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
}
- /* This important property is needed to guarantee that the buckets
+ /* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
*/
ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
@@ -862,11 +854,10 @@ Lnew:
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
}
- CHECK_TABLES();
return DB_ERROR_NONE;
Ldone:
@@ -891,7 +882,6 @@ get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
}
}
copy = build_term_list(p, b1, b2, sz, tb);
- CHECK_TABLES();
if (bend) {
*bend = b2;
}
@@ -923,70 +913,6 @@ done:
RUNLOCK_HASH(lck);
return DB_ERROR_NONE;
}
-
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm* b1;
- int num = 0;
- int retval;
- erts_smp_rwmtx_t* lck;
-
- ASSERT(!IS_FIXED(tbl)); /* no support for fixed tables here */
-
- hval = MAKE_HASH(key);
- lck = RLOCK_HASH(tb, hval);
- ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
-
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- HashDbTerm* b;
- HashDbTerm* b2 = b1->next;
-
- while(b2 != NULL && has_live_key(tb,b2,key,hval)) {
- if (ndex > arityval(b2->dbterm.tpl[0])) {
- retval = DB_ERROR_BADITEM;
- goto done;
- }
- b2 = b2->next;
- }
-
- b = b1;
- while(b != b2) {
- if (num < *num_ret) {
- ret[num++] = b->dbterm.tpl[ndex];
- } else {
- retval = DB_ERROR_NONE;
- goto done;
- }
- b = b->next;
- }
- *num_ret = num;
- }
- else {
- ASSERT(*num_ret > 0);
- ret[0] = b1->dbterm.tpl[ndex];
- *num_ret = 1;
- }
- retval = DB_ERROR_NONE;
- goto done;
- }
- b1 = b1->next;
- }
- retval = DB_ERROR_BADKEY;
-done:
- RUNLOCK_HASH(lck);
- return retval;
-}
-
static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
{
@@ -1079,54 +1005,6 @@ done:
}
/*
- * Very internal interface, removes elements of arity two from
- * BAG. Used for the PID meta table
- */
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm** bp;
- HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
- int found = 0;
-
- hval = MAKE_HASH(key);
- lck = WLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- ASSERT(!IS_FIXED(tb));
- ASSERT((tb->common.status & DB_BAG));
- ASSERT(!tb->common.compress);
-
- while(b != 0) {
- if (has_live_key(tb,b,key,hval)) {
- found = 1;
- if ((arityval(b->dbterm.tpl[0]) == 2) &&
- EQ(value, b->dbterm.tpl[2])) {
- *bp = b->next;
- free_term(tb, b);
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- b = *bp;
- break;
- }
- } else if (found) {
- break;
- }
- bp = &b->next;
- b = b->next;
- }
- WUNLOCK_HASH(lck);
- if (found) {
- try_shrink(tb);
- }
- return DB_ERROR_NONE;
-}
-
-/*
** NB, this is for the db_erase/2 bif.
*/
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
@@ -1273,816 +1151,1104 @@ static BIF_RETTYPE bif_trap1(Export *bif,
{
BIF_TRAP1(bif, p, p1);
}
-
+
+
/*
- * Continue collecting select matches, this may happen either due to a trap
- * or when the user calls ets:select/1
+ * Match traversal callbacks
*/
-static int db_select_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
-{
- DbTableHash *tb = &tbl->hash;
- Sint slot_ix;
- Sint save_slot_ix;
- Sint chunk_size;
- int all_objects;
- Binary *mp;
- int num_left = 1000;
- HashDbTerm *current = 0;
- Eterm match_list;
- Eterm *hp;
- Eterm match_res;
- Sint got;
- Eterm *tptr;
- erts_smp_rwmtx_t* lck;
-#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+/* Called when no match is possible.
+ * context_ptr: Pointer to context
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ */
+typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret);
+
+/* Called for each match result.
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * current_ptr_ptr: Triple pointer to either the bucket or the 'next' pointer in the previous element;
+ * can be (carefully) used to adjust iteration when deleting or replacing elements.
+ * match_res: The result of running the match program against the current term.
+ *
+ * Should return 1 for successful match, 0 otherwise.
+ */
+typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDbTerm*** current_ptr_ptr,
+ Eterm match_res);
+
+/* Called when either we've matched enough elements in this cycle or EOT was reached.
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * got: How many elements have been matched so far
+ * iterations_left: Number of intended iterations (down from an initial max.) left in this traversal cycle
+ * mpp: Double pointer to the compiled match program
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ * If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
+ */
+typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret);
+
+/* Called when it's time to trap
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * got: How many elements have been matched so far
+ * mpp: Double pointer to the compiled match program
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ * If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
+ */
+typedef int (*mtraversal_on_trap_t)(void* context_ptr, Sint slot_ix, Sint got, Binary** mpp, Eterm* ret);
- /* Decode continuation. We know it's a tuple but not the arity or anything else */
+/*
+ * Begin hash table match traversal
+ */
+static int match_traverse(Process* p, DbTableHash* tb,
+ Eterm pattern,
+ extra_match_validator_t extra_match_validator, /* Optional */
+ Sint chunk_size, /* If 0, no chunking */
+ Sint iterations_left, /* Nr. of iterations left */
+ Eterm** hpp, /* Heap */
+ int lock_for_write, /* Set to 1 if we're going to delete or
+ modify existing terms */
+ mtraversal_on_nothing_can_match_t on_nothing_can_match,
+ mtraversal_on_match_res_t on_match_res,
+ mtraversal_on_loop_ended_t on_loop_ended,
+ mtraversal_on_trap_t on_trap,
+ void* context_ptr, /* State for callbacks above */
+ Eterm* ret)
+{
+ Sint slot_ix; /* Slot index */
+ HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
+ * the 'next' pointer in the previous term
+ */
+ HashDbTerm* saved_current; /* Helper to avoid double skip on match */
+ struct mp_info mpi;
+ unsigned current_list_pos = 0; /* Prefound buckets list index */
+ Eterm match_res;
+ Sint got = 0; /* Matched terms counter */
+ erts_smp_rwmtx_t* lck; /* Slot lock */
+ int ret_value;
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ = (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
+ void (*unlock_hash_function)(erts_smp_rwmtx_t*)
+ = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
+#else
+ #define lock_hash_function(tb, hval) NULL
+ #define unlock_hash_function(lck) ((void)lck)
+#endif
+ Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**)
+ = (lock_for_write ? next_slot_w : next_slot);
- tptr = tuple_val(continuation);
+ if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi))
+ != DB_ERROR_NONE)
+ {
+ *ret = NIL;
+ goto done;
+ }
- if (arityval(*tptr) != 6)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
-
- if (!is_small(tptr[2]) || !is_small(tptr[3]) || !is_binary(tptr[4]) ||
- !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if ((chunk_size = signed_val(tptr[3])) < 0)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
- match_list = tptr[5];
- if ((got = signed_val(tptr[6])) < 0)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ if (!mpi.something_can_match) {
+ /* Can't possibly match anything */
+ ret_value = on_nothing_can_match(context_ptr, ret);
+ goto done;
+ }
- slot_ix = signed_val(tptr[2]);
- if (slot_ix < 0 /* EOT */
- || (chunk_size && got >= chunk_size)) {
- goto done; /* Already got all or enough in the match_list */
+ if (mpi.all_objects) {
+ mpi.mp->intern.flags |= BIN_FLAG_ALL_OBJECTS;
}
- lck = RLOCK_HASH(tb,slot_ix);
- if (slot_ix >= NACTIVE(tb)) {
- RUNLOCK_HASH(lck);
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ /*
+ * Look for initial slot / bucket
+ */
+ if (!mpi.key_given) {
+ /* Run this code if pattern is variable or GETKEY(pattern) */
+ /* is a variable */
+ slot_ix = 0;
+ lck = lock_hash_function(tb,slot_ix);
+ for (;;) {
+ ASSERT(slot_ix < NACTIVE(tb));
+ if (*(current_ptr = &BUCKET(tb,slot_ix)) != NULL) {
+ break;
+ }
+ slot_ix = next_slot_function(tb,slot_ix,&lck);
+ if (slot_ix == 0) {
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
+ goto done;
+ }
+ }
+ } else {
+ /* We have at least one */
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = lock_hash_function(tb, slot_ix);
+ current_ptr = mpi.lists[current_list_pos].bucket;
+ ASSERT(*current_ptr == BUCKET(tb,slot_ix));
+ ++current_list_pos;
}
- while ((current = BUCKET(tb,slot_ix)) == NULL) {
- slot_ix = next_slot(tb, slot_ix, &lck);
- if (slot_ix == 0) {
- slot_ix = -1; /* EOT */
- goto done;
- }
- }
+ /*
+ * Execute traversal cycle
+ */
for(;;) {
- if (current->hvalue != INVALID_HASH &&
- (match_res = db_match_dbterm(&tb->common, p, mp, all_objects,
- &current->dbterm, &hp, 2),
- is_value(match_res))) {
+ if (*current_ptr != NULL) {
+ if ((*current_ptr)->hvalue != INVALID_HASH) {
+ match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
+ &(*current_ptr)->dbterm, hpp, 2);
+ saved_current = *current_ptr;
+ if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ ++got;
+ }
+ --iterations_left;
+ if (*current_ptr != saved_current) {
+ /* Don't advance to next, the callback did it already */
+ continue;
+ }
+ }
+ current_ptr = &((*current_ptr)->next);
+ }
+ else if (mpi.key_given) { /* Key is bound */
+ unlock_hash_function(lck);
+ if (current_list_pos == mpi.num_lists) {
+ ret_value = on_loop_ended(context_ptr, -1, got, iterations_left, &mpi.mp, ret);
+ goto done;
+ } else {
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = lock_hash_function(tb, slot_ix);
+ current_ptr = mpi.lists[current_list_pos].bucket;
+ ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+ }
+ else { /* Key is variable */
+ if ((slot_ix = next_slot_function(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ unlock_hash_function(lck);
+ break;
+ }
+ if (iterations_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ unlock_hash_function(lck);
+ ret_value = on_trap(context_ptr, slot_ix, got, &mpi.mp, ret);
+ goto done;
+ }
+ current_ptr = &BUCKET(tb,slot_ix);
+ }
+ }
- match_list = CONS(hp, match_res, match_list);
- ++got;
- }
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
- --num_left;
- save_slot_ix = slot_ix;
- if ((current = next(tb, (Uint*)&slot_ix, &lck, current)) == NULL) {
- slot_ix = -1; /* EOT */
- break;
- }
- if (slot_ix != save_slot_ix) {
- if (chunk_size && got >= chunk_size) {
- RUNLOCK_HASH(lck);
- break;
- }
- if (num_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
- RUNLOCK_HASH(lck);
- goto trap;
- }
- }
- }
done:
- BUMP_REDS(p, 1000 - num_left);
- if (chunk_size) {
- Eterm continuation;
- Eterm rest = NIL;
- Sint rest_size = 0;
-
- if (got > chunk_size) { /* Cannot write destructively here,
- the list may have
- been in user space */
- rest = NIL;
- hp = HAlloc(p, (got - chunk_size) * 2);
- while (got-- > chunk_size) {
- rest = CONS(hp, CAR(list_val(match_list)), rest);
- hp += 2;
- match_list = CDR(list_val(match_list));
- ++rest_size;
- }
- }
- if (rest != NIL || slot_ix >= 0) {
- hp = HAlloc(p,3+7);
- continuation = TUPLE6(hp, tptr[1], make_small(slot_ix),
- tptr[3], tptr[4], rest,
- make_small(rest_size));
- hp += 7;
- RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
- } else {
- if (match_list != NIL) {
- hp = HAlloc(p, 3);
- RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
- } else {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE);
- }
- }
+ /* We should only jump directly to this label if
+ * we've already called on_nothing_can_match / on_loop_ended / on_trap
+ */
+ if (mpi.mp != NULL) {
+ erts_bin_free(mpi.mp);
}
- RET_TO_BIF(match_list,DB_ERROR_NONE);
-
-trap:
- BUMP_ALL_REDS(p);
-
- hp = HAlloc(p,7);
- continuation = TUPLE6(hp, tptr[1], make_small(slot_ix), tptr[3],
- tptr[4], match_list, make_small(got));
- RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-
-#undef RET_TO_BIF
-
-}
+ if (mpi.lists != mpi.dlists) {
+ erts_free(ERTS_ALC_T_DB_SEL_LIST,
+ (void *) mpi.lists);
+ }
+ return ret_value;
-static int db_select_hash(Process *p, DbTable *tbl,
- Eterm pattern, int reverse,
- Eterm *ret)
-{
- return db_select_chunk_hash(p, tbl, pattern, 0, reverse, ret);
+#ifndef SMP
+#undef lock_hash_function
+#undef unlock_hash_function
+#endif
}
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
- Eterm pattern, Sint chunk_size,
- int reverse, /* not used */
- Eterm *ret)
+/*
+ * Continue hash table match traversal
+ */
+static int match_traverse_continue(Process* p, DbTableHash* tb,
+ Sint chunk_size, /* If 0, no chunking */
+ Sint iterations_left, /* Nr. of iterations left */
+ Eterm** hpp, /* Heap */
+ Sint slot_ix, /* Slot index to resume traversal from */
+ Sint got, /* Matched terms counter */
+ Binary** mpp, /* Existing match program */
+ int lock_for_write, /* Set to 1 if we're going to delete or
+ modify existing terms */
+ mtraversal_on_match_res_t on_match_res,
+ mtraversal_on_loop_ended_t on_loop_ended,
+ mtraversal_on_trap_t on_trap,
+ void* context_ptr, /* For callbacks */
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Sint slot_ix;
- HashDbTerm *current = 0;
- unsigned current_list_pos = 0;
- Eterm match_list;
+ int all_objects = (*mpp)->intern.flags & BIN_FLAG_ALL_OBJECTS;
+ HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
+ * the 'next' pointer in the previous term
+ */
+ HashDbTerm* saved_current; /* Helper to avoid double skip on match */
Eterm match_res;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
- Eterm mpb;
erts_smp_rwmtx_t* lck;
+ int ret_value;
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ = (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
+ void (*unlock_hash_function)(erts_smp_rwmtx_t*)
+ = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
+#else
+ #define lock_hash_function(tb, hval) NULL
+ #define unlock_hash_function(lck) ((void)lck)
+#endif
+ Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr)
+ = (lock_for_write ? next_slot_w : next_slot);
-
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
-
-
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
+ if (got < 0) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
}
- if (!mpi.something_can_match) {
- if (chunk_size) {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
- }
- RET_TO_BIF(NIL, DB_ERROR_NONE);
- /* can't possibly match anything */
+ if (slot_ix < 0 /* EOT */
+ || (chunk_size && got >= chunk_size))
+ {
+ /* Already got all or enough in the match_list */
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+ goto done;
}
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- slot_ix = 0;
- lck = RLOCK_HASH(tb,slot_ix);
- for (;;) {
- ASSERT(slot_ix < NACTIVE(tb));
- if ((current = BUCKET(tb,slot_ix)) != NULL) {
- break;
- }
- slot_ix = next_slot(tb,slot_ix,&lck);
- if (slot_ix == 0) {
- if (chunk_size) {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
- }
- RET_TO_BIF(NIL,DB_ERROR_NONE);
- }
- }
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(current == BUCKET(tb,slot_ix));
- ++current_list_pos;
+ lck = lock_hash_function(tb, slot_ix);
+ if (slot_ix >= NACTIVE(tb)) { /* Is this possible? */
+ unlock_hash_function(lck);
+ *ret = NIL;
+ ret_value = DB_ERROR_BADPARAM;
+ goto done;
}
- match_list = NIL;
-
+ /*
+ * Resume traversal cycle from where we left
+ */
+ current_ptr = &BUCKET(tb,slot_ix);
for(;;) {
- if (current != NULL) {
- if (current->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &current->dbterm, &hp, 2);
- if (is_value(match_res)) {
- match_list = CONS(hp, match_res, match_list);
- ++got;
- }
- }
- current = current->next;
- }
- else if (mpi.key_given) { /* Key is bound */
- RUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- slot_ix = -1; /* EOT */
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- }
- else { /* Key is variable */
- --num_left;
-
- if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
- slot_ix = -1;
- break;
- }
- if (chunk_size && got >= chunk_size) {
- RUNLOCK_HASH(lck);
- break;
- }
- if (num_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
+ if (*current_ptr != NULL) {
+ if ((*current_ptr)->hvalue != INVALID_HASH) {
+ match_res = db_match_dbterm(&tb->common, p, *mpp, all_objects,
+ &(*current_ptr)->dbterm, hpp, 2);
+ saved_current = *current_ptr;
+ if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ ++got;
+ }
+ --iterations_left;
+ if (*current_ptr != saved_current) {
+ /* Don't advance to next, the callback did it already */
+ continue;
+ }
+ }
+ current_ptr = &((*current_ptr)->next);
+ }
+ else {
+ if ((slot_ix=next_slot_function(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ unlock_hash_function(lck);
+ break;
+ }
+ if (iterations_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ unlock_hash_function(lck);
+ ret_value = on_trap(context_ptr, slot_ix, got, mpp, ret);
+ goto done;
+ }
+ current_ptr = &BUCKET(tb,slot_ix);
}
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (chunk_size) {
- Eterm continuation;
- Eterm rest = NIL;
- Sint rest_size = 0;
-
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- if (got > chunk_size) { /* Split list in return value and 'rest' */
- Eterm tmp = match_list;
- rest = match_list;
- while (got-- > chunk_size + 1) {
- tmp = CDR(list_val(tmp));
- ++rest_size;
- }
- ++rest_size;
- match_list = CDR(list_val(tmp));
- CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
- been in 'user space' */
- }
- if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- hp = HAlloc(p,3+7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix),
- make_small(chunk_size),
- mpb, rest,
- make_small(rest_size));
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- hp += 7;
- RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
- } else { /* All data is exhausted */
- if (match_list != NIL) { /* No more data to search but still a
- result to return to the caller */
- hp = HAlloc(p, 3);
- RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
- } else { /* Reached the end of the ttable with no data to return */
- RET_TO_BIF(am_EOT, DB_ERROR_NONE);
- }
- }
- }
- RET_TO_BIF(match_list,DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- hp = HAlloc(p,7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
- continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix),
- make_small(chunk_size),
- mpb, match_list,
- make_small(got));
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-#undef RET_TO_BIF
+ ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+
+done:
+ /* We should only jump directly to this label if
+ * we've already called on_loop_ended / on_trap
+ */
+ return ret_value;
+#ifndef SMP
+#undef lock_hash_function
+#undef unlock_hash_function
+#endif
}
-static int db_select_count_hash(Process *p,
- DbTable *tbl,
- Eterm pattern,
- Eterm *ret)
+
+/*
+ * Common traversal trapping/continuation code;
+ * used by select_count, select_delete and select_replace,
+ * as well as their continuation-handling counterparts.
+ */
+
+static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
+ Process* p,
+ DbTableHash* tb,
+ Eterm tid,
+ Eterm* prev_continuation_tptr,
+ Sint slot_ix,
+ Sint got,
+ Binary** mpp,
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Uint slot_ix = 0;
- HashDbTerm* current = NULL;
- unsigned current_list_pos = 0;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
+ Eterm* hp;
Eterm egot;
Eterm mpb;
- erts_smp_rwmtx_t* lck;
-
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
-
-
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
- }
-
- if (!mpi.something_can_match) {
- RET_TO_BIF(make_small(0), DB_ERROR_NONE);
- /* can't possibly match anything */
- }
-
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- slot_ix = 0;
- lck = RLOCK_HASH(tb,slot_ix);
- current = BUCKET(tb,slot_ix);
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(current == BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
+ Eterm continuation;
+ int is_first_trap = (prev_continuation_tptr == NULL);
+ size_t base_halloc_sz = (is_first_trap ? ERTS_MAGIC_REF_THING_SIZE : 0);
- for(;;) {
- if (current != NULL) {
- if (current->hvalue != INVALID_HASH) {
- if (db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &current->dbterm, NULL,0) == am_true) {
- ++got;
- }
- --num_left;
- }
- current = current->next;
- }
- else { /* next bucket */
- if (mpi.key_given) { /* Key is bound */
- RUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- }
- else {
- if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
- }
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
BUMP_ALL_REDS(p);
if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, base_halloc_sz + 5);
egot = make_small(got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
+ hp = HAlloc(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5);
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- mpb,
- egot);
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-#undef RET_TO_BIF
+ if (is_first_trap) {
+ mpb = erts_db_make_match_prog_ref(p, *mpp, &hp);
+ *mpp = NULL; /* otherwise the caller will destroy it */
+ }
+ else {
+ mpb = prev_continuation_tptr[3];
+ }
+
+ continuation = TUPLE4(
+ hp,
+ tid,
+ make_small(slot_ix),
+ mpb,
+ egot);
+ *ret = bif_trap1(trap_function, p, continuation);
+ return DB_ERROR_NONE;
}
-static int db_select_delete_hash(Process *p,
- DbTable *tbl,
- Eterm pattern,
- Eterm *ret)
+static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation,
+ Eterm** tptr_ptr,
+ Eterm* tid_ptr,
+ Sint* slot_ix_p,
+ Binary** mpp,
+ Sint* got_p)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Uint slot_ix = 0;
- HashDbTerm **current = NULL;
- unsigned current_list_pos = 0;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
- Uint last_pseudo_delete = (Uint)-1;
- Eterm mpb;
- Eterm egot;
-#ifdef ERTS_SMP
- erts_aint_t fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
-#else
- erts_aint_t fixated_by_me = 0;
-#endif
- erts_smp_rwmtx_t* lck;
-
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
-
+ Eterm* tptr;
+ ASSERT(is_tuple(continuation));
+ tptr = tuple_val(continuation);
+ if (arityval(*tptr) != 4)
+ return 1;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
+ if (! is_small(tptr[2]) || !(is_big(tptr[4]) || is_small(tptr[4]))) {
+ return 1;
}
- if (!mpi.something_can_match) {
- RET_TO_BIF(make_small(0), DB_ERROR_NONE);
- /* can't possibly match anything */
+ *tptr_ptr = tptr;
+ *tid_ptr = tptr[1];
+ *slot_ix_p = unsigned_val(tptr[2]);
+ *mpp = erts_db_get_match_prog_binary_unchecked(tptr[3]);
+ if (is_big(tptr[4])) {
+ *got_p = big_to_uint32(tptr[4]);
}
+ else {
+ *got_p = unsigned_val(tptr[4]);
+ }
+ return 0;
+}
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- lck = WLOCK_HASH(tb,slot_ix);
- current = &BUCKET(tb,slot_ix);
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = WLOCK_HASH(tb, slot_ix);
- current = mpi.lists[current_list_pos++].bucket;
- ASSERT(*current == BUCKET(tb,slot_ix));
+
+/*
+ *
+ * select / select_chunk match traversal
+ *
+ */
+
+#define MAX_SELECT_CHUNK_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Sint chunk_size;
+ Eterm match_list;
+ Eterm* prev_continuation_tptr;
+} mtraversal_select_chunk_context_t;
+
+static int mtraversal_select_chunk_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_chunk_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ if (is_value(match_res)) {
+ sc_context_ptr->match_list = CONS(sc_context_ptr->hp, match_res, sc_context_ptr->match_list);
+ return 1;
}
+ return 0;
+}
+static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ Eterm mpb;
- for(;;) {
- if ((*current) == NULL) {
- if (mpi.key_given) { /* Key is bound */
- WUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = WLOCK_HASH(tb, slot_ix);
- current = mpi.lists[current_list_pos].bucket;
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- } else {
- if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- WUNLOCK_HASH(lck);
- goto trap;
- }
- current = &BUCKET(tb,slot_ix);
- }
- }
- else if ((*current)->hvalue == INVALID_HASH) {
- current = &((*current)->next);
- }
- else {
- int did_erase = 0;
- if (db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &(*current)->dbterm, NULL, 0) == am_true) {
- HashDbTerm *del;
- if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
- if (slot_ix != last_pseudo_delete) {
- if (!add_fixed_deletion(tb, slot_ix, fixated_by_me))
- goto do_erase;
- last_pseudo_delete = slot_ix;
- }
- (*current)->hvalue = INVALID_HASH;
- } else {
- do_erase:
- del = *current;
- *current = (*current)->next;
- free_term(tb, del);
- did_erase = 1;
- }
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- ++got;
- }
- --num_left;
- if (!did_erase) {
- current = &((*current)->next);
- }
- }
+ if (iterations_left == MAX_SELECT_CHUNK_ITERATIONS) {
+ /* We didn't get to iterate a single time, which means EOT */
+ ASSERT(sc_context_ptr->match_list == NIL);
+ *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+ return DB_ERROR_NONE;
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (got) {
- try_shrink(tb);
+ else {
+ ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS);
+ BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (sc_context_ptr->chunk_size) {
+ Eterm continuation;
+ Eterm rest = NIL;
+ Sint rest_size = 0;
+
+ if (got > sc_context_ptr->chunk_size) { /* Split list in return value and 'rest' */
+ Eterm tmp = sc_context_ptr->match_list;
+ rest = sc_context_ptr->match_list;
+ while (got-- > sc_context_ptr->chunk_size + 1) {
+ tmp = CDR(list_val(tmp));
+ ++rest_size;
+ }
+ ++rest_size;
+ sc_context_ptr->match_list = CDR(list_val(tmp));
+ CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
+ been in 'user space' */
+ }
+ if (rest != NIL || slot_ix >= 0) { /* Need more calls */
+ sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3 + 7 + ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &sc_context_ptr->hp);
+ continuation = TUPLE6(
+ sc_context_ptr->hp,
+ sc_context_ptr->tid,
+ make_small(slot_ix),
+ make_small(sc_context_ptr->chunk_size),
+ mpb, rest,
+ make_small(rest_size));
+ *mpp = NULL; /* Otherwise the caller will destroy it */
+ sc_context_ptr->hp += 7;
+ *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, continuation);
+ return DB_ERROR_NONE;
+ } else { /* All data is exhausted */
+ if (sc_context_ptr->match_list != NIL) { /* No more data to search but still a
+ result to return to the caller */
+ sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3);
+ *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, am_EOT);
+ return DB_ERROR_NONE;
+ } else { /* Reached the end of the ttable with no data to return */
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ }
+ }
+ *ret = sc_context_ptr->match_list;
+ return DB_ERROR_NONE;
}
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
- egot = make_small(got);
+}
+
+static int mtraversal_select_chunk_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ Eterm mpb;
+ Eterm continuation;
+ Eterm* hp;
+
+ BUMP_ALL_REDS(sc_context_ptr->p);
+
+ if (sc_context_ptr->prev_continuation_tptr == NULL) {
+ /* First time we're trapping */
+ hp = HAlloc(sc_context_ptr->p, 7 + ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &hp);
+ continuation = TUPLE6(
+ hp,
+ sc_context_ptr->tid,
+ make_small(slot_ix),
+ make_small(sc_context_ptr->chunk_size),
+ mpb,
+ sc_context_ptr->match_list,
+ make_small(got));
+ *mpp = NULL; /* otherwise the caller will destroy it */
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- mpb,
- egot);
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ /* Not the first time we're trapping; reuse continuation terms */
+ hp = HAlloc(sc_context_ptr->p, 7);
+ continuation = TUPLE6(
+ hp,
+ sc_context_ptr->prev_continuation_tptr[1],
+ make_small(slot_ix),
+ sc_context_ptr->prev_continuation_tptr[3],
+ sc_context_ptr->prev_continuation_tptr[4],
+ sc_context_ptr->match_list,
+ make_small(got));
+ }
+ *ret = bif_trap1(&ets_select_continue_exp, sc_context_ptr->p, continuation);
+ return DB_ERROR_NONE;
+}
-#undef RET_TO_BIF
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret) {
+ return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
+}
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret)
+{
+ mtraversal_select_chunk_context_t sc_context;
+ sc_context.p = p;
+ sc_context.tb = &tbl->hash;
+ sc_context.tid = tid;
+ sc_context.hp = NULL;
+ sc_context.chunk_size = chunk_size;
+ sc_context.match_list = NIL;
+ sc_context.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ sc_context.p, sc_context.tb,
+ pattern, NULL,
+ sc_context.chunk_size,
+ MAX_SELECT_CHUNK_ITERATIONS,
+ &sc_context.hp, 0,
+ mtraversal_select_chunk_on_nothing_can_match,
+ mtraversal_select_chunk_on_match_res,
+ mtraversal_select_chunk_on_loop_ended,
+ mtraversal_select_chunk_on_trap,
+ &sc_context, ret);
}
+
/*
-** This is called when select_delete traps
-*/
-static int db_select_delete_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+ *
+ * select_continue match traversal
+ *
+ */
+
+static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- Uint slot_ix;
- Uint last_pseudo_delete = (Uint)-1;
- HashDbTerm **current = NULL;
- Eterm *hp;
- int num_left = 1000;
- Uint got;
- Eterm *tptr;
- Binary *mp;
- Eterm egot;
- int fixated_by_me = ONLY_WRITER(p,tb) ? 0 : 1; /* ToDo: something nicer */
- erts_smp_rwmtx_t* lck;
+ mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ Eterm continuation;
+ Eterm rest = NIL;
+ Eterm* hp;
+
+ ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS);
+ BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (sc_context_ptr->chunk_size) {
+ Sint rest_size = 0;
+ if (got > sc_context_ptr->chunk_size) {
+ /* Cannot write destructively here,
+ the list may have
+ been in user space */
+ hp = HAlloc(sc_context_ptr->p, (got - sc_context_ptr->chunk_size) * 2);
+ while (got-- > sc_context_ptr->chunk_size) {
+ rest = CONS(hp, CAR(list_val(sc_context_ptr->match_list)), rest);
+ hp += 2;
+ sc_context_ptr->match_list = CDR(list_val(sc_context_ptr->match_list));
+ ++rest_size;
+ }
+ }
+ if (rest != NIL || slot_ix >= 0) {
+ hp = HAlloc(sc_context_ptr->p, 3 + 7);
+ continuation = TUPLE6(
+ hp,
+ sc_context_ptr->prev_continuation_tptr[1],
+ make_small(slot_ix),
+ sc_context_ptr->prev_continuation_tptr[3],
+ sc_context_ptr->prev_continuation_tptr[4],
+ rest,
+ make_small(rest_size));
+ hp += 7;
+ *ret = TUPLE2(hp, sc_context_ptr->match_list, continuation);
+ return DB_ERROR_NONE;
+ } else {
+ if (sc_context_ptr->match_list != NIL) {
+ hp = HAlloc(sc_context_ptr->p, 3);
+ *ret = TUPLE2(hp, sc_context_ptr->match_list, am_EOT);
+ return DB_ERROR_NONE;
+ } else {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ }
+ }
+ *ret = sc_context_ptr->match_list;
+ return DB_ERROR_NONE;
+}
-#define RET_TO_BIF(Term,RetVal) do { \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+/*
+ * This is called when select traps
+ */
+static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
+ mtraversal_select_chunk_context_t sc_context = {0};
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size;
+ Eterm match_list;
+ Sint iterations_left = MAX_SELECT_CHUNK_ITERATIONS;
-
+ /* Decode continuation. We know it's a tuple but not the arity or anything else */
+ ASSERT(is_tuple(continuation));
tptr = tuple_val(continuation);
- slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
- if (is_big(tptr[4])) {
- got = big_to_uint32(tptr[4]);
- } else {
- got = unsigned_val(tptr[4]);
- }
-
- lck = WLOCK_HASH(tb,slot_ix);
- if (slot_ix >= NACTIVE(tb)) {
- WUNLOCK_HASH(lck);
- goto done;
- }
- current = &BUCKET(tb,slot_ix);
- for(;;) {
- if ((*current) == NULL) {
- if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- WUNLOCK_HASH(lck);
- goto trap;
- }
- current = &BUCKET(tb,slot_ix);
- }
- else if ((*current)->hvalue == INVALID_HASH) {
- current = &((*current)->next);
- }
- else {
- int did_erase = 0;
- if (db_match_dbterm(&tb->common, p, mp, 0,
- &(*current)->dbterm, NULL, 0) == am_true) {
- HashDbTerm *del;
- if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
- if (slot_ix != last_pseudo_delete) {
- if (!add_fixed_deletion(tb, slot_ix, fixated_by_me))
- goto do_erase;
- last_pseudo_delete = slot_ix;
- }
- (*current)->hvalue = INVALID_HASH;
- } else {
- do_erase:
- del = *current;
- *current = (*current)->next;
- free_term(tb, del);
- did_erase = 1;
- }
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- ++got;
- }
-
- --num_left;
- if (!did_erase) {
- current = &((*current)->next);
- }
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (got) {
- try_shrink(tb);
- }
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, 5);
- egot = make_small(got);
- }
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- tptr[3],
- egot);
- RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ if (arityval(*tptr) != 6)
+ goto badparam;
-#undef RET_TO_BIF
+ if (!is_small(tptr[2]) || !is_small(tptr[3]) ||
+ !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
+ goto badparam;
+ if ((chunk_size = signed_val(tptr[3])) < 0)
+ goto badparam;
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (mp == NULL)
+ goto badparam;
+
+ if ((got = signed_val(tptr[6])) < 0)
+ goto badparam;
+
+ tid = tptr[1];
+ slot_ix = signed_val(tptr[2]);
+ match_list = tptr[5];
+
+ /* Proceed */
+ sc_context.p = p;
+ sc_context.tb = &tbl->hash;
+ sc_context.tid = tid;
+ sc_context.hp = NULL;
+ sc_context.chunk_size = chunk_size;
+ sc_context.match_list = match_list;
+ sc_context.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ sc_context.p, sc_context.tb, sc_context.chunk_size,
+ iterations_left, &sc_context.hp, slot_ix, got, &mp, 0,
+ mtraversal_select_chunk_on_match_res, /* Reuse callback */
+ mtraversal_select_chunk_continue_on_loop_ended,
+ mtraversal_select_chunk_on_trap, /* Reuse callback */
+ &sc_context, ret);
+
+badparam:
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
}
-
+
+#undef MAX_SELECT_CHUNK_ITERATIONS
+
+
/*
-** This is called when select_count traps
-*/
-static int db_select_count_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+ *
+ * select_count match traversal
+ *
+ */
+
+#define MAX_SELECT_COUNT_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Eterm* prev_continuation_tptr;
+} mtraversal_select_count_context_t;
+
+static int mtraversal_select_count_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_count_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
- DbTableHash *tb = &tbl->hash;
- Uint slot_ix;
- HashDbTerm* current;
- Eterm *hp;
- int num_left = 1000;
- Uint got;
- Eterm *tptr;
- Binary *mp;
- Eterm egot;
- erts_smp_rwmtx_t* lck;
+ return (match_res == am_true);
+}
-#define RET_TO_BIF(Term,RetVal) do { \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+static int mtraversal_select_count_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
+ ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS);
+ BUMP_REDS(scnt_context_ptr->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
+ *ret = erts_make_integer(got, scnt_context_ptr->p);
+ return DB_ERROR_NONE;
+}
-
- tptr = tuple_val(continuation);
- slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
- if (is_big(tptr[4])) {
- got = big_to_uint32(tptr[4]);
- } else {
- got = unsigned_val(tptr[4]);
- }
-
+static int mtraversal_select_count_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
+ return on_mtraversal_simple_trap(
+ &ets_select_count_continue_exp,
+ scnt_context_ptr->p,
+ scnt_context_ptr->tb,
+ scnt_context_ptr->tid,
+ scnt_context_ptr->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
- lck = RLOCK_HASH(tb, slot_ix);
- if (slot_ix >= NACTIVE(tb)) { /* Is this posible? */
- RUNLOCK_HASH(lck);
- goto done;
- }
- current = BUCKET(tb,slot_ix);
-
- for(;;) {
- if (current != NULL) {
- if (current->hvalue == INVALID_HASH) {
- current = current->next;
- continue;
- }
- if (db_match_dbterm(&tb->common, p, mp, 0, &current->dbterm,
- NULL, 0) == am_true) {
- ++got;
- }
- --num_left;
- current = current->next;
- }
- else { /* next bucket */
- if ((slot_ix = next_slot(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, 5);
- egot = make_small(got);
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
+ mtraversal_select_count_context_t scnt_context = {0};
+ Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS;
+ Sint chunk_size = 0;
+
+ scnt_context.p = p;
+ scnt_context.tb = &tbl->hash;
+ scnt_context.tid = tid;
+ scnt_context.hp = NULL;
+ scnt_context.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ scnt_context.p, scnt_context.tb,
+ pattern, NULL,
+ chunk_size, iterations_left, NULL, 0,
+ mtraversal_select_count_on_nothing_can_match,
+ mtraversal_select_count_on_match_res,
+ mtraversal_select_count_on_loop_ended,
+ mtraversal_select_count_on_trap,
+ &scnt_context, ret);
+}
+
+/*
+ * This is called when select_count traps
+ */
+static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
+ mtraversal_select_count_context_t scnt_context = {0};
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+ *ret = NIL;
+
+ if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ scnt_context.p = p;
+ scnt_context.tb = &tbl->hash;
+ scnt_context.tid = tid;
+ scnt_context.hp = NULL;
+ scnt_context.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ scnt_context.p, scnt_context.tb, chunk_size,
+ MAX_SELECT_COUNT_ITERATIONS,
+ NULL, slot_ix, got, &mp, 0,
+ mtraversal_select_count_on_match_res, /* Reuse callback */
+ mtraversal_select_count_on_loop_ended, /* Reuse callback */
+ mtraversal_select_count_on_trap, /* Reuse callback */
+ &scnt_context, ret);
+}
+
+#undef MAX_SELECT_COUNT_ITERATIONS
+
+
+/*
+ *
+ * select_delete match traversal
+ *
+ */
+
+#define MAX_SELECT_DELETE_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Eterm* prev_continuation_tptr;
+ erts_aint_t fixated_by_me;
+ Uint last_pseudo_delete;
+} mtraversal_select_delete_context_t;
+
+static int mtraversal_select_delete_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ HashDbTerm** current_ptr = *current_ptr_ptr;
+ mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ HashDbTerm* del;
+ if (match_res != am_true)
+ return 0;
+
+ if (NFIXED(sd_context_ptr->tb) > sd_context_ptr->fixated_by_me) { /* fixated by others? */
+ if (slot_ix != sd_context_ptr->last_pseudo_delete) {
+ if (!add_fixed_deletion(sd_context_ptr->tb, slot_ix, sd_context_ptr->fixated_by_me))
+ goto do_erase;
+ sd_context_ptr->last_pseudo_delete = slot_ix;
+ }
+ (*current_ptr)->hvalue = INVALID_HASH;
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
+ do_erase:
+ del = *current_ptr;
+ *current_ptr = (*current_ptr)->next; // replace pointer to term using next
+ free_term(sd_context_ptr->tb, del);
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- tptr[3],
- egot);
- RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ erts_smp_atomic_dec_nob(&sd_context_ptr->tb->common.nitems);
-#undef RET_TO_BIF
+ return 1;
+}
+static int mtraversal_select_delete_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS);
+ BUMP_REDS(sd_context_ptr->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
+ if (got) {
+ try_shrink(sd_context_ptr->tb);
+ }
+ *ret = erts_make_integer(got, sd_context_ptr->p);
+ return DB_ERROR_NONE;
}
-
+
+static int mtraversal_select_delete_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ return on_mtraversal_simple_trap(
+ &ets_select_delete_continue_exp,
+ sd_context_ptr->p,
+ sd_context_ptr->tb,
+ sd_context_ptr->tid,
+ sd_context_ptr->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
+
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
+ mtraversal_select_delete_context_t sd_context = {0};
+ Sint chunk_size = 0;
+
+ sd_context.p = p;
+ sd_context.tb = &tbl->hash;
+ sd_context.tid = tid;
+ sd_context.hp = NULL;
+ sd_context.prev_continuation_tptr = NULL;
+#ifdef ERTS_SMP
+ sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
+#else
+ sd_context.fixated_by_me = 0;
+#endif
+ sd_context.last_pseudo_delete = (Uint) -1;
+
+ return match_traverse(
+ sd_context.p, sd_context.tb,
+ pattern, NULL,
+ chunk_size,
+ MAX_SELECT_DELETE_ITERATIONS, NULL, 1,
+ mtraversal_select_delete_on_nothing_can_match,
+ mtraversal_select_delete_on_match_res,
+ mtraversal_select_delete_on_loop_ended,
+ mtraversal_select_delete_on_trap,
+ &sd_context, ret);
+}
+
+/*
+ * This is called when select_delete traps
+ */
+static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
+ mtraversal_select_delete_context_t sd_context = {0};
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+
+ if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ sd_context.p = p;
+ sd_context.tb = &tbl->hash;
+ sd_context.tid = tid;
+ sd_context.hp = NULL;
+ sd_context.prev_continuation_tptr = tptr;
+ sd_context.fixated_by_me = ONLY_WRITER(p, sd_context.tb) ? 0 : 1; /* TODO: something nicer */
+ sd_context.last_pseudo_delete = (Uint) -1;
+
+ return match_traverse_continue(
+ sd_context.p, sd_context.tb, chunk_size,
+ MAX_SELECT_DELETE_ITERATIONS,
+ NULL, slot_ix, got, &mp, 1,
+ mtraversal_select_delete_on_match_res, /* Reuse callback */
+ mtraversal_select_delete_on_loop_ended, /* Reuse callback */
+ mtraversal_select_delete_on_trap, /* Reuse callback */
+ &sd_context, ret);
+}
+
+#undef MAX_SELECT_DELETE_ITERATIONS
+
+
+/*
+ *
+ * select_replace match traversal
+ *
+ */
+
+#define MAX_SELECT_REPLACE_ITERATIONS 1000
+
+typedef struct {
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Eterm* prev_continuation_tptr;
+} mtraversal_select_replace_context_t;
+
+static int mtraversal_select_replace_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ DbTableHash* tb = sr_context_ptr->tb;
+ HashDbTerm* new;
+ HashDbTerm* next;
+ HashValue hval;
+
+ if (is_value(match_res)) {
+#ifdef DEBUG
+ Eterm key = db_getkey(tb->common.keypos, match_res);
+ ASSERT(is_value(key));
+ ASSERT(eq(key, GETKEY(tb, (**current_ptr_ptr)->dbterm.tpl)));
+#endif
+ next = (**current_ptr_ptr)->next;
+ hval = (**current_ptr_ptr)->hvalue;
+ new = new_dbterm(tb, match_res);
+ new->next = next;
+ new->hvalue = hval;
+ free_term(tb, **current_ptr_ptr);
+ **current_ptr_ptr = new; /* replace 'next' pointer in previous object */
+ *current_ptr_ptr = &((**current_ptr_ptr)->next); /* advance to next object */
+ return 1;
+ }
+ return 0;
+}
+
+static int mtraversal_select_replace_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS);
+ /* the more objects we've replaced, the more reductions we've consumed */
+ BUMP_REDS(sr_context_ptr->p,
+ MIN(MAX_SELECT_REPLACE_ITERATIONS * 2,
+ (MAX_SELECT_REPLACE_ITERATIONS - iterations_left) + (int)got));
+ *ret = erts_make_integer(got, sr_context_ptr->p);
+ return DB_ERROR_NONE;
+}
+
+static int mtraversal_select_replace_on_trap(void* context_ptr, Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ return on_mtraversal_simple_trap(
+ &ets_select_replace_continue_exp,
+ sr_context_ptr->p,
+ sr_context_ptr->tb,
+ sr_context_ptr->tid,
+ sr_context_ptr->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
+
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret)
+{
+ mtraversal_select_replace_context_t sr_context = {0};
+ Sint chunk_size = 0;
+
+ /* Bag implementation presented both semantic consistency and performance issues,
+ * unsupported for now
+ */
+ ASSERT(!(tbl->hash.common.status & DB_BAG));
+
+ sr_context.p = p;
+ sr_context.tb = &tbl->hash;
+ sr_context.tid = tid;
+ sr_context.hp = NULL;
+ sr_context.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ sr_context.p, sr_context.tb,
+ pattern, db_match_keeps_key,
+ chunk_size,
+ MAX_SELECT_REPLACE_ITERATIONS, NULL, 1,
+ mtraversal_select_replace_on_nothing_can_match,
+ mtraversal_select_replace_on_match_res,
+ mtraversal_select_replace_on_loop_ended,
+ mtraversal_select_replace_on_trap,
+ &sr_context, ret);
+}
+
+/*
+ * This is called when select_replace traps
+ */
+static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret)
+{
+ mtraversal_select_replace_context_t sr_context = {0};
+ Eterm* tptr;
+ Eterm tid ;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+ *ret = NIL;
+
+ if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ /* Proceed */
+ sr_context.p = p;
+ sr_context.tb = &tbl->hash;
+ sr_context.tid = tid;
+ sr_context.hp = NULL;
+ sr_context.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ sr_context.p, sr_context.tb, chunk_size,
+ MAX_SELECT_REPLACE_ITERATIONS,
+ NULL, slot_ix, got, &mp, 1,
+ mtraversal_select_replace_on_match_res, /* Reuse callback */
+ mtraversal_select_replace_on_loop_ended, /* Reuse callback */
+ mtraversal_select_replace_on_trap, /* Reuse callback */
+ &sr_context, ret);
+}
+
+
static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
@@ -2123,6 +2289,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
@@ -2219,19 +2386,17 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
/* release all memory occupied by a single table */
static int db_free_table_hash(DbTable *tbl)
{
- while (!db_free_table_continue_hash(tbl))
+ while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0)
;
return 0;
}
-static int db_free_table_continue_hash(DbTable *tbl)
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
{
DbTableHash *tb = &tbl->hash;
- int done;
FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel);
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE));
- done = 0;
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
@@ -2241,22 +2406,21 @@ static int db_free_table_continue_hash(DbTable *tbl)
(void *) fx,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
- if (++done >= 2*DELETE_RECORD_LIMIT) {
+ if (--reds < 0) {
erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
- return 0; /* Not done */
+ return reds; /* Not done */
}
}
erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
- done /= 2;
while(tb->nslots != 0) {
- free_seg(tb, 1);
+ reds -= EXT_SEGSZ/64 + free_seg(tb, 1);
/*
* If we have done enough work, get out here.
*/
- if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) {
- return 0; /* Not done */
+ if (reds < 0) {
+ return reds; /* Not done */
}
}
#ifdef ERTS_SMP
@@ -2271,7 +2435,7 @@ static int db_free_table_continue_hash(DbTable *tbl)
}
#endif
ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
- return 1; /* Done */
+ return reds; /* Done */
}
@@ -2284,7 +2448,8 @@ static int db_free_table_continue_hash(DbTable *tbl)
** slots should be searched. Also compiles the match program
*/
static int analyze_pattern(DbTableHash *tb, Eterm pattern,
- struct mp_info *mpi)
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi)
{
Eterm *ptpl;
Eterm lst, tpl, ttpl;
@@ -2322,7 +2487,10 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
i = 0;
for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
- Eterm body;
+ Eterm match;
+ Eterm guard;
+ Eterm body;
+
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
if (buff != sbuff) {
@@ -2337,9 +2505,17 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
}
return DB_ERROR_BADPARAM;
}
- matches[i] = tpl = ptpl[1];
- guards[i] = ptpl[2];
+ matches[i] = match = tpl = ptpl[1];
+ guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
+
+ if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
mpi->all_objects = 0;
@@ -2414,90 +2590,81 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab)
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix)
{
- int nsegs;
- struct ext_segment* eseg;
+ struct segment** old_segtab = SEGTAB(tb);
+ int nsegs = 0;
+ struct ext_segtab* est;
+ ASSERT(seg_ix >= NSEG_1);
switch (seg_ix) {
- case 0: nsegs = NSEG_1; break;
- case 1: nsegs = NSEG_2; break;
- default: nsegs = seg_ix + NSEG_INC; break;
- }
- eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- SIZEOF_EXTSEG(nsegs));
- ASSERT(eseg != NULL);
- sys_memset(&eseg->s, 0, sizeof(struct segment));
- IF_DEBUG(eseg->s.is_ext_segment = 1);
- eseg->prev_segtab = old_segtab;
- eseg->nsegs = nsegs;
- if (old_segtab) {
- ASSERT(nsegs > tb->nsegs);
- sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
- }
+ case NSEG_1: nsegs = NSEG_2; break;
+ default: nsegs = seg_ix + NSEG_INC; break;
+ }
+ ASSERT(nsegs > tb->nsegs);
+ est = (struct ext_segtab*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_EXT_SEGTAB(nsegs));
+ est->nsegs = nsegs;
+ est->prev_segtab = old_segtab;
+ est->prev_nsegs = tb->nsegs;
+ sys_memcpy(est->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
#ifdef DEBUG
- sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
+ sys_memset(&est->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
#endif
- eseg->segtab[seg_ix] = &eseg->s;
- return eseg;
+ return est;
}
/* Extend table with one new segment
*/
-static int alloc_seg(DbTableHash *tb)
+static void alloc_seg(DbTableHash *tb)
{
- int seg_ix = tb->nslots >> SEGSZ_EXP;
-
- if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */
- struct segment** segtab = SEGTAB(tb);
- struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab);
- if (seg == NULL) return 0;
- segtab[seg_ix] = &seg->s;
- /* We don't use the new segtab until next call (see "shrink race") */
- }
- else { /* Just a new plain segment */
- struct segment** segtab;
- if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */
- struct ext_segment* eseg;
- eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
- MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- SET_SEGTAB(tb, eseg->segtab);
- tb->nsegs = eseg->nsegs;
- }
- ASSERT(seg_ix < tb->nsegs);
- segtab = SEGTAB(tb);
- ASSERT(segtab[seg_ix] == NULL);
- segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- sizeof(struct segment));
- if (segtab[seg_ix] == NULL) return 0;
- sys_memset(segtab[seg_ix], 0, sizeof(struct segment));
- }
- tb->nslots += SEGSZ;
- return 1;
+ int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
+ struct segment** segtab;
+
+ ASSERT(seg_ix > 0);
+ if (seg_ix == tb->nsegs) { /* New segtab needed */
+ struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix);
+ SET_SEGTAB(tb, est->segtab);
+ tb->nsegs = est->nsegs;
+ }
+ ASSERT(seg_ix < tb->nsegs);
+ segtab = SEGTAB(tb);
+ segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(EXT_SEGSZ));
+ sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ));
+ tb->nslots += EXT_SEGSZ;
}
+#ifdef ERTS_SMP
+static void dealloc_ext_segtab(void* lop_data)
+{
+ struct ext_segtab* est = (struct ext_segtab*) lop_data;
+
+ erts_free(ERTS_ALC_T_DB_SEG, est);
+}
+#endif
+
/* Shrink table by freeing the top segment
** free_records: 1=free any records in segment, 0=assume segment is empty
*/
static int free_seg(DbTableHash *tb, int free_records)
{
- const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1;
+ const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
struct segment** const segtab = SEGTAB(tb);
- struct ext_segment* const top = (struct ext_segment*) segtab[seg_ix];
- int bytes;
+ struct segment* const segp = segtab[seg_ix];
+ Uint seg_sz;
int nrecords = 0;
- ASSERT(top != NULL);
+ ASSERT(segp != NULL);
#ifndef DEBUG
if (free_records)
#endif
{
- int i;
- for (i=0; i<SEGSZ; ++i) {
- HashDbTerm* p = top->s.buckets[i];
+ int i = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ while (i--) {
+ HashDbTerm* p = segp->buckets[i];
while(p != 0) {
HashDbTerm* nxt = p->next;
ASSERT(free_records); /* segment not empty as assumed? */
@@ -2507,55 +2674,46 @@ static int free_seg(DbTableHash *tb, int free_records)
}
}
}
-
- /* The "shrink race":
- * We must avoid deallocating an extended segment while its segtab may
- * still be used by other threads.
- * The trick is to stop use a segtab one call earlier. That is, stop use
- * a segtab when the segment above it is deallocated. When the segtab is
- * later deallocated, it has not been used for a very long time.
- * It is even theoretically safe as we have by then rehashed the entire
- * segment, seizing *all* locks, so there cannot exist any retarded threads
- * still hanging in BUCKET macro with an old segtab pointer.
- * For this to work, we must of course allocate a new segtab one call
- * earlier in alloc_seg() as well. And this is also the reason why
- * the minimum size of the first segtab is 2 and not 1 (NSEG_1).
- */
- if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */
- MY_ASSERT(top->s.is_ext_segment);
- ASSERT(segtab != top->segtab || seg_ix==0);
- bytes = SIZEOF_EXTSEG(top->nsegs);
- }
- else { /* Dealloc plain segment */
- struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1];
- MY_ASSERT(!top->s.is_ext_segment);
-
- if (segtab == newtop->segtab) { /* New top segment is extended */
- MY_ASSERT(newtop->s.is_ext_segment);
- if (newtop->prev_segtab != NULL) {
- /* Time to use a smaller segtab */
- SET_SEGTAB(tb, newtop->prev_segtab);
- tb->nsegs = seg_ix;
- ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
- }
- else {
- ASSERT(NSEG_1 > 2 && seg_ix==1);
- }
- }
- bytes = sizeof(struct segment);
+ if (seg_ix >= NSEG_1) {
+ struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab);
+
+ if (seg_ix == est->prev_nsegs) { /* Dealloc extended segtab */
+ ASSERT(est->prev_segtab != NULL);
+ SET_SEGTAB(tb, est->prev_segtab);
+ tb->nsegs = est->prev_nsegs;
+
+#ifdef ERTS_SMP
+ if (!tb->common.is_thread_safe) {
+ /*
+ * Table is doing a graceful shrink operation and we must avoid
+ * deallocating this segtab while it may still be read by other
+ * threads. Schedule deallocation with thread progress to make
+ * sure no lingering threads are still hanging in BUCKET macro
+ * with an old segtab pointer.
+ */
+ Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs);
+ ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0);
+ erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab,
+ est,
+ &est->lop,
+ sz);
+ }
+ else
+#endif
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
+ }
}
+ seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz));
- erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
- (void*)top, bytes);
#ifdef DEBUG
- if (seg_ix > 0) {
- segtab[seg_ix] = NULL;
- } else {
- SET_SEGTAB(tb, NULL);
- }
+ if (seg_ix < tb->nsegs)
+ SEGTAB(tb)[seg_ix] = NULL;
#endif
- tb->nslots -= SEGSZ;
+ tb->nslots -= seg_sz;
ASSERT(tb->nslots >= 0);
return nrecords;
}
@@ -2604,104 +2762,111 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
+#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
- return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
- else {
- if (erts_smp_atomic_read_nob(&tb->is_resizing))
- return 0;
- erts_smp_atomic_set_nob(&tb->is_resizing, 1);
- return 1;
- }
+ return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
+ else
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+#endif
+ return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
+#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
- else
- erts_smp_atomic_set_nob(&tb->is_resizing, 0);
+ erts_atomic_set_relb(&tb->is_resizing, 0);
+#endif
}
-/* Grow table with one new bucket.
+/* Grow table with one or more new buckets.
** Allocate new segment if needed.
*/
-static void grow(DbTableHash* tb, int nactive)
+static void grow(DbTableHash* tb, int nitems)
{
HashDbTerm** pnext;
HashDbTerm** to_pnext;
HashDbTerm* p;
erts_smp_rwmtx_t* lck;
- int from_ix;
+ int nactive;
+ int from_ix, to_ix;
int szm;
+ int loop_limit = 5;
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) != nactive) {
- goto abort; /* already done (race) */
- }
-
- /* Ensure that the slot nactive exists */
- if (nactive == tb->nslots) {
- /* Time to get a new segment */
- ASSERT((nactive & SEGSZ_MASK) == 0);
- if (!alloc_seg(tb)) goto abort;
- }
- ASSERT(nactive < tb->nslots);
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (nitems <= GROW_LIMIT(nactive)) {
+ goto abort; /* already done (race) */
+ }
- szm = erts_smp_atomic_read_nob(&tb->szm);
- if (nactive <= szm) {
- from_ix = nactive & (szm >> 1);
- } else {
- ASSERT(nactive == szm+1);
- from_ix = 0;
- szm = (szm<<1) | 1;
- }
+ /* Ensure that the slot nactive exists */
+ if (nactive == tb->nslots) {
+ /* Time to get a new segment */
+ ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0);
+ alloc_seg(tb);
+ }
+ ASSERT(nactive < tb->nslots);
- lck = WLOCK_HASH(tb, from_ix);
- /* Now a final double check (with the from_ix lock held)
- * that we did not get raced by a table fixer.
- */
- if (IS_FIXED(tb)) {
- WUNLOCK_HASH(lck);
- goto abort;
- }
- erts_smp_atomic_inc_nob(&tb->nactive);
- if (from_ix == 0) {
- if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->szm, szm);
- else
- erts_smp_atomic_set_nob(&tb->szm, szm);
- }
- done_resizing(tb);
+ szm = erts_smp_atomic_read_nob(&tb->szm);
+ if (nactive <= szm) {
+ from_ix = nactive & (szm >> 1);
+ } else {
+ ASSERT(nactive == szm+1);
+ from_ix = 0;
+ szm = (szm<<1) | 1;
+ }
+ to_ix = nactive;
+
+ lck = WLOCK_HASH(tb, from_ix);
+ ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix));
+ /* Now a final double check (with the from_ix lock held)
+ * that we did not get raced by a table fixer.
+ */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+ erts_smp_atomic_set_nob(&tb->nactive, ++nactive);
+ if (from_ix == 0) {
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_smp_atomic_set_relb(&tb->szm, szm);
+ else
+ erts_smp_atomic_set_nob(&tb->szm, szm);
+ }
+ done_resizing(tb);
+
+ /* Finally, let's split the bucket. We try to do it in a smart way
+ to keep link order and avoid unnecessary updates of next-pointers */
+ pnext = &BUCKET(tb, from_ix);
+ p = *pnext;
+ to_pnext = &BUCKET(tb, to_ix);
+ while (p != NULL) {
+ if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
+ *pnext = p->next;
+ free_term(tb, p);
+ p = *pnext;
+ }
+ else {
+ int ix = p->hvalue & szm;
+ if (ix != from_ix) {
+ ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
+ *to_pnext = p;
+ /* Swap "from" and "to": */
+ from_ix = ix;
+ to_pnext = pnext;
+ }
+ pnext = &p->next;
+ p = *pnext;
+ }
+ }
+ *to_pnext = NULL;
+ WUNLOCK_HASH(lck);
- /* Finally, let's split the bucket. We try to do it in a smart way
- to keep link order and avoid unnecessary updates of next-pointers */
- pnext = &BUCKET(tb, from_ix);
- p = *pnext;
- to_pnext = &BUCKET(tb, nactive);
- while (p != NULL) {
- if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
- *pnext = p->next;
- free_term(tb, p);
- p = *pnext;
- }
- else {
- int ix = p->hvalue & szm;
- if (ix != from_ix) {
- ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
- *to_pnext = p;
- /* Swap "from" and "to": */
- from_ix = ix;
- to_pnext = pnext;
- }
- pnext = &p->next;
- p = *pnext;
- }
- }
- *to_pnext = NULL;
+ }while (--loop_limit && nitems > GROW_LIMIT(nactive));
- WUNLOCK_HASH(lck);
return;
abort:
@@ -2712,60 +2877,78 @@ abort:
/* Shrink table by joining top bucket.
** Remove top segment if it gets empty.
*/
-static void shrink(DbTableHash* tb, int nactive)
-{
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) == nactive) {
- erts_smp_rwmtx_t* lck;
- int src_ix = nactive - 1;
- int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
- int dst_ix = src_ix & low_szm;
-
- ASSERT(dst_ix < src_ix);
- ASSERT(nactive > SEGSZ);
- lck = WLOCK_HASH(tb, dst_ix);
- /* Double check for racing table fixers */
- if (!IS_FIXED(tb)) {
- HashDbTerm** src_bp = &BUCKET(tb, src_ix);
- HashDbTerm** dst_bp = &BUCKET(tb, dst_ix);
- HashDbTerm** bp = src_bp;
-
- /* Q: Why join lists by appending "dst" at the end of "src"?
- A: Must step through "src" anyway to purge pseudo deleted. */
- while(*bp != NULL) {
- if ((*bp)->hvalue == INVALID_HASH) {
- HashDbTerm* deleted = *bp;
- *bp = deleted->next;
- free_term(tb, deleted);
- } else {
- bp = &(*bp)->next;
- }
- }
- *bp = *dst_bp;
- *dst_bp = *src_bp;
- *src_bp = NULL;
-
- erts_smp_atomic_set_nob(&tb->nactive, src_ix);
- if (dst_ix == 0) {
- erts_smp_atomic_set_relb(&tb->szm, low_szm);
- }
- WUNLOCK_HASH(lck);
-
- if (tb->nslots - src_ix >= SEGSZ) {
- free_seg(tb, 0);
- }
- }
- else {
- WUNLOCK_HASH(lck);
- }
+static void shrink(DbTableHash* tb, int nitems)
+{
+ HashDbTerm** src_bp;
+ HashDbTerm** dst_bp;
+ HashDbTerm** bp;
+ erts_smp_rwmtx_t* lck;
+ int src_ix, dst_ix, low_szm;
+ int nactive;
+ int loop_limit = 5;
- }
- /*else already done */
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) {
+ goto abort; /* already done (race) */
+ }
+ src_ix = nactive - 1;
+ low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
+ dst_ix = src_ix & low_szm;
+
+ ASSERT(dst_ix < src_ix);
+ ASSERT(nactive > FIRST_SEGSZ);
+ lck = WLOCK_HASH(tb, dst_ix);
+ ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix));
+ /* Double check for racing table fixers */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+
+ src_bp = &BUCKET(tb, src_ix);
+ dst_bp = &BUCKET(tb, dst_ix);
+ bp = src_bp;
+
+ /*
+ * We join lists by appending "dst" at the end of "src"
+ * as we must step through "src" anyway to purge pseudo deleted.
+ */
+ while(*bp != NULL) {
+ if ((*bp)->hvalue == INVALID_HASH) {
+ HashDbTerm* deleted = *bp;
+ *bp = deleted->next;
+ free_term(tb, deleted);
+ } else {
+ bp = &(*bp)->next;
+ }
+ }
+ *bp = *dst_bp;
+ *dst_bp = *src_bp;
+ *src_bp = NULL;
+
+ nactive = src_ix;
+ erts_smp_atomic_set_nob(&tb->nactive, nactive);
+ if (dst_ix == 0) {
+ erts_smp_atomic_set_relb(&tb->szm, low_szm);
+ }
+ WUNLOCK_HASH(lck);
+
+ if (tb->nslots - src_ix >= EXT_SEGSZ) {
+ free_seg(tb, 0);
+ }
+ done_resizing(tb);
+
+ } while (--loop_limit
+ && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive));
+ return;
+
+abort:
done_resizing(tb);
}
-
/* Search a list of tuples for a matching key */
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
@@ -2931,8 +3114,8 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
WUNLOCK_HASH(lck);
nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
} else {
WUNLOCK_HASH(lck);
@@ -3016,24 +3199,30 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
stats->kept_items = kept_items;
}
-#ifdef HARDDEBUG
-void db_check_table_hash(DbTable *tbl)
+/* For testing only */
+Eterm erts_ets_hash_sizeof_ext_segtab(void)
{
- DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
- int j;
-
- for (j = 0; j < tb->nactive; j++) {
- if ((list = BUCKET(tb,j)) != 0) {
- while (list != 0) {
- if (!is_tuple(make_tuple(list->dbterm.tpl))) {
- erts_exit(ERTS_ERROR_EXIT, "Bad term in slot %d of ets table", j);
- }
- list = list->next;
- }
- }
- }
+ return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
-#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
+ int i;
+
+ if(tb->locks == NULL) {
+ return;
+ }
+
+ for(i = 0; i < DB_HASH_LOCK_CNT; i++) {
+ erts_lcnt_ref_t *ref = &tb->locks->lck_vec[i].lck.lcnt;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(ref, "db_hash_slot", tb->common.the_name,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(ref);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e654363cd5..523ed7860e 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,23 +50,23 @@ typedef struct db_table_hash_fine_locks {
typedef struct db_table_hash {
DbTableCommon common;
- erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
+ /* SMP: szm and nactive are write-protected by is_resizing or table write lock */
erts_smp_atomic_t szm; /* current size mask. */
-
+ erts_smp_atomic_t nactive; /* Number of "active" slots */
+
+ erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
+ struct segment* first_segtab[1];
+
/* SMP: nslots and nsegs are protected by is_resizing or table write lock */
int nslots; /* Total number of slots */
int nsegs; /* Size of segment table */
/* List of slots where elements have been deleted while table was fixed */
erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
- erts_smp_atomic_t nactive; /* Number of "active" slots */
- erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
#ifdef ERTS_SMP
+ erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
DbTableHashFineLocks* locks;
#endif
-#ifdef VALGRIND
- struct ext_segment* top_ptr_to_segment_with_active_segtab;
-#endif
} DbTableHash;
@@ -75,7 +75,7 @@ typedef struct db_table_hash {
** table types. The process is always an [in out] parameter.
*/
void db_initialize_hash(void);
-void db_unfix_table_hash(DbTableHash *tb /* [in out] */);
+SWord db_unfix_table_hash(DbTableHash *tb);
Uint db_kept_items_hash(DbTableHash *tb);
/* Interface for meta pid table */
@@ -88,14 +88,6 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret);
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret);
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret);
-
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value);
-
/* not yet in method table */
int db_mark_all_deleted_hash(DbTable *tbl);
@@ -109,5 +101,10 @@ typedef struct {
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
+Eterm erts_ets_hash_sizeof_ext_segtab(void);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable);
+#endif
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index dd9403e132..d7deadacf0 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -76,9 +76,18 @@
((Dtt->pos) ? \
(Dtt)->array[(Dtt)->pos - 1] : NULL)
-#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+#define TOPN_NODE(Dtt, Pos) \
+ (((Pos) < Dtt->pos) ? \
+ (Dtt)->array[(Dtt)->pos - ((Pos) + 1)] : NULL)
+
+#define REPLACE_TOP_NODE(Dtt, Node) \
+ if ((Dtt)->pos) (Dtt)->array[(Dtt)->pos - 1] = (Node)
+#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
/* Obtain table static stack if available. NULL if not.
** Must be released with release_stack()
@@ -180,7 +189,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to);
static void check_slot_pos(DbTableTree *tb);
static void check_saved_stack(DbTableTree *tb);
-static int check_table_tree(DbTableTree* tb, TreeDbTerm *t);
#define TREE_DEBUG
#endif
@@ -226,9 +234,9 @@ struct mp_info {
Eterm most; /* The highest matching key (possibly
* partially bound expression) */
- TreeDbTerm *save_term; /* If the key is completely bound, this
- * will be the Tree node we're searching
- * for, otherwise it will be useless */
+ TreeDbTerm **save_term; /* If the key is completely bound, this
+ * will be the Tree node we're searching
+ * for, otherwise it will be useless */
Binary *mp; /* The compiled match program */
};
@@ -278,12 +286,30 @@ struct select_delete_context {
};
/*
+ * Used by doit_select_replace
+ */
+struct select_replace_context {
+ Process *p;
+ DbTableTree *tb;
+ Binary *mp;
+ Eterm end_condition;
+ Eterm *lastobj;
+ Sint32 max;
+ int keypos;
+ int all_objects;
+ Sint replaced;
+};
+
+/* Used by select_replace on analyze_pattern */
+typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body);
+
+/*
** Forward declarations
*/
static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
Eterm object);
-static int do_free_tree_cont(DbTableTree *tb, int num_left);
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds);
static void free_term(DbTableTree *tb, TreeDbTerm* p);
static int balance_left(TreeDbTerm **this);
static int balance_right(TreeDbTerm **this);
@@ -291,6 +317,7 @@ static int delsub(TreeDbTerm **this);
static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot);
static TreeDbTerm *find_node(DbTableTree *tb, Eterm key);
static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key);
+static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this);
static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key);
static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key);
static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*,
@@ -312,14 +339,23 @@ static void traverse_forward(DbTableTree *tb,
TreeDbTerm *,
void *,
int),
- void *context);
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+ void *context);
+static void traverse_update_backwards(DbTableTree *tb,
+ DbTreeStack*,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *tb,
+ TreeDbTerm **, // out
+ void *,
+ int),
+ void *context);
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
Eterm *partly_bound_key);
static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done);
static int analyze_pattern(DbTableTree *tb, Eterm pattern,
- struct mp_info *mpi);
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi);
static int doit_select(DbTableTree *tb,
TreeDbTerm *this,
void *ptr,
@@ -336,6 +372,10 @@ static int doit_select_delete(DbTableTree *tb,
TreeDbTerm *this,
void *ptr,
int forward);
+static int doit_select_replace(DbTableTree *tb,
+ TreeDbTerm **this_ptr,
+ void *ptr,
+ int forward);
static int partly_bound_can_match_lesser(Eterm partly_bound_1,
Eterm partly_bound_2);
@@ -369,27 +409,31 @@ static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret);
static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_tree(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reversed, Eterm *ret);
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reversed, Eterm *ret);
static int db_select_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
static int db_select_count_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_tree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
static void db_print_tree(fmtfn_t to, void *to_arg,
int show, DbTable *tbl);
static int db_free_table_tree(DbTable *tbl);
-static int db_free_table_continue_tree(DbTable *tbl);
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord);
static void db_foreach_offheap_tree(DbTable *,
void (*)(ErlOffHeap *, void *),
@@ -436,17 +480,14 @@ DbTableMethod db_tree =
db_select_delete_continue_tree,
db_select_count_tree,
db_select_count_continue_tree,
+ db_select_replace_tree,
+ db_select_replace_continue_tree,
db_take_tree,
db_delete_all_objects_tree,
db_free_table_tree,
db_free_table_continue_tree,
db_print_tree,
db_foreach_offheap_tree,
-#ifdef HARDDEBUG
- db_check_table_tree,
-#else
- NULL,
-#endif
db_lookup_dbterm_tree,
db_finalize_dbterm_tree
@@ -935,17 +976,15 @@ static int db_select_continue_tree(Process *p,
if (arityval(*tptr) != 8)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!is_small(tptr[4]) || !is_binary(tptr[5]) ||
+ if (!is_small(tptr[4]) ||
!(is_list(tptr[6]) || tptr[6] == NIL) || !is_small(tptr[7]) ||
!is_small(tptr[8]))
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[5])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[5]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[5]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
chunk_size = signed_val(tptr[4]);
@@ -956,7 +995,7 @@ static int db_select_continue_tree(Process *p,
sc.lastobj = NULL;
sc.max = 1000;
sc.keypos = tb->common.keypos;
- sc.all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
+ sc.all_objects = mp->intern.flags & BIN_FLAG_ALL_OBJECTS;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
@@ -1060,7 +1099,7 @@ static int db_select_continue_tree(Process *p,
}
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret)
{
/* Strategy: Traverse backwards to build resulting list from tail to head */
@@ -1097,7 +1136,7 @@ static int db_select_tree(Process *p, DbTable *tbl,
sc.got = 0;
sc.chunk_size = 0;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1111,7 +1150,7 @@ static int db_select_tree(Process *p, DbTable *tbl,
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select(tb,mpi.save_term,&sc,0 /* direction doesn't matter */);
+ doit_select(tb,*(mpi.save_term),&sc,0 /* direction doesn't matter */);
RET_TO_BIF(sc.accum,DB_ERROR_NONE);
}
@@ -1145,15 +1184,15 @@ static int db_select_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb=db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb= erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(0), /* Chunk size of zero means not chunked to the
@@ -1208,10 +1247,8 @@ static int db_select_count_continue_tree(Process *p,
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
sc.p = p;
@@ -1267,7 +1304,7 @@ static int db_select_count_continue_tree(Process *p,
}
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1302,7 +1339,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
sc.keypos = tb->common.keypos;
sc.got = 0;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1316,7 +1353,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select_count(tb,mpi.save_term,&sc,0 /* dummy */);
+ doit_select_count(tb,*(mpi.save_term),&sc,0 /* dummy */);
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
}
@@ -1338,22 +1375,22 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
if (IS_USMALL(0, sc.got)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = make_small(sc.got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = uint_to_big(sc.got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1367,7 +1404,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
}
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse,
Eterm *ret)
@@ -1405,7 +1442,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
sc.got = 0;
sc.chunk_size = chunk_size;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1419,7 +1456,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select(tb,mpi.save_term,&sc, 0 /* direction doesn't matter */);
+ doit_select(tb,*(mpi.save_term),&sc, 0 /* direction doesn't matter */);
if (sc.accum != NIL) {
hp=HAlloc(p, 3);
RET_TO_BIF(TUPLE2(hp,sc.accum,am_EOT),DB_ERROR_NONE);
@@ -1470,15 +1507,15 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program,
needn't be copied */
@@ -1495,15 +1532,15 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(chunk_size),
@@ -1558,7 +1595,7 @@ static int db_select_delete_continue_tree(Process *p,
sc.erase_lastterm = 0; /* Before first RET_TO_BIF */
sc.lastterm = NULL;
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
sc.p = p;
sc.tb = tb;
if (is_big(tptr[5])) {
@@ -1609,7 +1646,7 @@ static int db_select_delete_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1647,7 +1684,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
sc.keypos = tb->common.keypos;
sc.tb = tb;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(0,errcode);
}
@@ -1660,7 +1697,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select_delete(tb,mpi.save_term,&sc, 0 /* direction doesn't
+ doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't
matter */);
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
}
@@ -1682,20 +1719,20 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = make_small(sc.accum);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = uint_to_big(sc.accum, hp);
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1712,6 +1749,208 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
}
+static int db_select_replace_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_replace_context sc;
+ unsigned sz;
+ Eterm *hp;
+ Eterm lastkey;
+ Eterm end_condition;
+ Binary *mp;
+ Eterm key;
+ Eterm *tptr;
+ Eterm ereplaced;
+ Sint prev_replaced;
+
+
+#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+
+ /* Decode continuation. We know it's a tuple and everything else as
+ this is only called by ourselves */
+
+ /* continuation:
+ {Table, Lastkey, EndCondition, MatchProgBin, HowManyReplaced}*/
+
+ tptr = tuple_val(continuation);
+
+ if (arityval(*tptr) != 5)
+ erts_exit(ERTS_ERROR_EXIT,"Internal error in ets:select_replace/1");
+
+ lastkey = tptr[2];
+ end_condition = tptr[3];
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
+
+ sc.p = p;
+ sc.mp = mp;
+ sc.end_condition = NIL;
+ sc.lastobj = NULL;
+ sc.max = 1000;
+ sc.keypos = tb->common.keypos;
+ if (is_big(tptr[5])) {
+ sc.replaced = big_to_uint32(tptr[5]);
+ } else {
+ sc.replaced = unsigned_val(tptr[5]);
+ }
+ prev_replaced = sc.replaced;
+
+ stack = get_any_stack(tb);
+ traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
+ release_stack(tb,stack);
+
+ // the more objects we've replaced, the more reductions we've consumed
+ BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced)));
+
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.replaced,p), DB_ERROR_NONE);
+ }
+ key = GETKEY(tb, sc.lastobj);
+ if (end_condition != NIL &&
+ (cmp_partly_bound(end_condition,key) > 0)) {
+ /* done anyway */
+ RET_TO_BIF(make_small(sc.replaced),DB_ERROR_NONE);
+ }
+ /* Not done yet, let's trap. */
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.replaced)) {
+ hp = HAlloc(p, sz + 6);
+ ereplaced = make_small(sc.replaced);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + 6);
+ ereplaced = uint_to_big(sc.replaced, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE5
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ ereplaced);
+ RET_TO_BIF(bif_trap1(&ets_select_replace_continue_exp, p, continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_replace_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = THE_NON_VALUE;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm ereplaced;
+ Eterm mpb;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.lastobj = NULL;
+ sc.p = p;
+ sc.tb = tb;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.replaced = 0;
+
+ if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0),DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+ sc.all_objects = mpi.all_objects;
+
+ stack = get_static_stack(tb);
+ if (!mpi.got_partial && mpi.some_limitation &&
+ CMP_EQ(mpi.least,mpi.most)) {
+ TreeDbTerm* term = *(mpi.save_term);
+ doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */);
+ if (stack != NULL) {
+ if (TOP_NODE(stack) == term)
+ // throw away potentially invalid reference
+ REPLACE_TOP_NODE(stack, *(mpi.save_term));
+ release_stack(tb, stack);
+ }
+ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
+ }
+
+ if (stack == NULL)
+ stack = get_any_stack(tb);
+
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
+ release_stack(tb,stack);
+ // the more objects we've replaced, the more reductions we've consumed
+ BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced));
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.replaced)) {
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
+ ereplaced = make_small(sc.replaced);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
+ ereplaced = uint_to_big(sc.replaced, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ if (mpi.all_objects)
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
+
+ continuation = TUPLE5
+ (hp,
+ tid,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ mpb,
+ ereplaced);
+
+ /* Don't free mpi.mp, so don't use macro */
+ *ret = bif_trap1(&ets_select_replace_continue_exp, p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1761,23 +2000,22 @@ static void db_print_tree(fmtfn_t to, void *to_arg,
/* release all memory occupied by a single table */
static int db_free_table_tree(DbTable *tbl)
{
- while (!db_free_table_continue_tree(tbl))
+ while (db_free_table_continue_tree(tbl, ERTS_SWORD_MAX) < 0)
;
return 1;
}
-static int db_free_table_continue_tree(DbTable *tbl)
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
{
DbTableTree *tb = &tbl->tree;
- int result;
if (!tb->deletion) {
tb->static_stack.pos = 0;
tb->deletion = 1;
PUSH_NODE(&tb->static_stack, tb->root);
}
- result = do_free_tree_cont(tb, DELETE_RECORD_LIMIT);
- if (result) { /* Completely done. */
+ reds = do_free_tree_continue(tb, reds);
+ if (reds >= 0) { /* Completely done. */
erts_db_free(ERTS_ALC_T_DB_STK,
(DbTable *) tb,
(void *) tb->static_stack.array,
@@ -1785,7 +2023,7 @@ static int db_free_table_continue_tree(DbTable *tbl)
ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size)
== sizeof(DbTable));
}
- return result;
+ return reds;
}
static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
@@ -1958,8 +2196,9 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
** For the select functions, analyzes the pattern and determines which
** part of the tree should be searched. Also compiles the match program
*/
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
- struct mp_info *mpi)
+static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi)
{
Eterm lst, tpl, ttpl;
Eterm *matches,*guards, *bodies;
@@ -1997,7 +2236,10 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
i = 0;
for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
- Eterm body;
+ Eterm match;
+ Eterm guard;
+ Eterm body;
+
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
if (buff != sbuff) {
@@ -2012,9 +2254,17 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
}
return DB_ERROR_BADPARAM;
}
- matches[i] = tpl = ptpl[1];
- guards[i] = ptpl[2];
+ matches[i] = match = tpl = ptpl[1];
+ guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
+
+ if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
mpi->all_objects = 0;
@@ -2022,7 +2272,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
++i;
partly_bound = NIL;
- res = key_given(tb, tpl, &mpi->save_term, &partly_bound);
+ res = key_given(tb, tpl, &(mpi->save_term), &partly_bound);
if ( res >= 0 ) { /* Can match something */
key = 0;
mpi->something_can_match = 1;
@@ -2068,7 +2318,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static int do_free_tree_cont(DbTableTree *tb, int num_left)
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
{
TreeDbTerm *root;
TreeDbTerm *p;
@@ -2087,15 +2337,14 @@ static int do_free_tree_cont(DbTableTree *tb, int num_left)
root = p;
} else {
free_term(tb, root);
- if (--num_left > 0) {
- break;
- } else {
- return 0; /* Done enough for now */
- }
+ if (--reds < 0) {
+ return reds; /* Done enough for now */
+ }
+ break;
}
}
}
- return 1;
+ return reds;
}
/*
@@ -2526,6 +2775,58 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
return this;
}
+/*
+ * Find node and return the address of the node pointer (NULL if not found)
+ * Tries to reuse the existing stack for performance.
+ */
+
+static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) {
+ Eterm key = GETKEY(tb, this->dbterm.tpl);
+ TreeDbTerm *tmp;
+ TreeDbTerm *parent;
+ Sint c;
+
+ if(( tmp = TOP_NODE(stack)) != NULL) {
+ if (!cmp_key_eq(tb,key,tmp)) {
+ /* Start from the beginning */
+ stack->pos = stack->slot = 0;
+ }
+ }
+ if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
+ if (( tmp = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, tmp);
+ if (( c = cmp_key(tb,key,tmp) ) < 0) {
+ if (tmp->left == NULL) /* We are at the next
+ and the element does
+ not exist */
+ break;
+ else
+ tmp = tmp->left;
+ } else if (c > 0) {
+ if (tmp->right == NULL) /* Done */
+ return NULL;
+ else
+ tmp = tmp->right;
+ } else
+ break;
+ }
+ }
+
+ if (TOP_NODE(stack) != this)
+ return NULL;
+
+ parent = TOPN_NODE(stack, 1);
+ if (parent == NULL)
+ return ((this != tb->root) ? NULL : &(tb->root));
+ if (parent->left == this)
+ return &(parent->left);
+ if (parent->right == this)
+ return &(parent->right);
+ return NULL;
+}
+
static int
db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
DbUpdateHandle* handle)
@@ -2667,13 +2968,60 @@ static void traverse_forward(DbTableTree *tb,
}
/*
+ * Traverse the tree with an update callback function, used by db_select_replace
+ */
+static void traverse_update_backwards(DbTableTree *tb,
+ DbTreeStack* stack,
+ Eterm lastkey,
+ int (*doit)(DbTableTree*,
+ TreeDbTerm**,
+ void*,
+ int),
+ void* context)
+{
+ int res;
+ TreeDbTerm *this, *next, **this_ptr;
+
+ if (lastkey == THE_NON_VALUE) {
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL) {
+ return;
+ }
+ while (this != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ this = TOP_NODE(stack);
+ this_ptr = find_ptr(tb, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ REPLACE_TOP_NODE(stack, *this_ptr);
+ next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ if (!res)
+ return;
+ } else {
+ next = find_prev(tb, stack, lastkey);
+ }
+
+ while ((this = next) != NULL) {
+ this_ptr = find_ptr(tb, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ REPLACE_TOP_NODE(stack, *this_ptr);
+ next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ if (!res)
+ return;
+ }
+}
+
+/*
* Returns 0 if not given 1 if given and -1 on no possible match
* if key is given; *ret is set to point to the object concerned.
*/
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
Eterm *partly_bound)
{
- TreeDbTerm *this;
+ TreeDbTerm **this;
Eterm key;
ASSERT(ret != NULL);
@@ -2683,7 +3031,7 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
if (is_non_value(key))
return -1; /* can't possibly match anything */
if (!db_has_variable(key)) { /* Bound key */
- if (( this = find_node(tb, key) ) == NULL) {
+ if (( this = find_node2(tb, key) ) == NULL) {
return -1;
}
*ret = this;
@@ -3106,6 +3454,46 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
+static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
+ int forward)
+{
+ struct select_replace_context *sc = (struct select_replace_context *) ptr;
+ Eterm ret;
+
+ sc->lastobj = (*this)->dbterm.tpl;
+
+ /* Always backwards traversing */
+ if (sc->end_condition != NIL &&
+ (cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) {
+ return 0;
+ }
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
+ &(*this)->dbterm, NULL, 0);
+
+ if (is_value(ret)) {
+ TreeDbTerm* new;
+ TreeDbTerm* old = *this;
+#ifdef DEBUG
+ Eterm key = db_getkey(tb->common.keypos, ret);
+ ASSERT(is_value(key));
+ ASSERT(cmp_key(tb, key, old) == 0);
+#endif
+ new = new_dbterm(tb, ret);
+ new->left = old->left;
+ new->right = old->right;
+ new->balance = old->balance;
+ sc->lastobj = new->dbterm.tpl;
+ *this = new;
+ free_term(tb, old);
+ ++(sc->replaced);
+ }
+ if (--(sc->max) <= 0) {
+ return 0;
+ }
+ return 1;
+}
+
#ifdef TREE_DEBUG
static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
TreeDbTerm *t, int offset)
@@ -3134,6 +3522,9 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
#ifdef HARDDEBUG
+/*
+ * No called, but kept as it might come to use
+ */
void db_check_table_tree(DbTable *tbl)
{
DbTableTree *tb = &tbl->tree;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 3c5bfb2552..13eacaa8a9 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1119,9 +1119,186 @@ error:
return NULL;
}
-/* This is used when tracing */
-Eterm erts_match_set_lint(Process *p, Eterm matchexpr) {
- return db_match_set_lint(p, matchexpr, DCOMP_TRACE);
+/*
+ * Compare a matching term 'a' with a constructing term 'b' for equality.
+ *
+ * Returns true if 'b' is guaranteed to always construct
+ * the same term as 'a' has matched.
+ */
+static int db_match_eq_body(Eterm a, Eterm b, int const_mode)
+{
+ DECLARE_ESTACK(s);
+ Uint arity;
+ Eterm *ap, *bp;
+ const Eterm CONST_MODE_OFF = THE_NON_VALUE;
+
+ while (1) {
+ switch(b & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ if (!is_list(a))
+ return 0;
+ ESTACK_PUSH2(s, CDR(list_val(a)), CDR(list_val(b)));
+ a = CAR(list_val(a));
+ b = CAR(list_val(b));
+ continue; /* loop without pop */
+
+ case TAG_PRIMARY_BOXED:
+ if (is_tuple(b)) {
+ bp = tuple_val(b);
+ if (!const_mode) {
+ if (bp[0] == make_arityval(1) && is_tuple(bp[1])) {
+ b = bp[1]; /* double-tuple syntax */
+ }
+ else if (bp[0] == make_arityval(2) && bp[1] == am_const) {
+ ESTACK_PUSH(s, CONST_MODE_OFF);
+ const_mode = 1; /* {const, term()} syntax */
+ b = bp[2];
+ continue; /* loop without pop */
+ }
+ else
+ return 0; /* function call or invalid tuple syntax */
+ }
+ if (!is_tuple(a))
+ return 0;
+
+ ap = tuple_val(a);
+ bp = tuple_val(b);
+ if (ap[0] != bp[0])
+ return 0;
+ arity = arityval(ap[0]);
+ if (arity > 0) {
+ a = *(++ap);
+ b = *(++bp);
+ while(--arity) {
+ ESTACK_PUSH2(s, *(++ap), *(++bp));
+ }
+ continue; /* loop without pop */
+ }
+ }
+ else if (is_map(b)) {
+ /* We don't know what other pairs the matched map may contain */
+ return 0;
+ }
+ else if (!eq(a,b)) /* other boxed */
+ return 0;
+ break;
+
+ case TAG_PRIMARY_IMMED1:
+ if (a != b || a == am_Underscore || a == am_DollarDollar
+ || a == am_DollarUnderscore
+ || (const_mode && db_is_variable(a) >= 0)) {
+
+ return 0;
+ }
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "db_compare: "
+ "Bad object on ESTACK: 0x%bex\n", b);
+ }
+
+pop_next:
+ if (ESTACK_ISEMPTY(s))
+ break; /* done */
+
+ b = ESTACK_POP(s);
+ if (b == CONST_MODE_OFF) {
+ ASSERT(const_mode);
+ const_mode = 0;
+ goto pop_next;
+ }
+ a = ESTACK_POP(s);
+ }
+
+ DESTROY_ESTACK(s);
+ return 1;
+}
+
+/* This is used by select_replace */
+int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body)
+{
+ Eterm match_key;
+ Eterm* body_list;
+ Eterm single_body_term;
+ Eterm* single_body_term_tpl;
+ Eterm single_body_subterm;
+ Eterm single_body_subterm_key;
+ Eterm* single_body_subterm_key_tpl;
+ int const_mode;
+
+ if (!is_list(body)) {
+ return 0;
+ }
+
+ body_list = list_val(body);
+ if (CDR(body_list) != NIL) {
+ return 0;
+ }
+
+ single_body_term = CAR(body_list);
+ if (single_body_term == am_DollarUnderscore) {
+ /* same tuple is returned */
+ return 1;
+ }
+
+ if (!is_tuple(single_body_term)) {
+ return 0;
+ }
+
+ match_key = db_getkey(keypos, match);
+ if (!is_value(match_key)) {
+ // can't get key out of match
+ return 0;
+ }
+
+ single_body_term_tpl = tuple_val(single_body_term);
+ if (single_body_term_tpl[0] == make_arityval(2) &&
+ single_body_term_tpl[1] == am_const) {
+ /* {const, {"ets-tuple constant"}} */
+ single_body_subterm = single_body_term_tpl[2];
+ const_mode = 1;
+ }
+ else if (*single_body_term_tpl == make_arityval(1)) {
+ /* {{"ets-tuple construction"}} */
+ single_body_subterm = single_body_term_tpl[1];
+ const_mode = 0;
+ }
+ else {
+ /* not a tuple construction */
+ return 0;
+ }
+
+ single_body_subterm_key = db_getkey(keypos, single_body_subterm);
+ if (!is_value(single_body_subterm_key)) {
+ // can't get key out of single body subterm
+ return 0;
+ }
+
+ if (db_match_eq_body(match_key, single_body_subterm_key, const_mode)) {
+ /* tuple with same key is returned */
+ return 1;
+ }
+
+ if (const_mode) {
+ /* constant key did not match */
+ return 0;
+ }
+
+ if (!is_tuple(single_body_subterm_key)) {
+ /* can't possibly be an element instruction */
+ return 0;
+ }
+
+ single_body_subterm_key_tpl = tuple_val(single_body_subterm_key);
+ if (single_body_subterm_key_tpl[0] == make_arityval(3) &&
+ single_body_subterm_key_tpl[1] == am_element &&
+ single_body_subterm_key_tpl[3] == am_DollarUnderscore &&
+ single_body_subterm_key_tpl[2] == make_small(keypos))
+ {
+ /* {element, KeyPos, '$_'} */
+ return 1;
+ }
+
+ return 0;
}
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
@@ -1702,17 +1879,18 @@ error: /* Here is were we land when compilation failed. */
/*
** Free a match program (in a binary)
*/
-void erts_db_match_prog_destructor(Binary *bprog)
+int erts_db_match_prog_destructor(Binary *bprog)
{
MatchProg *prog;
if (bprog == NULL)
- return;
+ return 1;
prog = Binary2MatchProg(bprog);
if (prog->term_save != NULL) {
free_message_buffer(prog->term_save);
}
if (prog->saved_program_buf != NULL)
free_message_buffer(prog->saved_program_buf);
+ return 1;
}
void
@@ -1769,7 +1947,7 @@ Eterm db_prog_match(Process *c_p,
Eterm t;
Eterm *esp;
MatchVariable* variables;
- BeamInstr *cp;
+ ErtsCodeMFA *cp;
const UWord *pc = prog->text;
Eterm *ehp;
Eterm ret;
@@ -1834,7 +2012,8 @@ restart:
do_catch = 0;
fail_label = -1;
build_proc = psp;
- esdp->current_process = psp;
+ if (esdp)
+ esdp->current_process = psp;
#ifdef DEBUG
ASSERT(variables == mpsp->u.variables);
@@ -2171,7 +2350,7 @@ restart:
}
}
else {
- *esp = term;
+ *esp++ = term;
}
break;
case matchPushArrayAsList:
@@ -2408,9 +2587,9 @@ restart:
ehp = HAllocX(build_proc, 4, HEAP_XTRA);
*esp++ = make_tuple(ehp);
ehp[0] = make_arityval(3);
- ehp[1] = cp[0];
- ehp[2] = cp[1];
- ehp[3] = make_small((Uint) cp[2]);
+ ehp[1] = cp->module;
+ ehp[2] = cp->function;
+ ehp[3] = make_small((Uint) cp->arity);
}
break;
case matchSilent:
@@ -2503,7 +2682,8 @@ restart:
do_catch = 1;
if (in_flags & ERTS_PAM_COPY_RESULT) {
build_proc = c_p;
- esdp->current_process = c_p;
+ if (esdp)
+ esdp->current_process = c_p;
}
break;
case matchHalt:
@@ -2545,14 +2725,6 @@ success:
}
-/*
- * Convert a match program to a "magic" binary to return up to erlang
- */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp)
-{
- return erts_mk_magic_binary_term(hpp, &MSO(p), mp);
-}
-
DMCErrInfo *db_new_dmc_err_info(void)
{
DMCErrInfo *ret = erts_alloc(ERTS_ALC_T_DB_DMC_ERR_INFO,
@@ -3101,9 +3273,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
}
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
ASSERT(u.pb != &tmp);
@@ -3111,6 +3281,10 @@ void db_cleanup_offheap_comp(DbTerm* obj)
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ erts_bin_release((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
ASSERT(u.pb != &tmp);
@@ -3266,13 +3440,6 @@ int db_has_variable(Eterm node) {
return 0;
}
-int erts_db_is_compiled_ms(Eterm term)
-{
- return (is_binary(term)
- && (thing_subtag(*binary_val(term)) == REFC_BINARY_SUBTAG)
- && IsMatchProgBinary((((ProcBin *) binary_val(term))->val)));
-}
-
/*
** Local (static) utilities.
*/
@@ -5170,6 +5337,7 @@ void db_free_tmp_uncompressed(DbTerm* obj)
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
int all, DbTerm* obj, Eterm** hpp, Uint extra)
{
+ enum erts_pam_run_flags flags;
Uint32 dummy;
Eterm res;
@@ -5177,9 +5345,13 @@ Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
obj = db_alloc_tmp_uncompressed(tb, obj);
}
+ flags = (hpp ?
+ ERTS_PAM_COPY_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE :
+ ERTS_PAM_TMP_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE);
+
res = db_prog_match(c_p, c_p,
bprog, make_tuple(obj->tpl), NULL, 0,
- ERTS_PAM_COPY_RESULT|ERTS_PAM_CONTIGUOUS_TUPLE, &dummy);
+ flags, &dummy);
if (is_value(res) && hpp!=NULL) {
*hpp = HAlloc(c_p, extra);
@@ -5289,24 +5461,35 @@ void db_match_dis(Binary *bp)
case matchEqRef:
++t;
{
- RefThing *rt = (RefThing *) t;
+ Uint32 *num;
int ri;
- n = thing_arityval(rt->header);
- erts_printf("EqRef\t(%d) {", (int) n);
+
+ if (is_ordinary_ref_thing(t)) {
+ ErtsORefThing *rt = (ErtsORefThing *) t;
+ num = rt->num;
+ t += TermWords(ERTS_REF_THING_SIZE);
+ }
+ else {
+ ErtsMRefThing *mrt = (ErtsMRefThing *) t;
+ ASSERT(is_magic_ref_thing(t));
+ num = mrt->mb->refn;
+ t += TermWords(ERTS_MAGIC_REF_THING_SIZE);
+ }
+
+ erts_printf("EqRef\t(%d) {", (int) ERTS_REF_NUMBERS);
first = 1;
- for (ri = 0; ri < n; ++ri) {
+ for (ri = 0; ri < ERTS_REF_NUMBERS; ++ri) {
if (first)
first = 0;
else
erts_printf(", ");
#if defined(ARCH_64)
- erts_printf("0x%016bex", rt->data.ui[ri]);
+ erts_printf("0x%016bex", num[ri]);
#else
- erts_printf("0x%08bex", rt->data.ui[ri]);
+ erts_printf("0x%08bex", num[ri]);
#endif
}
}
- t += TermWords(REF_THING_SIZE);
erts_printf("}\n");
break;
case matchEqBig:
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index b30f0580dd..19055c6110 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -23,6 +23,7 @@
#include "global.h"
#include "erl_message.h"
+#include "erl_bif_unique.h"
/*#define HARDDEBUG 1*/
@@ -74,9 +75,6 @@ typedef struct db_term {
*/
} DbTerm;
-union db_table;
-typedef union db_table DbTable;
-
#define DB_MUST_RESIZE 1
#define DB_NEW_OBJECT 2
#define DB_INC_TRY_GROW 4
@@ -137,43 +135,56 @@ typedef struct db_table_method
Eterm slot,
Eterm* ret);
int (*db_select_chunk)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Sint chunk_size,
int reverse,
Eterm* ret);
int (*db_select)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
int reverse,
Eterm* ret);
int (*db_select_delete)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_delete_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_count)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_count_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
+ int (*db_select_replace)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm tid,
+ Eterm pattern,
+ Eterm* ret);
+ int (*db_select_replace_continue)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm continuation,
+ Eterm* ret);
int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
int (*db_delete_all_objects)(Process* p,
DbTable* db /* [in out] */ );
int (*db_free_table)(DbTable* db /* [in out] */ );
- int (*db_free_table_continue)(DbTable* db); /* [in out] */
+ SWord (*db_free_table_continue)(DbTable* db, SWord reds);
void (*db_print)(fmtfn_t to,
void* to_arg,
@@ -183,7 +194,6 @@ typedef struct db_table_method
void (*db_foreach_offheap)(DbTable* db, /* [in out] */
void (*func)(ErlOffHeap *, void *),
void *arg);
- void (*db_check_table)(DbTable* tb);
/* Lookup a dbterm for updating. Return false if not found. */
int (*db_lookup_dbterm)(Process *, DbTable *, Eterm key, Eterm obj,
@@ -197,11 +207,27 @@ typedef struct db_table_method
} DbTableMethod;
typedef struct db_fixation {
- Eterm pid;
+ /* Node in fixed_tabs list */
+ struct {
+ struct db_fixation *next, *prev;
+ Binary* btid;
+ } tabs;
+
+ /* Node in fixing_procs tree */
+ struct {
+ struct db_fixation *left, *right, *parent;
+ int is_red;
+ Process* p;
+ } procs;
+
Uint counter;
- struct db_fixation *next;
} DbFixation;
+typedef struct {
+ DbTable *next;
+ DbTable *prev;
+} DbTableList;
+
/*
* This structure contains data for all different types of database
* tables. Note that these fields must match the same fields
@@ -211,10 +237,13 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_smp_refc_t ref; /* fixation counter */
+ erts_smp_refc_t refc; /* reference count of table struct */
+ erts_smp_refc_t fix_count;/* fixation counter */
+ DbTableList all;
+ DbTableList owned;
#ifdef ERTS_SMP
erts_smp_rwmtx_t rwlock; /* rw lock on table */
- erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
+ erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */
int is_thread_safe; /* No fine locking inside table needed */
Uint32 type; /* table type, *read only* after creation */
#endif
@@ -223,7 +252,7 @@ typedef struct db_table_common {
UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
Uint64 heir_started_interval; /* To further identify the heir */
Eterm the_name; /* an atom */
- Eterm id; /* atom | integer */
+ Binary *btid;
DbTableMethod* meth; /* table methods */
erts_smp_atomic_t nitems; /* Total number of items in table */
erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
@@ -231,36 +260,35 @@ typedef struct db_table_common {
ErtsMonotonicTime monotonic;
ErtsMonotonicTime offset;
} time;
- DbFixation* fixations; /* List of processes who have done safe_fixtable,
+ DbFixation* fixing_procs; /* Tree of processes who have done safe_fixtable,
"local" fixations not included. */
/* All 32-bit fields */
Uint32 status; /* bit masks defined below */
- int slot; /* slot index in meta_main_tab */
int keypos; /* defaults to 1 */
int compress;
} DbTableCommon;
/* These are status bit patterns */
-#define DB_NORMAL (1 << 0)
-#define DB_PRIVATE (1 << 1)
-#define DB_PROTECTED (1 << 2)
-#define DB_PUBLIC (1 << 3)
-#define DB_BAG (1 << 4)
-#define DB_SET (1 << 5)
-/*#define DB_LHASH (1 << 6)*/
-#define DB_FINE_LOCKED (1 << 7) /* fine grained locking enabled */
-#define DB_DUPLICATE_BAG (1 << 8)
-#define DB_ORDERED_SET (1 << 9)
-#define DB_DELETE (1 << 10) /* table is being deleted */
-#define DB_FREQ_READ (1 << 11)
-
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
+#define DB_PRIVATE (1 << 0)
+#define DB_PROTECTED (1 << 1)
+#define DB_PUBLIC (1 << 2)
+#define DB_DELETE (1 << 3) /* table is being deleted */
+#define DB_SET (1 << 4)
+#define DB_BAG (1 << 5)
+#define DB_DUPLICATE_BAG (1 << 6)
+#define DB_ORDERED_SET (1 << 7)
+#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
+#define DB_FREQ_READ (1 << 9) /* read_concurrency */
+#define DB_NAMED_TABLE (1 << 10)
+
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET\
+ |DB_FINE_LOCKED|DB_FREQ_READ|DB_NAMED_TABLE)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
-#define NFIXED(T) (erts_smp_refc_read(&(T)->common.ref,0))
+#define NFIXED(T) (erts_smp_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
/*
@@ -355,7 +383,8 @@ Eterm db_add_counter(Eterm** hpp, Wterm counter, Eterm incr);
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags);
Binary *db_match_set_compile(Process *p, Eterm matchexpr,
Uint flags);
-void erts_db_match_prog_destructor(Binary *);
+int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body);
+int erts_db_match_prog_destructor(Binary *);
typedef struct match_prog {
ErlHeapFragment *term_save; /* Only if needed, a list of message
@@ -456,29 +485,54 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei);
void db_free_dmc_err_info(DMCErrInfo *ei);
/* Completely free's an error info structure, including all recorded
errors */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp);
-/* Convert a match program to a erlang "magic" binary to be returned to userspace,
- increments the reference counter. */
-int erts_db_is_compiled_ms(Eterm term);
+
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary(Eterm term);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary_unchecked(Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * Convert a match program to a "magic" ref to return up to erlang
+ */
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp)
+{
+ return erts_mk_magic_ref(hpp, &MSO(p), mp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary_unchecked(Eterm term)
+{
+ Binary *bp = erts_magic_ref2bin(term);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT((ERTS_MAGIC_BIN_DESTRUCTOR(bp) == erts_db_match_prog_destructor));
+ return bp;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary(Eterm term)
+{
+ Binary *bp;
+ if (!is_internal_magic_ref(term))
+ return NULL;
+ bp = erts_magic_ref2bin(term);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(bp) != erts_db_match_prog_destructor)
+ return NULL;
+ return bp;
+}
+
+#endif
/*
** Convenience when compiling into Binary structures
*/
#define IsMatchProgBinary(BP) \
- (((BP)->flags & BIN_FLAG_MAGIC) \
+ (((BP)->intern.flags & BIN_FLAG_MAGIC) \
&& ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
#define Binary2MatchProg(BP) \
(ASSERT(IsMatchProgBinary((BP))), \
((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
-/*
-** Debugging
-*/
-#ifdef HARDDEBUG
-void db_check_tables(void); /* in db.c */
-#define CHECK_TABLES() db_check_tables()
-#else
-#define CHECK_TABLES()
-#endif
#endif /* _DB_UTIL_H */
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 3526bb684d..bf8244564a 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -130,7 +130,7 @@ pdisplay1(fmtfn_t to, void *to_arg, Process* p, Eterm obj)
Uint32 *ref_num;
erts_print(to, to_arg, "#Ref<%lu", ref_channel_no(obj));
ref_num = ref_numbers(obj);
- for (i = ref_no_of_numbers(obj)-1; i >= 0; i--)
+ for (i = ref_no_numbers(obj)-1; i >= 0; i--)
erts_print(to, to_arg, ",%lu", ref_num[i]);
erts_print(to, to_arg, ">");
break;
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
new file mode 100644
index 0000000000..10c76d2579
--- /dev/null
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -0,0 +1,87 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2016. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Static declaration of BIFs that should execute on dirty schedulers.
+#
+# <dirty-bif-decl> ::= <type> <bif>
+# <bif> ::= <module> ":" <name> "/" <arity>
+# <type> ::= dirty-cpu | dirty-io | dirty-cpu-test | dirty-io-test
+#
+# When dirty scheduler support is available, a BIF declared with the
+# 'dirty-cpu' type will unconditionally execute on a dirty CPU scheduler,
+# and a BIF declared with the type 'dirty-io' will unconditionally execute
+# on a dirty IO scheduler. When dirty scheduler support is not available
+# all BIFs will of course execute on normal schedulers.
+#
+# When the emulator has been configured with the debug option
+# '--enable-dirty-schedulers-test', BIFs with the types 'dirty-cpu-test',
+# and 'dirty-io-test' will unconditionally execute on dirty schedulers.
+# When this debug option has not been enabled, these BIFs will be executed
+# on normal schedulers.
+#
+# BIFs marked as 'ubif' in ./bif.tab will be ignored, i.e., will always
+# execute on normal schedulers.
+#
+
+# --- Dirty BIFs ---
+
+dirty-cpu erts_debug:dirty_cpu/2
+dirty-io erts_debug:dirty_io/2
+
+# lcnt_control/1 doesn't need to be dirty.
+dirty-cpu erts_debug:lcnt_control/2
+dirty-cpu erts_debug:lcnt_collect/0
+dirty-cpu erts_debug:lcnt_clear/0
+
+# --- TEST of Dirty BIF functionality ---
+# Functions below will execute on dirty schedulers when emulator has
+# been configured for testing dirty schedulers. This is used for test
+# and debug purposes only. We really do *not* want to execute these
+# on dirty schedulers on a real system.
+
+dirty-cpu-test erlang:'++'/2
+dirty-cpu-test erlang:append/2
+dirty-cpu-test erlang:'--'/2
+dirty-cpu-test erlang:subtract/2
+dirty-cpu-test erlang:iolist_size/1
+dirty-cpu-test erlang:make_tuple/2
+dirty-cpu-test erlang:make_tuple/3
+dirty-cpu-test erlang:append_element/2
+dirty-cpu-test erlang:insert_element/3
+dirty-cpu-test erlang:delete_element/2
+dirty-cpu-test erlang:atom_to_list/1
+dirty-cpu-test erlang:list_to_atom/1
+dirty-cpu-test erlang:list_to_existing_atom/1
+dirty-cpu-test erlang:integer_to_list/1
+dirty-cpu-test erlang:string_to_integer/1
+dirty-cpu-test erlang:list_to_integer/1
+dirty-cpu-test erlang:list_to_integer/2
+dirty-cpu-test erlang:float_to_list/1
+dirty-cpu-test erlang:float_to_list/2
+dirty-cpu-test erlang:float_to_binary/1
+dirty-cpu-test erlang:float_to_binary/2
+dirty-cpu-test erlang:string_to_float/1
+dirty-cpu-test erlang:list_to_float/1
+dirty-cpu-test erlang:binary_to_float/1
+dirty-cpu-test erlang:tuple_to_list/1
+dirty-cpu-test erlang:list_to_tuple/1
+dirty-cpu-test erlang:display/1
+dirty-cpu-test erlang:display_string/1
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 97a69140c3..0e8ebf0c98 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -76,11 +76,10 @@ typedef struct {
# endif
#endif
-/* Values for mode arg to driver_select() */
-#define ERL_DRV_READ (1 << 0)
-#define ERL_DRV_WRITE (1 << 1)
-#define ERL_DRV_USE (1 << 2)
-#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (1 << 3))
+#define ERL_DRV_READ ((int)ERL_NIF_SELECT_READ)
+#define ERL_DRV_WRITE ((int)ERL_NIF_SELECT_WRITE)
+#define ERL_DRV_USE ((int)ERL_NIF_SELECT_STOP)
+#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (ERL_DRV_USE << 1))
/* Old deprecated */
#define DO_READ ERL_DRV_READ
@@ -176,13 +175,6 @@ struct erl_drv_event_data {
#endif
typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-/*
- * A driver monitor
- */
-typedef struct {
- unsigned char data[sizeof(void *)*4];
-} ErlDrvMonitor;
-
typedef struct {
unsigned long megasecs;
unsigned long secs;
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 6ec5fbb895..f88138063e 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -49,6 +49,21 @@ typedef enum {
ERL_DIRTY_JOB_IO_BOUND = 2
} ErlDirtyJobFlags;
+/* Values for enif_select AND mode arg for driver_select() */
+enum ErlNifSelectFlags {
+ ERL_NIF_SELECT_READ = (1 << 0),
+ ERL_NIF_SELECT_WRITE = (1 << 1),
+ ERL_NIF_SELECT_STOP = (1 << 2)
+};
+
+/*
+ * A driver monitor
+ */
+typedef struct {
+ unsigned char data[sizeof(void *)*4];
+} ErlDrvMonitor;
+
+
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
# undef SIZEOF_CHAR
@@ -69,10 +84,6 @@ typedef enum {
# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
# undef SIZEOF_LONG_LONG
#endif
-#ifdef HALFWORD_HEAP_EMULATOR
-# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
-# undef HALFWORD_HEAP_EMULATOR
-#endif
#include "erl_int_sizes_config.h"
#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
# error SIZEOF_CHAR mismatch
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 92edce5176..742c428f2a 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -54,6 +54,9 @@ fatal_error(int err, char *func)
struct ErlDrvMutex_ {
ethr_mutex mtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_ref_t lcnt;
+#endif
char *name;
};
@@ -64,6 +67,9 @@ struct ErlDrvCond_ {
struct ErlDrvRWLock_ {
ethr_rwmutex rwmtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_ref_t lcnt;
+#endif
char *name;
};
@@ -140,7 +146,8 @@ void erl_drv_thr_init(void)
sizeof(char *)*ERL_DRV_TSD_KEYS_INC);
for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++)
used_tsd_keys[i] = NULL;
- erts_mtx_init(&tsd_mtx, "drv_tsd");
+ erts_mtx_init(&tsd_mtx, "drv_tsd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
}
/*
@@ -161,14 +168,18 @@ erl_drv_mutex_create(char *name)
opt.posix_compliant = 1;
if (ethr_mutex_init_opt(&dmtx->mtx, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
- dmtx = NULL;
+ return NULL;
}
- else if (!name)
- dmtx->name = no_name;
- else {
+ if (name) {
dmtx->name = ((char *) dmtx) + sizeof(ErlDrvMutex);
sys_strcpy(dmtx->name, name);
+ } else {
+ dmtx->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_ref_x(&dmtx->lcnt, dmtx->name, NIL,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
}
return dmtx;
#else
@@ -180,7 +191,11 @@ void
erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_uninstall(&dmtx->lcnt);
+#endif
+ res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_mutex_destroy()");
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
@@ -202,9 +217,14 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
+ int res;
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_trylock()");
- return ethr_mutex_trylock(&dmtx->mtx);
+ res = ethr_mutex_trylock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&dmtx->lcnt, res);
+#endif
+ return res;
#else
return 0;
#endif
@@ -216,8 +236,14 @@ erl_drv_mutex_lock(ErlDrvMutex *dmtx)
#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_lock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+#endif
ethr_mutex_lock(&dmtx->mtx);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&dmtx->lcnt);
+#endif
}
void
@@ -226,6 +252,9 @@ erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
+#endif
ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -306,10 +335,18 @@ erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
if (!dcnd || !dmtx) {
fatal_error(EINVAL, "erl_drv_cond_wait()");
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
+#endif
while (1) {
int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
- if (res == 0)
+ if (res == 0) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+ erts_lcnt_lock_post(&dmtx->lcnt);
+#endif
break;
+ }
}
#endif
}
@@ -324,14 +361,18 @@ erl_drv_rwlock_create(char *name)
if (drwlck) {
if (ethr_rwmutex_init(&drwlck->rwmtx) != 0) {
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
- drwlck = NULL;
+ return NULL;
}
- else if (!name)
- drwlck->name = no_name;
- else {
+ if (name) {
drwlck->name = ((char *) drwlck) + sizeof(ErlDrvRWLock);
sys_strcpy(drwlck->name, name);
+ } else {
+ drwlck->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_ref_x(&drwlck->lcnt, drwlck->name, NIL,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
}
return drwlck;
#else
@@ -343,7 +384,11 @@ void
erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_uninstall(&drwlck->lcnt);
+#endif
+ res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_rwlock_destroy()");
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
@@ -364,9 +409,14 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
- return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
+ res = ethr_rwmutex_tryrlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ);
+#endif
+ return res;
#else
return 0;
#endif
@@ -378,7 +428,13 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
+#endif
ethr_rwmutex_rlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
+#endif
#endif
}
@@ -388,6 +444,9 @@ erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
+#endif
ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -396,9 +455,14 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
- return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
+ res = ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
+#endif
+ return res;
#else
return 0;
#endif
@@ -410,7 +474,13 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
+#endif
ethr_rwmutex_rwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
+#endif
#endif
}
@@ -420,6 +490,9 @@ erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
+#endif
ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 53a1801b72..535f677bb3 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -31,6 +31,9 @@
static Hash erts_fun_table;
#include "erl_smp.h"
+#ifdef HIPE
+# include "hipe_mode_switch.h"
+#endif
static erts_smp_rwmtx_t erts_fun_table_lock;
@@ -49,8 +52,8 @@ static void fun_free(ErlFunEntry* obj);
* to unloaded_fun[]. The -1 in unloaded_fun[0] will be interpreted
* as an illegal arity when attempting to call a fun.
*/
-static BeamInstr unloaded_fun_code[3] = {NIL, -1, 0};
-static BeamInstr* unloaded_fun = unloaded_fun_code + 2;
+static BeamInstr unloaded_fun_code[4] = {NIL, NIL, -1, 0};
+static BeamInstr* unloaded_fun = unloaded_fun_code + 3;
void
erts_init_fun_table(void)
@@ -60,7 +63,8 @@ erts_init_fun_table(void)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
@@ -219,6 +223,10 @@ erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end)
fe->pend_purge_address = addr;
ERTS_SMP_WRITE_MEMORY_BARRIER;
fe->address = unloaded_fun;
+#ifdef HIPE
+ fe->pend_purge_native_address = fe->native_address;
+ hipe_set_closure_stub(fe);
+#endif
erts_purge_state_add_fun(fe);
}
b = b->next;
@@ -234,8 +242,12 @@ erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
- if (fe->address == unloaded_fun)
+ if (fe->address == unloaded_fun) {
fe->address = fe->pend_purge_address;
+#ifdef HIPE
+ fe->native_address = fe->pend_purge_native_address;
+#endif
+ }
}
}
@@ -244,8 +256,12 @@ erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no)
{
Uint ix;
- for (ix = 0; ix < no; ix++)
+ for (ix = 0; ix < no; ix++) {
funs[ix]->pend_purge_address = NULL;
+#ifdef HIPE
+ funs[ix]->pend_purge_native_address = NULL;
+#endif
+ }
}
void
@@ -256,6 +272,9 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
fe->pend_purge_address = NULL;
+#ifdef HIPE
+ fe->pend_purge_native_address = NULL;
+#endif
if (erts_smp_refc_dectest(&fe->refc, 0) == 0)
erts_erase_fun_entry(fe);
}
@@ -324,6 +343,7 @@ fun_alloc(ErlFunEntry* template)
obj->pend_purge_address = NULL;
#ifdef HIPE
obj->native_address = NULL;
+ obj->pend_purge_native_address = NULL;
#endif
return obj;
}
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index b1d5cddb34..289d0d0b28 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -45,6 +45,9 @@ typedef struct erl_fun_entry {
erts_smp_refc_t refc; /* Reference count: One for code + one for each
fun object in each process. */
BeamInstr *pend_purge_address; /* address stored during a pending purge */
+#ifdef HIPE
+ UWord* pend_purge_native_address;
+#endif
} ErlFunEntry;
/*
@@ -57,9 +60,6 @@ typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
ErlFunEntry* fe; /* Pointer to fun entry. */
struct erl_off_heap_header* next;
-#ifdef HIPE
- UWord* native_address; /* Native code for the fun. */
-#endif
Uint arity; /* The arity of the fun. */
Uint num_free; /* Number of free variables (in env). */
/* -- The following may be compound Erlang terms ---------------------- */
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index bb36be0848..8cb977a7f3 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -35,13 +35,14 @@
#include "error.h"
#include "big.h"
#include "erl_gc.h"
-#if HIPE
+#ifdef HIPE
#include "hipe_stack.h"
#include "hipe_mode_switch.h"
#endif
#include "dtrace-wrapper.h"
#include "erl_bif_unique.h"
#include "dist.h"
+#include "erl_nfunc_sched.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
@@ -120,11 +121,14 @@ static Eterm *full_sweep_heaps(Process *p,
char *oh, Uint oh_size,
Eterm *objv, int nobj);
static int garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, int fcalls);
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage);
static int major_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl);
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
static int minor_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl);
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
static void do_minor(Process *p, ErlHeapFragment *live_hf_end,
char *mature, Uint mature_size,
Uint new_sz, Eterm* objv, int nobj);
@@ -174,7 +178,7 @@ Uint erts_test_long_gc_sleep; /* Only used for testing... */
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
@@ -270,7 +274,8 @@ erts_init_gc(void)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
init_gc_info(&dirty_gc.info);
#endif
@@ -410,20 +415,20 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
if (is_non_value(result)) {
if (p->freason == TRAP) {
- #if HIPE
+#ifdef HIPE
if (regs == NULL) {
regs = erts_proc_sched_data(p)->x_reg_array;
}
- #endif
- cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls);
+#endif
+ cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls, 0);
} else {
- cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls);
+ cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls, 0);
}
} else {
Eterm val[1];
val[0] = result;
- cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls);
+ cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls, 0);
result = val[0];
}
BUMP_REDS(p, cost);
@@ -435,7 +440,7 @@ Eterm
erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
{
return erts_gc_after_bif_call_lhf(p, ERTS_INVALID_HFRAG_PTR,
- result, regs, arity);
+ result, regs, arity);
}
static ERTS_INLINE void reset_active_writer(Process *p)
@@ -475,9 +480,15 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
p->live_hf_end = live_hf_end;
}
- if (need == 0)
+ if (need == 0) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)));
+ goto force_reschedule;
+ }
+#endif
return 1;
-
+ }
/*
* Satisfy need in a heap fragment...
*/
@@ -530,6 +541,10 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
p->heap_hfrag = hfrag;
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+force_reschedule:
+#endif
+
/* Make sure that we do a proper GC as soon as possible... */
p->flags |= F_FORCE_GC;
reds_left = ERTS_REDS_LEFT(p, fcalls);
@@ -601,6 +616,32 @@ young_gen_usage(Process *p)
} \
} while (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static ERTS_INLINE void
+check_for_possibly_long_gc(Process *p, Uint ygen_usage)
+{
+ int major;
+ Uint sz;
+
+ major = (p->flags & F_NEED_FULLSWEEP) || GEN_GCS(p) >= MAX_GEN_GCS(p);
+
+ sz = ygen_usage;
+ sz += p->hend - p->stop;
+ if (p->flags & F_ON_HEAP_MSGQ)
+ sz += p->msg.len;
+ if (major)
+ sz += p->old_htop - p->old_heap;
+
+ if (sz >= ERTS_POTENTIALLY_LONG_GC_HSIZE) {
+ ASSERT(!(p->flags & (F_DISABLE_GC|F_DELAY_GC)));
+ p->flags |= major ? F_DIRTY_MAJOR_GC : F_DIRTY_MINOR_GC;
+ erts_schedule_dirty_sys_execution(p);
+ }
+}
+
+#endif
+
/*
* Garbage collect a process.
*
@@ -611,13 +652,15 @@ young_gen_usage(Process *p)
*/
static int
garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, int fcalls)
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage)
{
Uint reclaimed_now = 0;
+ Uint ygen_usage;
Eterm gc_trace_end_tag;
int reds;
ErtsMonotonicTime start_time;
- ErtsSchedulerData *esdp;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
erts_aint32_t state;
ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
@@ -627,13 +670,26 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
ERTS_UNDEF(start_time, 0);
ERTS_CHK_MBUF_SZ(p);
- ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
- >= erts_proc_sched_data(p)->virtual_reds);
+ ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds);
state = erts_smp_atomic32_read_nob(&p->state);
- if (p->flags & (F_DISABLE_GC|F_DELAY_GC) || state & ERTS_PSFLG_EXITING)
+ if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ delay_gc_before_start:
+#endif
return delay_garbage_collection(p, live_hf_end, need, fcalls);
+ }
+
+ ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto delay_gc_before_start;
+ }
+#endif
if (p->abandoned_heap)
live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -642,8 +698,6 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
- esdp = erts_get_scheduler_data();
-
erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0)
start_time = erts_get_monotonic_time(esdp);
@@ -670,14 +724,25 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE);
}
DTRACE2(gc_minor_start, pidbuf, need);
- reds = minor_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
+ reds = minor_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
if (reds == -1) {
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & F_DIRTY_MAJOR_GC)
+ goto delay_gc_after_start;
+ }
+#endif
goto do_major_collection;
}
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~F_DIRTY_MINOR_GC;
gc_trace_end_tag = am_gc_minor_end;
} else {
do_major_collection:
@@ -686,7 +751,10 @@ do_major_collection:
trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
DTRACE2(gc_major_start, pidbuf, need);
- reds = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
+ reds = major_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
gc_trace_end_tag = am_gc_major_end;
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
@@ -716,6 +784,9 @@ do_major_collection:
am_kill, NIL, NULL, 0);
erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ delay_gc_after_start:
+#endif
/* erts_send_exit_signal looks for ERTS_PSFLG_GC, so
we have to remove it after the signal is sent */
erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
@@ -764,7 +835,7 @@ do_major_collection:
esdp->gc_info.reclaimed += reclaimed_now;
}
- FLAGS(p) &= ~F_FORCE_GC;
+ FLAGS(p) &= ~(F_FORCE_GC|F_HIBERNATED);
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
ERTS_MSACC_POP_STATE_M();
@@ -799,7 +870,7 @@ do_major_collection:
int
erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
int reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
@@ -810,7 +881,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
>= erts_proc_sched_data(p)->virtual_reds);
@@ -821,8 +892,8 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
*/
-void
-erts_garbage_collect_hibernate(Process* p)
+static int
+garbage_collect_hibernate(Process* p, int check_long_gc)
{
Uint heap_size;
Eterm* heap;
@@ -836,6 +907,20 @@ erts_garbage_collect_hibernate(Process* p)
if (p->flags & F_DISABLE_GC)
ERTS_INTERNAL_ERROR("GC disabled");
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
+ else if (check_long_gc) {
+ Uint flags = p->flags;
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, (p->htop - p->heap) + p->mbuf_sz);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ p->flags = flags|F_DIRTY_GC_HIBERNATE;
+ return 1;
+ }
+ p->flags = flags;
+ }
+#endif
/*
* Preliminaries.
*/
@@ -847,7 +932,6 @@ erts_garbage_collect_hibernate(Process* p)
* Do it.
*/
-
heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz;
heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
@@ -929,12 +1013,20 @@ erts_garbage_collect_hibernate(Process* p)
ErtsGcQuickSanityCheck(p);
+ p->flags |= F_HIBERNATED;
+
erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
reds = gc_cost(actual_size, actual_size);
- BUMP_REDS(p, reds);
+ return reds;
}
+void
+erts_garbage_collect_hibernate(Process* p)
+{
+ int reds = garbage_collect_hibernate(p, 1);
+ BUMP_REDS(p, reds);
+}
/*
* HiPE native code stack scanning procedures:
@@ -988,10 +1080,11 @@ static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
#endif /* HIPE */
-void
+int
erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint byte_lit_size,
- struct erl_off_heap_header* oh)
+ struct erl_off_heap_header* oh,
+ int fcalls)
{
Uint lit_size = byte_lit_size / sizeof(Eterm);
Uint old_heap_size;
@@ -1003,20 +1096,53 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint area_size;
Eterm* old_htop;
Uint n;
+ Uint ygen_usage = 0;
struct erl_off_heap_header** prev = NULL;
+ Sint64 reds;
+ int hibernated = !!(p->flags & F_HIBERNATED);
+
+ if (p->flags & (F_DISABLE_GC|F_DELAY_GC))
+ ERTS_INTERNAL_ERROR("GC disabled");
+
+ /*
+ * First an ordinary major collection...
+ */
+
+ p->flags |= F_NEED_FULLSWEEP;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~F_DIRTY_CLA;
+ else {
+ Uint size = byte_lit_size/sizeof(Uint);
+ ygen_usage = young_gen_usage(p);
+ if (hibernated)
+ size = size*2 + 3*ygen_usage;
+ else
+ size = size + 2*ygen_usage;
+ check_for_possibly_long_gc(p, size);
+ if (p->flags & F_DIRTY_MAJOR_GC) {
+ p->flags |= F_DIRTY_CLA;
+ return 10;
+ }
+ }
+#endif
+
+ reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
+ p->arg_reg, p->arity, fcalls,
+ ygen_usage);
+
+ ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
- if (p->flags & F_DISABLE_GC)
- return;
/*
* Set GC state.
*/
erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
/*
- * We assume that the caller has already done a major collection
- * (which has discarded the old heap), so that we don't have to cope
- * with pointer to literals on the old heap. We will now allocate
- * an old heap to contain the literals.
+ * Just did a major collection (which has discarded the old heap),
+ * so that we don't have to cope with pointer to literals on the
+ * old heap. We will now allocate an old heap to contain the literals.
*/
ASSERT(p->old_heap == 0); /* Must NOT have an old heap yet. */
@@ -1070,7 +1196,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ move_boxed(&ptr,val,&old_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1081,7 +1207,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ move_cons(&ptr,val,&old_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1139,7 +1265,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* link it into the MSO list for the process.
*/
- erts_refc_inc(&bptr->refc, 1);
+ erts_refc_inc(&bptr->intern.refc, 1);
*prev = ptr;
prev = &ptr->next;
}
@@ -1159,15 +1285,27 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* Restore status.
*/
erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
+ reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0);
+
+ if (hibernated) {
+ /* Restore the process into hibernated state... */
+ reds += garbage_collect_hibernate(p, 0);
+ }
+
+ if (reds > INT_MAX)
+ return INT_MAX;
+ return (int) reds;
}
static int
minor_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl)
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
Eterm *mature = p->abandoned_heap ? p->abandoned_heap : p->heap;
Uint mature_size = p->high_water - mature;
- Uint size_before = young_gen_usage(p);
+ Uint size_before = ygen_usage;
/*
* Check if we have gone past the max heap size limit
@@ -1370,9 +1508,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ move_boxed(&ptr,val,&old_htop,g_ptr++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ move_boxed(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1385,9 +1523,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ move_cons(&ptr,val,&old_htop,g_ptr++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ move_cons(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1429,9 +1567,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,n_hp++);
+ move_boxed(&ptr,val,&old_htop,n_hp++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ move_boxed(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1443,9 +1581,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,n_hp++);
+ move_cons(&ptr,val,&old_htop,n_hp++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ move_cons(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1465,10 +1603,10 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
*origptr = val;
mb->base = binary_bytes(val);
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,origptr);
+ move_boxed(&ptr,val,&old_htop,origptr);
mb->base = binary_bytes(mb->orig);
} else if (ErtsInYoungGen(*origptr, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ move_boxed(&ptr,val,&n_htop,origptr);
mb->base = binary_bytes(mb->orig);
}
}
@@ -1538,7 +1676,8 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
static int
major_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl)
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
Uint size_before, size_after, stack_size;
Eterm* n_heap;
@@ -1556,7 +1695,7 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
* to receive all live data.
*/
- size_before = young_gen_usage(p);
+ size_before = ygen_usage;
size_before += p->old_htop - p->old_heap;
stack_size = p->hend - p->stop;
@@ -1682,7 +1821,7 @@ full_sweep_heaps(Process *p,
Eterm* ptr;
Eterm val;
Eterm gval = *g_ptr;
-
+
switch (primary_tag(gval)) {
case TAG_PRIMARY_BOXED: {
@@ -1692,7 +1831,7 @@ full_sweep_heaps(Process *p,
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ move_boxed(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1705,7 +1844,7 @@ full_sweep_heaps(Process *p,
if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ move_cons(&ptr,val,&n_htop,g_ptr++);
} else {
g_ptr++;
}
@@ -1994,7 +2133,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ move_boxed(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -2006,7 +2145,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ move_cons(&ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -2019,7 +2158,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
if (header_is_bin_matchstate(gval)) {
ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
ErlBinMatchBuffer *mb = &(ms->mb);
- Eterm* origptr;
+ Eterm* origptr;
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
@@ -2027,7 +2166,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (ERTS_IS_IN_SWEEP_AREA(*origptr, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ move_boxed(&ptr,val,&n_htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -2090,7 +2229,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
ASSERT(is_boxed(val));
*heap_ptr++ = val;
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,heap_ptr++);
+ move_boxed(&ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -2102,7 +2241,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
if (IS_MOVED_CONS(val)) {
*heap_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_CONS(ptr,val,htop,heap_ptr++);
+ move_cons(&ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -2123,7 +2262,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,origptr);
+ move_boxed(&ptr,val,&htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -2156,11 +2295,11 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
ASSERT(val != ERTS_HOLE_MARKER);
if (is_header(val)) {
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, n_htop, &dummy_ref);
+ move_boxed(&ptr, val, &n_htop, &dummy_ref);
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, n_htop, &dummy_ref);
+ move_cons(&ptr, val, &n_htop, &dummy_ref);
ptr += 2;
}
}
@@ -2226,32 +2365,27 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hp++ = val;
break;
case TAG_PRIMARY_LIST:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,list_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_BOXED:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,boxed_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_HEADER:
*hp++ = val;
switch (val & _HEADER_SUBTAG_MASK) {
case ARITYVAL_SUBTAG:
break;
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(fhp - 1))
+ goto the_default;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2261,6 +2395,7 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
cpy_sz = thing_arityval(val);
goto cpy_words;
default:
+ the_default:
cpy_sz = header_arity(val);
cpy_words:
@@ -2436,17 +2571,10 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
}
/*
- * If a NIF has saved arguments, they need to be added
+ * If a NIF or BIF has saved arguments, they need to be added
*/
- if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
- Eterm* argv;
- int argc;
- if (erts_setup_nif_gc(p, &argv, &argc)) {
- roots[n].v = argv;
- roots[n].sz = argc;
- n++;
- }
- }
+ if (erts_setup_nif_export_rootset(p, &roots[n].v, &roots[n].sz))
+ n++;
ASSERT(n <= rootset->size);
@@ -2701,6 +2829,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
*prevppp = &pbp->next;
}
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+/*
+ * ERTS_MAGIC_REF_THING_HEADER only defined when there
+ * is a size difference between magic and ordinary references...
+ */
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_MAGIC_REF_THING_HEADER
+#else
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_REF_THING_HEADER
+#endif
+
static void
sweep_off_heap(Process *p, int fullsweep)
@@ -2725,7 +2863,7 @@ sweep_off_heap(Process *p, int fullsweep)
prev = &MSO(p).first;
ptr = MSO(p).first;
- /* Firts part of the list will reside on the (old) new-heap.
+ /* First part of the list will reside on the (old) new-heap.
* Keep if moved, otherwise deref.
*/
while (ptr) {
@@ -2733,7 +2871,8 @@ sweep_off_heap(Process *p, int fullsweep)
ASSERT(!ErtsInArea(ptr, oheap, oheap_sz));
*prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN: {
int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
if (to_new_heap) {
@@ -2742,21 +2881,34 @@ sweep_off_heap(Process *p, int fullsweep)
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
}
link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
- }
- else {
+ break;
+ }
+ case ERTS_USED_MAGIC_REF_THING_HEADER__: {
+ Uint size;
+ int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
+ ASSERT(is_magic_ref_thing(ptr));
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ size = (Uint) ((ErtsMRefThing *) ptr)->mb->orig_size;
+ if (to_new_heap)
+ bin_vheap += size / sizeof(Eterm);
+ else
+ BIN_OLD_VHEAP(p) += size / sizeof(Eterm); /* for binary gc (words)*/
+ /* fall through... */
+ }
+ default:
prev = &ptr->next;
ptr = ptr->next;
}
}
- else if (!ErtsInArea(ptr, oheap, oheap_sz)) {
+ else if (ErtsInArea(ptr, oheap, oheap_sz))
+ break; /* and let old-heap loop continue */
+ else {
/* garbage */
switch (thing_subtag(ptr->thing_word)) {
case REFC_BINARY_SUBTAG:
{
Binary* bptr = ((ProcBin*)ptr)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
+ erts_bin_release(bptr);
break;
}
case FUN_SUBTAG:
@@ -2767,13 +2919,20 @@ sweep_off_heap(Process *p, int fullsweep)
}
break;
}
+ case REF_SUBTAG:
+ {
+ ErtsMagicBinary *bptr;
+ ASSERT(is_magic_ref_thing(ptr));
+ bptr = ((ErtsMRefThing *) ptr)->mb;
+ erts_bin_release((Binary *) bptr);
+ break;
+ }
default:
ASSERT(is_external_header(ptr->thing_word));
erts_deref_node_entry(((ExternalThing*)ptr)->node);
}
*prev = ptr = ptr->next;
}
- else break; /* and let old-heap loop continue */
}
/* The rest of the list resides on old-heap, and we just did a
@@ -2782,15 +2941,24 @@ sweep_off_heap(Process *p, int fullsweep)
while (ptr) {
ASSERT(ErtsInArea(ptr, oheap, oheap_sz));
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN:
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
link_live_proc_bin(&shrink, &prev, &ptr, 0);
- }
- else {
- ASSERT(is_fun_header(ptr->thing_word) ||
- is_external_header(ptr->thing_word));
- prev = &ptr->next;
- ptr = ptr->next;
+ break;
+ case ERTS_USED_MAGIC_REF_THING_HEADER__:
+ ASSERT(is_magic_ref_thing(ptr));
+ BIN_OLD_VHEAP(p) +=
+ (((Uint) ((ErtsMRefThing *) ptr)->mb->orig_size)
+ / sizeof(Eterm)); /* for binary gc (words)*/
+ /* fall through... */
+ default:
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word)
+ || is_magic_ref_thing(ptr));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ break;
}
}
@@ -2883,6 +3051,9 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
}
tari = thing_arityval(val);
switch (thing_subtag(val)) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hp))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2998,6 +3169,8 @@ static void ERTS_INLINE
offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
Eterm* objv, int nobj)
{
+ Eterm *v;
+ Uint sz;
if (p->dictionary) {
offset_heap(ERTS_PD_START(p->dictionary),
ERTS_PD_SIZE(p->dictionary),
@@ -3018,12 +3191,8 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
offset_heap_ptr(objv, nobj, offs, area, area_size);
}
offset_off_heap(p, offs, area, area_size);
- if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
- Eterm* argv;
- int argc;
- if (erts_setup_nif_gc(p, &argv, &argc))
- offset_heap_ptr(argv, argc, offs, area, area_size);
- }
+ if (erts_setup_nif_export_rootset(p, &v, &sz))
+ offset_heap_ptr(v, sz, offs, area, area_size);
}
static void
@@ -3082,7 +3251,7 @@ reply_gc_info(void *vgcirp)
if (hpp)
ref_copy = STORE_NC(hpp, ohp, gcirp->ref);
else
- *szp += REF_THING_SIZE;
+ *szp += ERTS_REF_THING_SIZE;
msg = erts_bld_tuple(hpp, szp, 3,
make_small(esdp->no),
@@ -3113,6 +3282,39 @@ reply_gc_info(void *vgcirp)
gcireq_free(vgcirp);
}
+void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig) {
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+ Eterm gval;
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ ErlHeapBin *hb = (ErlHeapBin *)htop;
+ Eterm *real_bin;
+ byte *bs;
+
+ real_bin = binary_val(follow_moved(sb->orig, (Eterm)0));
+
+ if (*real_bin == HEADER_PROC_BIN) {
+ bs = ((ProcBin *) real_bin)->bytes + sb->offs;
+ } else {
+ bs = (byte *)(&(((ErlHeapBin *) real_bin)->data)) + sb->offs;
+ }
+
+ hb->thing_word = header_heap_bin(sb->size);
+ hb->size = sb->size;
+ sys_memcpy((byte *)hb->data, bs, sb->size);
+
+ gval = make_boxed(htop);
+ *orig = gval;
+ *ptr = gval;
+
+ ptr += ERL_SUB_BIN_SIZE;
+ htop += heap_bin_size(sb->size);
+
+ *hpp = htop;
+ *pp = ptr;
+}
+
+
Eterm
erts_gc_info_request(Process *c_p)
{
@@ -3346,8 +3548,8 @@ erts_max_heap_size(Eterm arg, Uint *max_heap_size, Uint *max_heap_flags)
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-static int
-within2(Eterm *ptr, Process *p, Eterm *real_htop)
+int
+erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm *real_htop)
{
ErlHeapFragment* bp;
ErtsMessage* mp;
@@ -3393,12 +3595,6 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
return 0;
}
-int
-within(Eterm *ptr, Process *p)
-{
- return within2(ptr, p, NULL);
-}
-
#endif
#ifdef ERTS_OFFHEAP_DEBUG
@@ -3429,7 +3625,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
erts_aint_t refc;
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- refc = erts_refc_read(&u.pb->val->refc, 1);
+ refc = erts_refc_read(&u.pb->val->intern.refc, 1);
break;
case FUN_SUBTAG:
refc = erts_smp_refc_read(&u.fun->fe->refc, 1);
@@ -3439,6 +3635,10 @@ erts_check_off_heap2(Process *p, Eterm *htop)
case EXTERNAL_REF_SUBTAG:
refc = erts_smp_refc_read(&u.ext->node->refc, 1);
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ refc = erts_refc_read(&u.mref->mb->intern.refc, 1);
+ break;
default:
ASSERT(!"erts_check_off_heap2: Invalid thing_word");
}
@@ -3453,7 +3653,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(erts_dbg_within_proc(u.ep, p, htop));
}
}
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 76dd74c866..6a529b8443 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -25,48 +25,73 @@
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
+#define ERTS_POTENTIALLY_LONG_GC_HSIZE (128*1024) /* Words */
+
#include "erl_map.h"
+#include "erl_fun.h"
+#include "erl_bits.h"
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
+void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig);
-#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- \
- HTOP[0] = CAR; /* copy car */ \
- HTOP[1] = PTR[1]; /* copy cdr */ \
- gval = make_list(HTOP); /* new location */ \
- *ORIG = gval; /* redirect original reference */ \
- PTR[0] = THE_NON_VALUE; /* store forwarding indicator */ \
- PTR[1] = gval; /* store forwarding address */ \
- HTOP += 2; /* update tospace htop */ \
-} while(0)
-
-#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- Sint nelts; \
- \
- ASSERT(is_header(HDR)); \
- nelts = header_arity(HDR); \
- switch ((HDR) & _HEADER_SUBTAG_MASK) { \
- case SUB_BINARY_SUBTAG: nelts++; break; \
- case MAP_SUBTAG: \
- if (is_flatmap_header(HDR)) nelts+=flatmap_get_size(PTR) + 1; \
- else nelts += hashmap_bitcount(MAP_HEADER_VAL(HDR)); \
- break; \
- case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \
- } \
- gval = make_boxed(HTOP); \
- *ORIG = gval; \
- *HTOP++ = HDR; \
- *PTR++ = gval; \
- while (nelts--) *HTOP++ = *PTR++; \
-} while(0)
+ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig)
+{
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+ Eterm gval;
+
+ htop[0] = car; /* copy car */
+ htop[1] = ptr[1]; /* copy cdr */
+ gval = make_list(htop); /* new location */
+ *orig = gval; /* redirect original reference */
+ ptr[0] = THE_NON_VALUE; /* store forwarding indicator */
+ ptr[1] = gval; /* store forwarding address */
+ *hpp += 2; /* update tospace htop */
+}
+#endif
-#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-int within(Eterm *ptr, Process *p);
+ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig)
+{
+ Eterm gval;
+ Sint nelts;
+ Eterm *ptr = *pp;
+ Eterm *htop = *hpp;
+
+ ASSERT(is_header(hdr));
+ nelts = header_arity(hdr);
+ switch ((hdr) & _HEADER_SUBTAG_MASK) {
+ case SUB_BINARY_SUBTAG:
+ {
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ /* convert sub-binary to heap-binary if applicable */
+ if (sb->bitsize == 0 && sb->bitoffs == 0 &&
+ sb->is_writable == 0 && sb->size <= sizeof(Eterm) * 3) {
+ erts_sub_binary_to_heap_binary(pp, hpp, orig);
+ return;
+ }
+ }
+ nelts++;
+ break;
+ case MAP_SUBTAG:
+ if (is_flatmap_header(hdr)) nelts+=flatmap_get_size(ptr) + 1;
+ else nelts += hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(ptr))->num_free+1; break;
+ }
+ gval = make_boxed(htop);
+ *orig = gval;
+ *htop++ = hdr;
+ *ptr++ = gval;
+ while (nelts--) *htop++ = *ptr++;
+
+ *hpp = htop;
+ *pp = ptr;
+}
#endif
#define ErtsInYoungGen(TPtr, Ptr, OldHeap, OldHeapSz) \
@@ -141,9 +166,10 @@ void erts_garbage_collect_hibernate(struct process* p);
Eterm erts_gc_after_bif_call_lhf(struct process* p, ErlHeapFragment *live_hf_end,
Eterm result, Eterm* regs, Uint arity);
Eterm erts_gc_after_bif_call(struct process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(struct process* p, Eterm* literals,
- Uint lit_size,
- struct erl_off_heap_header* oh);
+int erts_garbage_collect_literals(struct process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh,
+ int fcalls);
Uint erts_next_heap_size(Uint, Uint);
Eterm erts_heap_sizes(struct process* p);
@@ -154,5 +180,8 @@ void erts_free_heap_frags(struct process* p);
Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
int erts_max_heap_size(Eterm, Uint *, Uint *);
void erts_deallocate_young_generation(Process *c_p);
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+int erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm* real_htop);
+#endif
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index a73f4ecfc7..6e5cc7b801 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -29,6 +29,8 @@
# include "config.h"
#endif
+/* #define ERTS_MAGIC_REF_BIF_TIMERS */
+
#include "sys.h"
#include "global.h"
#include "bif.h"
@@ -36,6 +38,9 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
#include "erl_hl_timer.h"
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#include "erl_binary.h"
+#endif
#define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
@@ -98,17 +103,14 @@ typedef enum {
# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore
#endif
-/* Bit 0 to 9 contains scheduler id (see mask below) */
-#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10)
-#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 11)
-#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 12)
-#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 13)
-#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 14)
-#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 15)
-#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 16)
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 17)
-#endif
+/* Bit 0 to 10 contains scheduler id (see mask below) */
+#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11)
+#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12)
+#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
+#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
+#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
+#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
+#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 17)
#define ERTS_TMR_ROFLG_SID_MASK \
(ERTS_TMR_ROFLG_HLT - (Uint32) 1)
@@ -127,6 +129,13 @@ typedef struct ErtsHLTimer_ ErtsHLTimer;
#define ERTS_HLT_PFIELD_NOT_IN_TABLE (~((UWord) 0))
+typedef struct ErtsBifTimer_ ErtsBifTimer;
+
+typedef struct {
+ ErtsBifTimer *next;
+ ErtsBifTimer *prev;
+} ErtsBifTimerList;
+
typedef struct {
UWord parent; /* parent pointer and flags... */
union {
@@ -144,9 +153,9 @@ typedef struct {
typedef struct {
UWord parent; /* parent pointer and flags... */
- ErtsHLTimer *right;
- ErtsHLTimer *left;
-} ErtsHLTimerTree;
+ ErtsBifTimer *right;
+ ErtsBifTimer *left;
+} ErtsBifTimerTree;
typedef struct {
Uint32 roflgs;
@@ -155,67 +164,75 @@ typedef struct {
void *arg;
erts_atomic_t next;
} u;
+ union {
+ Process *proc;
+ Port *port;
+ Eterm name;
+ void (*callback)(void *);
+ } receiver;
} ErtsTmrHead;
struct ErtsHLTimer_ {
ErtsTmrHead head; /* NEED to be first! */
+ ErtsMonotonicTime timeout;
union {
ErtsThrPrgrLaterOp cleanup;
ErtsHLTimerTimeTree tree;
} time;
- ErtsMonotonicTime timeout;
- union {
- Process *proc;
- Port *port;
- Eterm name;
- void (*callback)(void *);
- } receiver;
#ifdef ERTS_HLT_HARD_DEBUG
int pending_timeout;
#endif
-
- erts_smp_atomic32_t state;
-
- /* BIF timer only fields follow... */
- struct {
- Uint32 refn[ERTS_REF_NUMBERS];
- ErtsHLTimerTree proc_tree;
- ErtsHLTimerTree tree;
- Eterm message;
- ErlHeapFragment *bp;
- } btm;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- struct {
- Eterm accessor;
- ErtsHLTimerTree tree;
- } abtm;
-#endif
};
-#define ERTS_HL_PTIMER_SIZE offsetof(ErtsHLTimer, btm)
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-#define ERTS_BIF_TIMER_SIZE offsetof(ErtsHLTimer, abtm)
-#define ERTS_ABIF_TIMER_SIZE sizeof(ErtsHLTimer)
-#else
-#define ERTS_BIF_TIMER_SIZE sizeof(ErtsHLTimer)
-#endif
-
typedef struct {
ErtsTmrHead head; /* NEED to be first! */
union {
- void *p;
- void (*callback)(void *);
+ ErtsTWheelTimer tw_tmr;
+ ErtsThrPrgrLaterOp cleanup;
} u;
- ErtsTWheelTimer tw_tmr;
} ErtsTWTimer;
+struct ErtsBifTimer_ {
+ union {
+ ErtsTmrHead head;
+ ErtsHLTimer hlt;
+ ErtsTWTimer twt;
+ } type;
+ struct {
+ erts_smp_atomic32_t state;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin;
+ ErtsHLTimerList proc_list;
+#else
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsBifTimerTree proc_tree;
+ ErtsBifTimerTree tree;
+#endif
+ Eterm message;
+ ErlHeapFragment *bp;
+ } btm;
+};
+
typedef union {
ErtsTmrHead head;
ErtsHLTimer hlt;
ErtsTWTimer twt;
+ ErtsBifTimer btm;
} ErtsTimer;
+typedef ErtsTimer *(*ErtsCreateTimerFunc)(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg);
+
#ifdef SMALL_MEMORY
#define BIF_TIMER_PREALC_SZ 10
#define PTIMER_PREALC_SZ 10
@@ -225,7 +242,7 @@ typedef union {
#endif
ERTS_SCHED_PREF_PALLOC_IMPL(bif_timer_pre,
- ErtsHLTimer,
+ ErtsBifTimer,
BIF_TIMER_PREALC_SZ)
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(tw_timer,
@@ -296,12 +313,16 @@ struct ErtsHLTimerService_ {
ErtsHLTCncldTmrQ canceled_queue;
#endif
ErtsHLTimer *time_tree;
- ErtsHLTimer *btm_tree;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsBifTimer *btm_tree;
+#endif
ErtsHLTimer *next_timeout;
ErtsYieldingTimeoutState yield;
ErtsTWheelTimer service_timer;
};
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
static ERTS_INLINE int
refn_is_lt(Uint32 *x, Uint32 *y)
{
@@ -317,6 +338,14 @@ refn_is_lt(Uint32 *x, Uint32 *y)
return x[0] < y[0];
}
+static ERTS_INLINE int
+refn_is_eq(Uint32 *x, Uint32 *y)
+{
+ return (x[0] == y[0]) & (x[1] == y[1]) & (x[2] == y[2]);
+}
+
+#endif
+
#define ERTS_RBT_PREFIX time
#define ERTS_RBT_T ErtsHLTimer
#define ERTS_RBT_KEY_T ErtsMonotonicTime
@@ -506,8 +535,16 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#endif /* ERTS_HLT_HARD_DEBUG */
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.mbin->refn)
+#else
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.refn)
+#endif
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
#define ERTS_RBT_PREFIX btm
-#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
#define ERTS_RBT_FLAGS_T UWord
#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
@@ -533,7 +570,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
(T)->btm.tree.parent |= (F); \
} while (0)
#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+ ((ErtsBifTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
#define ERTS_RBT_SET_PARENT(T, P) \
do { \
ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
@@ -544,20 +581,94 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.tree.right = (R))
#define ERTS_RBT_GET_LEFT(T) ((T)->btm.tree.left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
+#endif
#define ERTS_RBT_WANT_FOREACH
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static ERTS_INLINE void
+proc_btm_list_insert(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (!y) {
+ x->btm.proc_list.next = x;
+ x->btm.proc_list.prev = x;
+ *list = x;
+ }
+ else {
+ ERTS_HLT_ASSERT(y->btm.proc_list.prev->btm.proc_list.next == y);
+ x->btm.proc_list.next = y;
+ x->btm.proc_list.prev = y->btm.proc_list.prev;
+ y->btm.proc_list.prev->btm.proc_list.next = x;
+ y->btm.proc_list.prev = x;
+ }
+}
+
+static ERTS_INLINE void
+proc_btm_list_delete(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (y == x && x->btm.proc_list.next == x) {
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev == x);
+ *list = NULL;
+ }
+ else {
+ if (y == x)
+ *list = x->btm.proc_list.next;
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev->btm.proc_list.next == x);
+ ERTS_HLT_ASSERT(x->btm.proc_list.next->btm.proc_list.prev == x);
+ x->btm.proc_list.prev->btm.proc_list.next = x->btm.proc_list.next;
+ x->btm.proc_list.next->btm.proc_list.prev = x->btm.proc_list.prev;
+ }
+ x->btm.proc_list.next = NULL;
+}
+
+static ERTS_INLINE int
+proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
+ void (*destroy)(ErtsBifTimer *, void *),
+ void *arg,
+ int limit)
+{
+ int i;
+ ErtsBifTimer *first, *last;
+
+ first = *list;
+ if (!first)
+ return 0;
+
+ last = first->btm.proc_list.prev;
+ for (i = 0; i < limit; i++) {
+ ErtsBifTimer *x = last;
+ last = last->btm.proc_list.prev;
+ (*destroy)(x, arg);
+ x->btm.proc_list.next = NULL;
+ if (x == first) {
+ *list = NULL;
+ return 0;
+ }
+ }
+
+ last->btm.proc_list.next = first;
+ first->btm.proc_list.prev = last;
+ return 1;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
#define ERTS_RBT_PREFIX proc_btm
-#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
#define ERTS_RBT_FLAGS_T UWord
#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
@@ -583,7 +694,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
(T)->btm.proc_tree.parent |= (F); \
} while (0)
#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
+ ((ErtsBifTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
#define ERTS_RBT_SET_PARENT(T, P) \
do { \
ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
@@ -594,71 +705,20 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.proc_tree.right = (R))
#define ERTS_RBT_GET_LEFT(T) ((T)->btm.proc_tree.left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.proc_tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
-#define ERTS_RBT_WANT_DELETE
-#define ERTS_RBT_WANT_INSERT
-#define ERTS_RBT_WANT_LOOKUP
-#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
-#define ERTS_RBT_UNDEF
-
-#include "erl_rbtree.h"
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-
-#define ERTS_RBT_PREFIX abtm
-#define ERTS_RBT_T ErtsHLTimer
-#define ERTS_RBT_KEY_T Uint32 *
-#define ERTS_RBT_FLAGS_T UWord
-#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
- do { \
- (T)->abtm.tree.parent = (UWord) NULL; \
- (T)->abtm.tree.right = NULL; \
- (T)->abtm.tree.left = NULL; \
- } while (0)
-#define ERTS_RBT_IS_RED(T) \
- ((int) ((T)->abtm.tree.parent & ERTS_HLT_PFLG_RED))
-#define ERTS_RBT_SET_RED(T) \
- ((T)->abtm.tree.parent |= ERTS_HLT_PFLG_RED)
-#define ERTS_RBT_IS_BLACK(T) \
- (!ERTS_RBT_IS_RED((T)))
-#define ERTS_RBT_SET_BLACK(T) \
- ((T)->abtm.tree.parent &= ~ERTS_HLT_PFLG_RED)
-#define ERTS_RBT_GET_FLAGS(T) \
- ((T)->abtm.tree.parent & ERTS_HLT_PFLGS_MASK)
-#define ERTS_RBT_SET_FLAGS(T, F) \
- do { \
- ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
- (T)->abtm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
- (T)->abtm.tree.parent |= (F); \
- } while (0)
-#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->abtm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
-#define ERTS_RBT_SET_PARENT(T, P) \
- do { \
- ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
- (T)->abtm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
- (T)->abtm.tree.parent |= (UWord) (P); \
- } while (0)
-#define ERTS_RBT_GET_RIGHT(T) ((T)->abtm.tree.right)
-#define ERTS_RBT_SET_RIGHT(T, R) ((T)->abtm.tree.right = (R))
-#define ERTS_RBT_GET_LEFT(T) ((T)->abtm.tree.left)
-#define ERTS_RBT_SET_LEFT(T, L) ((T)->abtm.tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
-#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
+#endif
#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
-#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
#ifdef ERTS_SMP
static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
@@ -680,7 +740,9 @@ erts_create_timer_service(void)
srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
sizeof(ErtsHLTimerService));
srv->time_tree = NULL;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
srv->btm_tree = NULL;
+#endif
srv->next_timeout = NULL;
srv->yield = init_yield;
erts_twheel_init_timer(&srv->service_timer);
@@ -697,11 +759,8 @@ erts_timer_type_size(ErtsAlcType_t type)
{
switch (type) {
case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
- case ERTS_ALC_T_HL_PTIMER: return ERTS_HL_PTIMER_SIZE;
- case ERTS_ALC_T_BIF_TIMER: return ERTS_BIF_TIMER_SIZE;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- case ERTS_ALC_T_ABIF_TIMER: return ERTS_ABIF_TIMER_SIZE;
-#endif
+ case ERTS_ALC_T_HL_PTIMER: return sizeof(ErtsHLTimer);
+ case ERTS_ALC_T_BIF_TIMER: return sizeof(ErtsBifTimer);
default: ERTS_INTERNAL_ERROR("Unknown type");
}
return 0;
@@ -760,6 +819,111 @@ port_timeout_common(Port *port, void *tmr)
return 0;
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static erts_smp_atomic_t *
+mbin_to_btmref__(ErtsMagicBinary *mbin)
+{
+ return erts_smp_binary_to_magic_indirection((Binary *) mbin);
+}
+
+static ERTS_INLINE void
+magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr)
+{
+ erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin);
+ erts_smp_atomic_init_nob(aptr, (erts_aint_t) tmr);
+}
+
+static ERTS_INLINE ErtsBifTimer *
+magic_binary_to_btm(ErtsMagicBinary *mbin)
+{
+ erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin);
+ ErtsBifTimer *tmr = (ErtsBifTimer *) erts_smp_atomic_read_nob(aptr);
+ ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin);
+ return tmr;
+}
+
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE erts_aint_t
+init_btm_specifics(ErtsSchedulerData *esdp,
+ ErtsBifTimer *tmr, Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin
+#else
+ Uint32 *refn
+#endif
+ )
+{
+ Uint hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
+ int refc;
+ if (!hsz) {
+ tmr->btm.message = msg;
+ tmr->btm.bp = NULL;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(hsz);
+ Eterm *hp = bp->mem;
+ tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
+ tmr->btm.bp = bp;
+ }
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ refc = 1;
+ tmr->btm.mbin = mbin;
+ erts_refc_inc(&mbin->refc, 1);
+ magic_binary_init(mbin, tmr);
+ tmr->btm.proc_list.next = NULL;
+#else
+ refc = 0;
+ tmr->btm.refn[0] = refn[0];
+ tmr->btm.refn[1] = refn[1];
+ tmr->btm.refn[2] = refn[2];
+
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ btm_rbt_insert(&esdp->timer_service->btm_tree, tmr);
+#endif
+
+ erts_smp_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE);
+ return refc; /* refc from magic binary... */
+}
+
+static void tw_bif_timer_timeout(void *vbtmp);
+
+static ERTS_INLINE void
+timer_destroy(ErtsTimer *tmr, int twt, int btm)
+{
+ if (!btm) {
+ if (twt)
+ tw_timer_free(&tmr->twt);
+ else
+ erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
+ }
+ else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *bp = (Binary *) tmr->btm.btm.mbin;
+ if (erts_refc_dectest(&bp->refc, 0) == 0)
+ erts_bin_free(bp);
+#endif
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PRE_ALC)
+ bif_timer_pre_free(&tmr->btm);
+ else
+ erts_free(ERTS_ALC_T_BIF_TIMER, &tmr->btm);
+ }
+}
+
+static ERTS_INLINE void
+timer_pre_dec_refc(ErtsTimer *tmr)
+{
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t refc;
+ refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc);
+ ERTS_HLT_ASSERT(refc > 0);
+#else
+ erts_smp_atomic32_dec_nob(&tmr->head.refc);
+#endif
+}
+
/*
* Basic timer wheel timer stuff
*/
@@ -767,26 +931,39 @@ port_timeout_common(Port *port, void *tmr)
static void
scheduled_tw_timer_destroy(void *vtmr)
{
- tw_timer_free((ErtsTWTimer *) vtmr);
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 1, btm);
}
static void
schedule_tw_timer_destroy(ErtsTWTimer *tmr)
{
+ Uint size;
/*
* Reference to process/port can be
* dropped at once...
*/
if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
- erts_proc_dec_refc((Process *) tmr->u.p);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT)
- erts_port_dec_refc((Port *) tmr->u.p);
+ erts_port_dec_refc(tmr->head.receiver.port);
+
+ if (!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
+ }
erts_schedule_thr_prgr_later_cleanup_op(
scheduled_tw_timer_destroy,
(void *) tmr,
- &tmr->tw_tmr.u.cleanup,
- sizeof(ErtsTWTimer));
+ &tmr->u.cleanup,
+ size);
}
static ERTS_INLINE void
@@ -802,7 +979,7 @@ static void
tw_proc_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Process *proc = (Process *) twtp->u.p;
+ Process *proc = twtp->head.receiver.proc;
if (proc_timeout_common(proc, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
@@ -812,84 +989,126 @@ static void
tw_port_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Port *port = (Port *) twtp->u.p;
+ Port *port = twtp->head.receiver.port;
if (port_timeout_common(port, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
}
static void
-tw_ptimer_cancel(void *vtwtp)
-{
- tw_timer_dec_refc((ErtsTWTimer *) vtwtp);
-}
-
-static void
cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
{
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
- erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->tw_tmr);
+ erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->u.tw_tmr);
+ tw_timer_dec_refc(tmr);
}
static void
tw_callback_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- void (*callback)(void *) = twtp->u.callback;
+ void (*callback)(void *) = twtp->head.receiver.callback;
void *arg = twtp->head.u.arg;
tw_timer_dec_refc(twtp);
(*callback)(arg);
}
-static ErtsTWTimer *
-create_tw_timer(ErtsSchedulerData *esdp,
- ErtsTmrType type, void *p,
- void (*callback)(void *), void *arg,
- ErtsMonotonicTime timeout_pos)
+static ErtsTimer *
+create_tw_timer(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg)
{
ErtsTWTimer *tmr;
void (*timeout_func)(void *);
- void (*cancel_func)(void *);
erts_aint32_t refc;
- tmr = tw_timer_alloc();
- erts_twheel_init_timer(&tmr->tw_tmr);
-
- tmr->head.roflgs = (Uint32) esdp->no;
- ERTS_HLT_ASSERT((tmr->head.roflgs
- & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+ if (type != ERTS_TMR_BIF) {
+ tmr = tw_timer_alloc();
+ tmr->head.roflgs = 0;
+ }
+ else {
+ if (short_time) {
+ tmr = (ErtsTWTimer *) bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ tmr->head.roflgs = (ERTS_TMR_ROFLG_BIF_TMR
+ | ERTS_TMR_ROFLG_PRE_ALC);
+ }
+ else {
+ alloc_bif_timer:
+ tmr = (ErtsTWTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ tmr->head.roflgs = ERTS_TMR_ROFLG_BIF_TMR;
+ }
+ }
+
+ erts_twheel_init_timer(&tmr->u.tw_tmr);
+ tmr->head.roflgs |= (Uint32) esdp->no;
+ ERTS_HLT_ASSERT((((Uint32) esdp->no)
+ & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
switch (type) {
case ERTS_TMR_PROC:
- tmr->u.p = p;
+ tmr->head.receiver.proc = (Process *) rcvrp;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
timeout_func = tw_proc_timeout;
- cancel_func = tw_ptimer_cancel;
- erts_proc_inc_refc((Process *) p);
+ erts_proc_inc_refc((Process *) rcvrp);
refc = 2;
break;
case ERTS_TMR_PORT:
- tmr->u.p = p;
+ tmr->head.receiver.port = (Port *) rcvrp;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
timeout_func = tw_port_timeout;
- cancel_func = tw_ptimer_cancel;
- erts_port_inc_refc((Port *) p);
+ erts_port_inc_refc((Port *) rcvrp);
refc = 2;
break;
case ERTS_TMR_CALLBACK:
tmr->head.u.arg = arg;
- tmr->u.callback = callback;
+ tmr->head.receiver.callback = callback;
tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK;
timeout_func = tw_callback_timeout;
- cancel_func = NULL;
refc = 1;
break;
+ case ERTS_TMR_BIF:
+
+ timeout_func = tw_bif_timer_timeout;
+ if (is_internal_pid(rcvr)) {
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ refc = 2;
+ }
+ else {
+ ERTS_HLT_ASSERT(is_atom(rcvr));
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_REG_NAME;
+ tmr->head.receiver.name = (Eterm) rcvr;
+ refc = 1;
+ }
+
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
+#endif
+ );
+ break;
+
default:
ERTS_INTERNAL_ERROR("Unsupported timer type");
return NULL;
@@ -898,41 +1117,24 @@ create_tw_timer(ErtsSchedulerData *esdp,
erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
erts_twheel_set_timer(esdp->timer_wheel,
- &tmr->tw_tmr,
+ &tmr->u.tw_tmr,
timeout_func,
- cancel_func,
tmr,
timeout_pos);
- return tmr;
+ return (ErtsTimer *) tmr;
}
/*
* Basic high level timer stuff
*/
-static ERTS_INLINE void
-hl_timer_destroy(ErtsHLTimer *tmr)
-{
- Uint32 roflgs = tmr->head.roflgs;
- if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
- else {
- if (roflgs & ERTS_TMR_ROFLG_PRE_ALC)
- bif_timer_pre_free(tmr);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- else if (roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- erts_free(ERTS_ALC_T_ABIF_TIMER, tmr);
-#endif
- else
- erts_free(ERTS_ALC_T_BIF_TIMER, tmr);
- }
-}
-
static void
scheduled_hl_timer_destroy(void *vtmr)
{
- hl_timer_destroy((ErtsHLTimer *) vtmr);
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 0, btm);
}
static void
@@ -948,25 +1150,25 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0);
if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
- ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
+ ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name));
}
else if (roflgs & ERTS_TMR_ROFLG_PROC) {
- ERTS_HLT_ASSERT(tmr->receiver.proc);
- erts_proc_dec_refc(tmr->receiver.proc);
+ ERTS_HLT_ASSERT(tmr->head.receiver.proc);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
}
else if (roflgs & ERTS_TMR_ROFLG_PORT) {
- ERTS_HLT_ASSERT(tmr->receiver.port);
- erts_port_dec_refc(tmr->receiver.port);
+ ERTS_HLT_ASSERT(tmr->head.receiver.port);
+ erts_port_dec_refc(tmr->head.receiver.port);
}
if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- size = ERTS_HL_PTIMER_SIZE;
- else {
- /*
- * Message buffer can be dropped at
- * once...
- */
size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
}
erts_schedule_thr_prgr_later_cleanup_op(
@@ -975,18 +1177,6 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
}
static ERTS_INLINE void
-hl_timer_pre_dec_refc(ErtsHLTimer *tmr)
-{
-#ifdef ERTS_HLT_DEBUG
- erts_aint_t refc;
- refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc);
- ERTS_HLT_ASSERT(refc > 0);
-#else
- erts_smp_atomic32_dec_nob(&tmr->head.refc);
-#endif
-}
-
-static ERTS_INLINE void
hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
{
if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
@@ -1018,39 +1208,136 @@ check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
#endif
}
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-static void
-hlt_delete_abtm(ErtsHLTimer *tmr)
+static int
+bif_timer_ref_destructor(Binary *unused)
{
- Process *proc;
+ return 1;
+}
- ERTS_HLT_ASSERT(tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR);
+static ERTS_INLINE void
+btm_clear_magic_binary(ErtsBifTimer *tmr)
+{
+ erts_smp_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin);
+ Uint32 roflgs = tmr->type.head.roflgs;
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t tval = erts_smp_atomic_xchg_nob(aptr,
+ (erts_aint_t) NULL);
+ ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr);
+#else
+ erts_smp_atomic_set_nob(aptr, (erts_aint_t) NULL);
+#endif
+ if (roflgs & ERTS_TMR_ROFLG_HLT)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
+}
- proc = erts_proc_lookup(tmr->abtm.accessor);
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
- if (proc) {
- int deref = 0;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
- if (tmr->abtm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- abtm_rbt_delete(&proc->accessor_bif_timers, tmr);
- deref = 1;
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- if (deref)
- hl_timer_pre_dec_refc(tmr);
+static ERTS_INLINE void
+bif_timer_timeout(ErtsHLTimerService *srv,
+ ErtsBifTimer *tmr,
+ Uint32 roflgs)
+{
+ erts_aint32_t state;
+
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs);
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state,
+ ERTS_TMR_STATE_TIMED_OUT,
+ ERTS_TMR_STATE_ACTIVE);
+
+ ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
+ || state == ERTS_TMR_STATE_ACTIVE);
+
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+ Process *proc;
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
+ if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
+ Eterm term;
+ term = tmr->type.head.receiver.name;
+ ERTS_HLT_ASSERT(is_atom(term));
+ term = erts_whereis_name_to_id(NULL, term);
+ proc = erts_proc_lookup(term);
+ }
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(proc);
+ }
+ if (proc) {
+ int dec_refc = 0;
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = tmr->btm.bp;
+ tmr->btm.bp = NULL;
+ erts_queue_message(proc, 0, mp, tmr->btm.message,
+ am_clock_service);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ /* If the process is exiting do not disturb the cleanup... */
+ if (!ERTS_PROC_IS_EXITING(proc)) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ dec_refc = 1;
+ }
+#else
+ if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ dec_refc = 1;
+ }
+#endif
+ }
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ if (dec_refc)
+ timer_pre_dec_refc((ErtsTimer *) tmr);
+ }
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
}
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&srv->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+
+
}
+static void
+tw_bif_timer_timeout(void *vbtmp)
+{
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsHLTimerService *srv = NULL;
+#else
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsHLTimerService *srv = esdp->timer_service;
#endif
+ ErtsBifTimer *btmp = (ErtsBifTimer *) vbtmp;
+ bif_timer_timeout(srv, btmp, btmp->type.head.roflgs);
+ tw_timer_dec_refc(&btmp->type.twt);
+}
-static ErtsHLTimer *
+static ErtsTimer *
create_hl_timer(ErtsSchedulerData *esdp,
ErtsMonotonicTime timeout_pos,
int short_time, ErtsTmrType type,
- void *rcvrp, Eterm rcvr, Eterm acsr,
- Eterm msg, Uint32 *refn,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
void (*callback)(void *), void *arg)
{
ErtsHLTimerService *srv = esdp->timer_service;
@@ -1069,7 +1356,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
if (type != ERTS_TMR_BIF) {
tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
- ERTS_HL_PTIMER_SIZE);
+ sizeof(ErtsHLTimer));
tmr->timeout = timeout_pos;
switch (type) {
@@ -1078,7 +1365,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT(is_internal_pid(rcvr));
erts_proc_inc_refc((Process *) rcvrp);
- tmr->receiver.proc = (Process *) rcvrp;
+ tmr->head.receiver.proc = (Process *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PROC;
refc = 2;
break;
@@ -1086,14 +1373,14 @@ create_hl_timer(ErtsSchedulerData *esdp,
case ERTS_TMR_PORT:
ERTS_HLT_ASSERT(is_internal_port(rcvr));
erts_port_inc_refc((Port *) rcvrp);
- tmr->receiver.port = (Port *) rcvrp;
+ tmr->head.receiver.port = (Port *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PORT;
refc = 2;
break;
case ERTS_TMR_CALLBACK:
roflgs |= ERTS_TMR_ROFLG_CALLBACK;
- tmr->receiver.callback = callback;
+ tmr->head.receiver.callback = callback;
tmr->head.u.arg = arg;
refc = 1;
break;
@@ -1105,84 +1392,47 @@ create_hl_timer(ErtsSchedulerData *esdp,
}
else { /* ERTS_TMR_BIF */
- Uint hsz;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- int is_abif_tmr = is_value(acsr) && acsr != rcvr;
-#endif
if (short_time) {
- tmr = bif_timer_pre_alloc();
+ tmr = (ErtsHLTimer *) bif_timer_pre_alloc();
if (!tmr)
goto alloc_bif_timer;
roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
}
else {
alloc_bif_timer:
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (is_abif_tmr)
- tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER,
- ERTS_ABIF_TIMER_SIZE);
- else
-#endif
- tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER,
- ERTS_BIF_TIMER_SIZE);
- }
+ tmr = (ErtsHLTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ }
tmr->timeout = timeout_pos;
roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
if (is_internal_pid(rcvr)) {
roflgs |= ERTS_TMR_ROFLG_PROC;
- tmr->receiver.proc = (Process *) rcvrp;
+ tmr->head.receiver.proc = (Process *) rcvrp;
refc = 2;
}
else {
ERTS_HLT_ASSERT(is_atom(rcvr));
roflgs |= ERTS_TMR_ROFLG_REG_NAME;
- tmr->receiver.name = rcvr;
+ tmr->head.receiver.name = rcvr;
refc = 1;
}
- hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
- if (!hsz) {
- tmr->btm.message = msg;
- tmr->btm.bp = NULL;
- }
- else {
- ErlHeapFragment *bp = new_message_buffer(hsz);
- Eterm *hp = bp->mem;
- tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
- tmr->btm.bp = bp;
- }
- tmr->btm.refn[0] = refn[0];
- tmr->btm.refn[1] = refn[1];
- tmr->btm.refn[2] = refn[2];
-
- tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (is_abif_tmr) {
- Process *aproc;
- roflgs |= ERTS_TMR_ROFLG_ABIF_TMR;
- tmr->abtm.accessor = acsr;
- aproc = erts_proc_lookup(acsr);
- if (!aproc)
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- else {
- refc++;
- erts_smp_proc_lock(aproc, ERTS_PROC_LOCK_BTM);
- abtm_rbt_insert(&aproc->accessor_bif_timers, tmr);
- erts_smp_proc_unlock(aproc, ERTS_PROC_LOCK_BTM);
- }
- }
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
#endif
-
- btm_rbt_insert(&srv->btm_tree, tmr);
+ );
}
tmr->head.roflgs = roflgs;
erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
- erts_smp_atomic32_init_nob(&tmr->state, ERTS_TMR_STATE_ACTIVE);
if (!srv->next_timeout
|| tmr->timeout < srv->next_timeout->timeout) {
@@ -1192,7 +1442,6 @@ create_hl_timer(ErtsSchedulerData *esdp,
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
(void *) esdp,
tmr->timeout);
srv->next_timeout = tmr;
@@ -1209,79 +1458,20 @@ create_hl_timer(ErtsSchedulerData *esdp,
ERTS_HLT_HDBG_CHK_SRV(srv);
- return tmr;
-}
-
-static ERTS_INLINE void
-hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
-{
- ErtsProcLocks proc_locks = ERTS_PROC_LOCKS_MSG_SEND;
- Process *proc;
- int queued_message = 0;
- int dec_refc = 0;
- Uint32 is_reg_name = (roflgs & ERTS_TMR_ROFLG_REG_NAME);
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- hlt_delete_abtm(tmr);
-#endif
-
- if (is_reg_name) {
- Eterm pid;
- ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
- pid = erts_whereis_name_to_id(NULL, tmr->receiver.name);
- proc = erts_proc_lookup(pid);
- }
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
- ERTS_HLT_ASSERT(tmr->receiver.proc);
-
- proc = tmr->receiver.proc;
- proc_locks |= ERTS_PROC_LOCK_BTM;
- }
- if (proc) {
- erts_smp_proc_lock(proc, proc_locks);
- /*
- * If process is exiting, let it clean up
- * the btm tree by itself (it may be in
- * the middle of tree destruction).
- */
- if (!ERTS_PROC_IS_EXITING(proc)) {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = tmr->btm.bp;
- erts_queue_message(proc, proc_locks, mp,
- tmr->btm.message, am_clock_service);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
- queued_message = 1;
- proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
- tmr->btm.bp = NULL;
- if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- proc_btm_rbt_delete(&proc->bif_timers, tmr);
- tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- dec_refc = 1;
- }
- }
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
- if (dec_refc)
- hl_timer_pre_dec_refc(tmr);
- }
- if (!queued_message && tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
+ return (ErtsTimer *) tmr;
}
static ERTS_INLINE void
hlt_proc_timeout(ErtsHLTimer *tmr)
{
- if (proc_timeout_common(tmr->receiver.proc, (void *) tmr))
+ if (proc_timeout_common(tmr->head.receiver.proc, (void *) tmr))
hl_timer_dec_refc(tmr, tmr->head.roflgs);
}
static ERTS_INLINE void
hlt_port_timeout(ErtsHLTimer *tmr)
{
- if (port_timeout_common(tmr->receiver.port, (void *) tmr))
+ if (port_timeout_common(tmr->head.receiver.port, (void *) tmr))
hl_timer_dec_refc(tmr, tmr->head.roflgs);
}
@@ -1289,41 +1479,24 @@ static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
{
ErtsHLTimerService *srv = (ErtsHLTimerService *) vsrv;
Uint32 roflgs;
- erts_aint32_t state;
ERTS_HLT_HDBG_CHK_SRV(srv);
roflgs = tmr->head.roflgs;
ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_HLT);
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
- ERTS_TMR_STATE_TIMED_OUT,
- ERTS_TMR_STATE_ACTIVE);
-
- ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
- || state == ERTS_TMR_STATE_ACTIVE);
-
- if (state == ERTS_TMR_STATE_ACTIVE) {
-
- if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- hlt_bif_timer_timeout(tmr, roflgs);
- else if (roflgs & ERTS_TMR_ROFLG_PROC)
- hlt_proc_timeout(tmr);
- else if (roflgs & ERTS_TMR_ROFLG_PORT)
- hlt_port_timeout(tmr);
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
- (*tmr->receiver.callback)(tmr->head.u.arg);
- }
-
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ bif_timer_timeout(srv, (ErtsBifTimer *) tmr, roflgs);
+ else if (roflgs & ERTS_TMR_ROFLG_PROC)
+ hlt_proc_timeout(tmr);
+ else if (roflgs & ERTS_TMR_ROFLG_PORT)
+ hlt_port_timeout(tmr);
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
+ (*tmr->head.receiver.callback)(tmr->head.u.arg);
}
tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- if ((roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- && tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- btm_rbt_delete(&srv->btm_tree, tmr);
- tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
ERTS_HLT_HDBG_CHK_SRV(srv);
@@ -1390,7 +1563,6 @@ hlt_service_timeout(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
vesdp,
tmr->timeout);
}
@@ -1402,19 +1574,6 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
ERTS_HLT_HDBG_CHK_SRV(srv);
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
-
- if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- btm_rbt_delete(&srv->btm_tree, tmr);
- tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- hlt_delete_abtm(tmr);
-#endif
- }
-
if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
/* Already removed... */
ERTS_HLT_HDBG_CHK_SRV(srv);
@@ -1460,7 +1619,6 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
(void *) esdp,
smlst->timeout);
}
@@ -1485,6 +1643,17 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
+ ErtsBifTimer *btm = (ErtsBifTimer *) tmr;
+ if (btm->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, btm);
+ btm->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+ }
+#endif
+
if (roflgs & ERTS_TMR_ROFLG_HLT) {
hlt_delete_timer(esdp, &tmr->hlt);
hl_timer_dec_refc(&tmr->hlt, roflgs);
@@ -1750,57 +1919,86 @@ continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
* BIF timer specific
*/
+
Uint erts_bif_timer_memory_size(void)
{
return (Uint) 0;
}
static BIF_RETTYPE
-setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
- int short_time, Eterm rcvr, Eterm acsr,
- Eterm msg, int wrap)
+setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
+ int short_time, Eterm rcvr, Eterm msg, int wrap)
{
BIF_RETTYPE ret;
Eterm ref, tmo_msg, *hp;
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
ErtsSchedulerData *esdp;
- DeclareTmpHeap(tmp_hp, 4, c_p);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *mbin;
+#endif
+ Eterm tmp_hp[4];
+ ErtsCreateTimerFunc create_timer;
if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
goto badarg;
esdp = erts_proc_sched_data(c_p);
- hp = HAlloc(c_p, REF_THING_SIZE);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin = erts_create_magic_indirection(bif_timer_ref_destructor);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ref = erts_mk_magic_ref(&hp, &c_p->off_heap, mbin);
+ ASSERT(erts_get_ref_numbers_thr_id(((ErtsMagicBinary *)mbin)->refn)
+ == (Uint32) esdp->no);
+#else
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
-
- ASSERT(erts_get_ref_numbers_thr_id(
- internal_ref_numbers(ref)) == (Uint32) esdp->no);
-
- UseTmpHeap(4, c_p);
+ ASSERT(erts_get_ref_numbers_thr_id(internal_ordinary_ref_numbers(ref))
+ == (Uint32) esdp->no);
+#endif
tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
- tmr = create_hl_timer(esdp, timeout_pos, short_time,
- ERTS_TMR_BIF, NULL, rcvr, acsr, tmo_msg,
- internal_ref_numbers(ref), NULL, NULL);
-
- UnUseTmpHeap(4, c_p);
+ create_timer = twheel ? create_tw_timer : create_hl_timer;
+ tmr = (ErtsBifTimer *) create_timer(esdp, timeout_pos,
+ short_time, ERTS_TMR_BIF,
+ NULL, rcvr, tmo_msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ (ErtsMagicBinary *) mbin,
+#else
+ internal_ordinary_ref_numbers(ref),
+#endif
+ NULL, NULL);
if (is_internal_pid(rcvr)) {
Process *proc = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
rcvr, ERTS_PROC_LOCK_BTM,
ERTS_P2P_FLG_INC_REFC);
if (!proc) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#else
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
- hlt_delete_timer(esdp, tmr);
- hl_timer_destroy(tmr);
+ if (twheel)
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ else
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ timer_destroy((ErtsTimer *) tmr, twheel, 1);
}
else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ proc_btm_list_insert(&proc->bif_timers, tmr);
+#else
proc_btm_rbt_insert(&proc->bif_timers, tmr);
+#endif
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- tmr->receiver.proc = proc;
+ tmr->type.head.receiver.proc = proc;
}
}
@@ -1814,27 +2012,33 @@ badarg:
}
static int
-cancel_bif_timer(ErtsHLTimer *tmr)
+cancel_bif_timer(ErtsBifTimer *tmr)
{
erts_aint_t state;
Uint32 roflgs;
int res;
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state,
ERTS_TMR_STATE_CANCELED,
ERTS_TMR_STATE_ACTIVE);
if (state != ERTS_TMR_STATE_ACTIVE)
return 0;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
res = -1;
- roflgs = tmr->head.roflgs;
+ roflgs = tmr->type.head.roflgs;
if (roflgs & ERTS_TMR_ROFLG_PROC) {
- Process *proc = tmr->receiver.proc;
- ERTS_HLT_ASSERT(!(tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
+ Process *proc;
+
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
/*
@@ -1842,29 +2046,238 @@ cancel_bif_timer(ErtsHLTimer *tmr)
* the btm tree by itself (it may be in
* the middle of tree destruction).
*/
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!ERTS_PROC_IS_EXITING(proc) && tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ res = 1;
+ }
+#else
if (!ERTS_PROC_IS_EXITING(proc)
&& tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
proc_btm_rbt_delete(&proc->bif_timers, tmr);
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
res = 1;
}
+#endif
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
}
return res;
}
+static ERTS_INLINE Sint64
+access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel)
+{
+ int cncl_res;
+ Sint64 time_left;
+ ErtsMonotonicTime timeout;
+ int is_hlt;
+
+ if (!tmr)
+ return -1;
+
+ is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ timeout = (is_hlt
+ ? tmr->type.hlt.timeout
+ : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr));
+
+ if (!cancel) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->btm.state);
+ if (state == ERTS_TMR_STATE_ACTIVE)
+ return get_time_left(esdp, timeout);
+ return -1;
+ }
+
+ cncl_res = cancel_bif_timer(tmr);
+ if (!cncl_res)
+ return -1;
+
+ time_left = get_time_left(esdp, timeout);
+
+ if (sid != (Uint32) esdp->no) {
+ if (cncl_res > 0)
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ }
+ else {
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+ if (is_hlt) {
+ if (cncl_res > 0)
+ hl_timer_dec_refc(&tmr->type.hlt, tmr->type.hlt.head.roflgs);
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ }
+ else {
+ if (cncl_res > 0)
+ tw_timer_dec_refc(&tmr->type.twt);
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ }
+ }
+
+ return time_left;
+}
+
+static ERTS_INLINE Eterm
+return_info(Process *c_p, Sint64 time_left)
+{
+ Uint hsz;
+ Eterm *hp;
+
+ if (time_left < 0)
+ return am_false;
+
+ if (time_left <= (Sint64) MAX_SMALL)
+ return make_small((Sint) time_left);
+
+ hsz = ERTS_SINT64_HEAP_SIZE(time_left);
+ hp = HAlloc(c_p, hsz);
+ return erts_sint64_to_big(time_left, &hp);
+}
+
+static ERTS_INLINE Eterm
+send_async_info(Process *proc, ErtsProcLocks initial_locks,
+ Eterm tref, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm tag, res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ if (cancel)
+ tag = am_cancel_timer;
+ else
+ tag = am_read_timer;
+
+ ref = STORE_NC(&hp, ohp, tref);
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE3(hp, tag, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_smp_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static BIF_RETTYPE
+access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ BIF_RETTYPE ret;
+ Eterm res;
+ Sint64 time_left;
+
+ if (!is_internal_magic_ref(tref)) {
+ if (is_not_ref(tref)) {
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+ }
+ time_left = -1;
+ }
+ else {
+ ErtsMagicBinary *mbin;
+ mbin = (ErtsMagicBinary *) erts_magic_ref2bin(tref);
+ if (mbin->destructor != bif_timer_ref_destructor)
+ time_left = -1;
+ else {
+ ErtsBifTimer *tmr;
+ Uint32 sid;
+ tmr = magic_binary_to_btm(mbin);
+ sid = erts_get_ref_numbers_thr_id(internal_magic_ref_numbers(tref));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ time_left = access_btm(tmr, sid, erts_proc_sched_data(c_p), cancel);
+ }
+ }
+
+ if (!info)
+ res = am_ok;
+ else if (!async)
+ res = return_info(c_p, time_left);
+ else
+ res = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
+
+ ERTS_BIF_PREP_RET(ret, res);
+
+ return ret;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE Eterm
+send_sync_info(Process *proc, ErtsProcLocks initial_locks,
+ Uint32 *refn, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 3 + ERTS_REF_THING_SIZE;
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ ref = make_internal_ref(hp);
+ hp += ERTS_REF_THING_SIZE;
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE2(hp, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_smp_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
static ERTS_INLINE Eterm
access_sched_local_btm(Process *c_p, Eterm pid,
- Eterm tref, Uint32 *trefn,
- Uint32 *rrefn,
- int async, int cancel,
- int return_res,
- int info)
+ Eterm tref, Uint32 *trefn,
+ Uint32 *rrefn,
+ int async, int cancel,
+ int return_res,
+ int info)
{
ErtsSchedulerData *esdp;
ErtsHLTimerService *srv;
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
Sint64 time_left;
Process *proc;
ErtsProcLocks proc_locks;
@@ -1884,111 +2297,40 @@ access_sched_local_btm(Process *c_p, Eterm pid,
srv = esdp->timer_service;
tmr = btm_rbt_lookup(srv->btm_tree, trefn);
- if (tmr) {
- if (!cancel) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
- if (state == ERTS_TMR_STATE_ACTIVE)
- time_left = get_time_left(esdp, tmr->timeout);
- }
- else {
- int cncl_res = cancel_bif_timer(tmr);
- if (cncl_res) {
-
- time_left = get_time_left(esdp, tmr->timeout);
- if (cncl_res > 0)
- hl_timer_dec_refc(tmr, tmr->head.roflgs);
-
- hlt_delete_timer(esdp, tmr);
- }
- }
- }
+ time_left = access_btm(tmr, (Uint32) esdp->no, esdp, cancel);
if (!info)
- return am_ok;
-
- if (return_res) {
- ERTS_HLT_ASSERT(c_p);
- if (time_left < 0)
- return am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- return make_small((Sint) time_left);
- else {
- Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
- Eterm *hp = HAlloc(c_p, hsz);
- return erts_sint64_to_big(time_left, &hp);
- }
- }
+ return am_ok;
if (c_p) {
- proc = c_p;
- proc_locks = ERTS_PROC_LOCK_MAIN;
+ proc = c_p;
+ proc_locks = ERTS_PROC_LOCK_MAIN;
}
else {
- proc = erts_proc_lookup(pid);
- proc_locks = 0;
+ proc = erts_proc_lookup(pid);
+ proc_locks = 0;
}
- if (proc) {
- Uint hsz;
- ErtsMessage *mp;
- Eterm *hp, msg, ref, result;
- ErlOffHeap *ohp;
- Uint32 *refn;
-#ifdef ERTS_HLT_DEBUG
- Eterm *hp_end;
-#endif
-
- hsz = REF_THING_SIZE;
- if (async) {
- refn = trefn; /* timer ref */
- hsz += 4; /* 3-tuple */
- }
- else {
- refn = rrefn; /* request ref */
- hsz += 3; /* 2-tuple */
- }
-
- ERTS_HLT_ASSERT(refn);
-
- if (time_left > (Sint64) MAX_SMALL)
- hsz += ERTS_SINT64_HEAP_SIZE(time_left);
-
- mp = erts_alloc_message_heap(proc, &proc_locks,
- hsz, &hp, &ohp);
-
-#ifdef ERTS_HLT_DEBUG
- hp_end = hp + hsz;
-#endif
-
- if (time_left < 0)
- result = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- result = make_small((Sint) time_left);
- else
- result = erts_sint64_to_big(time_left, &hp);
-
- write_ref_thing(hp,
- refn[0],
- refn[1],
- refn[2]);
- ref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
-
- msg = (async
- ? TUPLE3(hp, (cancel
- ? am_cancel_timer
- : am_read_timer), ref, result)
- : TUPLE2(hp, ref, result));
-
- ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end);
-
- erts_queue_message(proc, proc_locks, mp, msg, am_clock_service);
-
- if (c_p)
- proc_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
+ if (!async) {
+ if (c_p)
+ return return_info(c_p, time_left);
+
+ if (proc)
+ return send_sync_info(proc, proc_locks,
+ rrefn, cancel, time_left);
+ }
+ else if (proc) {
+ Eterm ref;
+ Eterm heap[ERTS_REF_THING_SIZE];
+ if (is_value(tref))
+ ref = tref;
+ else {
+ write_ref_thing(&heap[0], trefn[0], trefn[1], trefn[2]);
+ ref = make_internal_ref(&heap[0]);
+ }
+ return send_async_info(proc, proc_locks,
+ ref, cancel, time_left);
}
return am_ok;
@@ -2021,108 +2363,64 @@ bif_timer_access_request(void *vreq)
static int
try_access_sched_remote_btm(ErtsSchedulerData *esdp,
Process *c_p, Uint32 sid,
- Uint32 *trefn,
+ Eterm tref, Uint32 *trefn,
int async, int cancel,
int info, Eterm *resp)
{
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
Sint64 time_left;
ERTS_HLT_ASSERT(c_p);
/*
* Check if the timer is aimed at current
- * process of if this process is an accessor
- * of the timer...
+ * process...
*/
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (!tmr)
- tmr = abtm_rbt_lookup(c_p->accessor_bif_timers, trefn);
-#endif
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
if (!tmr)
return 0;
- if (!cancel) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
- if (state == ERTS_TMR_STATE_ACTIVE)
- time_left = get_time_left(esdp, tmr->timeout);
- else
- time_left = -1;
- }
- else {
- int cncl_res = cancel_bif_timer(tmr);
- if (!cncl_res)
- time_left = -1;
- else {
- time_left = get_time_left(esdp, tmr->timeout);
- if (cncl_res > 0)
- queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
- }
- }
+ time_left = access_btm(tmr, sid, esdp, cancel);
- if (!info) {
+ if (!info)
*resp = am_ok;
- return 1;
- }
-
- if (!async) {
- if (time_left < 0)
- *resp = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- *resp = make_small((Sint) time_left);
- else {
- Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
- Eterm *hp = HAlloc(c_p, hsz);
- *resp = erts_sint64_to_big(time_left, &hp);
- }
- }
- else {
- ErtsMessage *mp;
- Eterm tag, res, msg, tref;
- Uint hsz;
- Eterm *hp;
- ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
- ErlOffHeap *ohp;
-
- hsz = 4 + REF_THING_SIZE;
- if (time_left > (Sint64) MAX_SMALL)
- hsz += ERTS_SINT64_HEAP_SIZE(time_left);
-
- mp = erts_alloc_message_heap(c_p, &proc_locks,
- hsz, &hp, &ohp);
- if (cancel)
- tag = am_cancel_timer;
- else
- tag = am_read_timer;
-
- write_ref_thing(hp,
- trefn[0],
- trefn[1],
- trefn[2]);
- tref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
-
- if (time_left < 0)
- res = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- res = make_small((Sint) time_left);
- else
- res = erts_sint64_to_big(time_left, &hp);
-
- msg = TUPLE3(hp, tag, tref, res);
+ else if (!async)
+ *resp = return_info(c_p, time_left);
+ else
+ *resp = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
- erts_queue_message(c_p, proc_locks, mp, msg, am_clock_service);
+ return 1;
+}
- proc_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (proc_locks)
- erts_smp_proc_unlock(c_p, proc_locks);
+static Eterm
+no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ ErtsMessage *mp;
+ Uint hsz;
+ Eterm *hp, msg, ref, tag;
+ ErlOffHeap *ohp;
+ ErtsProcLocks locks;
- *resp = am_ok;
- }
- return 1;
+ if (!async)
+ return am_false;
+ if (!info)
+ return am_ok;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+ locks = ERTS_PROC_LOCK_MAIN;
+ mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+ ref = STORE_NC(&hp, ohp, tref);
+ tag = cancel ? am_cancel_timer : am_read_timer;
+ msg = TUPLE3(hp, tag, ref, am_false);
+ erts_queue_message(c_p, locks, mp, msg, am_clock_service);
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (locks)
+ erts_smp_proc_unlock(c_p, locks);
+ return am_ok;
}
static BIF_RETTYPE
@@ -2156,7 +2454,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
ERTS_BIF_PREP_RET(ret, res);
}
else if (try_access_sched_remote_btm(esdp, c_p,
- sid, trefn,
+ sid, tref, trefn,
async, cancel,
info, &res)) {
ERTS_BIF_PREP_RET(ret, res);
@@ -2189,7 +2487,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
Eterm *hp, rref;
Uint32 *rrefn;
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
rref = erts_sched_make_ref_in_buffer(esdp, hp);
rrefn = internal_ref_numbers(rref);
@@ -2235,11 +2533,11 @@ badarg:
return ret;
no_timer:
- ERTS_BIF_PREP_RET(ret, am_false);
- return ret;
-
+ return no_timer_result(c_p, tref, cancel, async, info);
}
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
static ERTS_INLINE int
bool_arg(Eterm val, int *argp)
{
@@ -2251,8 +2549,8 @@ bool_arg(Eterm val, int *argp)
}
static ERTS_INLINE int
-parse_bif_timer_options(Eterm option_list, int *async, int *info,
- int *abs, Eterm *accessor)
+parse_bif_timer_options(Eterm option_list, int *async,
+ int *info, int *abs)
{
Eterm list = option_list;
@@ -2262,8 +2560,6 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
*info = 1;
if (abs)
*abs = 0;
- if (accessor)
- *accessor = THE_NON_VALUE;
while (is_list(list)) {
Eterm *consp, *tp, opt;
@@ -2290,13 +2586,6 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
if (!abs || !bool_arg(tp[2], abs))
return 0;
break;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- case am_accessor:
- if (!accessor || is_not_internal_pid(tp[2]))
- return 0;
- *accessor = tp[2];
- break;
-#endif
default:
return 0;
}
@@ -2310,42 +2599,57 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
}
static void
-exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
+exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
{
ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
Uint32 sid, roflgs;
erts_aint_t state;
+ int is_hlt;
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state,
ERTS_TMR_STATE_CANCELED,
ERTS_TMR_STATE_ACTIVE);
- roflgs = tmr->head.roflgs;
+ roflgs = tmr->type.head.roflgs;
sid = roflgs & ERTS_TMR_ROFLG_SID_MASK;
+ is_hlt = !!(roflgs & ERTS_TMR_ROFLG_HLT);
- ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(tmr->btm.refn));
+ ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(ERTS_BTM_HLT2REFN(tmr)));
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(tmr->btm.proc_list.next);
+#else
ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
!= ERTS_HLT_PFIELD_NOT_IN_TABLE);
-
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+#endif
- if (sid == (Uint32) esdp->no) {
- if (state == ERTS_TMR_STATE_ACTIVE) {
- if (tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
- hlt_delete_timer(esdp, tmr);
- }
- hl_timer_dec_refc(tmr, roflgs);
- }
- else {
- if (state == ERTS_TMR_STATE_ACTIVE) {
- if (tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
- queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+
+ if (sid != (Uint32) esdp->no) {
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ return;
+ }
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
- else
- hl_timer_dec_refc(tmr, roflgs);
+#endif
+ if (is_hlt)
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ else
+ cancel_tw_timer(esdp, &tmr->type.twt);
}
+ if (is_hlt)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
}
#ifdef ERTS_HLT_DEBUG
@@ -2354,20 +2658,29 @@ exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
# define ERTS_BTM_MAX_DESTROY_LIMIT 50
#endif
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
typedef struct {
ErtsBifTimers *bif_timers;
union {
proc_btm_rbt_yield_state_t proc_btm_yield_state;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- abtm_rbt_yield_state_t abtm_yield_state;
-#endif
} u;
} ErtsBifTimerYieldState;
+#endif
-int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
+int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
- ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+ return proc_btm_list_foreach_destroy_yielding(btm,
+ exit_cancel_bif_timer,
+ (void *) esdp,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+ ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2399,63 +2712,18 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
}
return res;
-}
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-
-static void
-detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
-{
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- hl_timer_dec_refc(tmr, tmr->head.roflgs);
-}
-
-int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
-{
- ErtsSchedulerData *esdp = erts_proc_sched_data(p);
- ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
- ErtsBifTimerYieldState *ysp;
- int res;
-
- ysp = (ErtsBifTimerYieldState *) *vyspp;
- if (!ysp)
- ysp = &ys;
-
- res = abtm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
- detach_bif_timer,
- (void *) esdp,
- &ysp->u.abtm_yield_state,
- ERTS_BTM_MAX_DESTROY_LIMIT);
-
- if (res == 0) {
- if (ysp != &ys)
- erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
- *vyspp = NULL;
- }
- else {
-
- if (ysp == &ys) {
- ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
- sizeof(ErtsBifTimerYieldState));
- sys_memcpy((void *) ysp, (void *) &ys,
- sizeof(ErtsBifTimerYieldState));
- }
-
- *vyspp = (void *) ysp;
- }
- return res;
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
}
-#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
-
static ERTS_INLINE int
parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ErtsMonotonicTime *conv_arg, int abs,
- ErtsMonotonicTime *tposp, int *stimep)
+ ErtsMonotonicTime *tposp, int *stimep,
+ ErtsMonotonicTime *msp)
{
- ErtsMonotonicTime t;
-
+ ErtsMonotonicTime t, now;
+
if (!term_to_Sint64(arg, &t)) {
ERTS_HLT_ASSERT(!is_small(arg));
if (!is_big(arg))
@@ -2470,22 +2738,30 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
if (conv_arg)
*conv_arg = t;
+ now = erts_get_monotonic_time(esdp);
+
if (abs) {
t += -1*ERTS_MONOTONIC_OFFSET_MSEC; /* external to internal */
if (t < ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN))
return 1;
if (t > ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_END))
return 1;
+ if (msp)
+ *msp = t - ERTS_MONOTONIC_TO_MSEC(now);
+
*stimep = (t - ERTS_MONOTONIC_TO_MSEC(esdp->last_monotonic_time)
< ERTS_BIF_TIMER_SHORT_TIME);
*tposp = ERTS_MSEC_TO_CLKTCKS(t);
}
else {
- ErtsMonotonicTime now, ticks;
+ ErtsMonotonicTime ticks;
if (t < 0)
return -1;
+ if (msp)
+ *msp = t;
+
ticks = ERTS_MSEC_TO_CLKTCKS(t);
if (ERTS_CLKTCK_RESOLUTION > 1000 && ticks < 0)
@@ -2493,7 +2769,6 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ERTS_HLT_ASSERT(ticks >= 0);
- now = erts_get_monotonic_time(esdp);
ticks += ERTS_MONOTONIC_TO_CLKTCKS(now-1);
ticks += 1;
@@ -2516,66 +2791,68 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
BIF_RETTYPE send_after_3(BIF_ALIST_3)
{
- ErtsMonotonicTime timeout_pos;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, tres;
- tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- 0, &timeout_pos, &short_time);
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1,
+ NULL, 0, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, 0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
}
BIF_RETTYPE send_after_4(BIF_ALIST_4)
{
- ErtsMonotonicTime timeout_pos;
- Eterm accessor;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, abs, tres;
- if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
BIF_ERROR(BIF_P, BADARG);
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- abs, &timeout_pos, &short_time);
+ abs, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, accessor, BIF_ARG_3, 0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
}
BIF_RETTYPE start_timer_3(BIF_ALIST_3)
{
- ErtsMonotonicTime timeout_pos;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, tres;
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- 0, &timeout_pos, &short_time);
+ 0, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, !0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
}
BIF_RETTYPE start_timer_4(BIF_ALIST_4)
{
- ErtsMonotonicTime timeout_pos;
- Eterm accessor;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, abs, tres;
- if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
BIF_ERROR(BIF_P, BADARG);
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- abs, &timeout_pos, &short_time);
+ abs, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, accessor, BIF_ARG_3, !0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
}
BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
@@ -2588,7 +2865,7 @@ BIF_RETTYPE cancel_timer_2(BIF_ALIST_2)
BIF_RETTYPE ret;
int async, info;
- if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL, NULL))
+ if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL))
return access_bif_timer(BIF_P, BIF_ARG_1, 1, async, info);
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
@@ -2605,7 +2882,7 @@ BIF_RETTYPE read_timer_2(BIF_ALIST_2)
BIF_RETTYPE ret;
int async;
- if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL, NULL))
+ if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL))
return access_bif_timer(BIF_P, BIF_ARG_1, 0, async, 1);
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
@@ -2620,14 +2897,13 @@ start_callback_timer(ErtsSchedulerData *esdp,
void *arg)
{
- if (twt)
- create_tw_timer(esdp, ERTS_TMR_CALLBACK, NULL,
- callback, arg, timeout_pos);
- else
- create_hl_timer(esdp, timeout_pos, 0,
- ERTS_TMR_CALLBACK, NULL,
- NIL, THE_NON_VALUE, NIL,
- NULL, callback, arg);
+ ErtsCreateTimerFunc create_timer = (twt
+ ? create_tw_timer
+ : create_hl_timer);
+ (void) create_timer(esdp, timeout_pos, 0,
+ ERTS_TMR_CALLBACK, NULL,
+ NIL, THE_NON_VALUE, NULL,
+ callback, arg);
}
typedef struct {
@@ -2704,18 +2980,18 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
if (tmo == 0)
c_p->flags |= F_TIMO;
else {
+ ErtsCreateTimerFunc create_timer;
c_p->flags |= F_INSLPQUEUE;
c_p->flags &= ~F_TIMO;
- if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PROC, (void *) c_p,
- NULL, NULL, timeout_pos);
- else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, short_time,
- ERTS_TMR_PROC, (void *) c_p,
- c_p->common.id, THE_NON_VALUE,
- NIL, NULL, NULL, NULL);
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, short_time,
+ ERTS_TMR_PROC, (void *) c_p,
+ c_p->common.id, THE_NON_VALUE,
+ NULL, NULL, NULL);
erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
}
}
@@ -2731,7 +3007,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo)
== ERTS_PTMR_NONE);
tres = parse_timeout_pos(esdp, etmo, &tmo, 0,
- &timeout_pos, &short_time);
+ &timeout_pos, &short_time, NULL);
if (tres != 0)
return tres;
@@ -2789,6 +3065,7 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
void *tmr;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsMonotonicTime timeout_pos;
+ ErtsCreateTimerFunc create_timer;
if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
erts_cancel_port_timer(c_prt);
@@ -2797,13 +3074,12 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
- if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PORT, (void *) c_prt,
- NULL, NULL, timeout_pos);
- else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
- (void *) c_prt, c_prt->common.id,
- THE_NON_VALUE, NIL, NULL, NULL, NULL);
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NULL, NULL, NULL);
erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
}
@@ -2842,7 +3118,7 @@ erts_read_port_timer(Port *c_prt)
if (tmr->head.roflgs & ERTS_TMR_ROFLG_HLT)
timeout_pos = tmr->hlt.timeout;
else
- timeout_pos = tmr->twt.tw_tmr.timeout_pos;
+ timeout_pos = erts_tweel_read_timeout(&tmr->twt.u.tw_tmr);
return get_time_left(NULL, timeout_pos);
}
@@ -2857,20 +3133,35 @@ typedef struct {
} ErtsBTMPrint;
static void
-btm_print(ErtsHLTimer *tmr, void *vbtmp)
+btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
{
ErtsBTMPrint *btmp = (ErtsBTMPrint *) vbtmp;
ErtsMonotonicTime left;
Eterm receiver;
- if (tmr->timeout <= btmp->now)
- left = 0;
- else
- left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
- receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
- ? tmr->receiver.name
- : tmr->receiver.proc->common.id);
+ if (is_hlt) {
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ if (tmr->type.hlt.timeout <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->type.hlt.timeout - btmp->now);
+ }
+ else {
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT));
+ if (tpos <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tpos - btmp->now);
+ }
+
+ receiver = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
erts_print(btmp->to, btmp->to_arg,
"=timer:%T\n"
@@ -2881,6 +3172,36 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp)
(Sint64) left);
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_btm_print(ErtsHLTimer *tmr, void *vbtmp)
+{
+ btm_print((ErtsBifTimer *) tmr, vbtmp, 0, 1);
+}
+
+static void
+twt_btm_print(void *vbtmp, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ btm_print((ErtsBifTimer *) vtwtp, vbtmp, tpos, 0);
+}
+
+#else
+
+static void
+btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
+{
+ int is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ ErtsMonotonicTime tpos;
+ if (is_hlt)
+ tpos = 0;
+ else
+ tpos = erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr);
+ btm_print(tmr, vbtmp, tpos, is_hlt);
+}
+
+#endif
+
void
erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
{
@@ -2898,7 +3219,15 @@ erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
- btm_rbt_foreach(srv->btm_tree, btm_print, (void *) &btmp);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_btm_print, (void *) &btmp);
+ time_rbt_foreach(srv->time_tree, hlt_btm_print, (void *) &btmp);
+#else
+ btm_rbt_foreach(srv->btm_tree, btm_tree_print, (void *) &btmp);
+#endif
}
}
@@ -2911,19 +3240,37 @@ typedef struct {
} ErtsBTMForeachDebug;
static void
-debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
{
- if (erts_smp_atomic32_read_nob(&tmr->state) == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
+ if (erts_smp_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
- (*btmfd->func)(((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
- ? tmr->receiver.name
- : tmr->receiver.proc->common.id),
- tmr->btm.message,
- tmr->btm.bp,
- btmfd->arg);
+ Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
+ (*btmfd->func)(id, tmr->btm.message, tmr->btm.bp, btmfd->arg);
}
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+{
+ debug_btm_foreach((ErtsBifTimer *) tmr, vbtmfd);
+}
+
+static void
+twt_debug_btm_foreach(void *vbtmfd, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ debug_btm_foreach((ErtsBifTimer *) vtwtp, vbtmfd);
+}
+
+#endif
+
void
erts_debug_bif_timer_foreach(void (*func)(Eterm,
Eterm,
@@ -2943,9 +3290,20 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm,
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_debug_btm_foreach,
+ (void *) &btmfd);
+ time_rbt_foreach(srv->time_tree,
+ hlt_debug_btm_foreach,
+ (void *) &btmfd);
+#else
btm_rbt_foreach(srv->btm_tree,
debug_btm_foreach,
(void *) &btmfd);
+#endif
}
}
@@ -2964,7 +3322,7 @@ debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
= (ErtsDebugForeachCallbackTimer *) vdfct;
if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
- && (tmr->receiver.callback == dfct->tclbk))
+ && (tmr->head.receiver.callback == dfct->tclbk))
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
@@ -2982,7 +3340,7 @@ debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
vdfct);
if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
- && (tmr->receiver.callback == dfct->tclbk))
+ && (tmr->head.receiver.callback == dfct->tclbk))
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
@@ -2997,7 +3355,7 @@ debug_tw_callback_timer(void *vdfct,
ErtsDebugForeachCallbackTimer *dfct
= (ErtsDebugForeachCallbackTimer *) vdfct;
- if (twtp->u.callback == dfct->tclbk)
+ if (twtp->head.receiver.callback == dfct->tclbk)
(*dfct->func)(dfct->arg,
timeout_pos,
twtp->head.u.arg);
@@ -3068,7 +3426,9 @@ st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
}
ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
}
static void
@@ -3097,8 +3457,10 @@ tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
& ~ERTS_HLT_PFLGS_MASK);
ERTS_HLT_ASSERT(tmr == prnt);
}
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
if (tmr->time.tree.same_time) {
ErtsHdbgHLT st_hdbg;
st_hdbg.srv = hdbg->srv;
@@ -3164,6 +3526,7 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (srv->btm_tree) {
ErtsHdbgHLT hdbg;
hdbg.srv = srv;
@@ -3172,6 +3535,7 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
+#endif
}
#endif /* ERTS_HLT_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
index 9cdcd581a0..ff31f04cb9 100644
--- a/erts/emulator/beam/erl_hl_timer.h
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2015-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2015-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
#ifndef ERL_HL_TIMER_H__
#define ERL_HL_TIMER_H__
-typedef struct ErtsHLTimer_ ErtsBifTimers;
+typedef struct ErtsBifTimer_ ErtsBifTimers;
typedef struct ErtsHLTimerService_ ErtsHLTimerService;
#include "sys.h"
@@ -56,7 +56,7 @@ void erts_cancel_proc_timer(Process *);
void erts_set_port_timer(Port *, Sint64);
void erts_cancel_port_timer(Port *);
Sint64 erts_read_port_timer(Port *);
-int erts_cancel_bif_timers(Process *, ErtsBifTimers *, void **);
+int erts_cancel_bif_timers(Process *, ErtsBifTimers **, void **);
int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
ErtsHLTimerService *erts_create_timer_service(void);
void erts_hl_timer_init(void);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index e8b8739852..5206d7564f 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -61,6 +61,10 @@
#define ERTS_DEFAULT_NO_ASYNC_THREADS 10
+#define ERTS_DEFAULT_SCHED_STACK_SIZE 128
+#define ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE 40
+#define ERTS_DEFAULT_DIO_SCHED_STACK_SIZE 40
+
/*
* The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands
* only. Do not remove even though they aren't used elsewhere in the emulator!
@@ -111,10 +115,12 @@ const int etp_lock_check = 1;
#else
const int etp_lock_check = 0;
#endif
-#ifdef WORDS_BIGENDIAN
-const int etp_big_endian = 1;
+const int etp_endianness = ERTS_ENDIANNESS;
+const Eterm etp_ref_header = ERTS_REF_THING_HEADER;
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+const Eterm etp_magic_ref_header = ERTS_MAGIC_REF_THING_HEADER;
#else
-const int etp_big_endian = 0;
+const Eterm etp_magic_ref_header = ERTS_REF_THING_HEADER;
#endif
const Eterm etp_the_non_value = THE_NON_VALUE;
#ifdef ERTS_HOLE_MARKER
@@ -123,6 +129,8 @@ const Eterm etp_hole_marker = ERTS_HOLE_MARKER;
const Eterm etp_hole_marker = 0;
#endif
+static int modified_sched_thread_suggested_stack_size = 0;
+
/*
* Note about VxWorks: All variables must be initialized by executable code,
* not by an initializer. Otherwise a new instance of the emulator will
@@ -301,30 +309,6 @@ void erl_error(char *fmt, va_list args)
static int early_init(int *argc, char **argv);
-void
-erts_short_init(void)
-{
-
- int ncpu;
- int time_correction;
- ErtsTimeWarpMode time_warp_mode;
-
- set_default_time_adj(&time_correction,
- &time_warp_mode);
- ncpu = early_init(NULL, NULL);
- erl_init(ncpu,
- ERTS_DEFAULT_MAX_PROCESSES,
- 0,
- ERTS_DEFAULT_MAX_PORTS,
- 0,
- 0,
- time_correction,
- time_warp_mode,
- ERTS_NODE_TAB_DELAY_GC_DEFAULT,
- ERTS_DB_SPNCNT_NORMAL);
- erts_initialized = 1;
-}
-
static void
erl_init(int ncpu,
int proc_tab_sz,
@@ -447,7 +431,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
}
static Eterm
-erl_system_process_otp(Eterm parent_pid, char* modname)
+erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq)
{
Eterm start_mod;
Process* parent;
@@ -463,6 +447,8 @@ erl_system_process_otp(Eterm parent_pid, char* modname)
parent = erts_pid2proc(NULL, 0, parent_pid, ERTS_PROC_LOCK_MAIN);
so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC;
+ if (off_heap_msgq)
+ so.flags |= SPO_OFF_HEAP_MSGQ;
res = erl_create_process(parent, start_mod, am_start, NIL, &so);
erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_MAIN);
return res;
@@ -634,9 +620,22 @@ void erts_usage(void)
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, " valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_SCHED_STACK_SIZE);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
- ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE);
+ erts_fprintf(stderr, "-sssdio size suggested stack size in kilo words for dirty IO scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DIO_SCHED_STACK_SIZE);
+#endif
erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
@@ -772,6 +771,8 @@ early_init(int *argc, char **argv) /*
H_MAX_SIZE = H_DEFAULT_MAX_SIZE;
H_MAX_FLAGS = MAX_HEAP_SIZE_KILL|MAX_HEAP_SIZE_LOG;
+ erts_term_init();
+
erts_initialized = 0;
erts_use_sender_punish = 1;
@@ -1129,8 +1130,12 @@ early_init(int *argc, char **argv) /*
}
if (dirty_cpu_scheds > schdlrs)
dirty_cpu_scheds = schdlrs;
+ if (dirty_cpu_scheds < 1)
+ dirty_cpu_scheds = 1;
if (dirty_cpu_scheds_online > schdlrs_onln)
dirty_cpu_scheds_online = schdlrs_onln;
+ if (dirty_cpu_scheds_online < 1)
+ dirty_cpu_scheds_online = 1;
#endif
}
@@ -1143,6 +1148,8 @@ early_init(int *argc, char **argv) /*
no_schedulers_online = schdlrs_onln;
erts_no_schedulers = (Uint) no_schedulers;
+#else
+ erts_no_schedulers = 1;
#endif
#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
@@ -1231,24 +1238,38 @@ early_init(int *argc, char **argv) /*
}
#ifndef ERTS_SMP
+
+void *erts_scheduler_stack_limit;
+
+
static void set_main_stack_size(void)
{
- if (erts_sched_thread_suggested_stack_size > 0) {
+ char c;
+ UWord stacksize;
# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK
- struct rlimit rl;
- int bytes = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
- if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
- (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
- erts_fprintf(stderr, "failed to set stack size for scheduler "
- "thread to %d bytes\n", bytes);
- erts_usage();
- }
+ struct rlimit rl;
+ int bytes;
+ stacksize = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
+ /* Add some extra pages... neede by some systems... */
+ bytes = (int) stacksize + 3*erts_sys_get_page_size();
+ if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
+ (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
+ erts_fprintf(stderr, "failed to set stack size for scheduler "
+ "thread to %d bytes\n", bytes);
+ erts_usage();
+ }
# else
+ if (modified_sched_thread_suggested_stack_size) {
erts_fprintf(stderr, "no OS support for dynamic stack size limit\n");
- erts_usage();
-# endif
+ erts_usage();
}
+ /* Be conservative and hope it is not more than 64 kWords... */
+ stacksize = 64*1024*sizeof(void *);
+# endif
+
+ erts_scheduler_stack_limit = erts_calc_stacklimit(&c, stacksize);
}
+
#endif
void
@@ -1293,11 +1314,14 @@ erl_start(int argc, char **argv)
port_tab_sz_ignore_files = 1;
}
-#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
/*
- * The default stack size on MacOS X is too small for pcre.
+ * A default stack size suitable for pcre which might use quite
+ * a lot of stack.
*/
- erts_sched_thread_suggested_stack_size = 256;
+ erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE;
+ erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE;
#endif
#ifdef DEBUG
@@ -1917,10 +1941,47 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wakeup threshold: %s\n", arg));
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (has_prefix("ssdcpu", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty CPU scheduler threads */
+ arg = get_arg(sub_param+6, argv[i+1], &i);
+ erts_dcpu_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dcpu_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dcpu_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty CPU scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty CPU scheduler thread stack size %d kilo words\n",
+ erts_dcpu_sched_thread_suggested_stack_size));
+ }
+ else if (has_prefix("ssdio", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty IO scheduler threads */
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ erts_dio_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dio_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dio_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty IO scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty IO scheduler thread stack size %d kilo words\n",
+ erts_dio_sched_thread_suggested_stack_size));
+ }
+#endif
else if (has_prefix("ss", sub_param)) {
/* suggested stack size (Kilo Words) for scheduler threads */
arg = get_arg(sub_param+2, argv[i+1], &i);
erts_sched_thread_suggested_stack_size = atoi(arg);
+ modified_sched_thread_suggested_stack_size = 1;
if ((erts_sched_thread_suggested_stack_size
< ERTS_SCHED_THREAD_MIN_STACK_SIZE)
@@ -2230,6 +2291,15 @@ erl_start(int argc, char **argv)
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
+ if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+ if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+#endif
+
erl_init(ncpu,
proc_tab_sz,
legacy_proc_tab,
@@ -2258,25 +2328,23 @@ erl_start(int argc, char **argv)
*/
Eterm pid;
- pid = erl_system_process_otp(otp_ring0_pid, "erts_code_purger");
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_code_purger", !0);
erts_code_purger
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
internal_pid_index(pid));
ASSERT(erts_code_purger && erts_code_purger->common.id == pid);
erts_proc_inc_refc(erts_code_purger);
-#ifdef ERTS_NEW_PURGE_STRATEGY
- pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector");
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector", !0);
erts_literal_area_collector
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
internal_pid_index(pid));
ASSERT(erts_literal_area_collector
&& erts_literal_area_collector->common.id == pid);
erts_proc_inc_refc(erts_literal_area_collector);
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
- pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker");
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker", !0);
erts_dirty_process_code_checker
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
internal_pid_index(pid));
@@ -2289,8 +2357,12 @@ erl_start(int argc, char **argv)
#ifdef ERTS_SMP
erts_start_schedulers();
- /* Let system specific code decide what to do with the main thread... */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
+#endif
+
+ /* Let system specific code decide what to do with the main thread... */
erts_sys_main_thread(); /* May or may not return! */
#else
{
@@ -2303,7 +2375,14 @@ erl_start(int argc, char **argv)
#endif
set_main_stack_size();
erts_sched_init_time_sup(esdp);
- process_main();
+ erts_ets_sched_spec_data_init(esdp);
+ erts_aux_work_timeout_late_init(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
+#endif
+
+ process_main(esdp->x_reg_array, esdp->f_reg_array);
}
#endif
}
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index 4d4defd8b5..634509f880 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1200,7 +1200,8 @@ erts_instr_init(int stat, int map_stat)
stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
- erts_mtx_init(&instr_mutex, "instr");
+ erts_mtx_init(&instr_mutex, "instr", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
mem_anchor = NULL;
@@ -1223,7 +1224,8 @@ erts_instr_init(int stat, int map_stat)
if (map_stat) {
- erts_mtx_init(&instr_x_mutex, "instr_x");
+ erts_mtx_init(&instr_x_mutex, "instr_x", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_instr_memory_map = 1;
erts_instr_stat = 1;
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index c53bdc685b..cf091ee43f 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -89,6 +89,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "hipe_mfait_lock", NULL },
#endif
{ "nodes_monitors", NULL },
+#ifdef ERTS_SMP
+ { "resource_monitors", "address" },
+#endif
{ "driver_list", NULL },
{ "proc_link", "pid" },
{ "proc_msgq", "pid" },
@@ -97,14 +100,11 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
{ "purge_state", NULL },
- { "proc_status", "pid" },
- { "proc_trace", "pid" },
- { "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
- { "meta_main_tab_slot", "address" },
{ "db_tab", "address" },
+ { "proc_status", "pid" },
+ { "proc_trace", "pid" },
{ "db_tab_fix", "address" },
- { "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
{ "dist_table", NULL },
@@ -113,10 +113,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "export_tab", NULL },
{ "fun_tab", NULL },
{ "environ", NULL },
-#ifdef ERTS_NEW_PURGE_STRATEGY
{ "release_literal_areas", NULL },
#endif
-#endif
{ "efile_drv", "address" },
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
@@ -158,9 +156,13 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "tracer_mtx", NULL },
{ "port_table", NULL },
#endif
+ { "magic_ref_table", "address" },
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
+#ifdef ERTS_SMP
+ { "pollsets_lock", NULL },
+#endif
{ "alcu_allocator", "index" },
{ "mseg", NULL },
#ifdef ERTS_SMP
@@ -173,7 +175,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "get_time", NULL },
{ "get_corrected_time", NULL },
{ "breakpoints", NULL },
- { "pollsets_lock", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
@@ -199,41 +200,20 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#define ERTS_LOCK_ORDER_SIZE \
(sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
-#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
- (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
- && ((LCK_FLG) \
- & ERTS_LC_FLG_LT_ALL \
- & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
+ (((LCKD_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_SPINLOCK \
+ && \
+ ((LCK_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) != ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
static __decl_noreturn void __noreturn lc_abort(void);
-static char *
-lock_type(Uint16 flags)
+static const char *rw_op_str(erts_lock_options_t options)
{
- switch (flags & ERTS_LC_FLG_LT_ALL) {
- case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
- case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
- case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
- case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
- case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
- default: return "";
+ if(options == ERTS_LOCK_OPTIONS_WRITE) {
+ ERTS_INTERNAL_ERROR("Only write flag present");
}
-}
-static char *
-rw_op_str(Uint16 flags)
-{
- switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
- case ERTS_LC_FLG_LO_READ_WRITE:
- return " (rw)";
- case ERTS_LC_FLG_LO_READ:
- return " (r)";
- case ERTS_LC_FLG_LO_WRITE:
- ERTS_INTERNAL_ERROR("Only write flag present");
- default:
- break;
- }
- return "";
+ return erts_lock_options_get_short_desc(options);
}
typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
@@ -244,7 +224,8 @@ struct erts_lc_locked_lock_t_ {
Sint16 id;
char *file;
unsigned int line;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
};
typedef struct {
@@ -431,7 +412,7 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+new_locked_lock(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
@@ -441,12 +422,13 @@ new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
l_lck->extra = lck->extra;
l_lck->file = file;
l_lck->line = line;
- l_lck->flags = lck->flags | op_flags;
+ l_lck->flags = lck->flags;
+ l_lck->taken_options = options;
return l_lck;
}
static void
-raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags,
char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
@@ -458,16 +440,16 @@ raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
erts_fprintf(stderr,"%T",extra);
- erts_fprintf(stderr,"%s",lock_type(flags));
+ erts_fprintf(stderr,"[%s]",erts_lock_flags_get_type_name(flags));
if (file)
erts_fprintf(stderr,"(%s:%d)",file,line);
- erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+ erts_fprintf(stderr,"'(%s)%s",rw_op_str(flags),suffix);
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+print_lock2(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags, char *suffix)
{
raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
@@ -522,9 +504,9 @@ uninitialized_lock(void)
static void
lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
+ erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -532,9 +514,9 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
static void
unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
+ erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -745,84 +727,128 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+static int compare_locked_by_id(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+{
+ if(locked_lock->id < comparand->id) {
+ return -1;
+ } else if(locked_lock->id > comparand->id) {
+ return 1;
+ }
+
+ return 0;
+}
-static int
-find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
+static int compare_locked_by_id_extra(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
+ int order = compare_locked_by_id(locked_lock, comparand);
+
+ if(order) {
+ return order;
+ } else if(locked_lock->extra < comparand->extra) {
+ return -1;
+ } else if(locked_lock->extra > comparand->extra) {
+ return 1;
+ }
- if (l_lck) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & lck->flags) == lck->flags)
- return 1;
- return 0;
- }
- else if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra < lck->extra)) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id > lck->id
- || (l_lck->id == lck->id
- && l_lck->extra >= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra <= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
+ return 0;
+}
+
+typedef int (*locked_compare_func)(erts_lc_locked_lock_t *, erts_lc_lock_t *);
+
+/* Searches through a list of taken locks, bailing when it hits an entry whose
+ * order relative to the search template is the opposite of the one at the
+ * start of the search. (*closest_neighbor) is either set to the exact match,
+ * or the one closest to it in the sort order. */
+static int search_locked_list(locked_compare_func compare,
+ erts_lc_locked_lock_t *locked_locks,
+ erts_lc_lock_t *search_template,
+ erts_lc_locked_lock_t **closest_neighbor)
+{
+ erts_lc_locked_lock_t *iterator = locked_locks;
+
+ (*closest_neighbor) = iterator;
+
+ if(iterator) {
+ int relative_order = compare(iterator, search_template);
+
+ if(relative_order < 0) {
+ while((iterator = iterator->next) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order >= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ } else if(relative_order > 0) {
+ while((iterator = iterator->prev) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order <= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ }
+
+ return relative_order == 0;
}
+
return 0;
}
+/* Searches for a lock in the given list that matches search_template, and sets
+ * (*locked_locks) to the closest lock in the sort order. */
static int
-find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
-{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
-
- if (l_lck) {
- if (l_lck->id == id)
- return 1;
- else if (l_lck->id < id) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id >= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id <= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
+find_lock(erts_lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ int found_lock;
+
+ found_lock = search_locked_list(compare_locked_by_id_extra,
+ (*locked_locks),
+ search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ if(found_lock) {
+ erts_lock_options_t relevant_options;
+ erts_lock_flags_t relevant_flags;
+
+ /* We only care about the options and flags that are set in the
+ * template. */
+ relevant_options = (closest_neighbor->taken_options & search_template->taken_options);
+ relevant_flags = (closest_neighbor->flags & search_template->flags);
+
+ return search_template->taken_options == relevant_options &&
+ search_template->flags == relevant_flags;
}
+
return 0;
}
+/* Searches for a lock in the given list by id, and sets (*locked_locks) to the
+ * closest lock in the sort order. */
+static int
+find_id(erts_lc_locked_lock_t **locked_locks, Sint16 id)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ erts_lc_lock_t search_template;
+ int found_lock;
+
+ search_template.id = id;
+
+ found_lock = search_locked_list(compare_locked_by_id,
+ (*locked_locks),
+ &search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ return found_lock;
+}
+
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
@@ -918,17 +944,17 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
void
-erts_lc_check_no_locked_of_type(Uint16 flags)
+erts_lc_check_no_locked_of_type(erts_lock_flags_t type)
{
erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
if (l_lcks) {
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
- if (l_lck->flags & flags) {
+ if ((l_lck->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
erts_fprintf(stderr,
"Locked lock of type %s found which isn't "
"allowed here!\n",
- lock_type(l_lck->flags));
+ erts_lock_flags_get_type_name(l_lck->flags));
print_curr_locks(l_lcks);
lc_abort();
}
@@ -937,7 +963,7 @@ erts_lc_check_no_locked_of_type(Uint16 flags)
}
int
-erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
#ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
return 0;
@@ -986,7 +1012,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
break;
}
}
@@ -1008,7 +1034,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1021,7 +1047,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
+ l_lck = locked ? new_locked_lock(lck, options, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1039,7 +1065,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
if (locked) {
l_lck->next = tl_lck->next;
l_lck->prev = tl_lck;
@@ -1062,14 +1088,14 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1109,7 +1135,7 @@ void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
}
}
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
@@ -1137,7 +1163,7 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1150,7 +1176,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1166,12 +1192,12 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
l_lcks->locked.last = l_lck;
}
else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
- lock_twice("Locking", l_lcks, lck, op_flags);
+ lock_twice("Locking", l_lcks, lck, options);
else
lock_order_violation(l_lcks, lck);
}
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1192,8 +1218,8 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
- unlock_op_mismatch(l_lcks, lck, op_flags);
+ if ((l_lck->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
+ unlock_op_mismatch(l_lcks, lck, options);
if (l_lck->prev)
l_lck->prev->next = l_lck->next;
else
@@ -1210,7 +1236,7 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
unlock_of_not_locked(l_lcks, lck);
}
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1274,23 +1300,25 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
}
void
-erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
+erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
void
-erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
+erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = extra;
ASSERT(is_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
@@ -1304,6 +1332,7 @@ erts_lc_destroy_lock(erts_lc_lock_t *lck)
lck->id = -1;
lck->extra = THE_NON_VALUE;
lck->flags = 0;
+ lck->taken_options = 0;
}
void
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 18296d1fec..8c754a8dfa 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -36,6 +36,8 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#include "erl_lock_flags.h"
+
#ifndef ERTS_ENABLE_LOCK_POSITION
/* Enable in order for _x variants of mtx functions to be used. */
#define ERTS_ENABLE_LOCK_POSITION 1
@@ -44,36 +46,14 @@
typedef struct {
int inited;
Sint16 id;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
UWord extra;
} erts_lc_lock_t;
#define ERTS_LC_INITITALIZED 0x7f7f7f7f
-
-#define ERTS_LC_FLG_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LC_FLG_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LC_FLG_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LC_FLG_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LC_FLG_LT_PROCLOCK (((Uint16) 1) << 4)
-
-#define ERTS_LC_FLG_LO_READ (((Uint16) 1) << 5)
-#define ERTS_LC_FLG_LO_WRITE (((Uint16) 1) << 6)
-
-#define ERTS_LC_FLG_LO_READ_WRITE (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-#define ERTS_LC_FLG_LT_ALL (ERTS_LC_FLG_LT_SPINLOCK \
- | ERTS_LC_FLG_LT_RWSPINLOCK \
- | ERTS_LC_FLG_LT_MUTEX \
- | ERTS_LC_FLG_LT_RWMUTEX \
- | ERTS_LC_FLG_LT_PROCLOCK)
-
-#define ERTS_LC_FLG_LO_ALL (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-
-#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), (X)}
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), 0, (X)}
void erts_lc_init(void);
void erts_lc_late_init(void);
@@ -83,31 +63,31 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
-void erts_lc_check_no_locked_of_type(Uint16 flags);
-int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_check_no_locked_of_type(erts_lock_flags_t flags);
+int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
char* file, unsigned int line);
void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
-void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
-void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra);
+void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags);
+void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra);
void erts_lc_destroy_lock(erts_lc_lock_t *lck);
void erts_lc_fail(char *fmt, ...);
int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 481e92b2cd..d2e8f47d59 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,51 +18,37 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- */
-
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-/* Needed for VxWorks va_arg */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
#include "sys.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "global.h"
#include "erl_lock_count.h"
-#include "ethread.h"
-#include "erl_term.h"
-#include "atom.h"
-#include <stdio.h>
-
-/* globals, dont access these without locks or blocks */
+#include "erl_thr_progress.h"
-ethr_mutex lcnt_data_lock;
-erts_lcnt_data_t *erts_lcnt_data;
-Uint16 erts_lcnt_rt_options;
-erts_lcnt_time_t timer_start;
-const char *str_undefined = "undefined";
+#include "erl_node_tables.h"
+#include "erl_alloc_util.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+#include "erl_db.h"
-static ethr_tsd_key lcnt_thr_data_key;
-static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[2048];
+#define LCNT_MAX_CARRIER_ENTRIES 255
-/* local functions */
+/* - Locals that are shared with the header implementation - */
-static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
-}
+#ifdef DEBUG
+int lcnt_initialization_completed__;
+#endif
-static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
-}
+erts_lock_flags_t lcnt_category_mask__;
+ethr_tsd_key lcnt_thr_data_key__;
-const int log2_tab64[64] = {
+const int lcnt_log2_tab64__[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
@@ -72,629 +58,624 @@ const int log2_tab64[64] = {
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5};
-static ERTS_INLINE int lcnt_log2(Uint64 v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
-}
-
-static char* lcnt_lock_type(Uint16 flag) {
- switch(flag & ERTS_LCNT_LT_ALL) {
- case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
- case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
- case ERTS_LCNT_LT_MUTEX: return "mutex";
- case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
- case ERTS_LCNT_LT_PROCLOCK: return "proclock";
- default: return "";
- }
-}
+/* - Local variables - */
-static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
- ethr_atomic_set(&stats->tries, 0);
- ethr_atomic_set(&stats->colls, 0);
- stats->timer.s = 0;
- stats->timer.ns = 0;
- stats->timer_n = 0;
- stats->file = (char *)str_undefined;
- stats->line = 0;
- sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
-}
+typedef struct lcnt_static_lock_ref_ {
+ erts_lcnt_ref_t *reference;
-static void lcnt_time(erts_lcnt_time_t *time) {
- /*
- * erts_sys_hrtime() is the highest resolution
- * we could find, it may or may not be monotonic...
- */
- ErtsMonotonicTime mtime = erts_sys_hrtime();
- time->s = (unsigned long) (mtime / 1000000000LL);
- time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
-}
+ erts_lock_flags_t flags;
+ const char *name;
+ Eterm id;
-static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
- long ds;
- long dns;
+ struct lcnt_static_lock_ref_ *next;
+} lcnt_static_lock_ref_t;
- ds = t1->s - t0->s;
- dns = t1->ns - t0->ns;
+static ethr_atomic_t lcnt_static_lock_registry;
- /* the difference should not be able to get bigger than 1 sec in ns*/
+static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
+static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
- if (dns < 0) {
- ds -= 1;
- dns += 1000000000LL;
- }
+static erts_lcnt_time_t lcnt_timer_start;
- ASSERT(ds >= 0);
+static int lcnt_preserve_info;
- d->s = ds;
- d->ns = dns;
-}
+/* local functions */
+
+static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
+ size_t i;
+
+ for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
-/* difference d must be non-negative */
+ sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
-static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- t->s += d->s;
- t->ns += d->ns;
+ stats->total_time_waited.s = 0;
+ stats->total_time_waited.ns = 0;
- t->s += t->ns / 1000000000LL;
- t->ns = t->ns % 1000000000LL;
+ stats->times_waited = 0;
+
+ stats->file = NULL;
+ stats->line = 0;
+
+ ethr_atomic_set(&stats->attempts, 0);
+ ethr_atomic_set(&stats->collisions, 0);
+ }
+
+ info->location_count = 1;
}
-static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
- erts_lcnt_thread_data_t *eltd;
+static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
+ lcnt_thread_data_t__ *eltd =
+ (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
- eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
- if (!eltd) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ if(!eltd) {
+ ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
}
+
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
- eltd->id = lcnt_n_thr++;
- /* set thread data to array */
- lcnt_thread_data[eltd->id] = eltd;
-
return eltd;
-}
-
-static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
- return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
}
-/* debug */
+/* - List operations -
+ *
+ * Info entries are kept in a doubly linked list where each entry is locked
+ * with its neighbors rather than a global lock. Deletion is rather quick, but
+ * insertion is still serial since the head becomes a de facto global lock.
+ *
+ * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
-#if 0
-static char* lock_opt(Uint16 flag) {
- if ((flag & ERTS_LCNT_LO_WRITE) && (flag & ERTS_LCNT_LO_READ)) return "rw";
- if (flag & ERTS_LCNT_LO_READ ) return "r ";
- if (flag & ERTS_LCNT_LO_WRITE) return " w";
- return "--";
-}
+#define LCNT_SPINLOCK_YIELD_ITERATIONS 50
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
- erts_aint_t w_state, r_state;
- char *type;
+#define LCNT_SPINLOCK_HELPER_INIT \
+ Uint failed_spin_count = 0;
- if (strcmp(lock->name, "run_queue") != 0) return;
- type = lcnt_lock_type(lock->flag);
- r_state = ethr_atomic_read(&lock->r_state);
- w_state = ethr_atomic_read(&lock->w_state);
+#define LCNT_SPINLOCK_HELPER_YIELD \
+ do { \
+ failed_spin_count++; \
+ if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
+ erts_thr_yield(); \
+ } else { \
+ ERTS_SPIN_BODY; \
+ } \
+ } while(0)
- if (lock->flag & flag) {
- erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- type,
- lock->id);
- }
+static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
+ ethr_atomic32_set_relb(&info->lock, 0);
}
-#endif
-
-static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- unsigned int i;
- erts_lcnt_lock_stats_t *stats = NULL;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
- }
- }
- return &lock->stats[0];
+static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
}
-static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
- int idx;
- unsigned long r;
+static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
- idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
- } else {
- r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
- if (r) idx = lcnt_log2(r);
- else idx = 0;
+ while(!lcnt_try_lock_list_entry(info)) {
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
- erts_lcnt_time_t *time_wait) {
+static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- ethr_atomic_inc(&stats->tries);
+ for(;;) {
+ if(!lcnt_try_lock_list_entry(info))
+ goto retry_after_entry_failed;
+ if(!lcnt_try_lock_list_entry(info->next))
+ goto retry_after_next_failed;
+ if(!lcnt_try_lock_list_entry(info->prev))
+ goto retry_after_prev_failed;
- if (lock_in_conflict)
- ethr_atomic_inc(&stats->colls);
+ return;
- if (time_wait) {
- lcnt_time_add(&(stats->timer), time_wait);
- stats->timer_n++;
- lcnt_update_stats_hist(&stats->hist,time_wait);
+ retry_after_prev_failed:
+ lcnt_unlock_list_entry(info->next);
+ retry_after_next_failed:
+ lcnt_unlock_list_entry(info);
+ retry_after_entry_failed:
+ LCNT_SPINLOCK_HELPER_YIELD;
}
}
-/* interface */
+static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ lcnt_unlock_list_entry(info->prev);
+ lcnt_unlock_list_entry(info->next);
+ lcnt_unlock_list_entry(info);
+}
+
+static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
+ erts_lcnt_lock_info_t *next, *prev;
+
+ prev = &list->head;
+
+ lcnt_lock_list_entry(prev);
+
+ next = prev->next;
-void erts_lcnt_init() {
- erts_lcnt_thread_data_t *eltd = NULL;
+ lcnt_lock_list_entry(next);
- /* init lock */
- if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+ info->next = next;
+ info->prev = prev;
- /* init tsd */
- lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
+ prev->next = info;
+ next->prev = info;
- lcnt_lock();
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
+}
+
+static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
+ erts_lcnt_lock_info_carrier_t *carrier) {
+ erts_lcnt_lock_info_t *next, *prev;
+ size_t i;
- erts_lcnt_rt_options = ERTS_LCNT_OPT_LOCATION | ERTS_LCNT_OPT_PROCLOCK;
- eltd = lcnt_thread_data_alloc();
- ethr_tsd_set(lcnt_thr_data_key, eltd);
+ for(i = 0; i < carrier->entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[i];
- /* init lcnt structure */
- erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
- if (!erts_lcnt_data) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ info->prev = &carrier->entries[i - 1];
+ info->next = &carrier->entries[i + 1];
}
- erts_lcnt_data->current_locks = erts_lcnt_list_init();
- erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
- lcnt_unlock();
+ prev = &list->head;
+
+ lcnt_lock_list_entry(prev);
+
+ next = prev->next;
+
+ lcnt_lock_list_entry(next);
+
+ next->prev = &carrier->entries[carrier->entry_count - 1];
+ carrier->entries[carrier->entry_count - 1].next = next;
+ prev->next = &carrier->entries[0];
+ carrier->entries[0].prev = prev;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_late_init() {
- /* set start timer and zero statistics */
- erts_lcnt_clear_counters();
- erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
+ /* Ensure that ref_count operations explode when touching the sentinels in
+ * DEBUG mode. */
+ ethr_atomic_init(&(list->head.ref_count), -1);
+ ethr_atomic_init(&(list->tail.ref_count), -1);
+
+ ethr_atomic32_init(&(list->head.lock), 0);
+ (list->head).next = &list->tail;
+ (list->head).prev = &list->tail;
+
+ ethr_atomic32_init(&(list->tail.lock), 0);
+ (list->tail).next = &list->head;
+ (list->tail).prev = &list->head;
}
-/* list operations */
+/* - Carrier operations - */
-/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+int lcnt_thr_progress_unmanaged_delay__(void) {
+ return erts_thr_progress_unmanaged_delay();
+}
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
- erts_lcnt_lock_list_t *list;
+void lcnt_thr_progress_unmanaged_continue__(int handle) {
+ return erts_thr_progress_unmanaged_continue(handle);
+}
- list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
- if (!list) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
- return list;
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
}
-/* only do this on the list with the deleted locks! */
-void erts_lcnt_list_clear(erts_lcnt_lock_list_t *list) {
- erts_lcnt_lock_t *lock = NULL,
- *next = NULL;
+static void lcnt_thr_prg_cleanup_carrier(void *data) {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t entry_count, i;
- lock = list->head;
+ /* carrier->entry_count will be replaced with garbage if it's deallocated
+ * on the final iteration, so we'll tuck it away to get a clean exit. */
+ entry_count = carrier->entry_count;
- while(lock != NULL) {
- next = lock->next;
- free(lock);
- lock = next;
- }
+ for(i = 0; i < entry_count; i++) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
+ erts_lcnt_release_lock_info(&carrier->entries[i]);
+ }
}
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *tail = NULL;
+static void lcnt_schedule_carrier_cleanup(void *data) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- tail = list->tail;
- if (tail) {
- tail->next = lock;
- lock->prev = tail;
+ /* We can't issue cleanup jobs on anything other than normal schedulers, so
+ * we move to the first scheduler if required. */
+
+ if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
+ erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
} else {
- list->head = lock;
- lock->prev = NULL;
- ASSERT(!lock->next);
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t carrier_size;
+
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
+
+ erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
+ data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
}
- lock->next = NULL;
- list->tail = lock;
+}
- list->n++;
+static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
+ lcnt_release_carrier__(info->carrier);
}
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- if (lock->next) lock->next->prev = lock->prev;
- if (lock->prev) lock->prev->next = lock->next;
- if (list->head == lock) list->head = lock->next;
- if (list->tail == lock) list->tail = lock->prev;
+static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
+ ASSERT(ethr_atomic_read(&info->ref_count) == 0);
+
+ if(lcnt_preserve_info) {
+ ethr_atomic_set(&info->ref_count, 1);
- lock->prev = NULL;
- lock->next = NULL;
- list->n--;
+ /* Move straight to deallocation the next time around. */
+ info->dispose = &lcnt_info_deallocate;
+
+ lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
+ } else {
+ lcnt_info_deallocate(info);
+ }
}
-/* END ASSUMPTION: lcnt_data_lock taken */
+static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
+ ethr_atomic_init(&info->ref_count, 1);
+ ethr_atomic32_init(&info->lock, 0);
-/* lock operations */
+ ethr_atomic_init(&info->r_state, 0);
+ ethr_atomic_init(&info->w_state, 0);
-/* interface to erl_threads.h */
-/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, NIL);
+ info->dispose = &lcnt_info_dispose;
+
+ lcnt_clear_stats(info);
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
- int i;
- if (name == NULL) { ERTS_LCNT_CLEAR_FLAG(lock); return; }
- lcnt_lock();
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
+ erts_lcnt_lock_info_carrier_t *result;
+ size_t carrier_size, i;
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = flag;
- lock->name = name;
- lock->id = id;
+ ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
+ ASSERT(lcnt_initialization_completed__);
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * entry_count;
- lock->n_stats = 1;
+ result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
+ result->entry_count = entry_count;
- for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- lcnt_clear_stats(&lock->stats[i]);
- }
+ ethr_atomic_init(&result->ref_count, entry_count);
- erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
- lcnt_unlock();
-}
-/* init empty, instead of zero struct */
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) {
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = 0;
- lock->name = NULL;
- lock->id = NIL;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
- lock->n_stats = 0;
- sys_memzero(lock->stats, sizeof(lock->stats));
-}
-/* destroy lock */
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- lcnt_lock();
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
- erts_lcnt_lock_t *deleted_lock;
- /* copy structure and insert the copy */
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
- if (!deleted_lock) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ for(i = 0; i < entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &result->entries[i];
+
+ lcnt_lock_info_init_helper(info);
+
+ info->carrier = result;
}
- /* delete original */
- erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
- ERTS_LCNT_CLEAR_FLAG(lock);
- lcnt_unlock();
+ return result;
}
-/* lock */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t swapped_carrier;
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- erts_aint_t r_state = 0, w_state = 0;
- erts_lcnt_thread_data_t *eltd;
+#ifdef DEBUG
+ int i;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ /* Verify that all locks share the same categories/static property; all
+ * other flags are fair game. */
+ for(i = 1; i < carrier->entry_count; i++) {
+ const erts_lock_flags_t SIGNIFICANT_DIFF_MASK =
+ ERTS_LOCK_FLAGS_MASK_CATEGORY | ERTS_LOCK_FLAGS_PROPERTY_STATIC;
- eltd = lcnt_get_thread_data();
- ASSERT(eltd);
+ erts_lcnt_lock_info_t *previous, *current;
- w_state = ethr_atomic_read(&lock->w_state);
+ previous = &carrier->entries[i - 1];
+ current = &carrier->entries[i];
- if (option & ERTS_LCNT_LO_WRITE) {
- r_state = ethr_atomic_read(&lock->r_state);
- ethr_atomic_inc( &lock->w_state);
- }
- if (option & ERTS_LCNT_LO_READ) {
- ethr_atomic_inc( &lock->r_state);
+ ASSERT(!((previous->flags ^ current->flags) & SIGNIFICANT_DIFF_MASK));
}
+#endif
- /* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
- if ((w_state > 0) || (r_state > 0)) {
- eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
+ if(swapped_carrier != (ethr_sint_t)NULL) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
+ ethr_atomic_set(&carrier->ref_count, 0);
+#endif
+
+ lcnt_deallocate_carrier__(carrier);
} else {
- eltd->lock_in_conflict = 0;
+ lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
}
}
-void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- erts_aint_t w_state;
- erts_lcnt_thread_data_t *eltd;
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
+ ethr_sint_t previous_carrier, swapped_carrier;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ previous_carrier = ethr_atomic_read(ref);
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
- w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc(&lock->w_state);
- eltd = lcnt_get_thread_data();
+ if(previous_carrier && previous_carrier == swapped_carrier) {
+ lcnt_schedule_carrier_cleanup((void*)previous_carrier);
+ }
+}
- ASSERT(eltd);
+/* - Static lock registry -
+ *
+ * Since static locks can be trusted to never disappear, we can track them
+ * pretty cheaply and won't need to bother writing an "erts_lcnt_update_xx"
+ * variant. */
- if (w_state > 0) {
- eltd->lock_in_conflict = 1;
- /* only set the timer if nobody else has it
- * This should only happen when proc_locks aquires several locks
- * 'atomicly'. All other locks will block the thread if w_state > 0
- * i.e. locked.
- */
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
+static void lcnt_init_static_lock_registry(void) {
+ ethr_atomic_init(&lcnt_static_lock_registry, (ethr_sint_t)NULL);
+}
+
+static void lcnt_update_static_locks(void) {
+ lcnt_static_lock_ref_t *iterator =
+ (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
+
+ while(iterator != NULL) {
+ if(!erts_lcnt_check_enabled(iterator->flags)) {
+ erts_lcnt_uninstall(iterator->reference);
+ } else if(!erts_lcnt_check_ref_installed(iterator->reference)) {
+ erts_lcnt_lock_info_carrier_t *carrier = erts_lcnt_create_lock_info_carrier(1);
+
+ erts_lcnt_init_lock_info_idx(carrier, 0, iterator->name, iterator->id, iterator->flags);
+
+ erts_lcnt_install(iterator->reference, carrier);
}
- eltd->timer_set++;
- } else {
- eltd->lock_in_conflict = 0;
+
+ iterator = iterator->next;
}
}
-/* if a lock wasn't really a lock operation, bad bad process locks */
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags) {
+ lcnt_static_lock_ref_t *lock = malloc(sizeof(lcnt_static_lock_ref_t));
+ int retry_insertion;
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC);
+
+ lock->reference = reference;
+ lock->flags = flags;
+ lock->name = name;
+ lock->id = id;
+
+ do {
+ ethr_sint_t swapped_head;
+
+ lock->next = (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
- /* should check if this thread was "waiting" */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ swapped_head = ethr_atomic_cmpxchg_acqb(
+ &lcnt_static_lock_registry,
+ (ethr_sint_t)lock,
+ (ethr_sint_t)lock->next);
- ethr_atomic_dec(&lock->w_state);
+ retry_insertion = (swapped_head != (ethr_sint_t)lock->next);
+ } while(retry_insertion);
}
-/*
- * erts_lcnt_lock_post
- *
- * Used when we get a lock (i.e. directly after a lock operation)
- * if the timer was set then we had to wait for the lock
- * lock_post will calculate the wait time.
- */
+/* - Initialization - */
+
+void erts_lcnt_pre_thr_init() {
+ /* Ensure that the dependency hack mentioned in the header doesn't
+ * explode at runtime. */
+ ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
+ ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
+ (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
+
+ lcnt_init_list(&lcnt_current_lock_list);
+ lcnt_init_list(&lcnt_deleted_lock_list);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
+ lcnt_init_static_lock_registry();
}
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- erts_lcnt_thread_data_t *eltd;
- erts_lcnt_time_t timer;
- erts_lcnt_time_t time_wait;
- erts_lcnt_lock_stats_t *stats;
-#ifdef DEBUG
- erts_aint_t flowstate;
-#endif
+void erts_lcnt_post_thr_init() {
+ /* ASSUMPTION: this is safe since it runs prior to the creation of other
+ * threads (Directly after ethread init). */
+
+ ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ erts_lcnt_thread_setup();
+}
+
+void erts_lcnt_late_init() {
+ /* Set start timer and zero all statistics */
+ erts_lcnt_clear_counters();
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
#ifdef DEBUG
- if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc(&lock->flowstate);
- }
+ /* It's safe to use erts_alloc and thread progress past this point. */
+ lcnt_initialization_completed__ = 1;
#endif
+}
- eltd = lcnt_get_thread_data();
-
- ASSERT(eltd);
+void erts_lcnt_post_startup(void) {
+ /* Default to capturing everything to match the behavior of the old lock
+ * counter build. */
+ erts_lcnt_set_category_mask(ERTS_LOCK_FLAGS_MASK_CATEGORY);
+}
- /* if lock was in conflict, time it */
- stats = lcnt_get_lock_stats(lock, file, line);
- if (eltd->timer_set) {
- lcnt_time(&timer);
+void erts_lcnt_thread_setup() {
+ lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
- lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
- lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
- eltd->timer_set--;
- ASSERT(eltd->timer_set >= 0);
- } else {
- lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
- }
+ ASSERT(eltd);
+ ethr_tsd_set(lcnt_thr_data_key__, eltd);
}
-/* unlock */
+void erts_lcnt_thread_exit_handler() {
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+ if (eltd) {
+ free(eltd);
+ }
}
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+/* - BIF interface - */
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
#ifdef DEBUG
- {
- erts_aint_t w_state;
- erts_aint_t flowstate;
-
- /* flowstate */
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 1);
- ethr_atomic_dec(&lock->flowstate);
-
- /* write state */
- w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0);
- }
+ ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&info->ref_count);
#endif
- ethr_atomic_dec(&lock->w_state);
}
-/* trylock */
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
+ ethr_sint_t count;
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- /* Determine lock_state via res instead of state */
- if (res != EBUSY) {
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+ /* We need to acquire the lock before decrementing ref_count to avoid
+ * racing with list iteration; there's a short window between reading the
+ * reference to info and increasing its ref_count. */
+ lcnt_lock_list_entry_with_neighbors(info);
+
+ count = ethr_atomic_dec_read(&info->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count > 0) {
+ lcnt_unlock_list_entry_with_neighbors(info);
} else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+ (info->next)->prev = info->prev;
+ (info->prev)->next = info->next;
+
+ lcnt_unlock_list_entry_with_neighbors(info);
+
+ info->dispose(info);
}
}
+erts_lock_flags_t erts_lcnt_get_category_mask() {
+ return lcnt_category_mask__;
+}
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
- /* Determine lock_state via res instead of state */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (res != EBUSY) {
-#ifdef DEBUG
- {
- erts_aint_t flowstate;
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
- }
+#ifdef ERTS_ENABLE_KERNEL_POLL
+/* erl_poll/erl_check_io only exports one of these variants at a time, and we
+ * may need to use either one depending on emulator startup flags. */
+void erts_lcnt_update_pollset_locks_nkp(int);
+void erts_lcnt_update_pollset_locks_kp(int);
+
+void erts_lcnt_update_cio_locks_nkp(int);
+void erts_lcnt_update_cio_locks_kp(int);
#endif
- ethr_atomic_inc(&lock->w_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
+ erts_lock_flags_t changed_categories;
+
+ ASSERT(!(mask & ~ERTS_LOCK_FLAGS_MASK_CATEGORY));
+ ASSERT(lcnt_initialization_completed__);
+
+ changed_categories = (lcnt_category_mask__ ^ mask);
+ lcnt_category_mask__ = mask;
+
+ if(changed_categories) {
+ lcnt_update_static_locks();
}
-}
-/* thread operations */
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION) {
+ erts_lcnt_update_distribution_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ }
-void erts_lcnt_thread_setup(void) {
- erts_lcnt_thread_data_t *eltd;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ erts_lcnt_update_allocator_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ }
- lcnt_lock();
- /* lock for thread id global update */
- eltd = lcnt_thread_data_alloc();
- lcnt_unlock();
- ASSERT(eltd);
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-}
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_PROCESS) {
+ erts_lcnt_update_process_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ }
-void erts_lcnt_thread_exit_handler() {
- erts_lcnt_thread_data_t *eltd;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ if(erts_use_kernel_poll) {
+ erts_lcnt_update_pollset_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_update_pollset_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+#else
+ erts_lcnt_update_pollset_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
- eltd = ethr_tsd_get(lcnt_thr_data_key);
+ erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
- if (eltd) {
- free(eltd);
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DB) {
+ erts_lcnt_update_db_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DB);
}
}
-/* bindings for bifs */
-
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options |= opt;
- return prev;
+void erts_lcnt_set_preserve_info(int enable) {
+ lcnt_preserve_info = enable;
}
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options &= ~opt;
- return prev;
+int erts_lcnt_get_preserve_info() {
+ return lcnt_preserve_info;
}
void erts_lcnt_clear_counters(void) {
- erts_lcnt_lock_t *lock;
- erts_lcnt_lock_list_t *list;
- erts_lcnt_lock_stats_t *stats;
- int i;
+ erts_lcnt_lock_info_t *iterator;
- lcnt_lock();
+ lcnt_time__(&lcnt_timer_start);
- list = erts_lcnt_data->current_locks;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
+ lcnt_clear_stats(iterator);
+ }
- for (lock = list->head; lock != NULL; lock = lock->next) {
- for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- stats = &lock->stats[i];
- lcnt_clear_stats(stats);
- }
- lock->n_stats = 1;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
+ erts_lcnt_release_lock_info(iterator);
}
+}
+
+erts_lcnt_data_t erts_lcnt_get_data(void) {
+ erts_lcnt_time_t timer_stop;
+ erts_lcnt_data_t result;
- /* empty deleted locks in lock list */
- erts_lcnt_list_clear(erts_lcnt_data->deleted_locks);
+ lcnt_time__(&timer_stop);
- lcnt_time(&timer_start);
+ result.timer_start = lcnt_timer_start;
- lcnt_unlock();
+ result.current_locks = &lcnt_current_lock_list;
+ result.deleted_locks = &lcnt_deleted_lock_list;
+
+ lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
+
+ return result;
}
-erts_lcnt_data_t *erts_lcnt_get_data(void) {
- erts_lcnt_time_t timer_stop;
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
+ erts_lcnt_lock_info_t *current, *next;
- lcnt_lock();
+ current = *iterator ? *iterator : &list->head;
- lcnt_time(&timer_stop);
- lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
+ ASSERT(current != &list->tail);
- lcnt_unlock();
+ lcnt_lock_list_entry(current);
- return erts_lcnt_data;
-}
+ next = current->next;
+
+ if(next != &list->tail) {
+ erts_lcnt_retain_lock_info(next);
+ }
+
+ lcnt_unlock_list_entry(current);
+
+ if(current != &list->head) {
+ erts_lcnt_release_lock_info(current);
+ }
+
+ *iterator = next;
-char *erts_lcnt_lock_type(Uint16 type) {
- return lcnt_lock_type(type);
+ return next != &list->tail;
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 3e8dcefe69..89d95a73cf 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,64 +18,51 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- * Abstract:
- * Locks statistics internal representation.
- *
- * Conceptual representation,
- * - set name
- * | - id (the unique lock)
- * | | - lock type
- * | | - statistics
- * | | | - location (file and line number)
- * | | | - tries
- * | | | - collisions (including trylock busy)
- * | | | - timer (time spent in waiting for lock)
- * | | | - n_timer (collisions excluding trylock busy)
- * | | | - histogram
- * | | | | - # 0 = log2(lock wait_time ns)
- * | | | | - ...
- * | | | | - # n = log2(lock wait_time ns)
- *
- * Each instance of a lock is the unique lock, i.e. set and id in that set.
- * For each lock there is a set of statistics with where and what impact
- * the lock aqusition had.
- *
- * Runtime options
- * - suspend, used when internal lock-counting can't be applied. For instance
- * when allocating a term for the outside and halloc needs to be used.
- * Default: off.
- * - location, reserved and not used.
- * - proclock, disable proclock counting. Used when performance might be an
- * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}).
- * Default: off.
- * - copysave, enable saving of destroyed locks (and thereby its statistics).
- * If memory constraints is an issue this need to be disabled.
- * Accessible from erts_debug:lock_counters({copy_save, bool()}).
- * Default: off.
+/**
+ * @description Statistics for locks.
+ * @file erl_lock_count.h
+ *
+ * @author Björn-Egil Dahlberg
+ * @author John Högberg
+ *
+ * Conceptual representation:
*
+ * - set name
+ * | - id (the unique lock)
+ * | | - lock type
+ * | | - statistics
+ * | | | - location (file and line number)
+ * | | | - attempts
+ * | | | - collisions (including trylock busy)
+ * | | | - timer (time spent in waiting for lock)
+ * | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
+ *
+ * Each instance of a lock is the unique lock, i.e. set and id in that set.
+ * For each lock there is a set of statistics with where and what impact
+ * the lock acquisition had.
*/
-#include "sys.h"
-
#ifndef ERTS_LOCK_COUNT_H__
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
#ifndef ERTS_ENABLE_LOCK_POSITION
-/* Enable in order for _x variants of mtx functions to be used. */
+/** @brief Controls whether _x variants of mtx functions are used. */
#define ERTS_ENABLE_LOCK_POSITION 1
#endif
+#include "sys.h"
#include "ethread.h"
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+#include "erl_term.h"
+#include "erl_lock_flags.h"
+
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (5)
-/* histogram */
#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
@@ -85,153 +72,857 @@
#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
#endif
-#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4)
-#define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5)
-
-#define ERTS_LCNT_LO_READ (((Uint16) 1) << 6)
-#define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7)
-
-#define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \
- | ERTS_LCNT_LO_WRITE )
-
-#define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \
- | ERTS_LCNT_LT_RWSPINLOCK \
- | ERTS_LCNT_LT_MUTEX \
- | ERTS_LCNT_LT_RWMUTEX \
- | ERTS_LCNT_LT_PROCLOCK )
-
-#define ERTS_LCNT_LOCK_TYPE(lock) ((lock)->flag & ERTS_LCNT_LT_ALL)
-#define ERTS_LCNT_IS_LOCK_INVALID(lock) (!((lock)->flag & ERTS_LCNT_LT_ALL))
-#define ERTS_LCNT_CLEAR_FLAG(lock) ((lock)->flag = 0)
-
-/* runtime options */
-
-#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
-#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
-#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
-#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 3)
-#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 4)
-
typedef struct {
unsigned long s;
unsigned long ns;
} erts_lcnt_time_t;
-extern erts_lcnt_time_t timer_start;
-
typedef struct {
- Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
+ /* @brief log2 array of nano seconds occurences */
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
} erts_lcnt_hist_t;
-typedef struct erts_lcnt_lock_stats_s {
- /* "tries" and "colls" needs to be atomic since
- * trylock busy does not aquire a lock and there
- * is no post action to rectify the situation
- */
+typedef struct {
+ /** @brief In which file the lock was taken. May be NULL. */
+ const char *file;
+ /** @brief Line number in \c file */
+ unsigned int line;
- char *file; /* which file the lock was taken */
- unsigned int line; /* line number in file */
+ /* "attempts" and "collisions" need to be atomic since try_lock busy does
+ * not acquire a lock and there is no post action to rectify the
+ * situation. */
- ethr_atomic_t tries; /* n tries to get lock */
- ethr_atomic_t colls; /* n collisions of tries to get lock */
+ ethr_atomic_t attempts;
+ ethr_atomic_t collisions;
- unsigned long timer_n; /* #times waited for lock */
- erts_lcnt_time_t timer; /* total wait time for lock */
- erts_lcnt_hist_t hist;
+ erts_lcnt_time_t total_time_waited;
+ Uint64 times_waited;
+
+ erts_lcnt_hist_t wait_time_histogram;
} erts_lcnt_lock_stats_t;
-/* rw locks uses both states, other locks only uses w_state */
-typedef struct erts_lcnt_lock_s {
- char *name; /* lock name */
- Uint16 flag; /* lock type */
- Eterm id; /* id if possible */
+typedef struct lcnt_lock_info_t_ {
+ erts_lock_flags_t flags;
+ const char *name;
+ /** @brief Id if possible, must be an immediate */
+ Eterm id;
-#ifdef DEBUG
- ethr_atomic_t flowstate;
-#endif
+ /* The first entry is reserved as a fallback for when location information
+ * is missing, and when the lock is used in more than (MAX_LOCK_LOCATIONS
+ * - 1) different places. */
+ erts_lcnt_lock_stats_t location_stats[ERTS_LCNT_MAX_LOCK_LOCATIONS];
+ unsigned int location_count;
- /* lock states */
- ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
- ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
+ /* -- Everything below is internal to this module ---------------------- */
- /* statistics */
- unsigned int n_stats;
- erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/
+ /* Lock states; rw locks uses both states, other locks only uses w_state */
- /* chains for list handling */
- /* data is hold by lcnt_lock */
- struct erts_lcnt_lock_s *prev;
- struct erts_lcnt_lock_s *next;
-} erts_lcnt_lock_t;
+ /** @brief Write state. 0 = not taken, otherwise n threads waiting */
+ ethr_atomic_t w_state;
+ /** @brief Read state. 0 = not taken, > 0 -> writes will wait */
+ ethr_atomic_t r_state;
-typedef struct {
- erts_lcnt_lock_t *head;
- erts_lcnt_lock_t *tail;
- unsigned long n;
-} erts_lcnt_lock_list_t;
+ struct lcnt_lock_info_t_ *prev;
+ struct lcnt_lock_info_t_ *next;
-typedef struct {
- erts_lcnt_time_t duration; /* time since last clear */
- erts_lcnt_lock_list_t *current_locks;
- erts_lcnt_lock_list_t *deleted_locks;
-} erts_lcnt_data_t;
+ /** @brief Used in place of erts_refc_t to avoid a circular dependency. */
+ ethr_atomic_t ref_count;
+ ethr_atomic32_t lock;
+
+ /** @brief Deletion hook called once \c ref_count reaches 0; may defer
+ * deletion by modifying \c ref_count. */
+ void (*dispose)(struct lcnt_lock_info_t_ *);
+
+ struct lcnt_lock_info_carrier_ *carrier;
+} erts_lcnt_lock_info_t;
+
+typedef struct lcnt_lock_info_list_ {
+ erts_lcnt_lock_info_t head;
+ erts_lcnt_lock_info_t tail;
+} erts_lcnt_lock_info_list_t;
typedef struct {
- int id;
+ erts_lcnt_time_t timer_start; /**< Time of last clear */
+ erts_lcnt_time_t duration; /**< Time since last clear */
+
+ erts_lcnt_lock_info_list_t *current_locks;
+ erts_lcnt_lock_info_list_t *deleted_locks;
+} erts_lcnt_data_t;
- erts_lcnt_time_t timer; /* timer */
- int timer_set; /* bool */
- int lock_in_conflict; /* bool */
-} erts_lcnt_thread_data_t;
+typedef struct lcnt_lock_info_carrier_ erts_lcnt_lock_info_carrier_t;
-/* globals */
+typedef ethr_atomic_t erts_lcnt_ref_t;
-extern Uint16 erts_lcnt_rt_options;
+/* -- Globals -------------------------------------------------------------- */
-/* function declerations */
+/** @brief Checks whether counting is enabled for any of the given
+ * categories. */
+#define erts_lcnt_check_enabled(flags) \
+ (lcnt_category_mask__ & flags)
-void erts_lcnt_init(void);
+/* -- Lock operations ------------------------------------------------------
+ *
+ * All of these will nop if there's nothing "installed" on the given reference,
+ * in order to transparently support enable/disable at runtime. */
+
+/** @brief Records that a lock is being acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock
+ * @param option Notes whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Records that a lock has been acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock_post.
+ * @param file The name of the file where the lock was acquired.
+ * @param line The line at which the lock was acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line);
+
+/** @brief Records that a lock has been released. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_unlock_opt
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Rectifies the case where a lock wasn't actually a lock operation.
+ *
+ * Only used for process locks at the moment. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref);
+
+/** @brief Records the result of a trylock, placing the queried lock status in
+ * \c result. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result);
+
+/** @copydoc erts_lcnt_trylock
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option);
+
+/* Indexed variants of the standard lock operations, for use when a single
+ * reference contains many counters (eg. process locks).
+ *
+ * erts_lcnt_open_ref must be used to safely extract the installed carrier,
+ * which must released with erts_lcnt_close_reference on success.
+ *
+ * Refer to \c erts_lcnt_lock for example usage. */
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result);
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option);
+
+/* -- Reference operations ------------------------------------------------- */
+
+/** @brief Registers a lock counter reference; this must be called prior to
+ * using any other functions in this module. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref);
+
+/** @brief As \c erts_lcnt_init_ref, but also enables lock counting right
+ * away if appropriate to reduce noise.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags);
+
+/** @brief Checks whether counting is enabled on the given reference. */
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref);
+
+/** @brief Convenience macro to re/enable counting on an already initialized
+ * reference. Don't forget to specify the lock type in \c flags! */
+#define erts_lcnt_install_new_lock_info(ref, name, id, flags) \
+ if(!erts_lcnt_check_ref_installed(ref)) { \
+ erts_lcnt_lock_info_carrier_t *__carrier; \
+ __carrier = erts_lcnt_create_lock_info_carrier(1);\
+ erts_lcnt_init_lock_info_idx(__carrier, 0, name, id, flags); \
+ erts_lcnt_install(ref, __carrier);\
+ } while(0)
+
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int count);
+
+/* @brief Initializes the lock info at the given index.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with.
+ * @param flags The flags the lock itself was initialized with. Keep in mind
+ * that all locks in a carrier must share the same category/static property. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags);
+
+/** @brief Atomically installs the given lock counters. Nops (and releases the
+ * provided carrier) if something was already installed. */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier);
+
+/** @brief Atomically removes the currently installed lock counters. Nops if
+ * nothing was installed. */
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref);
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result);
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier);
+
+/* -- Module initialization ------------------------------------------------ */
+
+void erts_lcnt_pre_thr_init(void);
+void erts_lcnt_post_thr_init(void);
void erts_lcnt_late_init(void);
-/* thread operations */
+/* @brief Called after everything in the system has been initialized, including
+ * the schedulers. This is mainly a backwards compatibility shim for matching
+ * the old lcnt behavior where all lock counting was enabled by default. */
+void erts_lcnt_post_startup(void);
+
void erts_lcnt_thread_setup(void);
void erts_lcnt_thread_exit_handler(void);
-/* list operations (local) */
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
-
-void erts_lcnt_list_clear( erts_lcnt_lock_list_t *list);
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-
-/* lock operations (global) */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag);
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id);
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock);
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock);
-
-void erts_lcnt_lock(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock);
-
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock);
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
-
-/* bif interface */
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
-void erts_lcnt_clear_counters(void);
-char *erts_lcnt_lock_type(Uint16 type);
-erts_lcnt_data_t *erts_lcnt_get_data(void);
+/* -- BIF interface -------------------------------------------------------- */
+
+/** @brief Safely iterates through all entries in the given list.
+ *
+ * The referenced item will be valid until the next call to
+ * \c erts_lcnt_iterate_list after which point it may be destroyed; call
+ * erts_lcnt_retain_lock_info if you wish to hang on to it beyond that point.
+ *
+ * Iteration can be cancelled by calling erts_lcnt_release_lock_info on the
+ * iterator and breaking out of the loop.
+ *
+ * @param iterator The iteration variable; set the pointee to NULL to start
+ * iteration.
+ * @return 1 while the iterator is valid, 0 at the end of the list. */
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator);
+
+/** @brief Clears the counter state of all locks, and releases all locks
+ * preserved through erts_lcnt_set_preserve_info (if any). */
+void erts_lcnt_clear_counters(void);
+
+/** @brief Retrieves the global lock counter state.
+ *
+ * Note that the lists may be modified while you're mucking around with them.
+ * Always use \c erts_lcnt_iterate_list to enumerate them. */
+erts_lcnt_data_t erts_lcnt_get_data(void);
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info);
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info);
+
+/** @brief Sets whether to preserve the info of destroyed/uninstalled locks.
+ *
+ * This option makes no distinction whether the lock was destroyed or if lock
+ * counting was simply disabled, so erts_lcnt_set_category_mask must not be
+ * used while this option is active. */
+void erts_lcnt_set_preserve_info(int enable);
+
+int erts_lcnt_get_preserve_info(void);
+
+/** @brief Updates the category mask, enabling or disabling counting on the
+ * affected locks as necessary.
+ *
+ * This is not guaranteed to find all existing locks; only those that are
+ * flagged as static locks and those reachable through other means can be
+ * altered. */
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask);
+
+erts_lock_flags_t erts_lcnt_get_category_mask(void);
+
+/* -- Inline implementation ------------------------------------------------ */
+
+/* The following is a hack to get the things we need from erl_thr_progress.h,
+ * which we can't #include without dependency hell breaking loose.
+ *
+ * The size of LcntThrPrgrLaterOp and value of the constant are verified at
+ * compile-time in erts_lcnt_pre_thr_init. */
+
+int lcnt_thr_progress_unmanaged_delay__(void);
+void lcnt_thr_progress_unmanaged_continue__(int handle);
+typedef struct { Uint64 _[4]; } LcntThrPrgrLaterOp;
+#define LCNT_THR_PRGR_DHANDLE_MANAGED -1
+
+struct lcnt_lock_info_carrier_ {
+ ethr_atomic_t ref_count;
+
+ LcntThrPrgrLaterOp release_entries;
+
+ unsigned char entry_count;
+ erts_lcnt_lock_info_t entries[];
+};
+
+typedef struct {
+ erts_lcnt_time_t timer; /* timer */
+ int timer_set; /* bool */
+ int lock_in_conflict; /* bool */
+} lcnt_thread_data_t__;
+
+extern const int lcnt_log2_tab64__[];
+
+extern ethr_tsd_key lcnt_thr_data_key__;
+extern erts_lock_flags_t lcnt_category_mask__;
+
+#ifdef DEBUG
+extern int lcnt_initialization_completed__;
+#endif
+
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags);
+
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v);
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited);
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited);
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state);
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time);
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d);
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0);
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time) {
+ /*
+ * erts_sys_hrtime() is the highest resolution
+ * we could find, it may or may not be monotonic...
+ */
+ ErtsMonotonicTime mtime = erts_sys_hrtime();
+ time->s = (unsigned long) (mtime / 1000000000LL);
+ time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
+}
+
+/* difference d must be non-negative */
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
+ t->s += d->s;
+ t->ns += d->ns;
+
+ t->s += t->ns / 1000000000LL;
+ t->ns = t->ns % 1000000000LL;
+}
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
+ long ds;
+ long dns;
+
+ ds = t1->s - t0->s;
+ dns = t1->ns - t0->ns;
+
+ /* the difference should not be able to get bigger than 1 sec in ns*/
+
+ if (dns < 0) {
+ ds -= 1;
+ dns += 1000000000LL;
+ }
+
+ ASSERT(ds >= 0);
+
+ d->s = ds;
+ d->ns = dns;
+}
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return lcnt_log2_tab64__[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited) {
+ int idx;
+
+ if(time_waited->s > 0 || time_waited->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ unsigned long r = time_waited->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+
+ idx = r ? lcnt_log2__(r) : 0;
+ }
+
+ hist->ns[idx]++;
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited) {
+ ethr_atomic_inc(&stats->attempts);
+
+ if(lock_in_conflict) {
+ ethr_atomic_inc(&stats->collisions);
+ }
+
+ if(time_waited) {
+ stats->times_waited++;
+
+ lcnt_time_add__(&stats->total_time_waited, time_waited);
+ lcnt_update_wait_histogram__(&stats->wait_time_histogram, time_waited);
+ }
+}
+
+/* If we were installed while the lock was held, r/w_state will be 0 and we
+ * can't tell which unlock or unacquire operation was the last. To get around
+ * this we assume that all excess operations go *towards* zero rather than down
+ * to zero, eventually becoming consistent with the actual state once the lock
+ * is fully released.
+ *
+ * Conflicts might not be counted until the recorded state is fully consistent
+ * with the actual state, but there should be no other ill effects. */
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {
+ ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state);
+
+ /* We can not assume that state is >= -1 here; unlock and unacquire might
+ * bring it below -1 and race to increment it back. */
+
+ if(state < 0) {
+ ethr_atomic_inc_acqb(l_state);
+ }
+}
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line) {
+ unsigned int i;
+
+ ASSERT(info->location_count >= 1 && info->location_count <= ERTS_LCNT_MAX_LOCK_LOCATIONS);
+
+ for(i = 0; i < info->location_count; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
+
+ if(stats->file == file && stats->line == line) {
+ return stats;
+ }
+ }
+
+ if(info->location_count < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[info->location_count];
+
+ stats->file = file;
+ stats->line = line;
+
+ info->location_count++;
+
+ return stats;
+ }
+
+ return &info->location_stats[0];
+}
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void) {
+ lcnt_thread_data_t__ *eltd = (lcnt_thread_data_t__ *)ethr_tsd_get(lcnt_thr_data_key__);
+
+ ASSERT(eltd);
+
+ return eltd;
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result) {
+ if(ERTS_LIKELY(!erts_lcnt_check_ref_installed(ref))) {
+ return 0;
+ }
+
+ ASSERT(lcnt_initialization_completed__);
+
+ (*handle) = lcnt_thr_progress_unmanaged_delay__();
+ (*result) = (erts_lcnt_lock_info_carrier_t*)ethr_atomic_read(ref);
+
+ if(*result) {
+ if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_retain_carrier__(*result);
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 1;
+ } else if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 0;
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier) {
+ if(handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_release_carrier__(carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref) {
+ ethr_atomic_init(ref, (ethr_sint_t)NULL);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_init_ref(ref);
+
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC) {
+ lcnt_register_static_lock__(ref, name, id, flags);
+ }
+
+ if(erts_lcnt_check_enabled(flags)) {
+ erts_lcnt_install_new_lock_info(ref, name, id, flags);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref) {
+ return (!!*ethr_atomic_addr(ref));
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_x_idx(carrier, 0, file, line);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_unacquire_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_idx(carrier, 0, result);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_opt_idx(carrier, 0, result, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_sint_t w_state, r_state;
+
+ w_state = ethr_atomic_inc_read(&info->w_state) - 1;
+ r_state = ethr_atomic_read(&info->r_state);
+
+ /* We cannot acquire w_lock if either w or r are taken */
+ eltd->lock_in_conflict = (w_state > 0) || (r_state > 0);
+ } else {
+ ethr_sint_t w_state = ethr_atomic_read(&info->w_state);
+
+ /* We cannot acquire r_lock if w_lock is taken */
+ eltd->lock_in_conflict = (w_state > 0);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ if(eltd->lock_in_conflict) {
+ /* Only set the timer if nobody else has it. This should only happen
+ * when proc_locks acquires several locks "atomically." All other locks
+ * will block the thread when locked (w_state > 0) */
+ if(eltd->timer_set == 0) {
+ lcnt_time__(&eltd->timer);
+ }
+
+ eltd->timer_set++;
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_post_x_idx(carrier, index, NULL, 0);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+ erts_lcnt_lock_stats_t *stats;
+
+ ASSERT(index < carrier->entry_count);
+
+ /* If the lock was in conflict, update the time spent waiting. */
+ stats = lcnt_get_lock_stats__(info, file, line);
+ if(eltd->timer_set) {
+ erts_lcnt_time_t time_wait;
+ erts_lcnt_time_t timer;
+
+ lcnt_time__(&timer);
+
+ lcnt_time_diff__(&time_wait, &timer, &eltd->timer);
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, &time_wait);
+
+ eltd->timer_set--;
+
+ ASSERT(eltd->timer_set >= 0);
+ } else {
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, NULL);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_unlock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ lcnt_dec_lock_state__(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ lcnt_dec_lock_state__(&info->r_state);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ lcnt_dec_lock_state__(&info->w_state);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_trylock_opt_idx(carrier, index, result, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(result != EBUSY) {
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_atomic_inc(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ lcnt_update_stats__(&info->location_stats[0], 0, NULL);
+ } else {
+ ethr_atomic_inc(&info->location_stats[0].attempts);
+ ethr_atomic_inc(&info->location_stats[0].collisions);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(is_immed(id));
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_TYPE);
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_CATEGORY);
+
+ info->flags = flags;
+ info->name = name;
+ info->id = id;
+}
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_inc_read_acqb(&carrier->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&carrier->ref_count);
+#endif
+}
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t count = ethr_atomic_dec_read_relb(&carrier->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count == 0) {
+ lcnt_deallocate_carrier__(carrier);
+ }
+}
+
+#endif
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_lock_flags.c b/erts/emulator/beam/erl_lock_flags.c
new file mode 100644
index 0000000000..e0a0e95c09
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.c
@@ -0,0 +1,59 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_lock_flags.h"
+
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags) {
+ switch(flags & ERTS_LOCK_FLAGS_MASK_TYPE) {
+ case ERTS_LOCK_FLAGS_TYPE_PROCLOCK:
+ return "proclock";
+ case ERTS_LOCK_FLAGS_TYPE_MUTEX:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_mutex";
+ }
+
+ return "mutex";
+ case ERTS_LOCK_FLAGS_TYPE_SPINLOCK:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_spinlock";
+ }
+
+ return "spinlock";
+ default:
+ return "garbage";
+ }
+}
+
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options) {
+ switch(options) {
+ case ERTS_LOCK_OPTIONS_RDWR:
+ return "rw";
+ case ERTS_LOCK_OPTIONS_READ:
+ return "r";
+ case ERTS_LOCK_OPTIONS_WRITE:
+ return "w";
+ default:
+ return "none";
+ }
+}
diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h
new file mode 100644
index 0000000000..d711f69456
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.h
@@ -0,0 +1,78 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_LOCK_FLAGS_H__
+#define ERTS_LOCK_FLAGS_H__
+
+#define ERTS_LOCK_OPTIONS_READ (1 << 1)
+#define ERTS_LOCK_OPTIONS_WRITE (1 << 2)
+
+#define ERTS_LOCK_OPTIONS_RDWR (ERTS_LOCK_OPTIONS_READ | ERTS_LOCK_OPTIONS_WRITE)
+
+/* Property/category are bitfields to simplify their use in masks. */
+#define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0)
+#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030)
+
+/* Type is a plain number. */
+#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F)
+
+#define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1)
+#define ERTS_LOCK_FLAGS_TYPE_MUTEX (2)
+#define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3)
+
+/* "Static" guarantees that the lock will never be destroyed once created. */
+#define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4)
+#define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5)
+
+#define ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR (1 << 6)
+#define ERTS_LOCK_FLAGS_CATEGORY_PROCESS (1 << 7)
+#define ERTS_LOCK_FLAGS_CATEGORY_IO (1 << 8)
+#define ERTS_LOCK_FLAGS_CATEGORY_DB (1 << 9)
+#define ERTS_LOCK_FLAGS_CATEGORY_DEBUG (1 << 10)
+#define ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER (1 << 11)
+#define ERTS_LOCK_FLAGS_CATEGORY_GENERIC (1 << 12)
+#define ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION (1 << 13)
+
+#define ERTS_LOCK_TYPE_SPINLOCK \
+ (ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
+#define ERTS_LOCK_TYPE_RWSPINLOCK \
+ (ERTS_LOCK_TYPE_SPINLOCK | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_MUTEX \
+ (ERTS_LOCK_FLAGS_TYPE_MUTEX)
+#define ERTS_LOCK_TYPE_RWMUTEX \
+ (ERTS_LOCK_TYPE_MUTEX | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_PROCLOCK \
+ (ERTS_LOCK_FLAGS_CATEGORY_PROCESS | \
+ ERTS_LOCK_FLAGS_TYPE_PROCLOCK)
+
+/* -- -- */
+
+typedef unsigned short erts_lock_flags_t;
+typedef unsigned short erts_lock_options_t;
+
+/* @brief Gets the type name of the lock, honoring the RW flag if supplied. */
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags);
+
+/* @brief Gets a short-form description of the given lock options. (rw/r/w) */
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options);
+
+#endif /* ERTS_LOCK_FLAGS_H__ */
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index 979a0040b0..f0c54e05f7 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -91,7 +91,7 @@ static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_
static Export hashmap_merge_trap_export;
static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1);
static Uint hashmap_subtree_size(Eterm node);
-static Eterm hashmap_to_list(Process *p, Eterm map);
+static Eterm hashmap_to_list(Process *p, Eterm map, Sint n);
static Eterm hashmap_keys(Process *p, Eterm map);
static Eterm hashmap_values(Process *p, Eterm map);
static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node, Eterm *value);
@@ -161,13 +161,58 @@ BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
BIF_RET(res);
} else if (is_hashmap(BIF_ARG_1)) {
- return hashmap_to_list(BIF_P, BIF_ARG_1);
+ return hashmap_to_list(BIF_P, BIF_ARG_1, -1);
}
BIF_P->fvalue = BIF_ARG_1;
BIF_ERROR(BIF_P, BADMAP);
}
+/* erts_internal:maps_to_list/2
+ *
+ * This function should be removed once iterators are in place.
+ * Never document it.
+ * Never encourage its usage.
+ *
+ * A negative value in ARG 2 means the entire map.
+ */
+
+BIF_RETTYPE erts_internal_maps_to_list_2(BIF_ALIST_2) {
+ Sint m;
+ if (term_to_Sint(BIF_ARG_2, &m)) {
+ if (is_flatmap(BIF_ARG_1)) {
+ Uint n;
+ Eterm* hp;
+ Eterm *ks,*vs, res, tup;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+
+ if (m >= 0) {
+ n = m < n ? m : n;
+ }
+
+ hp = HAlloc(BIF_P, (2 + 3) * n);
+ res = NIL;
+
+ while(n--) {
+ tup = TUPLE2(hp, ks[n], vs[n]); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+ }
+
+ BIF_RET(res);
+ } else if (is_hashmap(BIF_ARG_1)) {
+ return hashmap_to_list(BIF_P, BIF_ARG_1, m);
+ }
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
/* maps:find/2
* return value if key *matches* a key in the map
*/
@@ -1188,16 +1233,17 @@ typedef struct HashmapMergeContext_ {
#endif
} HashmapMergeContext;
-static void hashmap_merge_ctx_destructor(Binary* ctx_bin)
+static int hashmap_merge_ctx_destructor(Binary* ctx_bin)
{
HashmapMergeContext* ctx = (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
PSTACK_DESTROY_SAVED(&ctx->pstack);
+ return 1;
}
BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1) {
- Binary* ctx_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary* ctx_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
@@ -1453,9 +1499,9 @@ trap: /* Yield */
hashmap_merge_ctx_destructor);
ctx = ERTS_MAGIC_BIN_DATA(ctx_b);
sys_memcpy(ctx, &local_ctx, sizeof(HashmapMergeContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
ASSERT(ctx->trap_bin == THE_NON_VALUE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), ctx_b);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), ctx_b);
erts_set_gc_state(p, 0);
}
@@ -1916,15 +1962,22 @@ BIF_RETTYPE maps_values_1(BIF_ALIST_1) {
BIF_ERROR(BIF_P, BADMAP);
}
-static Eterm hashmap_to_list(Process *p, Eterm node) {
+static Eterm hashmap_to_list(Process *p, Eterm node, Sint m) {
DECLARE_WSTACK(stack);
Eterm *hp, *kv;
- Eterm res = NIL;
+ Eterm tup, res = NIL;
+ Uint n = hashmap_size(node);
- hp = HAlloc(p, hashmap_size(node) * (2 + 3));
+ if (m >= 0) {
+ n = m < n ? m : n;
+ }
+
+ hp = HAlloc(p, n * (2 + 3));
hashmap_iterator_init(&stack, node, 0);
- while ((kv=hashmap_iterator_next(&stack)) != NULL) {
- Eterm tup = TUPLE2(hp, CAR(kv), CDR(kv));
+ while (n--) {
+ kv = hashmap_iterator_next(&stack);
+ ASSERT(kv != NULL);
+ tup = TUPLE2(hp, CAR(kv), CDR(kv));
hp += 3;
res = CONS(hp, tup, res);
hp += 2;
diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h
index 61a841f7f0..c3ccf80b85 100644
--- a/erts/emulator/beam/erl_map.h
+++ b/erts/emulator/beam/erl_map.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -57,7 +57,7 @@ typedef struct flatmap_s {
#define hashmap_size(x) (((hashmap_head_t*) hashmap_val(x))->size)
-#define hashmap_make_hash(Key) make_internal_hash(Key)
+#define hashmap_make_hash(Key) make_internal_hash(Key, 0)
#define hashmap_restore_hash(Heap,Lvl,Key) \
(((Lvl) < 8) ? hashmap_make_hash(Key) >> (4*(Lvl)) : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), (Key))) >> (4*((Lvl) & 7)))
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
index fc0aaed18a..1f270eb55f 100644
--- a/erts/emulator/beam/erl_math.c
+++ b/erts/emulator/beam/erl_math.c
@@ -247,6 +247,17 @@ BIF_RETTYPE math_pow_2(BIF_ALIST_2)
return math_call_2(BIF_P, pow, BIF_ARG_1, BIF_ARG_2);
}
+BIF_RETTYPE math_ceil_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, ceil, BIF_ARG_1);
+}
+BIF_RETTYPE math_floor_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, floor, BIF_ARG_1);
+}
-
+BIF_RETTYPE math_fmod_2(BIF_ALIST_2)
+{
+ return math_call_2(BIF_P, fmod, BIF_ARG_1, BIF_ARG_2);
+}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index cdd771ef7d..c1af70592a 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -167,15 +167,17 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) {
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ erts_bin_release((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
erts_deref_node_entry(u.ext->node);
@@ -285,9 +287,11 @@ erts_queue_dist_message(Process *rcvr,
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
- if (rcvr_locks & ERTS_PROC_LOCK_STATUS) {
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
- need_locks |= ERTS_PROC_LOCK_STATUS;
+ ErtsProcLocks unlocks =
+ rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
+ if (unlocks) {
+ erts_smp_proc_unlock(rcvr, unlocks);
+ need_locks |= unlocks;
}
erts_smp_proc_lock(rcvr, need_locks);
}
@@ -406,7 +410,7 @@ queue_messages(Process* receiver,
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
goto exiting;
- need_locks = receiver_locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
if (need_locks) {
erts_smp_proc_unlock(receiver, need_locks);
}
@@ -696,6 +700,9 @@ erts_send_message(Process* sender,
erts_aint32_t receiver_state;
#ifdef SHCOPY_SEND
erts_shcopy_t info;
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
#ifdef USE_VM_PROBES
@@ -725,7 +732,7 @@ erts_send_message(Process* sender,
*/
if (have_seqtrace(stoken)) {
seq_trace_update_send(sender);
- seq_trace_output(stoken, message, SEQ_TRACE_SEND,
+ seq_trace_output(stoken, message, SEQ_TRACE_SEND,
receiver->common.id, sender);
seq_trace_size = 6; /* TUPLE5 */
}
@@ -741,7 +748,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -760,7 +767,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
if (is_immed(stoken))
token = stoken;
@@ -796,7 +803,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -810,7 +817,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
}
#ifdef USE_VM_PROBES
@@ -998,8 +1005,8 @@ erts_move_messages_off_heap(Process *c_p)
hp = hfrag->mem;
if (is_not_immed(ERL_MESSAGE_TERM(mp)))
ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
- msg_sz, &hp,
- &hfrag->off_heap);
+ msg_sz, &hp,
+ &hfrag->off_heap);
if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
token_sz, &hp,
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 7244ae144f..3994800ba7 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -24,7 +24,7 @@
* key in the monitor case and the pid of the linked process as key in the
* link case. Lookups the order of the references is somewhat special. Local
* references are strictly smaller than remote references and are sorted
- * by inlined comparision functionality. Remote references are handled by the
+ * by inlined comparison functionality. Remote references are handled by the
* usual cmp function.
* Each Monitor is tagged with different tags depending on which end of the
* monitor it is.
@@ -45,6 +45,7 @@
#include "bif.h"
#include "big.h"
#include "erl_monitors.h"
+#include "erl_bif_unique.h"
#define STACK_NEED 50
#define MAX_MONITORS 0xFFFFFFFFUL
@@ -79,7 +80,24 @@ static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
b2 = boxed_val(ref2);
if (is_ref_thing_header(*b1)) {
if (is_ref_thing_header(*b2)) {
- return memcmp(b1+1,b2+1,ERTS_REF_WORDS*sizeof(Uint));
+ Uint32 *num1, *num2;
+ if (is_ordinary_ref_thing(b1)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) b1;
+ num1 = rtp->num;
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) b1;
+ num1 = mrtp->mb->refn;
+ }
+ if (is_ordinary_ref_thing(b2)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) b2;
+ num2 = rtp->num;
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) b2;
+ num2 = mrtp->mb->refn;
+ }
+ return erts_internal_ref_number_cmp(num1, num2);
}
return -1;
}
@@ -91,13 +109,14 @@ static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
#define CP_LINK_VAL(To, Hp, From) \
do { \
- if (IS_CONST(From)) \
+ if (is_immed(From)) \
(To) = (From); \
else { \
Uint i__; \
Uint len__; \
ASSERT((Hp)); \
- ASSERT(is_internal_ref((From)) || is_external((From))); \
+ ASSERT(is_internal_ordinary_ref((From)) \
+ || is_external((From))); \
(To) = make_boxed((Hp)); \
len__ = thing_arityval(*boxed_val((From))) + 1; \
for(i__ = 0; i__ < len__; i__++) \
@@ -109,15 +128,15 @@ do { \
} \
} while (0)
-static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
+static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm name)
{
Uint mon_size = ERTS_MONITOR_SIZE;
ErtsMonitor *n;
Eterm *hp;
mon_size += NC_HEAP_SIZE(ref);
- if (!IS_CONST(pid)) {
- mon_size += NC_HEAP_SIZE(pid);
+ if (type != MON_NIF_TARGET && is_not_immed(entity)) {
+ mon_size += NC_HEAP_SIZE(entity);
}
if (mon_size <= ERTS_MONITOR_SH_SIZE) {
@@ -135,8 +154,11 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
n->type = (Uint16) type;
n->balance = 0; /* Always the same initial value */
n->name = name; /* atom() or [] */
- CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/
- CP_LINK_VAL(n->pid, hp, pid);
+ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unnecessary check, never immediate*/
+ if (type == MON_NIF_TARGET)
+ n->u.resource = (ErtsResource*)entity;
+ else
+ CP_LINK_VAL(n->u.pid, hp, (Eterm)entity);
return n;
}
@@ -147,7 +169,7 @@ static ErtsLink *create_link(Uint type, Eterm pid)
ErtsLink *n;
Eterm *hp;
- if (!IS_CONST(pid)) {
+ if (is_not_immed(pid)) {
lnk_size += NC_HEAP_SIZE(pid);
}
@@ -206,16 +228,16 @@ void erts_destroy_monitor(ErtsMonitor *mon)
Uint mon_size = ERTS_MONITOR_SIZE;
ErlNode *node;
- ASSERT(!IS_CONST(mon->ref));
+ ASSERT(is_not_immed(mon->ref));
mon_size += NC_HEAP_SIZE(mon->ref);
if (is_external(mon->ref)) {
node = external_thing_ptr(mon->ref)->node;
erts_deref_node_entry(node);
}
- if (!IS_CONST(mon->pid)) {
- mon_size += NC_HEAP_SIZE(mon->pid);
- if (is_external(mon->pid)) {
- node = external_thing_ptr(mon->pid)->node;
+ if (mon->type != MON_NIF_TARGET && is_not_immed(mon->u.pid)) {
+ mon_size += NC_HEAP_SIZE(mon->u.pid);
+ if (is_external(mon->u.pid)) {
+ node = external_thing_ptr(mon->u.pid)->node;
erts_deref_node_entry(node);
}
}
@@ -234,7 +256,7 @@ void erts_destroy_link(ErtsLink *lnk)
ASSERT(lnk->type == LINK_NODE || ERTS_LINK_ROOT(lnk) == NULL);
- if (!IS_CONST(lnk->pid)) {
+ if (is_not_immed(lnk->pid)) {
lnk_size += NC_HEAP_SIZE(lnk->pid);
if (is_external(lnk->pid)) {
node = external_thing_ptr(lnk->pid)->node;
@@ -329,7 +351,7 @@ static void insertion_rotation(int dstack[], int dpos,
}
}
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, UWord entity,
Eterm name)
{
void *tstack[STACK_NEED];
@@ -339,12 +361,14 @@ void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
int state = 0;
ErtsMonitor **this = root;
Sint c;
+
+ ASSERT(is_internal_ordinary_ref(ref) || is_external_ref(ref));
dstack[0] = DIR_END;
for (;;) {
if (!*this) { /* Found our place */
state = 1;
- *this = create_monitor(type,ref,pid,name);
+ *this = create_monitor(type,ref,entity,name);
break;
} else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
/* go left */
@@ -914,8 +938,12 @@ static void erts_dump_monitors(ErtsMonitor *root, int indent)
if (root == NULL)
return;
erts_dump_monitors(root->right,indent+2);
- erts_printf("%*s[%b16d:%b16u:%T:%T:%T]\n", indent, "", root->balance,
- root->type, root->ref, root->pid, root->name);
+ erts_printf("%*s[%b16d:%b16u:%T:%T", indent, "", root->balance,
+ root->type, root->ref, root->name);
+ if (root->type == MON_NIF_TARGET)
+ erts_printf(":%p]\n", root->u.resource);
+ else
+ erts_printf(":%T]\n", root->u.pid);
erts_dump_monitors(root->left,indent+2);
}
@@ -1030,7 +1058,7 @@ void erts_one_link_size(ErtsLink *lnk, void *vpu)
{
Uint *pu = vpu;
*pu += ERTS_LINK_SIZE*sizeof(Uint);
- if(!IS_CONST(lnk->pid))
+ if(is_not_immed(lnk->pid))
*pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
erts_doforall_links(ERTS_LINK_ROOT(lnk),&erts_one_link_size,vpu);
@@ -1040,8 +1068,8 @@ void erts_one_mon_size(ErtsMonitor *mon, void *vpu)
{
Uint *pu = vpu;
*pu += ERTS_MONITOR_SIZE*sizeof(Uint);
- if(!IS_CONST(mon->pid))
- *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
- if(!IS_CONST(mon->ref))
+ if(mon->type != MON_NIF_TARGET && is_not_immed(mon->u.pid))
+ *pu += NC_HEAP_SIZE(mon->u.pid)*sizeof(Uint);
+ if(is_not_immed(mon->ref))
*pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
}
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index 9e2beedea3..1cacecd7e9 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -82,8 +82,9 @@
/* Type tags for monitors */
#define MON_ORIGIN 1
-#define MON_TARGET 3
-#define MON_TIME_OFFSET 7
+#define MON_TARGET 2
+#define MON_NIF_TARGET 3
+#define MON_TIME_OFFSET 4
/* Type tags for links */
#define LINK_PID 1 /* ...Or port */
@@ -91,7 +92,7 @@
/* Size of a monitor without heap, in words (fixalloc) */
#define ERTS_MONITOR_SIZE ((sizeof(ErtsMonitor) - sizeof(Uint))/sizeof(Uint))
-#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + REF_THING_SIZE)
+#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + ERTS_REF_THING_SIZE)
#define ERTS_LINK_SIZE ((sizeof(ErtsLink) - sizeof(Uint))/sizeof(Uint))
#define ERTS_LINK_SH_SIZE ERTS_LINK_SIZE /* Size of fix-alloced links */
@@ -105,11 +106,15 @@ typedef struct erts_monitor_or_link {
typedef struct erts_monitor {
struct erts_monitor *left, *right;
Sint16 balance;
- Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_TIME_OFFSET */
+ Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_NIF_TARGET | MON_TIME_OFFSET */
Eterm ref;
- Eterm pid; /* In case of distributed named monitor, this is the
- nodename atom in MON_ORIGIN process, otherwise a pid or
- , in case of a MON_TARGET, a port */
+ union {
+ Eterm pid; /* In case of distributed named monitor, this is the
+ * nodename atom in MON_ORIGIN process, otherwise a pid or,
+ * in case of a MON_TARGET, a port
+ */
+ struct ErtsResource_* resource; /* MON_NIF_TARGET */
+ }u;
Eterm name; /* When monitoring a named process: atom() else [] */
Uint heap[1]; /* Larger in reality */
} ErtsMonitor;
@@ -144,7 +149,7 @@ Uint erts_tot_link_lh_size(void);
/* Prototypes */
void erts_destroy_monitor(ErtsMonitor *mon);
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, UWord entity,
Eterm name);
ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref);
ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref);
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 7ddf49937f..6c477be615 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -66,9 +66,9 @@ static Uint msacc_unmanaged_count = 0;
#endif
#if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT
-#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + REF_THING_SIZE)
+#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE)
#else
-#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + REF_THING_SIZE)
+#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + ERTS_REF_THING_SIZE)
#endif
/* we have to split initiation as atoms are not inited in early init */
@@ -76,7 +76,8 @@ void erts_msacc_early_init(void) {
#ifndef ERTS_MSACC_ALWAYS_ON
erts_msacc_enabled = 0;
#endif
- erts_rwmtx_init(&msacc_mutex,"msacc_list_mutex");
+ erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef USE_THREADS
erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
#else
@@ -109,7 +110,8 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
#ifdef USE_THREADS
erts_rwmtx_rwlock(&msacc_mutex);
if (!managed) {
- erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex");
+ erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
msacc->next = msacc_unmanaged;
msacc_unmanaged = msacc;
msacc_unmanaged_count++;
@@ -137,8 +139,8 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
void erts_msacc_set_bif_state(ErtsMsAcc *__erts_msacc_cache, Eterm mod, void *fn) {
#ifdef ERTS_MSACC_EXTENDED_BIFS
-#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \
- if (fn == &FuncAddr) { \
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
+ if (fn == &BifFuncAddr) { \
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATIC_STATE_COUNT + Num); \
} else
#include "erl_bif_list.h"
@@ -212,7 +214,7 @@ typedef struct {
int action;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
} ErtsMSAccReq;
@@ -245,7 +247,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
- hp = erts_produce_heap(&factory, REF_THING_SIZE + 3 /* tuple */, 0);
+ hp = erts_produce_heap(&factory, ERTS_REF_THING_SIZE + 3 /* tuple */, 0);
ref_copy = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
msg = erts_msacc_gather_stats(msacc, &factory);
msg = TUPLE2(hp, ref_copy, msg);
@@ -255,7 +257,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
erts_factory_close(&factory);
} else {
ErlOffHeap *ohp = NULL;
- msgp = erts_alloc_message_heap(rp, &rp_locks, REF_THING_SIZE, &hp, &ohp);
+ msgp = erts_alloc_message_heap(rp, &rp_locks, ERTS_REF_THING_SIZE, &hp, &ohp);
msg = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
}
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
index 4c8e1c8e22..d64ef8c8b9 100644
--- a/erts/emulator/beam/erl_msacc.h
+++ b/erts/emulator/beam/erl_msacc.h
@@ -22,7 +22,7 @@
#define ERL_MSACC_H__
/* Can be enabled/disabled via configure */
-#if ERTS_ENABLE_MSACC == 2
+#if defined(ERTS_ENABLE_MSACC) && ERTS_ENABLE_MSACC == 2
#define ERTS_MSACC_EXTENDED_STATES 1
#endif
@@ -66,7 +66,7 @@
#define ERTS_MSACC_STATE_COUNT 7
-#if ERTS_MSACC_STATE_STRINGS && ERTS_ENABLE_MSACC
+#if defined(ERTS_MSACC_STATE_STRINGS) && defined(ERTS_ENABLE_MSACC)
static char *erts_msacc_states[] = {
"aux",
"check_io",
@@ -104,7 +104,7 @@ static char *erts_msacc_states[] = {
#define ERTS_MSACC_STATE_COUNT ERTS_MSACC_STATIC_STATE_COUNT
#endif
-#if ERTS_MSACC_STATE_STRINGS
+#ifdef ERTS_MSACC_STATE_STRINGS
static char *erts_msacc_states[] = {
"alloc",
"aux",
@@ -122,7 +122,7 @@ static char *erts_msacc_states[] = {
"sleep",
"timers"
#ifdef ERTS_MSACC_EXTENDED_BIFS
-#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
,"bif_" #Mod "_" #Func "_" #Arity
#include "erl_bif_list.h"
#undef BIF_LIST
@@ -157,7 +157,7 @@ struct erl_msacc_t_ {
};
-#if ERTS_ENABLE_MSACC
+#ifdef ERTS_ENABLE_MSACC
#ifdef USE_THREADS
extern erts_tsd_key_t erts_msacc_key;
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index e275867928..f2a660f085 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -572,7 +572,7 @@ void erts_mtrace_pre_init(void)
void erts_mtrace_init(char *receiver, char *nodename)
{
- char hostname[MAXHOSTNAMELEN];
+ char hostname[MAXHOSTNAMELEN + 1];
char pid[21]; /* enough for a 64 bit number */
socket_desc = ERTS_SOCK_INVALID_SOCKET;
@@ -583,8 +583,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
byte ip_addr[4];
Uint16 port;
- erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
+ erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_mtx_init(&mtrace_op_mutex, "mtrace_op", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
@@ -613,9 +615,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
}
tracep = trace_buffer;
endp = trace_buffer + TRACE_BUF_SZ;
- if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0)
+ /* gethostname requires that the len is max(hostname) + 1 */
+ if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN + 1) != 0)
hostname[0] = '\0';
- hostname[MAXHOSTNAMELEN-1] = '\0';
+ hostname[MAXHOSTNAMELEN] = '\0';
sys_get_pid(pid, sizeof(pid));
write_trace_header(nodename ? nodename : "", pid, hostname);
erts_mtrace_update_heap_size();
diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c
new file mode 100644
index 0000000000..1bebc1eda4
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.c
@@ -0,0 +1,180 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+
+#include "global.h"
+#include "erl_process.h"
+#include "bif.h"
+#include "erl_nfunc_sched.h"
+#include "erl_trace.h"
+
+NifExport *
+erts_new_proc_nif_export(Process *c_p, int argc)
+{
+ size_t size;
+ int i;
+ NifExport *nep, *old_nep;
+
+ size = sizeof(NifExport) + (argc-1)*sizeof(Eterm);
+ nep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, size);
+
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++)
+ nep->exp.addressv[i] = &nep->exp.beam[0];
+
+ nep->argc = -1; /* unused marker */
+ nep->argv_size = argc;
+ nep->trace = NULL;
+ old_nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(c_p, nep);
+ if (old_nep) {
+ ASSERT(!nep->trace);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, old_nep);
+ }
+ return nep;
+}
+
+void
+erts_destroy_nif_export(Process *p)
+{
+ NifExport *nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
+ if (nep) {
+ if (nep->m)
+ erts_nif_export_cleanup_nif_mod(nep);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, nep);
+ }
+}
+
+void
+erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ NifExportTrace *netp;
+ ASSERT(nep && nep->argc >= 0);
+ ASSERT(!nep->trace);
+ netp = erts_alloc(ERTS_ALC_T_NIF_EXP_TRACE,
+ sizeof(NifExportTrace));
+ netp->applying = applying;
+ netp->ep = ep;
+ netp->cp = cp;
+ netp->flags = flags;
+ netp->flags_meta = flags_meta;
+ netp->I = I;
+ netp->meta_tracer = NIL;
+ erts_tracer_update(&netp->meta_tracer, meta_tracer);
+ nep->trace = netp;
+}
+
+void
+erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep)
+{
+ NifExportTrace *netp = nep->trace;
+ nep->trace = NULL;
+ erts_bif_trace_epilogue(c_p, result, netp->applying, netp->ep,
+ netp->cp, netp->flags, netp->flags_meta,
+ netp->I, netp->meta_tracer);
+ erts_tracer_update(&netp->meta_tracer, NIL);
+ erts_free(ERTS_ALC_T_NIF_EXP_TRACE, netp);
+}
+
+NifExport *
+erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv)
+{
+ Process *used_proc;
+ ErtsSchedulerData *esdp;
+ Eterm* reg;
+ NifExport* nep;
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ if (dirty_shadow_proc) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp && ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = dirty_shadow_proc;
+ }
+ else {
+ esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = c_p;
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+
+ reg = esdp->x_reg_array;
+
+ if (mfa)
+ nep = erts_get_proc_nif_export(c_p, (int) mfa->arity);
+ else {
+ /* If no mfa, this is not the first schedule... */
+ nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(nep && nep->argc >= 0);
+ }
+
+ if (nep->argc < 0) {
+ /*
+ * First schedule; save things that might
+ * need to be restored...
+ */
+ for (i = 0; i < (int) mfa->arity; i++)
+ nep->argv[i] = reg[i];
+ nep->pc = pc;
+ nep->cp = c_p->cp;
+ nep->mfa = mfa;
+ nep->current = c_p->current;
+ ASSERT(argc >= 0);
+ nep->argc = (int) mfa->arity;
+ nep->m = NULL;
+
+ ASSERT(!erts_check_nif_export_in_area(c_p,
+ (char *) nep,
+ (sizeof(NifExport)
+ + (sizeof(Eterm)
+ *(nep->argc-1)))));
+ }
+ /* Copy new arguments into register array if necessary... */
+ if (reg != argv) {
+ for (i = 0; i < argc; i++)
+ reg[i] = argv[i];
+ }
+ ASSERT(is_atom(mod) && is_atom(func));
+ nep->exp.info.mfa.module = mod;
+ nep->exp.info.mfa.function = func;
+ nep->exp.info.mfa.arity = (Uint) argc;
+ nep->exp.beam[0] = (BeamInstr) instr; /* call_nif || apply_bif */
+ nep->exp.beam[1] = (BeamInstr) dfunc;
+ nep->func = ifunc;
+ used_proc->arity = argc;
+ used_proc->freason = TRAP;
+ used_proc->i = (BeamInstr*) nep->exp.addressv[0];
+ return nep;
+}
diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h
new file mode 100644
index 0000000000..55a3a6dbf6
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.h
@@ -0,0 +1,332 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_NFUNC_SCHED_H__
+#define ERL_NFUNC_SCHED_H__
+
+#include "erl_process.h"
+#include "bif.h"
+#include "error.h"
+
+typedef struct {
+ int applying;
+ Export* ep;
+ BeamInstr *cp;
+ Uint32 flags;
+ Uint32 flags_meta;
+ BeamInstr* I;
+ ErtsTracer meta_tracer;
+} NifExportTrace;
+
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. A number of values are stored for error handling purposes
+ * only.
+ *
+ * 'argc' is >= 0 when NifExport is in use, and < 0 when not.
+ */
+
+typedef struct {
+ Export exp;
+ struct erl_module_nif* m; /* NIF module, or NULL if BIF */
+ void *func; /* Indirect NIF or BIF to execute (may be unused) */
+ ErtsCodeMFA *current;/* Current as set when originally called */
+ NifExportTrace *trace;
+ /* --- The following is only used on error --- */
+ BeamInstr *pc; /* Program counter */
+ BeamInstr *cp; /* Continuation pointer */
+ ErtsCodeMFA *mfa; /* MFA of original call */
+ int argc; /* Number of arguments in original call */
+ int argv_size; /* Allocated size of argv */
+ Eterm argv[1]; /* Saved arguments from the original call */
+} NifExport;
+
+NifExport *erts_new_proc_nif_export(Process *c_p, int argc);
+void erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+void erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep);
+void erts_destroy_nif_export(Process *p);
+NifExport *erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv);
+void erts_nif_export_cleanup_nif_mod(NifExport *ep); /* erl_nif.c */
+ERTS_GLB_INLINE NifExport *erts_get_proc_nif_export(Process *c_p, int extra);
+ERTS_GLB_INLINE int erts_setup_nif_export_rootset(Process* proc, Eterm** objv,
+ Uint* nobj);
+ERTS_GLB_INLINE int erts_check_nif_export_in_area(Process *p,
+ char *start, Uint size);
+ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep,
+ Eterm result);
+ERTS_GLB_INLINE void erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa);
+ERTS_GLB_INLINE int erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+ERTS_GLB_INLINE Process *erts_proc_shadow2real(Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE NifExport *
+erts_get_proc_nif_export(Process *c_p, int argc)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (!nep || (nep->argc < 0 && nep->argv_size < argc))
+ return erts_new_proc_nif_export(c_p, argc);
+ return nep;
+}
+
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. Any exception term saved in the NifExport is also made
+ * part of the GC rootset here; it always resides in rootset[0].
+ */
+ERTS_GLB_INLINE int
+erts_setup_nif_export_rootset(Process* proc, Eterm** objv, Uint* nobj)
+{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+
+ if (!ep || ep->argc <= 0)
+ return 0;
+
+ *objv = ep->argv;
+ *nobj = ep->argc;
+ return 1;
+}
+
+/*
+ * Check if nif export points into code area...
+ */
+ERTS_GLB_INLINE int
+erts_check_nif_export_in_area(Process *p, char *start, Uint size)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(p);
+ if (!nep || nep->argc < 0)
+ return 0;
+ if (ErtsInArea(nep->pc, start, size))
+ return 1;
+ if (ErtsInArea(nep->cp, start, size))
+ return 1;
+ if (ErtsInArea(nep->mfa, start, size))
+ return 1;
+ if (ErtsInArea(nep->current, start, size))
+ return 1;
+ return 0;
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result)
+{
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ERTS_SMP_LC_ASSERT(!(c_p->static_flags
+ & ERTS_STC_FLG_SHADOW_PROC));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ c_p->current = ep->current;
+ ep->argc = -1; /* Unused nif-export marker... */
+ if (ep->trace)
+ erts_nif_export_restore_trace(c_p, result, ep);
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ int ix;
+
+ ASSERT(nep);
+ *pc = nep->pc;
+ c_p->cp = nep->cp;
+ *nif_mfa = nep->mfa;
+ for (ix = 0; ix < nep->argc; ix++)
+ reg[ix] = nep->argv[ix];
+ erts_nif_export_restore(c_p, nep, THE_NON_VALUE);
+}
+
+ERTS_GLB_INLINE int
+erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ if (is_non_value(result) && c_p->freason == TRAP) {
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (nep && nep->argc >= 0) {
+ erts_nif_export_save_trace(c_p, nep, applying, ep,
+ cp, flags, flags_meta,
+ I, meta_tracer);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+ERTS_GLB_INLINE Process *
+erts_proc_shadow2real(Process *c_p)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ Process *real_c_p = c_p->next;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ASSERT(real_c_p->common.id == c_p->common.id);
+ return real_c_p;
+ }
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+#endif
+ return c_p;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_NFUNC_SCHED_H__ */
+
+#if defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__)
+#define ERTS_NFUNC_SCHED_INTERNALS__
+
+#define ERTS_I_BEAM_OP_TO_NIF_EXPORT(I) \
+ (ASSERT(BeamOp(op_apply_bif) == (BeamInstr *) (*(I)) \
+ || BeamOp(op_call_nif) == (BeamInstr *) (*(I))), \
+ ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0]))))
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+#include "erl_message.h"
+#include <stddef.h>
+
+ERTS_GLB_INLINE void erts_flush_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE void erts_cache_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE Process *erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp,
+ Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_flush_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p->stop == sproc->stop);
+ ASSERT(c_p->hend == sproc->hend);
+ ASSERT(c_p->heap == sproc->heap);
+ ASSERT(c_p->abandoned_heap == sproc->abandoned_heap);
+ ASSERT(c_p->heap_sz == sproc->heap_sz);
+ ASSERT(c_p->high_water == sproc->high_water);
+ ASSERT(c_p->old_heap == sproc->old_heap);
+ ASSERT(c_p->old_htop == sproc->old_htop);
+ ASSERT(c_p->old_hend == sproc->old_hend);
+
+ ASSERT(c_p->htop <= sproc->htop && sproc->htop <= c_p->stop);
+
+ c_p->htop = sproc->htop;
+
+ if (!c_p->mbuf)
+ c_p->mbuf = sproc->mbuf;
+ else if (sproc->mbuf) {
+ ErlHeapFragment *bp;
+ for (bp = sproc->mbuf; bp->next; bp = bp->next)
+ ASSERT(!bp->off_heap.first);
+ bp->next = c_p->mbuf;
+ c_p->mbuf = sproc->mbuf;
+ }
+
+ c_p->mbuf_sz += sproc->mbuf_sz;
+
+ if (!c_p->off_heap.first)
+ c_p->off_heap.first = sproc->off_heap.first;
+ else if (sproc->off_heap.first) {
+ struct erl_off_heap_header *ohhp;
+ for (ohhp = sproc->off_heap.first; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = c_p->off_heap.first;
+ c_p->off_heap.first = sproc->off_heap.first;
+ }
+
+ c_p->off_heap.overhead += sproc->off_heap.overhead;
+}
+
+ERTS_GLB_INLINE void
+erts_cache_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+ ASSERT(c_p);
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ sproc->htop = c_p->htop;
+ sproc->stop = c_p->stop;
+ sproc->hend = c_p->hend;
+ sproc->heap = c_p->heap;
+ sproc->abandoned_heap = c_p->abandoned_heap;
+ sproc->heap_sz = c_p->heap_sz;
+ sproc->high_water = c_p->high_water;
+ sproc->old_hend = c_p->old_hend;
+ sproc->old_htop = c_p->old_htop;
+ sproc->old_heap = c_p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+}
+
+ERTS_GLB_INLINE Process *
+erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p)
+{
+ Process *sproc;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = c_p;
+ sproc->common.id = c_p->common.id;
+
+ erts_cache_dirty_shadow_proc(sproc);
+
+ return sproc;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+#endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */
+
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 479a2f4809..9caeed3273 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -55,6 +55,10 @@
#include "dtrace-wrapper.h"
#include "erl_process.h"
#include "erl_bif_unique.h"
+#include "erl_utils.h"
+#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
#define HAVE_USE_DTRACE 1
#endif
@@ -71,14 +75,19 @@
struct erl_module_nif {
void* priv_data;
void* handle; /* "dlopen" */
- struct enif_entry_t* entry;
+ struct enif_entry_t entry;
erts_refc_t rt_cnt; /* number of resource types */
erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */
Module* mod; /* Can be NULL if orphan with dtor-resources left */
+
+ ErlNifFunc _funcs_copy_[1]; /* only used for old libs */
};
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
#ifdef DEBUG
# define READONLY_CHECK
+# define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1)
#endif
#ifdef READONLY_CHECK
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
@@ -87,6 +96,14 @@ static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
#endif
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__)
+static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func);
+# include "erl_gc.h"
+#else
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE)
+#endif
+
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* off_heap);
#endif
@@ -194,6 +211,9 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(p->common.id != ERTS_INVALID_PID);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env->dbg_disable_assert_in_env = 0;
+#endif
#if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
@@ -217,38 +237,6 @@ static void cache_env(ErlNifEnv* env);
static void full_flush_env(ErlNifEnv *env);
static void flush_env(ErlNifEnv* env);
-#ifdef ERTS_DIRTY_SCHEDULERS
-void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
- ErlNifEnv* env, Process* p,
- struct erl_module_nif* mod_nif)
-{
- Process *sproc;
-#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
-
- ASSERT(!p->scheduler_data);
- ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
- && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
- ASSERT(esdp);
-#endif
-
- erts_pre_nif(env, p, mod_nif, NULL);
-
- sproc = esdp->dirty_shadow_process;
- ASSERT(sproc);
- ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
- == (ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_PROXY));
-
- sproc->next = p;
- sproc->common.id = p->common.id;
- env->proc = sproc;
- full_cache_env(env);
-}
-#endif
-
/* Temporary object header, auto-deallocated when NIF returns
* or when independent environment is cleared.
*/
@@ -276,115 +264,154 @@ void erts_post_nif(ErlNifEnv* env)
env->exiting = ERTS_PROC_IS_EXITING(env->proc);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-void erts_post_dirty_nif(ErlNifEnv* env)
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save 'current' and registers if first time this
+ * call is scheduled.
+ */
+
+static ERTS_INLINE ERL_NIF_TERM
+schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[])
{
- Process *c_p;
- ASSERT(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(env->proc->next);
- erts_unblock_fpe(env->fpe_was_unmasked);
- full_flush_env(env);
- free_tmp_objs(env);
- c_p = env->proc->next;
- env->exiting = ERTS_PROC_IS_EXITING(c_p);
- ERTS_VBUMP_ALL_REDS(c_p);
+ NifExport *ep;
+ Process *c_p, *dirty_shadow_proc;
+
+ execution_state(env, &c_p, NULL);
+ if (c_p == env->proc)
+ dirty_shadow_proc = NULL;
+ else
+ dirty_shadow_proc = env->proc;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+
+ ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ c_p->current,
+ c_p->cp,
+ (BeamInstr) em_call_nif,
+ direct_fp, indirect_fp,
+ mod, func_name,
+ argc, (const Eterm *) argv);
+ if (!ep->m) {
+ /* First time this call is scheduled... */
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep->m = env->mod_nif;
+ }
+ return (ERL_NIF_TERM) THE_NON_VALUE;
}
-#endif
-static void full_flush_env(ErlNifEnv* env)
-{
#ifdef ERTS_DIRTY_SCHEDULERS
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- /* Dirty nif call using shadow process struct */
- Process *c_p = env->proc->next;
-
- ASSERT(is_scheduler() < 0);
- ASSERT(env->proc->common.id == c_p->common.id);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
- & ERTS_PROC_LOCK_MAIN);
-
- if (!env->heap_frag) {
- ASSERT(env->hp_end == HEAP_LIMIT(c_p));
- ASSERT(env->hp >= HEAP_TOP(c_p));
- ASSERT(env->hp <= HEAP_LIMIT(c_p));
- HEAP_TOP(c_p) = env->hp;
+
+static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+int
+erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ int exiting;
+ ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg;
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsCodeMFA *codemfa = erts_code_to_codemfa(I);
+ NativeFunPtr dirty_nif = (NativeFunPtr) I[1];
+ ErlNifEnv env;
+ ERL_NIF_TERM result;
+#ifdef DEBUG
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state);
+
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ ASSERT(!c_p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+ nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
+ erts_pre_nif(&env, c_p, nep->m, NULL);
+
+ env.proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ env.proc->freason = EXC_NULL;
+ env.proc->fvalue = NIL;
+ env.proc->ftrace = NIL;
+ env.proc->i = c_p->i;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
+
+ erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(env.proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (env.exception_thrown) {
+ schedule_exception:
+ schedule(&env, dirty_nif_exception, NULL,
+ am_erts_internal, am_dirty_nif_exception,
+ 1, &env.proc->fvalue);
+ }
+ else if (is_value(result)) {
+ schedule(&env, dirty_nif_finalizer, NULL,
+ am_erts_internal, am_dirty_nif_finalizer,
+ 1, &result);
+ }
+ else if (env.proc->freason != TRAP) { /* user returned garbage... */
+ ERTS_DECL_AM(badreturn);
+ (void) enif_raise_exception(&env, AM_badreturn);
+ goto schedule_exception;
}
else {
- Uint usz;
- ASSERT(env->hp_end != HEAP_LIMIT(c_p));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
-
- HEAP_TOP(c_p) = HEAP_TOP(env->proc);
- usz = env->hp - env->heap_frag->mem;
- env->proc->mbuf_sz += usz - env->heap_frag->used_size;
- env->heap_frag->used_size = usz;
-
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
-
- if (c_p->mbuf) {
- ErlHeapFragment *bp;
- for (bp = env->proc->mbuf; bp->next; bp = bp->next)
- ;
- bp->next = c_p->mbuf;
- }
+ /* Rescheduled by dirty NIF call... */
+ ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ }
+ c_p->i = env.proc->i;
+ c_p->arity = env.proc->arity;
+ }
- c_p->mbuf = env->proc->mbuf;
- c_p->mbuf_sz += env->proc->mbuf_sz;
+#ifdef DEBUG
+ if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ nep->func = NULL;
+#endif
- }
+ erts_unblock_fpe(env.fpe_was_unmasked);
+ full_flush_env(&env);
+ free_tmp_objs(&env);
- if (!c_p->off_heap.first)
- c_p->off_heap.first = env->proc->off_heap.first;
- else if (env->proc->off_heap.first) {
- struct erl_off_heap_header *ohhp;
- for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next)
- ;
- ohhp->next = c_p->off_heap.first;
- c_p->off_heap.first = env->proc->off_heap.first;
- }
- c_p->off_heap.overhead += env->proc->off_heap.overhead;
+ return exiting;
+}
- return;
- }
#endif
+static void full_flush_env(ErlNifEnv* env)
+{
flush_env(env);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ /* Dirty nif call using shadow process struct */
+ erts_flush_dirty_shadow_proc(env->proc);
+#endif
}
static void full_cache_env(ErlNifEnv* env)
{
#ifdef ERTS_DIRTY_SCHEDULERS
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- /* Dirty nif call using shadow process struct */
- Process *sproc = env->proc;
- Process *c_p = sproc->next;
- ASSERT(c_p);
- ASSERT(is_scheduler() < 0);
- ASSERT(env->proc->common.id == c_p->common.id);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
- & ERTS_PROC_LOCK_MAIN);
-
- sproc->htop = c_p->htop;
- sproc->stop = c_p->stop;
- sproc->hend = c_p->hend;
- sproc->heap = c_p->heap;
- sproc->abandoned_heap = c_p->abandoned_heap;
- sproc->heap_sz = c_p->heap_sz;
- sproc->high_water = c_p->high_water;
- sproc->old_hend = c_p->old_hend;
- sproc->old_htop = c_p->old_htop;
- sproc->old_heap = c_p->old_heap;
- sproc->mbuf = NULL;
- sproc->mbuf_sz = 0;
- ERTS_INIT_OFF_HEAP(&sproc->off_heap);
-
- env->hp_end = HEAP_LIMIT(c_p);
- env->hp = HEAP_TOP(c_p);
- env->heap_frag = NULL;
- return;
- }
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ erts_cache_dirty_shadow_proc(env->proc);
#endif
-
cache_env(env);
}
@@ -472,11 +499,15 @@ setup_nif_env(struct enif_msg_environment_t* msg_env,
HEAP_END(&msg_env->phony_proc) = phony_heap;
MBUF(&msg_env->phony_proc) = NULL;
msg_env->phony_proc.common.id = ERTS_INVALID_PID;
+ msg_env->env.tracee = tracee;
+
#ifdef FORCE_HEAP_FRAGS
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
#endif
- msg_env->env.tracee = tracee;
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env->env.dbg_disable_assert_in_env = 0;
+#endif
}
ErlNifEnv* enif_alloc_env(void)
@@ -557,6 +588,10 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
ErlTraceMessageQueue *msgq, **last_msgq;
int reds = 0;
+ /* Only one thread at a time is allowed to flush trace messages,
+ so we require the main lock to be held when doing the flush */
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = c_p->trace_msg_q;
@@ -687,9 +722,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
MBUF(&menv->phony_proc) = NULL;
}
} else {
- Uint sz = size_object(msg);
+ erts_literal_area_t litarea;
ErlOffHeap *ohp;
Eterm *hp;
+ Uint sz;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
+ sz = size_object_litopt(msg, &litarea);
if (c_p && !env->tracee) {
full_flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
@@ -709,7 +747,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ohp = &bp->off_heap;
}
}
- msg = copy_struct(msg, sz, &hp, ohp);
+ msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
}
ERL_MESSAGE_TERM(mp) = msg;
@@ -845,6 +883,65 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
return res;
}
+/*
+ * env must be the caller's environment in a scheduler or NULL in a
+ * non-scheduler thread.
+ * name must be an atom - anything else will just waste time.
+ */
+static Eterm call_whereis(ErlNifEnv *env, Eterm name)
+{
+ Process *c_p;
+ Eterm res;
+ int scheduler;
+
+ execution_state(env, &c_p, &scheduler);
+ ASSERT((c_p && scheduler) || (!c_p && !scheduler));
+
+ if (scheduler < 0) {
+ /* dirty scheduler */
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ c_p = NULL; /* as we don't have main lock */
+ }
+
+
+ if (c_p) {
+ /* main lock may be released below and c_p->htop updated by others */
+ flush_env(env);
+ }
+ res = erts_whereis_name_to_id(c_p, name);
+ if (c_p)
+ cache_env(env);
+
+ return res;
+}
+
+int enif_whereis_pid(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid)
+{
+ Eterm res;
+
+ if (is_not_atom(name))
+ return 0;
+
+ res = call_whereis(env, name);
+ /* enif_get_local_ functions check the type */
+ return enif_get_local_pid(env, res, pid);
+}
+
+int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
+{
+ Eterm res;
+
+ if (is_not_atom(name))
+ return 0;
+
+ res = call_whereis(env, name);
+ /* enif_get_local_ functions check the type */
+ return enif_get_local_port(env, res, port);
+}
+
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
{
Uint sz;
@@ -965,16 +1062,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
byte* raw_ptr;
}u;
- if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) {
- ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term);
- if (sb->is_writable) {
- ProcBin* pb = (ProcBin*) binary_val(sb->orig);
- ASSERT(pb->thing_word == HEADER_PROC_BIN);
- if (pb->flags) {
- erts_emasculate_writable_binary(pb);
- sb->is_writable = 0;
- }
- }
+ if (is_binary(bin_term)) {
+ ProcBin *pb = (ProcBin*) binary_val(bin_term);
+ if (pb->thing_word == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) pb;
+ pb = (ProcBin*) binary_val(sb->orig);
+ }
+ if (pb->thing_word == HEADER_PROC_BIN && pb->flags)
+ erts_emasculate_writable_binary(pb);
}
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
@@ -991,7 +1086,7 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
bin->bin_term = bin_term;
bin->size = binary_size(bin_term);
bin->ref_bin = NULL;
- ADD_READONLY_CHECK(env, bin->data, bin->size);
+ ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
@@ -1043,7 +1138,6 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
if (refbin == NULL) {
return 0; /* The NIF must take action */
}
- erts_refc_init(&refbin->refc, 1);
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
@@ -1082,9 +1176,7 @@ void enif_release_binary(ErlNifBinary* bin)
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
ASSERT(bin->bin_term == THE_NON_VALUE);
- if (erts_refc_dectest(&refbin->refc, 0) == 0) {
- erts_bin_free(refbin);
- }
+ erts_bin_release(refbin);
}
#ifdef DEBUG
bin->data = NULL;
@@ -1182,6 +1274,22 @@ int enif_compare(Eterm lhs, Eterm rhs)
return result;
}
+ErlNifUInt64 enif_hash(ErlNifHash type, Eterm term, ErlNifUInt64 salt)
+{
+ switch (type) {
+ case ERL_NIF_INTERNAL_HASH:
+ return make_internal_hash(term, (Uint32) salt);
+ case ERL_NIF_PHASH2:
+ /* It appears that make_hash2 doesn't always react to seasoning
+ * as well as it should. Therefore, let's make it ignore the salt
+ * value and declare salted uses of phash2 as unsupported.
+ */
+ return make_hash2(term) & ((1 << 27) - 1);
+ default:
+ return 0;
+ }
+}
+
int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
{
Eterm* ptr;
@@ -1248,7 +1356,7 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
bin_term = make_binary(pb);
- if (erts_refc_read(&bptr->refc, 1) == 1) {
+ if (erts_refc_read(&bptr->intern.refc, 1) == 1) {
/* Total ownership transfer */
bin->ref_bin = NULL;
bin->bin_term = bin_term;
@@ -1298,22 +1406,15 @@ Eterm enif_make_badarg(ErlNifEnv* env)
Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
{
- Process *c_p;
-
- execution_state(env, &c_p, NULL);
-
env->exception_thrown = 1;
- c_p->fvalue = reason;
- BIF_ERROR(c_p, EXC_ERROR);
+ env->proc->fvalue = reason;
+ BIF_ERROR(env->proc, EXC_ERROR);
}
int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
{
- if (env->exception_thrown && reason != NULL) {
- Process *c_p;
- execution_state(env, &c_p, NULL);
- *reason = c_p->fvalue;
- }
+ if (env->exception_thrown && reason != NULL)
+ *reason = env->proc->fvalue;
return env->exception_thrown;
}
@@ -1581,6 +1682,9 @@ int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
va_list ap;
@@ -1588,7 +1692,9 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
*hp++ = make_arityval(cnt);
va_start(ap,cnt);
while (cnt--) {
- *hp++ = va_arg(ap,Eterm);
+ Eterm elem = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, elem, ++nr, "tuple");
+ *hp++ = elem;
}
va_end(ap);
return ret;
@@ -1596,12 +1702,16 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
const Eterm* src = arr;
*hp++ = make_arityval(cnt);
while (cnt--) {
+ ASSERT_IN_ENV(env, *src, ++nr, "tuple");
*hp++ = *src++;
}
return ret;
@@ -1612,6 +1722,8 @@ ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
Eterm* hp = alloc_heap(env,2);
Eterm ret = make_list(hp);
+ ASSERT_IN_ENV(env, car, 0, "head of list cell");
+ ASSERT_IN_ENV(env, cdr, 0, "tail of list cell");
CAR(hp) = car;
CDR(hp) = cdr;
return ret;
@@ -1623,6 +1735,9 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
return NIL;
}
else {
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
@@ -1630,8 +1745,10 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
va_start(ap,cnt);
while (cnt--) {
+ Eterm term = va_arg(ap,Eterm);
*last = make_list(hp);
- *hp = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1643,14 +1760,19 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
const Eterm* src = arr;
while (cnt--) {
+ Eterm term = *src++;
*last = make_list(hp);
- *hp = *src++;
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1674,7 +1796,7 @@ ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
{
- Eterm* hp = alloc_heap(env, REF_THING_SIZE);
+ Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
return erts_make_ref_in_buffer(hp);
}
@@ -1683,13 +1805,9 @@ void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
driver_system_info(sip, si_size);
}
-int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) {
- Eterm *listptr, ret = NIL, *hp;
-
- if (is_nil(term)) {
- *list = term;
- return 1;
- }
+int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)
+{
+ Eterm *listptr, ret, *hp;
ret = NIL;
@@ -1899,38 +2017,9 @@ int enif_snprintf(char *buffer, size_t size, const char* format, ...)
** Memory managed (GC'ed) "resource" objects **
***********************************************************/
-
-struct enif_resource_type_t
-{
- struct enif_resource_type_t* next; /* list of all resource types */
- struct enif_resource_type_t* prev;
- struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
- ErlNifResourceDtor* dtor; /* user destructor function */
- erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
- +1 for active erl_module_nif */
- Eterm module;
- Eterm name;
-};
-
/* dummy node in circular list */
struct enif_resource_type_t resource_type_list;
-typedef struct enif_resource_t
-{
- struct enif_resource_type_t* type;
-#ifdef DEBUG
- erts_refc_t nif_refc;
-# ifdef ARCH_32
- byte align__[4];
-# endif
-#endif
-
- char data[1];
-}ErlNifResource;
-
-#define SIZEOF_ErlNifResource(SIZE) (offsetof(ErlNifResource,data) + (SIZE))
-#define DATA_TO_RESOURCE(PTR) ((ErlNifResource*)((char*)(PTR) - offsetof(ErlNifResource,data)))
-
static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
{
ErlNifResourceType* type;
@@ -1955,10 +2044,10 @@ static void close_lib(struct erl_module_nif* lib)
ASSERT(lib->handle != NULL);
ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
- if (lib->entry != NULL && lib->entry->unload != NULL) {
+ if (lib->entry.unload != NULL) {
struct enif_msg_environment_t msg_env;
pre_nif_noproc(&msg_env, lib, NULL);
- lib->entry->unload(&msg_env.env, lib->priv_data);
+ lib->entry.unload(&msg_env.env, lib->priv_data);
post_nif_noproc(&msg_env);
}
if (!erts_is_static_nif(lib->handle))
@@ -1992,24 +2081,23 @@ struct opened_resource_type
ErlNifResourceFlags op;
ErlNifResourceType* type;
- ErlNifResourceDtor* new_dtor;
+ ErlNifResourceTypeInit new_callbacks;
};
static struct opened_resource_type* opened_rt_list = NULL;
-ErlNifResourceType*
-enif_open_resource_type(ErlNifEnv* env,
- const char* module_str,
- const char* name_str,
- ErlNifResourceDtor* dtor,
- ErlNifResourceFlags flags,
- ErlNifResourceFlags* tried)
+static
+ErlNifResourceType* open_resource_type(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried,
+ size_t sizeof_init)
{
ErlNifResourceType* type = NULL;
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(module_str == NULL); /* for now... */
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -2043,7 +2131,9 @@ enif_open_resource_type(ErlNifEnv* env,
sizeof(struct opened_resource_type));
ort->op = op;
ort->type = type;
- ort->new_dtor = dtor;
+ sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit));
+ ASSERT(sizeof_init > 0 && sizeof_init <= sizeof(ErlNifResourceTypeInit));
+ sys_memcpy(&ort->new_callbacks, init, sizeof_init);
ort->next = opened_rt_list;
opened_rt_list = ort;
}
@@ -2053,6 +2143,31 @@ enif_open_resource_type(ErlNifEnv* env,
return type;
}
+ErlNifResourceType*
+enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str,
+ const char* name_str,
+ ErlNifResourceDtor* dtor,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ ErlNifResourceTypeInit init = {dtor, NULL};
+ ASSERT(module_str == NULL); /* for now... */
+ return open_resource_type(env, name_str, &init, flags, tried,
+ sizeof(init));
+}
+
+ErlNifResourceType*
+enif_open_resource_type_x(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ return open_resource_type(env, name_str, init, flags, tried,
+ env->mod_nif->entry.sizeof_ErlNifResourceTypeInit);
+}
+
static void commit_opened_resource_types(struct erl_module_nif* lib)
{
while (opened_rt_list) {
@@ -2071,7 +2186,9 @@ static void commit_opened_resource_types(struct erl_module_nif* lib)
}
type->owner = lib;
- type->dtor = ort->new_dtor;
+ type->dtor = ort->new_callbacks.dtor;
+ type->stop = ort->new_callbacks.stop;
+ type->down = ort->new_callbacks.down;
if (type->dtor != NULL) {
erts_refc_inc(&lib->rt_dtor_cnt, 1);
@@ -2097,12 +2214,145 @@ static void rollback_opened_resource_types(void)
}
}
+struct destroy_monitor_ctx
+{
+ ErtsResource* resource;
+ int exiting_procs;
+ int scheduler;
+};
-static void nif_resource_dtor(Binary* bin)
+static void destroy_one_monitor(ErtsMonitor* mon, void* context)
{
- ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
+ struct destroy_monitor_ctx* ctx = (struct destroy_monitor_ctx*) context;
+ Process* rp;
+ ErtsMonitor *rmon = NULL;
+ int is_exiting;
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->u.pid));
+ ASSERT(is_internal_ref(mon->ref));
+
+ if (ctx->scheduler > 0) { /* Normal scheduler */
+ rp = erts_proc_lookup(mon->u.pid);
+ }
+ else {
+#ifdef ERTS_SMP
+ rp = erts_proc_lookup_inc_refc(mon->u.pid);
+#else
+ ASSERT(!"nif monitor destruction in non-scheduler thread");
+ rp = NULL;
+#endif
+ }
+
+ if (!rp) {
+ is_exiting = 1;
+ }
+ if (rp) {
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ is_exiting = 1;
+ } else {
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
+ ASSERT(rmon);
+ is_exiting = 0;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+#ifdef ERTS_SMP
+ if (ctx->scheduler <= 0)
+ erts_proc_dec_refc(rp);
+#endif
+ }
+ if (is_exiting) {
+ ctx->resource->monitors->pending_failed_fire++;
+ }
+
+ /* ToDo: Delay destruction after monitor_locks */
+ if (rmon) {
+ ASSERT(rmon->type == MON_NIF_TARGET);
+ ASSERT(rmon->u.resource == ctx->resource);
+ erts_destroy_monitor(rmon);
+ }
+ erts_destroy_monitor(mon);
+}
+
+static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource)
+{
+ struct destroy_monitor_ctx ctx;
+
+ execution_state(NULL, NULL, &ctx.scheduler);
+
+ ctx.resource = resource;
+ erts_sweep_monitors(monitors, &destroy_one_monitor, &ctx);
+}
+
+
+#ifdef ERTS_SMP
+# define NIF_RESOURCE_DTOR &nif_resource_dtor
+#else
+# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue
+
+/*
+ * NO-SMP: Always run resource destructor on scheduler thread
+ * as we may have to remove process monitors.
+ */
+static int nif_resource_dtor(Binary*);
+
+static void nosmp_nif_resource_dtor_scheduled(void* vbin)
+{
+ erts_bin_free((Binary*)vbin);
+}
+
+static int nosmp_nif_resource_dtor_prologue(Binary* bin)
+{
+ if (is_scheduler()) {
+ return nif_resource_dtor(bin);
+ }
+ else {
+ erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin);
+ return 0; /* do not free */
+ }
+}
+
+#endif /* !ERTS_SMP */
+
+static int nif_resource_dtor(Binary* bin)
+{
+ ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ErlNifResourceType* type = resource->type;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+
+ if (resource->monitors) {
+ ErtsResourceMonitors* rm = resource->monitors;
+
+ ASSERT(type->down);
+ erts_smp_mtx_lock(&rm->lock);
+ ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
+ if (rm->root) {
+ ASSERT(!rm->is_dying);
+ destroy_all_monitors(rm->root, resource);
+ rm->root = NULL;
+ }
+ if (rm->pending_failed_fire) {
+ /*
+ * Resource death struggle prolonged to serve exiting process(es).
+ * Destructor will be called again when last exiting process
+ * tries to fire its MON_NIF_TARGET monitor (and fails).
+ *
+ * This resource is doomed. It has no "real" references and
+ * should get not get called upon to do anything except the
+ * final destructor call.
+ *
+ * We keep refc at 0 and use a separate counter for exiting
+ * processes to avoid resource getting revived by "dec_term".
+ */
+ ASSERT(!rm->is_dying);
+ rm->is_dying = 1;
+ erts_smp_mtx_unlock(&rm->lock);
+ return 0;
+ }
+ erts_smp_mtx_unlock(&rm->lock);
+ erts_smp_mtx_destroy(&rm->lock);
+ }
if (type->dtor != NULL) {
struct enif_msg_environment_t msg_env;
@@ -2117,85 +2367,203 @@ static void nif_resource_dtor(Binary* bin)
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
+ return 1;
}
-void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
+void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
+ int is_direct_call)
{
- Binary* bin = erts_create_magic_binary_x(SIZEOF_ErlNifResource(size),
- &nif_resource_dtor,
- 1); /* unaligned */
- ErlNifResource* resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
+ struct enif_msg_environment_t msg_env;
+ ASSERT(resource->type->stop);
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->stop(&msg_env.env, resource->data, e, is_direct_call);
+ post_nif_noproc(&msg_env);
+}
+
+void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
+{
+ ErtsMonitor* rmon;
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ struct enif_msg_environment_t msg_env;
+ ErlNifPid nif_pid;
+ ErlNifMonitor nif_monitor;
+ ErtsResourceMonitors* rmp = resource->monitors;
+
+ ASSERT(rmp);
+ ASSERT(resource->type->down);
+
+ erts_smp_mtx_lock(&rmp->lock);
+ rmon = erts_remove_monitor(&rmp->root, ref);
+ if (!rmon) {
+ int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying;
+ ASSERT(rmp->pending_failed_fire >= 0);
+ erts_smp_mtx_unlock(&rmp->lock);
+
+ if (free_me) {
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0);
+ erts_bin_free(&bin->binary);
+ }
+ return;
+ }
+ ASSERT(!rmp->is_dying);
+ if (erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0) == 0) {
+ /*
+ * Racing resource destruction.
+ * To avoid a more complex refc-dance with destructing thread
+ * we avoid calling 'down' and just silently remove the monitor.
+ * This can happen even for non smp as destructor calls may be scheduled.
+ */
+ erts_smp_mtx_unlock(&rmp->lock);
+ }
+ else {
+ erts_smp_mtx_unlock(&rmp->lock);
+
+ ASSERT(rmon->u.pid == pid);
+ erts_ref_to_driver_monitor(ref, &nif_monitor);
+ nif_pid.pid = pid;
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
+ post_nif_noproc(&msg_env);
+
+ erts_bin_release(&bin->binary);
+ }
+ erts_destroy_monitor(rmon);
+}
+
+void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
+{
+ size_t magic_sz = offsetof(ErtsResource,data);
+ Binary* bin;
+ ErtsResource* resource;
+ size_t monitors_offs;
+
+ if (type->down) {
+ /* Put ErtsResourceMonitors after user data and properly aligned */
+ monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1)
+ & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1));
+ magic_sz += monitors_offs + sizeof(ErtsResourceMonitors);
+ }
+ else {
+ ERTS_UNDEF(monitors_offs, 0);
+ magic_sz += data_sz;
+ }
+ bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR,
+ ERTS_ALC_T_BINARY,
+ 1); /* unaligned */
+ resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
- erts_refc_inc(&bin->refc, 1);
+ erts_refc_inc(&bin->intern.refc, 1);
#ifdef DEBUG
erts_refc_init(&resource->nif_refc, 1);
#endif
erts_refc_inc(&resource->type->refc, 2);
+ if (type->down) {
+ resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
+ erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ resource->monitors->root = NULL;
+ resource->monitors->pending_failed_fire = 0;
+ resource->monitors->is_dying = 0;
+ resource->monitors->user_data_sz = data_sz;
+ }
+ else {
+ resource->monitors = NULL;
+ }
return resource->data;
}
void enif_release_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
#ifdef DEBUG
erts_refc_dec(&resource->nif_refc, 0);
#endif
- if (erts_refc_dectest(&bin->binary.refc, 0) == 0) {
- erts_bin_free(&bin->binary);
- }
+ erts_bin_release(&bin->binary);
}
void enif_keep_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
#ifdef DEBUG
erts_refc_inc(&resource->nif_refc, 1);
#endif
- erts_refc_inc(&bin->binary.refc, 2);
+ erts_refc_inc(&bin->binary.intern.refc, 2);
+}
+
+Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
+{
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ return erts_mk_magic_ref(hpp, oh, &bin->binary);
}
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
+ Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
+ ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
}
ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
const void* data, size_t size)
{
- Eterm bin = enif_make_resource(env, obj);
- ProcBin* pb = (ProcBin*) binary_val(bin);
- pb->bytes = (byte*) data;
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ErlOffHeap *ohp = &MSO(env->proc);
+ Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
+ ProcBin* pb = (ProcBin *) hp;
+
+ pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- return bin;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
+ pb->val = &bin->binary;
+ pb->bytes = (byte*) data;
+ pb->flags = 0;
+
+ OH_OVERHEAD(ohp, size / sizeof(Eterm));
+ erts_refc_inc(&bin->binary.intern.refc, 1);
+
+ return make_binary(hp);
}
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
void** objp)
{
- ProcBin* pb;
Binary* mbin;
- ErlNifResource* resource;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
- return 0;
+ ErtsResource* resource;
+ if (is_internal_magic_ref(term))
+ mbin = erts_magic_ref2bin(term);
+ else {
+ Eterm *hp;
+ if (!is_binary(term))
+ return 0;
+ hp = binary_val(term);
+ if (thing_subtag(*hp) != REFC_BINARY_SUBTAG)
+ return 0;
+ /*
+ if (((ProcBin *) hp)->size != 0) {
+ return 0; / * Or should we allow "resource binaries" as handles? * /
+ }
+ */
+ mbin = ((ProcBin *) hp)->val;
+ if (!(mbin->intern.flags & BIN_FLAG_MAGIC))
+ return 0;
}
- pb = (ProcBin*) binary_val(term);
- /*if (pb->size != 0) {
- return 0; / * Or should we allow "resource binaries" as handles? * /
- }*/
- mbin = pb->val;
- resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
- if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
+ resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR
|| resource->type != type) {
return 0;
}
@@ -2205,9 +2573,14 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
size_t enif_sizeof_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
- return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ if (resource->monitors) {
+ return resource->monitors->user_data_sz;
+ }
+ else {
+ Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
+ return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data);
+ }
}
@@ -2264,188 +2637,28 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(proc) == 0;
}
-/*
- * NIF exports need a few more items than the Export struct provides,
- * including the erl_module_nif* and a NIF function pointer, so the
- * NifExport below adds those. The Export member must be first in the
- * struct. The saved_current, exception_thrown, saved_argc, rootset_extra, and
- * rootset members are used to track the MFA, any pending exception, and
- * arguments of the top NIF in case a chain of one or more
- * enif_schedule_nif() calls results in an exception, since in that case
- * the original MFA and registers have to be restored before returning to
- * Erlang to ensure stacktrace information associated with the exception is
- * correct.
- */
-typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-
-typedef struct {
- Export exp;
- struct erl_module_nif* m;
- NativeFunPtr fp;
- BeamInstr *saved_current;
- int exception_thrown;
- int saved_argc;
- int rootset_extra;
- Eterm rootset[1];
-} NifExport;
-
-/*
- * If a process has saved arguments, they need to be part of the GC
- * rootset. The function below is called from setup_rootset() in
- * erl_gc.c. This function is declared in erl_process.h. Any exception term
- * saved in the NifExport is also made part of the GC rootset here; it
- * always resides in rootset[0].
- */
-int
-erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
-{
- NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- int gc = ep && (ep->saved_argc > 0 || ep->rootset[0] != NIL);
-
- if (gc) {
- *objv = ep->rootset;
- *nobj = 1 + ep->saved_argc;
- }
- return gc;
-}
-
-int
-erts_check_nif_export_in_area(Process *p, char *start, Uint size)
-{
- NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(p);
- if (!nep || !nep->saved_current)
- return 0;
- if (ErtsInArea(nep->saved_current, start, size))
- return 1;
- return 0;
-}
-
-/*
- * Allocate a NifExport and set it in proc specific data
- */
-static NifExport*
-allocate_nif_sched_data(Process* proc, int argc)
-{
- NifExport* ep;
- size_t total;
- int i;
-
- total = sizeof(NifExport) + argc*sizeof(Eterm);
- ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
- sys_memset((void*) ep, 0, total);
- ep->rootset_extra = argc;
- ep->rootset[0] = NIL;
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->exp.addressv[i] = &ep->exp.code[3];
- }
- ep->exp.code[3] = (BeamInstr) em_call_nif;
- (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ep);
- return ep;
-}
-
static ERTS_INLINE void
-destroy_nif_export(NifExport *nif_export)
+nif_export_cleanup_nif_mod(NifExport *ep)
{
- erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
+ if (erts_refc_dectest(&ep->m->rt_dtor_cnt, 0) == 0 && ep->m->mod == NULL)
+ close_lib(ep->m);
+ ep->m = NULL;
}
void
-erts_destroy_nif_export(void *nif_export)
+erts_nif_export_cleanup_nif_mod(NifExport *ep)
{
- destroy_nif_export((NifExport *) nif_export);
+ nif_export_cleanup_nif_mod(ep);
}
-/*
- * Initialize a NifExport struct. Create it if needed and store it in the
- * proc. The direct_fp function is what will be invoked by op_call_nif, and
- * the indirect_fp function, if not NULL, is what the direct_fp function
- * will call. If the allocated NifExport isn't enough to hold all of argv,
- * allocate a larger one. Save MFA and registers only if the need_save
- * parameter is true.
- */
-static ERL_NIF_TERM
-init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
- int need_save, int argc, const ERL_NIF_TERM argv[])
+static ERTS_INLINE void
+nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
{
- Process* proc;
- Eterm* reg;
- NifExport* ep;
- int i, scheduler;
- int orig_argc;
-
- execution_state(env, &proc, &scheduler);
-
- ASSERT(scheduler);
-
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
- & ERTS_PROC_LOCK_MAIN);
-
- reg = erts_proc_sched_data(proc)->x_reg_array;
-
- ASSERT(!need_save || proc->current);
- orig_argc = need_save ? (int) proc->current[2] : 0;
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- if (!ep)
- ep = allocate_nif_sched_data(proc, orig_argc);
- else if (need_save && ep->rootset_extra < orig_argc) {
- NifExport* new_ep = allocate_nif_sched_data(proc, orig_argc);
- destroy_nif_export(ep);
- ep = new_ep;
- }
- if (env->exception_thrown) {
- ep->exception_thrown = 1;
- ep->rootset[0] = proc->fvalue;
- } else {
- ep->exception_thrown = 0;
- ep->rootset[0] = NIL;
- }
- if (scheduler > 0)
- ERTS_VBUMP_ALL_REDS(proc);
- if (need_save) {
- ep->saved_current = proc->current;
- ep->saved_argc = orig_argc;
- for (i = 0; i < orig_argc; i++)
- ep->rootset[i+1] = reg[i];
- }
- for (i = 0; i < argc; i++)
- reg[i] = (Eterm) argv[i];
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[0] = (BeamInstr) proc->current[0];
- ep->exp.code[1] = (BeamInstr) proc->current[1];
- ep->exp.code[2] = argc;
- ep->exp.code[4] = (BeamInstr) direct_fp;
- ep->m = env->mod_nif;
- ep->fp = indirect_fp;
- proc->freason = TRAP;
- proc->arity = argc;
- return THE_NON_VALUE;
+ erts_nif_export_restore(c_p, ep, res);
+ ASSERT(ep->m);
+ nif_export_cleanup_nif_mod(ep);
}
-/*
- * Restore saved MFA and registers. Registers are restored only when the
- * exception flag is true.
- */
-static void
-restore_nif_mfa(Process* proc, NifExport* ep, int exception)
-{
- int i;
-
- ERTS_SMP_LC_ASSERT(!(proc->static_flags
- & ERTS_STC_FLG_SHADOW_PROC));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
- & ERTS_PROC_LOCK_MAIN);
-
- ASSERT(ep->saved_current != &ep->exp.code[0]);
- proc->current = ep->saved_current;
- ep->saved_current = NULL;
- if (exception) {
- Eterm* reg = erts_proc_sched_data(proc)->x_reg_array;
- for (i = 0; i < ep->saved_argc; i++)
- reg[i] = ep->rootset[i+1];
- }
- ep->saved_argc = 0;
-}
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -2454,7 +2667,7 @@ restore_nif_mfa(Process* proc, NifExport* ep, int exception)
* switch the process off a dirty scheduler thread and back onto a regular
* scheduler thread, and then return the result from the dirty NIF. It also
* restores the original NIF MFA when necessary based on the value of
- * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL
* means restore, NULL means do not restore.
*/
static ERL_NIF_TERM
@@ -2469,9 +2682,7 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
- ASSERT(!ep->exception_thrown);
- if (ep->fp)
- restore_nif_mfa(proc, ep, 0);
+ nif_export_restore(proc, ep, argv[0]);
return argv[0];
}
@@ -2481,146 +2692,100 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
+ ERL_NIF_TERM ret;
Process* proc;
NifExport* ep;
+ Eterm exception;
execution_state(env, &proc, NULL);
+ ASSERT(argc == 1);
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
- ASSERT(ep->exception_thrown);
- if (ep->fp)
- restore_nif_mfa(proc, ep, 1);
- return enif_raise_exception(env, ep->rootset[0]);
+ exception = argv[0]; /* argv overwritten by restore below... */
+ nif_export_cleanup_nif_mod(ep);
+ ret = enif_raise_exception(env, exception);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ proc->freason |= EXF_RESTORE_NIF;
+ return ret;
}
/*
- * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
- * then check the result and schedule the appropriate finalizer function
- * where needed. Also restore the original NIF MFA when appropriate.
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute.
+ * The dirty scheduler thread type (CPU or I/O) is indicated in flags
+ * parameter.
*/
-static ERL_NIF_TERM
-execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
+ Eterm func_name, int argc, const ERL_NIF_TERM argv[])
{
Process* proc;
- NativeFunPtr fp;
- NifExport* ep;
- ERL_NIF_TERM result;
-
- execution_state(env, &proc, NULL);
-
- fp = (NativeFunPtr) proc->current[6];
-
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
- /*
- * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
- */
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep && fp);
-
- ep->fp = NULL;
- erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC));
+ ASSERT(is_atom(func_name));
+ ASSERT(fp);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
- result = (*fp)(env, argc, argv);
+ execution_state(env, &proc, NULL);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
+ ? ERTS_PSFLG_DIRTY_CPU_PROC
+ : ERTS_PSFLG_DIRTY_IO_PROC));
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- /*
- * If no more NIFs were scheduled by the native call via
- * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
- * which case we need to restore the original NIF calling
- * context. Reuse fp essentially as a boolean for this, passing it to
- * init_nif_sched_data below. Both dirty_nif_exception and
- * dirty_nif_finalizer then check ep->fp to decide whether or not to
- * restore the original calling context.
- */
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- if (ep->fp)
- fp = NULL;
- if (is_non_value(result) || env->exception_thrown) {
- if (proc->freason != TRAP) {
- return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
- } else {
- if (ep->fp == NULL)
- restore_nif_mfa(proc, ep, 1);
- return THE_NON_VALUE;
- }
- }
- else
- return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+ return schedule(env, fp, NULL, proc->current->module, func_name, argc, argv);
}
-/*
- * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
- * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
- * type (CPU or I/O) is indicated in flags parameter.
- */
static ERTS_INLINE ERL_NIF_TERM
-schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
+ int argc, const ERL_NIF_TERM argv[])
{
- ERL_NIF_TERM result;
- erts_aint32_t act, dirty_flag;
- Process* proc;
+ Process *proc;
+ NifExport *ep;
+ Eterm mod, func;
NativeFunPtr fp;
- NifExport* ep;
- int need_save, scheduler;
- execution_state(env, &proc, &scheduler);
- if (scheduler <= 0) {
- ASSERT(scheduler < 0);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
- }
+ execution_state(env, &proc, NULL);
- fp = (NativeFunPtr) proc->current[6];
+ /*
+ * Called in order to schedule statically determined
+ * dirty NIF calls...
+ *
+ * Note that 'current' does not point into a NifExport
+ * structure; only a structure with similar
+ * parts (located in code).
+ */
- ASSERT(fp);
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ mod = proc->current->module;
+ func = proc->current->function;
+ fp = (NativeFunPtr) ep->func;
- ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
+ ASSERT(is_atom(mod) && is_atom(func));
+ ASSERT(fp);
- if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
- else
- dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
+ (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ dirty_psflg);
- act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag);
- if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)))
- erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
- else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) {
- /* clear other flag... */
- if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
- else
- dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
- erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag);
- }
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- need_save = (ep == NULL || !ep->saved_current);
- result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
- if (scheduler <= 0)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- return result;
+ return schedule(env, fp, NULL, mod, func, argc, argv);
}
static ERL_NIF_TERM
-schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv);
}
static ERL_NIF_TERM
-schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
#endif /* ERTS_DIRTY_SCHEDULERS */
@@ -2639,22 +2804,42 @@ execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ERL_NIF_TERM result;
execution_state(env, &proc, NULL);
- fp = (NativeFunPtr) proc->current[6];
- ASSERT(!env->exception_thrown);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ fp = ep->func;
ASSERT(ep);
- ep->fp = NULL;
+ ASSERT(!env->exception_thrown);
+
+ fp = (NativeFunPtr) ep->func;
+
+#ifdef DEBUG
+ ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
result = (*fp)(env, argc, argv);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- /*
- * If no NIFs were scheduled by the native call via
- * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
- * which case we need to restore the original NIF MFA.
- */
- if (ep->fp == NULL)
- restore_nif_mfa(proc, ep, env->exception_thrown);
+
+ ASSERT(ep == ERTS_PROC_GET_NIF_TRAP_EXPORT(proc));
+
+ if (is_value(result) || proc->freason != TRAP) {
+ /* Done (not rescheduled)... */
+ ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ if (!env->exception_thrown)
+ nif_export_restore(proc, ep, result);
+ else {
+ nif_export_cleanup_nif_mod(ep);
+ /*
+ * Restore orig info for error and clear nif
+ * export in handle_error()
+ */
+ proc->freason |= EXF_RESTORE_NIF;
+ }
+ }
+
+#ifdef DEBUG
+ if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ ep->func = NULL;
+#endif
+
return result;
}
@@ -2664,9 +2849,8 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
int argc, const ERL_NIF_TERM argv[])
{
Process* proc;
- NifExport* ep;
ERL_NIF_TERM fun_name_atom, result;
- int need_save, scheduler;
+ int scheduler;
if (argc > MAX_ARG)
return enif_make_badarg(env);
@@ -2681,35 +2865,19 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
}
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- need_save = (ep == NULL || !ep->saved_current);
-
- if (flags) {
+ if (flags == 0)
+ result = schedule(env, execute_nif, fp, proc->current->module,
+ fun_name_atom, argc, argv);
+ else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
#ifdef ERTS_DIRTY_SCHEDULERS
- NativeFunPtr sched_fun;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
- sched_fun = schedule_dirty_io_nif;
- else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- sched_fun = schedule_dirty_cpu_nif;
- else {
- result = enif_make_badarg(env);
- goto done;
- }
- result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
+ result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
#else
- result = enif_make_badarg(env);
+ result = enif_raise_exception(env, am_notsup);
#endif
- goto done;
}
else
- result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- ep->exp.code[1] = (BeamInstr) fun_name_atom;
+ result = enif_make_badarg(env);
-done:
if (scheduler < 0)
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
@@ -2724,14 +2892,19 @@ enif_thread_type(void)
if (!esdp)
return ERL_NIF_THR_UNDEFINED;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
return ERL_NIF_THR_NORMAL_SCHEDULER;
-
- if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp))
+#ifdef ERTS_DIRTY_SCHEDULERS
+ case ERTS_SCHED_DIRTY_CPU:
return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
-
- ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(esdp));
- return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
+ case ERTS_SCHED_DIRTY_IO:
+ return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
+#endif
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return -1;
+ }
}
/* Maps */
@@ -2781,6 +2954,10 @@ int enif_make_map_put(ErlNifEnv* env,
if (!is_map(map_in)) {
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
*map_out = erts_maps_put(env->proc, key, value, map_in);
cache_env(env);
@@ -2815,6 +2992,10 @@ int enif_make_map_update(ErlNifEnv* env,
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
res = erts_maps_update(env->proc, key, value, map_in, map_out);
cache_env(env);
@@ -3016,22 +3197,172 @@ int enif_map_iterator_get_pair(ErlNifEnv *env,
return 0;
}
+int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
+ ErlNifMonitor* monitor)
+{
+ int scheduler;
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+ Process *rp;
+ Eterm tmp[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ int retval;
+
+ ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
+ == NIF_RESOURCE_DTOR);
+ ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+ ASSERT(!rsrc->monitors == !rsrc->type->down);
+
+
+ if (!rsrc->monitors) {
+ ASSERT(!rsrc->type->down);
+ return -1;
+ }
+ ASSERT(rsrc->type->down);
+
+ execution_state(env, NULL, &scheduler);
+
+#ifdef ERTS_SMP
+ if (scheduler > 0) /* Normal scheduler */
+ rp = erts_proc_lookup_raw(target_pid->pid);
+ else
+ rp = erts_proc_lookup_raw_inc_refc(target_pid->pid);
+#else
+ if (scheduler <= 0) {
+ erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from "
+ "non-scheduler thread on non-SMP VM");
+ }
+ rp = erts_proc_lookup(target_pid->pid);
+#endif
+
+ if (!rp)
+ return 1;
+
+ ref = erts_make_ref_in_buffer(tmp);
+
+ erts_smp_mtx_lock(&rsrc->monitors->lock);
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) {
+ retval = 1;
+ }
+ else {
+ erts_add_monitor(&rsrc->monitors->root, MON_ORIGIN, ref, rp->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL);
+ retval = 0;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_smp_mtx_unlock(&rsrc->monitors->lock);
+
+#ifdef ERTS_SMP
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+#endif
+ if (monitor)
+ erts_ref_to_driver_monitor(ref,monitor);
+
+ return retval;
+}
+
+int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
+{
+ int scheduler;
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+#ifdef DEBUG
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
+#endif
+ Process *rp;
+ ErtsMonitor *mon;
+ ErtsMonitor *rmon = NULL;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ int is_exiting;
+
+ ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
+ ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+
+ execution_state(env, NULL, &scheduler);
+
+ ref = erts_driver_monitor_to_ref(ref_heap, monitor);
+
+ erts_smp_mtx_lock(&rsrc->monitors->lock);
+ mon = erts_remove_monitor(&rsrc->monitors->root, ref);
+
+ if (mon == NULL) {
+ erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ return 1;
+ }
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->u.pid));
+
+#ifdef ERTS_SMP
+ if (scheduler > 0) /* Normal scheduler */
+ rp = erts_proc_lookup(mon->u.pid);
+ else
+ rp = erts_proc_lookup_inc_refc(mon->u.pid);
+#else
+ if (scheduler <= 0) {
+ erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from "
+ "non-scheduler thread on non-SMP VM");
+ }
+ rp = erts_proc_lookup(mon->u.pid);
+#endif
+
+ if (!rp) {
+ is_exiting = 1;
+ }
+ else {
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ is_exiting = 1;
+ } else {
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ ASSERT(rmon);
+ is_exiting = 0;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+#ifdef ERTS_SMP
+ if (scheduler <= 0)
+ erts_proc_dec_refc(rp);
+#endif
+ }
+ if (is_exiting) {
+ rsrc->monitors->pending_failed_fire++;
+ }
+ erts_smp_mtx_unlock(&rsrc->monitors->lock);
+
+ if (rmon) {
+ ASSERT(rmon->type == MON_NIF_TARGET);
+ ASSERT(rmon->u.resource == rsrc);
+ erts_destroy_monitor(rmon);
+ }
+ erts_destroy_monitor(mon);
+
+ return 0;
+}
+
+int enif_compare_monitors(const ErlNifMonitor *monitor1,
+ const ErlNifMonitor *monitor2)
+{
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
-static BeamInstr** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
+static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
{
int n = (int) mod_code->num_functions;
int j;
for (j = 0; j < n; ++j) {
- BeamInstr* code_ptr = (BeamInstr*) mod_code->functions[j];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (f_atom == ((Eterm) code_ptr[3])
- && arity == ((unsigned) code_ptr[4])) {
-
- return (BeamInstr**) &mod_code->functions[j];
+ ErtsCodeInfo* ci = mod_code->functions[j];
+ ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (f_atom == ci->mfa.function
+ && arity == ci->mfa.arity) {
+ return mod_code->functions+j;
}
}
return NULL;
@@ -3123,39 +3454,62 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+#define AT_LEAST_VERSION(E,MAJ,MIN) \
+ (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN)))
+
/*
- * The function below is for looping through ErlNifFunc arrays, helping
- * provide backwards compatibility across the version 2.7 change that added
- * the "flags" field to ErlNifFunc.
+ * Allocate erl_module_nif and make a _modern_ copy of the lib entry.
*/
-static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+static struct erl_module_nif* create_lib(const ErlNifEntry* src)
{
- ASSERT(incrp);
- if (!*incrp) {
- if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- *incrp = sizeof(ErlNifFunc);
- else {
- /*
- * ErlNifFuncV1 below is what ErlNifFunc was before the
- * addition of the flags field for 2.7, and is needed to handle
- * backward compatibility.
- */
- typedef struct {
- const char* name;
- unsigned arity;
- ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
- }ErlNifFuncV1;
- *incrp = sizeof(ErlNifFuncV1);
- }
+ struct erl_module_nif* lib;
+ ErlNifEntry* dst;
+ Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_);
+
+ if (!AT_LEAST_VERSION(src, 2, 7))
+ bytes += src->num_of_funcs * sizeof(ErlNifFunc);
+
+ lib = erts_alloc(ERTS_ALC_T_NIF, bytes);
+ dst = &lib->entry;
+
+ sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant));
+
+ if (AT_LEAST_VERSION(src, 2, 1)) {
+ dst->vm_variant = src->vm_variant;
+ } else {
+ dst->vm_variant = "beam.vanilla";
}
- return (ErlNifFunc*) ((char*)func + *incrp);
-}
+ if (AT_LEAST_VERSION(src, 2, 7)) {
+ dst->options = src->options;
+ } else {
+ /*
+ * Make a modern copy of the ErlNifFunc array
+ */
+ struct ErlNifFunc_V1 {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }*src_funcs = (struct ErlNifFunc_V1*) src->funcs;
+ int i;
+ for (i = 0; i < src->num_of_funcs; ++i) {
+ sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs));
+ lib->_funcs_copy_[i].flags = 0;
+ }
+ dst->funcs = lib->_funcs_copy_;
+ dst->options = 0;
+ }
+ if (AT_LEAST_VERSION(src, 2, 12)) {
+ dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit;
+ } else {
+ dst->sizeof_ErlNifResourceTypeInit = 0;
+ }
+ return lib;
+};
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
- static const char reload[] = "reload";
static const char upgrade[] = "upgrade";
char* lib_name = NULL;
void* handle = NULL;
@@ -3167,15 +3521,20 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
Eterm mod_atom;
const Atom* mod_atomp;
Eterm f_atom;
- BeamInstr* caller;
+ ErtsCodeMFA* caller;
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
Eterm ret = am_ok;
int veto;
struct erl_module_nif* lib = NULL;
- int reload_warning = 0;
struct erl_module_instance* this_mi;
struct erl_module_instance* prev_mi;
+ if (BIF_P->flags & F_HIPE_MODE) {
+ ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled "
+ "modules not supported");
+ BIF_RET(ret);
+ }
+
encoding = erts_get_native_filename_encoding();
if (encoding == ERL_FILENAME_WIN_WCHAR) {
/* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
@@ -3201,12 +3560,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/* Find calling module */
ASSERT(BIF_P->current != NULL);
- ASSERT(BIF_P->current[0] == am_erlang
- && BIF_P->current[1] == am_load_nif
- && BIF_P->current[2] == 2);
+ ASSERT(BIF_P->current->module == am_erlang
+ && BIF_P->current->function == am_load_nif
+ && BIF_P->current->arity == 2);
caller = find_function_from_pc(BIF_P->cp);
ASSERT(caller != NULL);
- mod_atom = caller[0];
+ mod_atom = caller->module;
ASSERT(is_atom(mod_atom));
module_p = erts_get_module(mod_atom, erts_active_code_ix());
ASSERT(module_p != NULL);
@@ -3232,8 +3591,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
this_mi = module_p->on_load;
}
- if (init_func == NULL &&
- (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
+ if (this_mi->nif != NULL) {
+ ret = load_nif_error(BIF_P,"reload","NIF library already loaded"
+ " (reload disallowed since OTP 20).");
+ }
+ else if (init_func == NULL &&
+ (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
@@ -3261,7 +3624,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
}
- else if (entry->minor >= 1
+ else if (AT_LEAST_VERSION(entry, 2, 1)
&& sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for "
"this vm variant (%s).",
@@ -3272,20 +3635,25 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
" match calling module '%T'", entry->name, mod_atom);
}
else {
- /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
- int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
- for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
- BeamInstr** code_pp;
+ lib = create_lib(entry);
+ entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */
+
+ lib->handle = handle;
+ erts_refc_init(&lib->rt_cnt, 0);
+ erts_refc_init(&lib->rt_dtor_cnt, 0);
+ ASSERT(opened_rt_list == NULL);
+ lib->mod = module_p;
+
+ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
+ ErtsCodeInfo** ci_pp;
+ ErlNifFunc* f = &entry->funcs[i];
+
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
- || (code_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
+ || (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
- else if (maybe_dirty_nifs && f->flags) {
+ else if (f->flags) {
/*
* If the flags field is non-zero and this emulator was
* built with dirty scheduler support, check that the flags
@@ -3302,11 +3670,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
mod_atom, f->name, f->arity);
#endif
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (code_pp[1] - code_pp[0] < (5+4))
-#else
- else if (code_pp[1] - code_pp[0] < (5+3))
-#endif
+ else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
+ < BEAM_NIF_MIN_FUNC_SZ)
{
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
" in module (%T:%s/%u too small)",
@@ -3314,7 +3679,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
mod_atom, f->name, f->arity);*/
- f = next_func(entry, &incr, f);
}
}
@@ -3322,132 +3686,72 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
- /* Call load, reload or upgrade:
+ /* Call load or upgrade:
*/
-
- lib = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct erl_module_nif));
- lib->handle = handle;
- lib->entry = entry;
- erts_refc_init(&lib->rt_cnt, 0);
- erts_refc_init(&lib->rt_dtor_cnt, 0);
- ASSERT(opened_rt_list == NULL);
- lib->mod = module_p;
env.mod_nif = lib;
- if (this_mi->nif != NULL) { /*************** Reload ******************/
- /*
- * Repeated load_nif calls from same Erlang module instance ("reload")
- * is deprecated and was only ment as a development feature not to
- * be used in production systems. (See warning below)
- */
- int k, old_incr = 0;
- ErlNifFunc* old_func;
- lib->priv_data = this_mi->nif->priv_data;
-
- ASSERT(this_mi->nif->entry != NULL);
- if (entry->reload == NULL) {
- ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
- goto error;
- }
- /* Check that no NIF is removed */
- old_func = this_mi->nif->entry->funcs;
- for (k=0; k < this_mi->nif->entry->num_of_funcs; k++) {
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
- for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == f->arity
- && sys_strcmp(old_func->name, f->name) == 0) {
- break;
- }
- f = next_func(entry, &incr, f);
- }
- if (i == entry->num_of_funcs) {
- ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
- "function %T:%s/%u\r\n", mod_atom,
- old_func->name, old_func->arity);
- goto error;
- }
- old_func = next_func(this_mi->nif->entry, &old_incr, old_func);
- }
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful (%d).", veto);
- }
- else {
- commit_opened_resource_types(lib);
- this_mi->nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(this_mi->nif);
- reload_warning = 1;
- }
+
+ lib->priv_data = NULL;
+ if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
+ void* prev_old_data = prev_mi->nif->priv_data;
+ if (entry->upgrade == NULL) {
+ ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
+ goto error;
+ }
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ prev_mi->nif->priv_data = prev_old_data;
+ ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
+ }
}
- else {
- lib->priv_data = NULL;
- if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
- void* prev_old_data = prev_mi->nif->priv_data;
- if (entry->upgrade == NULL) {
- ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
- goto error;
- }
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- prev_mi->nif->priv_data = prev_old_data;
- ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
- }
- else
- commit_opened_resource_types(lib);
- }
- else if (entry->load != NULL) { /********* Initial load ***********/
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
- }
- else
- commit_opened_resource_types(lib);
- }
+ else if (entry->load != NULL) { /********* Initial load ***********/
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
+ }
}
if (ret == am_ok) {
+ commit_opened_resource_types(lib);
+
/*
** Everything ok, patch the beam code with op_call_nif
*/
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
-
this_mi->nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
- BeamInstr* code_ptr;
+ ErlNifFunc* f = &entry->funcs[i];
+ ErtsCodeInfo* ci;
+ BeamInstr *code_ptr;
+
erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ code_ptr = erts_codeinfo_to_code(ci);
- if (code_ptr[1] == 0) {
- code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
+ if (ci->u.gen_bp == NULL) {
+ code_ptr[0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- GenericBp* g = (GenericBp *) code_ptr[1];
- ASSERT(code_ptr[5+0] ==
+ GenericBp* g = ci->u.gen_bp;
+ ASSERT(code_ptr[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
- code_ptr[5+3] = (BeamInstr) f->fptr;
- code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
- (BeamInstr) schedule_dirty_io_nif :
- (BeamInstr) schedule_dirty_cpu_nif;
+ if (f->flags) {
+ code_ptr[3] = (BeamInstr) f->fptr;
+ code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) static_schedule_dirty_io_nif :
+ (BeamInstr) static_schedule_dirty_cpu_nif;
}
else
#endif
- code_ptr[5+1] = (BeamInstr) f->fptr;
- code_ptr[5+2] = (BeamInstr) lib;
- f = next_func(entry, &incr, f);
+ code_ptr[1] = (BeamInstr) f->fptr;
+ code_ptr[2] = (BeamInstr) lib;
}
}
else {
@@ -3468,14 +3772,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
- if (reload_warning) {
- erts_dsprintf_buf_t* dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Repeated calls to erlang:load_nif from module '%T'.\n\n"
- "The NIF reload mechanism is deprecated and must not "
- "be used in production systems.\n", mod_atom);
- erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
- }
BIF_RET(ret);
}
@@ -3525,7 +3821,8 @@ erts_unload_nif(struct erl_module_nif* lib)
void erl_nif_init()
{
- ERTS_CT_ASSERT((offsetof(ErlNifResource,data) % 8) == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
+ ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8)
+ == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
resource_type_list.next = &resource_type_list;
resource_type_list.prev = &resource_type_list;
@@ -3539,8 +3836,8 @@ void erl_nif_init()
int erts_nif_get_funcs(struct erl_module_nif* mod,
ErlNifFunc **funcs)
{
- *funcs = mod->entry->funcs;
- return mod->entry->num_of_funcs;
+ *funcs = mod->entry.funcs;
+ return mod->entry.num_of_funcs;
}
Eterm erts_nif_call_function(Process *p, Process *tracee,
@@ -3551,10 +3848,10 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
#ifdef DEBUG
/* Verify that function is part of this module */
int i;
- for (i = 0; i < mod->entry->num_of_funcs; i++)
- if (fun == &(mod->entry->funcs[i]))
+ for (i = 0; i < mod->entry.num_of_funcs; i++)
+ if (fun == &(mod->entry.funcs[i]))
break;
- ASSERT(i < mod->entry->num_of_funcs);
+ ASSERT(i < mod->entry.num_of_funcs);
if (p)
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
|| erts_smp_thr_progress_is_blocking());
@@ -3576,6 +3873,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
clear_offheap(&MSO(p));
erts_pre_nif(&env, p, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env.dbg_disable_assert_in_env = 1;
+#endif
nif_result = (*fun->fptr)(&env, argc, argv);
if (env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -3598,6 +3898,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
so we create a phony one. */
struct enif_msg_environment_t msg_env;
pre_nif_noproc(&msg_env, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env.env.dbg_disable_assert_in_env = 1;
+#endif
nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
if (msg_env.env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -3662,6 +3965,55 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size)
#endif /* READONLY_CHECK */
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+static void dbg_assert_in_env(ErlNifEnv* env, Eterm term,
+ int nr, const char* type, const char* func)
+{
+ Uint saved_used_size;
+ Eterm* real_htop;
+
+ if (is_immed(term)
+ || (is_non_value(term) && env->exception_thrown)
+ || erts_is_literal(term, ptr_val(term)))
+ return;
+
+ if (env->dbg_disable_assert_in_env) {
+ /*
+ * Trace nifs may cheat as built terms are discarded after return.
+ * ToDo: Check if 'term' is part of argv[].
+ */
+ return;
+ }
+
+ if (env->heap_frag) {
+ ASSERT(env->heap_frag == MBUF(env->proc));
+ ASSERT(env->hp >= env->heap_frag->mem);
+ ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size);
+ saved_used_size = env->heap_frag->used_size;
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ real_htop = NULL;
+ }
+ else {
+ real_htop = env->hp;
+ }
+ if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) {
+ fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func);
+ if (nr) {
+ fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.",
+ nr, type);
+ }
+ else {
+ fprintf(stderr, "The %s is not from the same ErlNifEnv.", type);
+ }
+ fprintf(stderr, "\r\nABORTING\r\n");
+ abort();
+ }
+ if (env->heap_frag) {
+ env->heap_frag->used_size = saved_used_size;
+ }
+}
+#endif
+
#ifdef HAVE_USE_DTRACE
#define MESSAGE_BUFSIZ 1024
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 494971e118..b0d5c39798 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -49,10 +49,10 @@
** 2.8: 18.0 add enif_has_pending_exception
** 2.9: 18.2 enif_getenv
** 2.10: Time API
-** 2.11: 19.0 enif_snprintf
+** 2.11: 19.0 enif_snprintf
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 11
+#define ERL_NIF_MINOR_VERSION 12
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -116,12 +116,16 @@ typedef struct enif_entry_t
int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
+
+ /* Added in 2.1 */
const char* vm_variant;
- unsigned options;
-}ErlNifEntry;
-/* Field bits for ErlNifEntry options */
-#define ERL_NIF_DIRTY_NIF_OPTION 1
+ /* Added in 2.7 */
+ unsigned options; /* Unused. Can be set to 0 or 1 (dirty sched config) */
+
+ /* Added in 2.12 */
+ size_t sizeof_ErlNifResourceTypeInit;
+}ErlNifEntry;
typedef struct
@@ -134,8 +138,18 @@ typedef struct
void* ref_bin;
}ErlNifBinary;
-typedef struct enif_resource_type_t ErlNifResourceType;
-typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef void* ErlNifEvent; /* FIXME: Use 'HANDLE' somehow without breaking existing source */
+#else
+typedef int ErlNifEvent;
+#endif
+
+/* Return bits from enif_select: */
+#define ERL_NIF_SELECT_STOP_CALLED (1 << 0)
+#define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1)
+#define ERL_NIF_SELECT_INVALID_EVENT (1 << 2)
+#define ERL_NIF_SELECT_FAILED (1 << 3)
+
typedef enum
{
ERL_NIF_RT_CREATE = 1,
@@ -157,6 +171,19 @@ typedef struct
ERL_NIF_TERM port_id; /* internal, may change */
}ErlNifPort;
+typedef ErlDrvMonitor ErlNifMonitor;
+
+typedef struct enif_resource_type_t ErlNifResourceType;
+typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+typedef void ErlNifResourceStop(ErlNifEnv*, void*, ErlNifEvent, int is_direct_call);
+typedef void ErlNifResourceDown(ErlNifEnv*, void*, ErlNifPid*, ErlNifMonitor*);
+
+typedef struct {
+ ErlNifResourceDtor* dtor;
+ ErlNifResourceStop* stop; /* at ERL_NIF_SELECT_STOP event */
+ ErlNifResourceDown* down; /* enif_monitor_process */
+} ErlNifResourceTypeInit;
+
typedef ErlDrvSysInfo ErlNifSysInfo;
typedef struct ErlDrvTid_ *ErlNifTid;
@@ -209,6 +236,11 @@ typedef enum {
ERL_NIF_BIN2TERM_SAFE = 0x20000000
} ErlNifBinaryToTerm;
+typedef enum {
+ ERL_NIF_INTERNAL_HASH = 1,
+ ERL_NIF_PHASH2 = 2
+} ErlNifHash;
+
/*
* Return values from enif_thread_type(). Negative values
* reserved for specific types of non-scheduler threads.
@@ -266,8 +298,6 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS)
#endif
-#define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
-
#ifdef __cplusplus
}
# define ERL_NIF_INIT_PROLOGUE extern "C" {
@@ -293,7 +323,8 @@ ERL_NIF_INIT_DECL(NAME) \
FUNCS, \
LOAD, RELOAD, UPGRADE, UNLOAD, \
ERL_NIF_VM_VARIANT, \
- ERL_NIF_ENTRY_OPTIONS \
+ 1, \
+ sizeof(ErlNifResourceTypeInit) \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 9a8f216773..94c04cd126 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -175,6 +175,14 @@ ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsign
ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
ERL_NIF_API_FUNC_DECL(int,enif_thread_type,(void));
ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_select,(ErlNifEnv* env, ErlNifEvent e, enum ErlNifSelectFlags flags, void* obj, const ErlNifPid* pid, ERL_NIF_TERM ref));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type_x,(ErlNifEnv*, const char* name_str, const ErlNifResourceTypeInit*, ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(int, enif_monitor_process,(ErlNifEnv*,void* obj,const ErlNifPid*,ErlNifMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_demonitor_process,(ErlNifEnv*,void* obj,const ErlNifMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_compare_monitors,(const ErlNifMonitor*,const ErlNifMonitor*));
+ERL_NIF_API_FUNC_DECL(ErlNifUInt64,enif_hash,(ErlNifHash type, ERL_NIF_TERM term, ErlNifUInt64 salt));
+ERL_NIF_API_FUNC_DECL(int, enif_whereis_pid, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid));
+ERL_NIF_API_FUNC_DECL(int, enif_whereis_port, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port));
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
@@ -332,6 +340,14 @@ ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char
# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
# define enif_thread_type ERL_NIF_API_FUNC_MACRO(enif_thread_type)
# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
+# define enif_select ERL_NIF_API_FUNC_MACRO(enif_select)
+# define enif_open_resource_type_x ERL_NIF_API_FUNC_MACRO(enif_open_resource_type_x)
+# define enif_monitor_process ERL_NIF_API_FUNC_MACRO(enif_monitor_process)
+# define enif_demonitor_process ERL_NIF_API_FUNC_MACRO(enif_demonitor_process)
+# define enif_compare_monitors ERL_NIF_API_FUNC_MACRO(enif_compare_monitors)
+# define enif_hash ERL_NIF_API_FUNC_MACRO(enif_hash)
+# define enif_whereis_pid ERL_NIF_API_FUNC_MACRO(enif_whereis_pid)
+# define enif_whereis_port ERL_NIF_API_FUNC_MACRO(enif_whereis_port)
/*
** ADD NEW ENTRIES HERE (before this comment)
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 0c76c6fe7d..6ec428e282 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -255,19 +255,16 @@ extern ErtsPTab erts_port;
* Refs *
\* */
+#define internal_ref_no_numbers(x) ERTS_REF_NUMBERS
+#define internal_ref_numbers(x) (is_internal_ordinary_ref((x)) \
+ ? internal_ordinary_ref_numbers((x)) \
+ : (ASSERT(is_internal_magic_ref((x))), \
+ internal_magic_ref_numbers((x))))
#if defined(ARCH_64)
-#define internal_ref_no_of_numbers(x) \
- (internal_ref_data((x))[0])
-#define internal_thing_ref_no_of_numbers(thing) \
- (internal_thing_ref_data(thing)[0])
-#define internal_ref_numbers(x) \
- (&internal_ref_data((x))[1])
-#define internal_thing_ref_numbers(thing) \
- (&internal_thing_ref_data(thing)[1])
-#define external_ref_no_of_numbers(x) \
+#define external_ref_no_numbers(x) \
(external_ref_data((x))[0])
-#define external_thing_ref_no_of_numbers(thing) \
+#define external_thing_ref_no_numbers(thing) \
(external_thing_ref_data(thing)[0])
#define external_ref_numbers(x) \
(&external_ref_data((x))[1])
@@ -277,12 +274,8 @@ extern ErtsPTab erts_port;
#else
-#define internal_ref_no_of_numbers(x) (internal_ref_data_words((x)))
-#define internal_thing_ref_no_of_numbers(t) (internal_thing_ref_data_words(t))
-#define internal_ref_numbers(x) (internal_ref_data((x)))
-#define internal_thing_ref_numbers(t) (internal_thing_ref_data(t))
-#define external_ref_no_of_numbers(x) (external_ref_data_words((x)))
-#define external_thing_ref_no_of_numbers(t) (external_thing_ref_data_words((t)))
+#define external_ref_no_numbers(x) (external_ref_data_words((x)))
+#define external_thing_ref_no_numbers(t) (external_thing_ref_data_words((t)))
#define external_ref_numbers(x) (external_ref_data((x)))
#define external_thing_ref_numbers(t) (external_thing_ref_data((t)))
@@ -299,15 +292,9 @@ extern ErtsPTab erts_port;
#define internal_ref_channel_no(x) (internal_channel_no((x)))
#define external_ref_channel_no(x) (external_channel_no((x)))
-#define ref_data_words(x) (is_internal_ref((x)) \
- ? internal_ref_data_words((x)) \
- : external_ref_data_words((x)))
-#define ref_data(x) (is_internal_ref((x)) \
- ? internal_ref_data((x)) \
- : external_ref_data((x)))
-#define ref_no_of_numbers(x) (is_internal_ref((x)) \
- ? internal_ref_no_of_numbers((x))\
- : external_ref_no_of_numbers((x)))
+#define ref_no_numbers(x) (is_internal_ref((x)) \
+ ? internal_ref_no_numbers((x))\
+ : external_ref_no_numbers((x)))
#define ref_numbers(x) (is_internal_ref((x)) \
? internal_ref_numbers((x)) \
: external_ref_numbers((x)))
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index f37393bce4..deadf435e9 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -85,21 +85,20 @@ dist_table_cmp(void *dep1, void *dep2)
static void*
dist_table_alloc(void *dep_tmpl)
{
- Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
sysname = ((DistEntry *) dep_tmpl)->sysname;
- chnl_nr = make_small((Uint) atom_val(sysname));
dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
dist_entries++;
dep->prev = NULL;
erts_smp_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -107,12 +106,14 @@ dist_table_alloc(void *dep_tmpl)
dep->flags = 0;
dep->version = 0;
- erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
+ erts_smp_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->node_links = NULL;
dep->nlinks = NULL;
dep->monitors = NULL;
- erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
+ erts_smp_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->qflgs = 0;
dep->qsize = 0;
dep->out_queue.first = NULL;
@@ -760,8 +761,10 @@ void erts_init_node_tables(int dd_sec)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
- erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
@@ -818,6 +821,33 @@ int erts_lc_is_de_rlocked(DistEntry *dep)
#endif
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) {
+ DistEntry *dep = (DistEntry*)dep_raw;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&dep->rwmtx.lcnt, "dist_entry", dep->sysname,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->lnk_mtx.lcnt, "dist_entry_links", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->qlock.lcnt, "dist_entry_out_queue", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ } else {
+ erts_lcnt_uninstall(&dep->rwmtx.lcnt);
+ erts_lcnt_uninstall(&dep->lnk_mtx.lcnt);
+ erts_lcnt_uninstall(&dep->qlock.lcnt);
+ }
+}
+
+void erts_lcnt_update_distribution_locks(int enable) {
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count,
+ (void*)(UWord)enable);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
+#endif
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The following is only supposed to be used for testing, and debugging. *
* *
@@ -850,14 +880,15 @@ static Eterm AM_timer;
static Eterm AM_delayed_delete_timer;
static void setup_reference_table(void);
-static Eterm reference_table_term(Uint **hpp, Uint *szp);
+static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp);
static void delete_reference_table(void);
-#if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */
-#define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE
-#else
-#define ID_HEAP_SIZE 3 /* 2-tuple */
-#endif
+#undef ERTS_MAX__
+#define ERTS_MAX__(A, B) ((A) > (B) ? (A) : (B))
+
+#define ID_HEAP_SIZE \
+ ERTS_MAX__(ERTS_MAGIC_REF_THING_SIZE, \
+ ERTS_MAX__(BIG_UINT_HEAP_SIZE, 3))
typedef struct node_referrer_ {
struct node_referrer_ *next;
@@ -870,6 +901,7 @@ typedef struct node_referrer_ {
int system_ref;
Eterm id;
Uint id_heap[ID_HEAP_SIZE];
+ ErlOffHeap off_heap;
} NodeReferrer;
typedef struct {
@@ -942,7 +974,7 @@ erts_get_node_and_dist_references(struct process *proc)
/* Get term size */
size = 0;
- (void) reference_table_term(NULL, &size);
+ (void) reference_table_term(NULL, NULL, &size);
hp = HAlloc(proc, size);
#ifdef DEBUG
@@ -951,7 +983,7 @@ erts_get_node_and_dist_references(struct process *proc)
#endif
/* Write term */
- res = reference_table_term(&hp, NULL);
+ res = reference_table_term(&hp, &proc->off_heap, NULL);
ASSERT(endp == hp);
@@ -1048,13 +1080,14 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
sizeof(NodeReferrer));
nrp->next = referred_node->referrers;
+ ERTS_INIT_OFF_HEAP(&nrp->off_heap);
referred_node->referrers = nrp;
if(IS_CONST(id))
nrp->id = id;
else {
Uint *hp = &nrp->id_heap[0];
- ASSERT(is_big(id) || is_tuple(id));
- nrp->id = copy_struct(id, size_object(id), &hp, NULL);
+ ASSERT(is_big(id) || is_tuple(id) || is_internal_magic_ref(id));
+ nrp->id = copy_struct(id, size_object(id), &hp, &nrp->off_heap);
}
nrp->heap_ref = 0;
nrp->link_ref = 0;
@@ -1126,12 +1159,12 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
- case REFC_BINARY_SUBTAG:
- if(IsMatchProgBinary(u.pb->val)) {
+ case REF_SUBTAG:
+ if(IsMatchProgBinary(u.mref->mb)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == u.pb->val) {
+ if(ib->bin_val == (Binary *) u.mref->mb) {
insert_bin = 0;
break;
}
@@ -1140,18 +1173,19 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
Uint *hp = &id_heap[0];
InsertedBin *nib;
UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
- a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
- erts_match_prog_foreach_offheap(u.pb->val,
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.mref->mb);
+ erts_match_prog_foreach_offheap((Binary *) u.mref->mb,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = u.pb->val;
+ nib->bin_val = (Binary *) u.mref->mb;
nib->next = inserted_bins;
inserted_bins = nib;
UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
}
}
break;
+ case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
break; /* No need to */
default:
@@ -1165,8 +1199,8 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
{
Eterm *idp = p;
- if(is_external(monitor->pid))
- insert_node(external_thing_ptr(monitor->pid)->node, MONITOR_REF, *idp);
+ if(monitor->type != MON_NIF_TARGET && is_external(monitor->u.pid))
+ insert_node(external_thing_ptr(monitor->u.pid)->node, MONITOR_REF, *idp);
if(is_external(monitor->ref))
insert_node(external_thing_ptr(monitor->ref)->node, MONITOR_REF, *idp);
}
@@ -1210,10 +1244,20 @@ insert_links2(ErtsLink *lnk, Eterm id)
static void
insert_ets_table(DbTable *tab, void *unused)
{
+ ErlOffHeap off_heap;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
struct insert_offheap2_arg a;
a.type = ETS_REF;
- a.id = tab->common.id;
+ if (tab->common.status & DB_NAMED_TABLE)
+ a.id = tab->common.the_name;
+ else {
+ Eterm *hp = &heap[0];
+ ERTS_INIT_OFF_HEAP(&off_heap);
+ a.id = erts_mk_magic_ref(&hp, &off_heap, tab->common.btid);
+ }
erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a);
+ if (is_not_atom(a.id))
+ erts_cleanup_offheap(&off_heap);
}
static void
@@ -1517,7 +1561,7 @@ setup_reference_table(void)
*/
static Eterm
-reference_table_term(Uint **hpp, Uint *szp)
+reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp)
{
#undef MK_2TUP
#undef MK_3TUP
@@ -1572,12 +1616,11 @@ reference_table_term(Uint **hpp, Uint *szp)
nrid = nrp->id;
if (!IS_CONST(nrp->id)) {
-
Uint nrid_sz = size_object(nrp->id);
if (szp)
*szp += nrid_sz;
if (hpp)
- nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
+ nrid = copy_struct(nrp->id, nrid_sz, hpp, ohp);
}
if (is_internal_pid(nrid) || nrid == am_error_logger) {
@@ -1712,6 +1755,7 @@ delete_reference_table(void)
NodeReferrer *tnrp;
nrp = referred_nodes[i].referrers;
while(nrp) {
+ erts_cleanup_offheap(&nrp->off_heap);
tnrp = nrp;
nrp = nrp->next;
erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 489da1ba17..91bcb4fce1 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -195,6 +195,10 @@ int erts_lc_is_de_rwlocked(DistEntry *);
int erts_lc_is_de_rlocked(DistEntry *);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_distribution_locks(int enable);
+#endif
+
ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep);
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index c59b42cdae..6a3213ec52 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -733,7 +733,7 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
if (lock_pdl && prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
-#if ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_CHECK
if (!ERTS_IS_CRASH_DUMPING) {
if (erts_lc_is_emu_thr()) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -988,16 +988,6 @@ typedef enum {
ERTS_PORT_OP_DONE
} ErtsPortOpResult;
-ErtsPortOpResult
-erts_schedule_proc2port_signal(Process *,
- Port *,
- Eterm,
- Eterm *,
- ErtsProc2PortSigData *,
- int,
- ErtsPortTaskHandle *,
- ErtsProc2PortSigCallback);
-
int erts_deliver_port_exit(Port *, Eterm, Eterm, int, int);
/*
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 3102e44c11..1ab1e47254 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -852,10 +852,11 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
}
static ERTS_INLINE void
-abort_nosuspend_task(Port *pp,
- ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp,
- int bpq_data)
+abort_signal_task(Port *pp,
+ int abort_type,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
@@ -863,18 +864,28 @@ abort_nosuspend_task(Port *pp,
if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
else {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
aborted_proc2port_data(pp, size);
}
}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
+{
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, type, tdp, bpq_data);
+}
+
static ErtsPortTaskHandleList *
get_free_nosuspend_handles(Port *pp)
{
@@ -936,7 +947,7 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
#ifdef ERTS_SMP
- if (runq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING)
erts_non_empty_runq(runq);
#endif
}
@@ -1613,8 +1624,9 @@ abort_nosuspend:
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
+
+ ASSERT(ptp);
+ port_task_free(ptp);
return 0;
@@ -1625,12 +1637,15 @@ fail:
erts_port_dec_refc(pp);
#endif
+ if (ptp) {
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
+ ptp->type, &ptp->u.alive.td, 0);
+ port_task_free(ptp);
+ }
+
if (ns_pthlp)
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
-
return -1;
}
@@ -2161,13 +2176,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
#endif
}
-int
-erts_port_is_scheduled(Port *pp)
-{
- erts_aint32_t flags = erts_smp_atomic32_read_acqb(&pp->sched.flags);
- return (flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)) != 0;
-}
-
#ifdef ERTS_SMP
void
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 2a6bd165a3..39f403b443 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -188,13 +188,7 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 1
-#endif
- );
+ erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
@@ -267,7 +261,6 @@ int erts_port_task_schedule(Eterm,
ErtsPortTaskType,
...);
void erts_port_task_free_port(Port *);
-int erts_port_is_scheduled(Port *);
ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void);
ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr);
void erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp);
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 1a579704a8..e6f8460164 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -382,12 +382,15 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
break;
}
case REF_DEF:
+ if (!ERTS_IS_CRASH_DUMPING)
+ erts_magic_ref_save_bin(obj);
+ /* fall through... */
case EXTERNAL_REF_DEF:
PRINT_STRING(res, fn, arg, "#Ref<");
PRINT_UWORD(res, fn, arg, 'u', 0, 1,
(ErlPfUWord) ref_channel_no(wobj));
ref_num = ref_numbers(wobj);
- for (i = ref_no_of_numbers(wobj)-1; i >= 0; i--) {
+ for (i = ref_no_numbers(wobj)-1; i >= 0; i--) {
PRINT_CHAR(res, fn, arg, '.');
PRINT_UWORD(res, fn, arg, 'u', 0, 1, (ErlPfUWord) ref_num[i]);
}
@@ -526,8 +529,8 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
case EXPORT_DEF:
{
Export* ep = *((Export **) (export_val(wobj) + 1));
- Atom* module = atom_tab(atom_val(ep->code[0]));
- Atom* name = atom_tab(atom_val(ep->code[1]));
+ Atom* module = atom_tab(atom_val(ep->info.mfa.module));
+ Atom* name = atom_tab(atom_val(ep->info.mfa.function));
PRINT_STRING(res, fn, arg, "#Fun<");
PRINT_BUF(res, fn, arg, module->name, module->len);
@@ -535,7 +538,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
PRINT_BUF(res, fn, arg, name->name, name->len);
PRINT_CHAR(res, fn, arg, '.');
PRINT_SWORD(res, fn, arg, 'd', 0, 1,
- (ErlPfSWord) ep->code[2]);
+ (ErlPfSWord) ep->info.mfa.arity);
PRINT_CHAR(res, fn, arg, '>');
}
break;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 5c0322a7f6..63c838a91d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -48,6 +48,7 @@
#include "erl_bif_unique.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
+#include "erl_nfunc_sched.h"
#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
@@ -106,9 +107,33 @@
#define LOW_BIT (1 << PRIORITY_LOW)
#define PORT_BIT (1 << ERTS_PORT_PRIO_LEVEL)
-#define ERTS_EMPTY_RUNQ(RQ) \
- ((ERTS_RUNQ_FLGS_GET_NOB((RQ)) & ERTS_RUNQ_FLGS_QMASK) == 0 \
- && (RQ)->misc.start == NULL)
+#define ERTS_IS_RUNQ_EMPTY_FLGS(FLGS) \
+ (!((FLGS) & (ERTS_RUNQ_FLGS_QMASK|ERTS_RUNQ_FLG_MISC_OP)))
+
+#define ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(FLGS) \
+ (!((FLGS) & (PORT_BIT|ERTS_RUNQ_FLG_MISC_OP)))
+
+#define ERTS_EMPTY_RUNQ(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+static ERTS_INLINE int
+runq_got_work_to_execute_flags(Uint32 flags)
+{
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ return !ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(flags);
+ return !ERTS_IS_RUNQ_EMPTY_FLGS(flags);
+}
+
+#ifdef ERTS_SMP
+static ERTS_INLINE int
+runq_got_work_to_execute(ErtsRunQueue *rq)
+{
+ return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq));
+}
+#endif
#undef RUNQ_READ_RQ
#undef RUNQ_SET_RQ
@@ -140,9 +165,6 @@ do { \
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
#endif
-#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
- (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
-
const Process erts_invalid_process = {{ERTS_INVALID_PID}};
extern BeamInstr beam_apply[];
@@ -172,7 +194,10 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
-
+#ifdef ERTS_DIRTY_SCHEDULERS
+int erts_dcpu_sched_thread_suggested_stack_size = -1;
+int erts_dio_sched_thread_suggested_stack_size = -1;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
@@ -193,170 +218,147 @@ static ErtsAuxWorkData *aux_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
#define ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN (((erts_aint32_t) 1) << 3)
-typedef enum {
- ERTS_SCHED_NORMAL,
- ERTS_SCHED_DIRTY_CPU,
- ERTS_SCHED_DIRTY_IO
-} ErtsSchedType;
-
typedef struct {
int ongoing;
ErtsProcList *blckrs;
ErtsProcList *chngq;
} ErtsMultiSchedulingBlock;
+typedef struct {
+ Uint32 normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Uint32 dirty_cpu;
+ Uint32 dirty_io;
+#endif
+} ErtsSchedTypeCounters;
+
static struct {
erts_smp_mtx_t mtx;
- Uint32 online;
- Uint32 curr_online;
- Uint32 active;
+ ErtsSchedTypeCounters online;
+ ErtsSchedTypeCounters curr_online;
+ ErtsSchedTypeCounters active;
erts_smp_atomic32_t changing;
ErtsProcList *chngq;
Eterm changer;
ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */
ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedType last_msb_dirty_type;
+#endif
} schdlr_sspnd;
-#define ERTS_SCHDLR_SSPND_S_BITS 10
-#define ERTS_SCHDLR_SSPND_DCS_BITS 11
-#define ERTS_SCHDLR_SSPND_DIS_BITS 11
-
-#define ERTS_SCHDLR_SSPND_S_MASK ((1 << ERTS_SCHDLR_SSPND_S_BITS)-1)
-#define ERTS_SCHDLR_SSPND_DCS_MASK ((1 << ERTS_SCHDLR_SSPND_DCS_BITS)-1)
-#define ERTS_SCHDLR_SSPND_DIS_MASK ((1 << ERTS_SCHDLR_SSPND_DIS_BITS)-1)
-
-#define ERTS_SCHDLR_SSPND_S_SHIFT 0
-#define ERTS_SCHDLR_SSPND_DCS_SHIFT (ERTS_SCHDLR_SSPND_S_SHIFT \
- + ERTS_SCHDLR_SSPND_S_BITS)
-#define ERTS_SCHDLR_SSPND_DIS_SHIFT (ERTS_SCHDLR_SSPND_DCS_SHIFT \
- + ERTS_SCHDLR_SSPND_DCS_BITS)
-
-#if (ERTS_SCHDLR_SSPND_S_BITS \
- + ERTS_SCHDLR_SSPND_DCS_BITS \
- + ERTS_SCHDLR_SSPND_DIS_BITS) > 32
-# error Wont fit in Uint32
-#endif
+static void init_scheduler_suspend(void);
-#if (ERTS_MAX_NO_OF_SCHEDULERS-1) > ERTS_SCHDLR_SSPND_S_MASK
-# error Max no schedulers wont fit in its bit-field
-#endif
-#if ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS > ERTS_SCHDLR_SSPND_DCS_MASK
-# error Max no dirty cpu schedulers wont fit in its bit-field
-#endif
-#if ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS > ERTS_SCHDLR_SSPND_DIS_MASK
-# error Max no dirty io schedulers wont fit in its bit-field
+static ERTS_INLINE Uint32
+schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p)
+{
+ int res = val1p->normal == val2p->normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ res &= val1p->dirty_cpu == val2p->dirty_cpu;
+ res &= val1p->dirty_io == val2p->dirty_io;
#endif
-
-#define ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(S, DCS, DIS) \
- ((((Uint32) (((S) & ERTS_SCHDLR_SSPND_S_MASK))-1) \
- << ERTS_SCHDLR_SSPND_S_SHIFT) \
- | ((((Uint32) ((DCS) & ERTS_SCHDLR_SSPND_DCS_MASK)) \
- << ERTS_SCHDLR_SSPND_DCS_SHIFT)) \
- | ((((Uint32) ((DIS) & ERTS_SCHDLR_SSPND_DIS_MASK)) \
- << ERTS_SCHDLR_SSPND_DIS_SHIFT)))
-
-static void init_scheduler_suspend(void);
+ return res;
+}
static ERTS_INLINE Uint32
-schdlr_sspnd_get_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
- Uint32 res = (Uint32) (*valp);
switch (type) {
case ERTS_SCHED_NORMAL:
- res >>= ERTS_SCHDLR_SSPND_S_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_S_MASK;
- res++;
- break;
+ return valp->normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- res >>= ERTS_SCHDLR_SSPND_DCS_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_DCS_MASK;
- break;
+ return valp->dirty_cpu;
case ERTS_SCHED_DIRTY_IO:
- res >>= ERTS_SCHDLR_SSPND_DIS_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_DIS_MASK;
- break;
+ return valp->dirty_io;
+#else
+ case ERTS_SCHED_DIRTY_CPU:
+ case ERTS_SCHED_DIRTY_IO:
+ return 0;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return 0;
}
+}
+#ifdef DEBUG
+static ERTS_INLINE Uint32
+schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp)
+{
+ Uint32 res = valp->normal;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ res += valp->dirty_cpu;
+ res += valp->dirty_io;
+#endif
return res;
}
+#endif
static ERTS_INLINE void
-schdlr_sspnd_dec_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
ASSERT(schdlr_sspnd_get_nscheds(valp, type) > 0);
switch (type) {
case ERTS_SCHED_NORMAL:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_S_SHIFT;
+ valp->normal--;
break;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_DCS_SHIFT;
+ valp->dirty_cpu--;
break;
case ERTS_SCHED_DIRTY_IO:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_DIS_SHIFT;
+ valp->dirty_io--;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
}
static ERTS_INLINE void
-schdlr_sspnd_inc_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
switch (type) {
case ERTS_SCHED_NORMAL:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_SCHEDULERS-1);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_S_SHIFT;
+ valp->normal++;
break;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_DCS_SHIFT;
+ valp->dirty_cpu++;
break;
case ERTS_SCHED_DIRTY_IO:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_DIS_SHIFT;
+ valp->dirty_io++;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
}
static ERTS_INLINE void
-schdlr_sspnd_set_nscheds(Uint32 *valp, ErtsSchedType type, Uint32 no)
+schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type, Uint32 no)
{
- Uint32 val = *valp;
-
switch (type) {
case ERTS_SCHED_NORMAL:
- ASSERT(no > 0);
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_S_MASK)
- << ERTS_SCHDLR_SSPND_S_SHIFT);
- val |= (((no-1) & ((Uint32) ERTS_SCHDLR_SSPND_S_MASK))
- << ERTS_SCHDLR_SSPND_S_SHIFT);
+ valp->normal = no;
break;
+#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_DCS_MASK)
- << ERTS_SCHDLR_SSPND_DCS_SHIFT);
- val |= ((no & ((Uint32) ERTS_SCHDLR_SSPND_DCS_MASK))
- << ERTS_SCHDLR_SSPND_DCS_SHIFT);
+ valp->dirty_cpu = no;
break;
case ERTS_SCHED_DIRTY_IO:
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_DIS_MASK)
- << ERTS_SCHDLR_SSPND_DIS_SHIFT);
- val |= ((no & ((Uint32) ERTS_SCHDLR_SSPND_DIS_MASK))
- << ERTS_SCHDLR_SSPND_DIS_SHIFT);
+ valp->dirty_io = no;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
-
- *valp = val;
}
static struct {
@@ -403,13 +405,59 @@ static erts_atomic_t runq_supervisor_sleeping;
ErtsSchedulerData *erts_scheduler_data;
#endif
-ErtsAlignedRunQueue *erts_aligned_run_queues;
-Uint erts_no_run_queues;
+ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues);
+Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues);
-ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
#ifdef ERTS_DIRTY_SCHEDULERS
-ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
-ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
+
+struct {
+ union {
+ erts_smp_atomic32_t active;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } cpu;
+ union {
+ erts_smp_atomic32_t active;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } io;
+} dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+#endif
+
+static ERTS_INLINE void
+dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_aint32_t val;
+ erts_smp_atomic32_t *ap;
+ switch (esdp->type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ ap = &dirty_count.cpu.active;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ ap = &dirty_count.io.active;
+ break;
+ default:
+ ap = NULL;
+ ERTS_INTERNAL_ERROR("Not a dirty scheduler");
+ break;
+ }
+
+ /*
+ * All updates done under run-queue lock, so
+ * no inc or dec needed...
+ */
+ ERTS_SMP_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue));
+
+ val = erts_smp_atomic32_read_nob(ap);
+ val += add;
+ erts_smp_atomic32_set_nob(ap, val);
+#endif
+}
+
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data);
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data);
typedef union {
Process dsp;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))];
@@ -447,13 +495,13 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
#endif
typedef enum {
- ERTS_PSTT_GC, /* Garbage Collect */
+ ERTS_PSTT_GC_MAJOR, /* Garbage Collect: Fullsweep */
+ ERTS_PSTT_GC_MINOR, /* Garbage Collect: Generational */
ERTS_PSTT_CPC, /* Check Process Code */
-#ifdef ERTS_NEW_PURGE_STRATEGY
ERTS_PSTT_CLA, /* Copy Literal Area */
-#endif
ERTS_PSTT_COHMQ, /* Change off heap message queue */
- ERTS_PSTT_FTMQ /* Flush trace msg queue */
+ ERTS_PSTT_FTMQ, /* Flush trace msg queue */
+ ERTS_PSTT_ETS_FREE_FIXATION
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -537,22 +585,28 @@ do { \
} \
} while (0)
-#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, DOX) \
+#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, NRQS, DO, DOX) \
do { \
ErtsRunQueue *RQVAR; \
+ int nrqs = (NRQS); \
int ix__; \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
+ for (ix__ = 0; ix__ < nrqs; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
erts_smp_runq_lock(RQVAR); \
{ DO; } \
} \
{ DOX; } \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) \
+ for (ix__ = 0; ix__ < nrqs; ix__++) \
erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \
} while (0)
-#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
- ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, )
+#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS, DO, )
+
+#define ERTS_ATOMIC_FOREACH_NORMAL_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, erts_no_run_queues, DO, )
+
+
/*
* Local functions.
*/
@@ -563,7 +617,6 @@ static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
-static void aux_work_timeout_late_init(void);
static void setup_aux_work_timer(ErtsSchedulerData *esdp);
static int execute_sys_tasks(Process *c_p,
@@ -606,6 +659,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
#endif
valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+ valid |= ERTS_SSI_AUX_WORK_YIELD;
if (~valid & value)
erts_exit(ERTS_ABORT_EXIT,
@@ -694,6 +748,8 @@ erts_pre_init_process(void)
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
= "MSEG_CACHE_CHECK";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_YIELD_IX]
+ = "YIELD";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX]
= "REAP_PORTS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX]
@@ -730,6 +786,16 @@ erts_pre_init_process(void)
= ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
= ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].get_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].set_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].get_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS;
#endif
}
@@ -775,7 +841,9 @@ erts_late_init_process(void)
{
int ix;
- erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
+ erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
Eterm atom;
char *atom_str;
@@ -810,15 +878,28 @@ erts_late_init_process(void)
}
+#define ERTS_SCHED_WTIME_IDLE ~((Uint64) 0)
+
static void
-init_sched_wall_time(ErtsSchedWallTime *swtp)
+init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp)
{
- swtp->need = erts_sched_balance_util;
- swtp->enabled = 0;
- swtp->start = 0;
- swtp->working.total = 0;
- swtp->working.start = 0;
- swtp->working.currently = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (esdp->type != ERTS_SCHED_NORMAL) {
+ erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0);
+ esdp->sched_wall_time.enabled = 1;
+ esdp->sched_wall_time.start = time_stamp;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+ }
+ else
+#endif
+ {
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
+ esdp->sched_wall_time.enabled = 0;
+ esdp->sched_wall_time.start = 0;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = 0;
+ }
}
static ERTS_INLINE Uint64
@@ -1009,31 +1090,145 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
-static ERTS_INLINE void
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+typedef struct {
+ Uint64 working;
+ Uint64 total;
+} ErtsDirtySchedWallTime;
+
+static void
+read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info)
+{
+ erts_aint32_t mod1;
+ Uint64 working, start, ts;
+
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+
+ while (1) {
+ erts_aint32_t mod2;
+
+ /* Spin until values are not written... */
+ while (1) {
+ if ((mod1 & 1) == 0)
+ break;
+ ERTS_SPIN_BODY;
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ }
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ working = esdp->sched_wall_time.working.total;
+ start = esdp->sched_wall_time.working.start;
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ mod2 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ if (mod1 == mod2)
+ break;
+ mod1 = mod2;
+ }
+
+ ts = sched_wall_time_ts();
+ ts -= esdp->sched_wall_time.start;
+
+ info->total = ts;
+
+ if (start == ERTS_SCHED_WTIME_IDLE || ts < start)
+ info->working = working;
+ else
+ info->working = working + (ts - start);
+
+ if (info->working > info->total)
+ info->working = info->total;
+}
+
+#endif
+
+#ifdef ERTS_SMP
+
+static void
+dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
+{
+ erts_aint32_t mod;
+ Uint64 ts = sched_wall_time_ts();
+
+ ts -= esdp->sched_wall_time.start;
+
+ /*
+ * This thread is the only thread writing in
+ * this sched_wall_time struct. We set 'mod' to
+ * an odd value while writing...
+ */
+ mod = erts_atomic32_read_dirty(&esdp->sched_wall_time.u.mod);
+ ASSERT((mod & 1) == 0);
+ mod++;
+
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+
+ if (working) {
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
+
+ esdp->sched_wall_time.working.start = ts;
+
+ }
+ else {
+ Uint64 total;
+
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
+
+ total = esdp->sched_wall_time.working.total;
+ total += ts - esdp->sched_wall_time.working.start;
+
+ esdp->sched_wall_time.working.total = total;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+
+
+ }
+
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ mod++;
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+
+#if 0
+ if (!working) {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER);
+ }
+#endif
+}
+
+#endif /* ERTS_SMP */
+
+static void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
{
- if (esdp->sched_wall_time.need) {
+ if (esdp->sched_wall_time.u.need) {
Uint64 ts = sched_wall_time_ts();
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
- update_avg_sched_util(esdp, ts, working);
+ update_avg_sched_util(esdp, ts, working);
#endif
if (esdp->sched_wall_time.enabled) {
if (working) {
-#ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
esdp->sched_wall_time.working.start = ts;
}
else {
-#ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
ts -= esdp->sched_wall_time.working.start;
esdp->sched_wall_time.working.total += ts;
+#ifdef DEBUG
+ esdp->sched_wall_time.working.start
+ = ERTS_SCHED_WTIME_IDLE;
+#endif
}
}
}
@@ -1050,15 +1245,19 @@ typedef struct {
int enable;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int want_dirty_cpu;
+ int want_dirty_io;
+#endif
} ErtsSchedWallTimeReq;
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_smp_atomic32_t refc;
} ErtsSystemCheckReq;
@@ -1074,6 +1273,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(screq,
5,
ERTS_ALC_T_SYS_CHECK_REQ)
+
static void
reply_sched_wall_time(void *vswtrp)
{
@@ -1097,23 +1297,23 @@ reply_sched_wall_time(void *vswtrp)
#endif
if (swtrp->set) {
if (!swtrp->enable && esdp->sched_wall_time.enabled) {
- esdp->sched_wall_time.need = erts_sched_balance_util;
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
}
else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- esdp->sched_wall_time.need = 1;
+ esdp->sched_wall_time.u.need = 1;
esdp->sched_wall_time.enabled = 1;
esdp->sched_wall_time.start = ts;
esdp->sched_wall_time.working.total = 0;
esdp->sched_wall_time.working.start = 0;
- esdp->sched_wall_time.working.currently = 1;
}
}
if (esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- ASSERT(esdp->sched_wall_time.working.currently);
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
total = ts;
ts -= esdp->sched_wall_time.working.start;
@@ -1124,30 +1324,117 @@ reply_sched_wall_time(void *vswtrp)
hpp = NULL;
szp = &sz;
- while (1) {
- if (hpp)
- ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
- else
- *szp += REF_THING_SIZE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (esdp->sched_wall_time.enabled
+ && swtrp->req_sched == esdp->no
+ && (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) {
+ /* Reply with info about this scheduler and all dirty schedulers... */
+ ErtsDirtySchedWallTime *dswt;
+ int ix, no_dirty_scheds, want_dcpu, want_dio, soffset;
+
+ want_dcpu = swtrp->want_dirty_cpu;
+ want_dio = swtrp->want_dirty_io;
+
+ no_dirty_scheds = 0;
+ if (want_dcpu)
+ no_dirty_scheds += erts_no_dirty_cpu_schedulers;
+ if (want_dio)
+ no_dirty_scheds += erts_no_dirty_io_schedulers;
+
+ ASSERT(no_dirty_scheds);
+
+ dswt = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(ErtsDirtySchedWallTime)
+ * no_dirty_scheds);
+
+ for (ix = 0; ix < no_dirty_scheds; ix++) {
+ ErtsSchedulerData *esdp;
+ if (want_dcpu && ix < erts_no_dirty_cpu_schedulers)
+ esdp = &erts_aligned_dirty_cpu_scheduler_data[ix].esd;
+ else {
+ int dio_ix = ix - erts_no_dirty_cpu_schedulers;
+ esdp = &erts_aligned_dirty_io_scheduler_data[dio_ix].esd;
+ }
+ read_dirty_sched_wall_time(esdp, &dswt[ix]);
+ }
- if (swtrp->set)
- msg = ref_copy;
- else {
- msg = (!esdp->sched_wall_time.enabled
- ? am_notsup
- : erts_bld_tuple(hpp, szp, 3,
- make_small(esdp->no),
- erts_bld_uint64(hpp, szp, working),
- erts_bld_uint64(hpp, szp, total)));
+ soffset = erts_no_schedulers + 1;
- msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
- }
- if (hpp)
- break;
+ if (!want_dcpu) {
+ ASSERT(want_dio);
+ soffset += erts_no_dirty_cpu_schedulers;
+ }
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ ASSERT(!swtrp->set);
+
+ /* info about dirty schedulers... */
+ msg = NIL;
+ for (ix = no_dirty_scheds-1; ix >= 0; ix--) {
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(ix+soffset),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].working),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].total)),
+ msg);
+ }
+ /* info about this scheduler... */
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)),
+ msg);
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
- mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
- szp = NULL;
- hpp = &hp;
+ erts_free(ERTS_ALC_T_TMP, dswt);
+ }
+ else
+#endif
+ {
+ /* Reply with info about this scheduler only... */
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ if (swtrp->set)
+ msg = ref_copy;
+ else {
+ msg = (!esdp->sched_wall_time.enabled
+ ? am_undefined
+ : erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)));
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ }
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
}
erts_queue_message(rp, rp_locks, mp, msg, am_system);
@@ -1165,7 +1452,8 @@ reply_sched_wall_time(void *vswtrp)
}
Eterm
-erts_sched_wall_time_request(Process *c_p, int set, int enable)
+erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int want_dirty_cpu, int want_dirty_io)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
@@ -1187,6 +1475,10 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable)
swtrp->proc = c_p;
swtrp->ref = STORE_NC(&hp, NULL, ref);
swtrp->req_sched = esdp->no;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ swtrp->want_dirty_cpu = want_dirty_cpu;
+ swtrp->want_dirty_io = want_dirty_io;
+#endif
erts_smp_atomic32_init_nob(&swtrp->refc,
(erts_aint32_t) erts_no_schedulers);
@@ -1224,7 +1516,7 @@ reply_system_check(void *vscrp)
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
#endif
- sz = REF_THING_SIZE;
+ sz = ERTS_REF_THING_SIZE;
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
hpp = &hp;
msg = STORE_NC(hpp, ohp, scrp->ref);
@@ -1300,12 +1592,6 @@ proclist_destroy(ErtsProcList *plp)
}
ErtsProcList *
-erts_proclist_copy(ErtsProcList *plp)
-{
- return proclist_copy(plp);
-}
-
-ErtsProcList *
erts_proclist_create(Process *p)
{
return proclist_create(p);
@@ -1726,12 +2012,13 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
int id, self = 0;
if (ignore_self) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
- if (esdp)
- self = (int) esdp->no;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* ignore_self is meaningless on dirty schedulers since aux work can
+ * only run on normal schedulers, and their ids do not translate. */
+ if(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ self = (int)esdp->no;
+ }
}
ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
@@ -2299,7 +2586,8 @@ static ERTS_INLINE erts_aint32_t
handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
- awdp->esdp->run_queue->halt_in_progress = 1;
+ ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING);
+
if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
int i, max = erts_ptab_max(&erts_port);
erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
@@ -2336,6 +2624,48 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS;
}
+void
+erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp)
+{
+ ASSERT(esdp == erts_get_scheduler_data());
+ /* Always called by the scheduler itself... */
+ set_aux_work_flags_wakeup_nob(esdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ int yield = 0;
+ /*
+ * Yield operations are always requested by the scheduler itself.
+ *
+ * The following handlers should *not* set the ERTS_SSI_AUX_WORK_YIELD
+ * flag in order to indicate more work. They should instead return
+ * information so this "main handler" can manipulate the flag...
+ *
+ * The following handlers should be able to handle being called
+ * even though no work is to be done...
+ */
+
+ /* Various yielding operations... */
+
+ yield |= erts_handle_yielded_ets_all_request(awdp->esdp,
+ &awdp->yield.ets_all);
+
+ /*
+ * Other yielding operations...
+ *
+ */
+
+ if (!yield) {
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+ return aux_work & ~ERTS_SSI_AUX_WORK_YIELD;
+ }
+
+ return aux_work;
+}
+
+
#if HAVE_ERTS_MSEG
static ERTS_INLINE erts_aint32_t
@@ -2476,6 +2806,9 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
handle_mseg_cache_check);
#endif
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_YIELD,
+ handle_yield);
+
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
@@ -2510,6 +2843,9 @@ typedef struct {
int initialized;
erts_atomic32_t refc;
+#ifdef DEBUG
+ erts_atomic32_t used;
+#endif
erts_atomic32_t type[1];
} ErtsAuxWorkTmo;
@@ -2519,6 +2855,13 @@ static ERTS_INLINE void
start_aux_work_timer(ErtsSchedulerData *esdp)
{
ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp);
+#ifdef DEBUG
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used,
+ (erts_aint32_t) esdp->no);
+ ASSERT(esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(!no);
+#endif
+
tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1);
tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1;
erts_twheel_init_timer(&aux_work_tmo->timer.data);
@@ -2526,7 +2869,6 @@ start_aux_work_timer(ErtsSchedulerData *esdp)
erts_twheel_set_timer(esdp->timer_wheel,
&aux_work_tmo->timer.data,
aux_work_timeout,
- NULL,
(void *) esdp,
tmo);
}
@@ -2555,16 +2897,19 @@ aux_work_timeout_early_init(int no_schedulers)
aux_work_tmo = (ErtsAuxWorkTmo *) p;
aux_work_tmo->initialized = 0;
erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
+#ifdef DEBUG
+ erts_atomic32_init_nob(&aux_work_tmo->used, 0);
+#endif
for (i = 0; i <= no_schedulers; i++)
erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
}
void
-aux_work_timeout_late_init(void)
+erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp)
{
aux_work_tmo->initialized = 1;
- if (erts_atomic32_read_nob(&aux_work_tmo->refc))
- start_aux_work_timer(erts_get_scheduler_data());
+ if (erts_atomic32_read_acqb(&aux_work_tmo->refc))
+ start_aux_work_timer(esdp);
}
static void
@@ -2572,6 +2917,13 @@ aux_work_timeout(void *vesdp)
{
erts_aint32_t refc;
int i;
+#ifdef DEBUG
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used, 0);
+ ASSERT(no == esdp->no);
+ ASSERT(esdp == (ErtsSchedulerData *) vesdp);
+#endif
+
#ifdef ERTS_SMP
i = 0;
#else
@@ -2674,7 +3026,7 @@ erts_active_schedulers(void)
{
Uint as = erts_no_schedulers;
- ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
+ ERTS_ATOMIC_FOREACH_NORMAL_RUNQ(rq, as -= abs(rq->waiting));
return as;
}
@@ -2839,11 +3191,12 @@ static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t nflgs;
erts_aint32_t xflgs = 0;
do {
+ nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC);
+ nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
@@ -2865,7 +3218,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
} while (oflgs & ERTS_SSI_FLG_WAITING);
return oflgs;
}
@@ -2915,7 +3268,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
return oflgs;
}
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
}
}
@@ -2971,13 +3324,6 @@ thr_prgr_fin_wait(void *vssi)
static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp);
-void
-erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time)
-{
- /* TODO only poke when needed (based on timeout_time) */
- erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1));
-}
-
static void *
aux_thread(void *unused)
{
@@ -3050,6 +3396,8 @@ aux_thread(void *unused)
return NULL;
}
+static void suspend_scheduler(ErtsSchedulerData *esdp);
+
#endif /* ERTS_SMP */
static void
@@ -3090,6 +3438,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
rq->sleepers.list->prev = ssi;
rq->sleepers.list = ssi;
erts_smp_spin_unlock(&rq->sleepers.lock);
+ dirty_active(esdp, -1);
}
#endif
@@ -3108,8 +3457,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && thr_prgr_active != working)
- sched_wall_time_change(esdp, thr_prgr_active);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 0);
+ else if (thr_prgr_active != working)
+ sched_wall_time_change(esdp, working = thr_prgr_active);
while (1) {
ErtsMonotonicTime current_time = 0;
@@ -3215,13 +3566,17 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 1);
+ else if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
- }
+ }
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
@@ -3415,14 +3770,19 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_runq_lock(rq);
}
clear_sys_scheduling();
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
#endif
if (!working)
sched_wall_time_change(esdp, working = 1);
sched_active_sys(esdp->no, rq);
}
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_active(esdp, 1);
+
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
}
@@ -3439,17 +3799,54 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
- nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
xflgs = oflgs;
}
}
+static ERTS_INLINE void
+ssi_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi));
+}
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
static void
-wake_scheduler(ErtsRunQueue *rq)
+dcpu_sched_ix_suspend_wake(Uint ix)
{
- ErtsSchedulerSleepInfo *ssi;
- erts_aint32_t flgs;
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+static void
+dio_sched_ix_suspend_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+
+static void
+dcpu_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#if 0
+static void
+dio_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+#endif
+
+#endif
+
+static void
+wake_scheduler(ErtsRunQueue *rq)
+{
/*
* The unlocked run queue is not strictly necessary
* from a thread safety or deadlock prevention
@@ -3461,10 +3858,7 @@ wake_scheduler(ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)
|| ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
- ssi = rq->scheduler->ssi;
-
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
+ ssi_wake(rq->scheduler->ssi);
}
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -3511,6 +3905,13 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
} while (ssi);
}
}
+
+static void
+wake_dirty_scheduler(ErtsRunQueue *rq)
+{
+ wake_dirty_schedulers(rq, 1);
+}
+
#endif
#define ERTS_NO_USED_RUNQS_SHIFT 16
@@ -3651,7 +4052,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
if (runq) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
- wake_dirty_schedulers(runq, 1);
+ wake_dirty_scheduler(runq);
else
#endif
wake_scheduler(runq);
@@ -3955,8 +4356,7 @@ suspend_run_queue(ErtsRunQueue *rq)
wake_scheduler(rq);
}
-static void scheduler_ix_resume_wake(Uint ix);
-static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
+static void nrml_sched_ix_resume_wake(Uint ix);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
@@ -3964,16 +4364,19 @@ resume_run_queue(ErtsRunQueue *rq)
int pix;
Uint32 oflgs;
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
erts_smp_runq_lock(rq);
oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq,
(ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
- | ERTS_RUNQ_FLG_SUSPENDED),
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC),
(ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- if (oflgs & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (oflgs & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
erts_aint32_t len;
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
@@ -3992,10 +4395,7 @@ resume_run_queue(ErtsRunQueue *rq)
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- scheduler_ix_resume_wake(rq->ix);
+ nrml_sched_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -4053,28 +4453,22 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp = NULL;
+ ErtsMigrationPath *mp;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
- }
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -4083,6 +4477,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
end = rq->misc.end;
rq->misc.start = NULL;
rq->misc.end = NULL;
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
erts_smp_runq_unlock(rq);
erts_smp_runq_lock(to_rq);
@@ -4103,9 +4498,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
if (rq->ports.start) {
Port *prt;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -4143,15 +4535,10 @@ evacuate_run_queue(ErtsRunQueue *rq,
int notify = 0;
to_rq = NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
- }
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
@@ -4207,11 +4594,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
goto handle_next_proc;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- clear_proc_dirty_queue_bit(real_proc, rq, qbit);
-#endif
-
if (ERTS_PSFLG_BOUND & real_state) {
/* Bound processes get stuck here... */
proc->next = NULL;
@@ -4225,16 +4607,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- /*
- * dirty run queues evacuate only to run
- * queue 0 during multi-scheduling blocking
- */
- to_rq = ERTS_RUNQ_IX(0);
- else
-#endif
- to_rq = mp->prio[prio].runq;
+ to_rq = mp->prio[prio].runq;
RUNQ_SET_RQ(&proc->run_queue, to_rq);
erts_smp_runq_lock(to_rq);
@@ -4269,7 +4642,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
erts_smp_runq_lock(vrq);
- if (rq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING)
goto no_procs;
/*
@@ -4368,8 +4741,7 @@ check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
Uint32 flags = ERTS_RUNQ_FLGS_GET(vrq);
- if ((flags & (ERTS_RUNQ_FLG_NONEMPTY
- | ERTS_RUNQ_FLG_PROTECTED)) == ERTS_RUNQ_FLG_NONEMPTY)
+ if (runq_got_work_to_execute_flags(flags) & (!(flags & ERTS_RUNQ_FLG_PROTECTED)))
return try_steal_task_from_victim(rq, rq_lockedp, vrq, flags);
else
return 0;
@@ -4437,11 +4809,9 @@ try_steal_task(ErtsRunQueue *rq)
if (!rq_locked)
erts_smp_runq_lock(rq);
- if (!res)
- res = rq->halt_in_progress ?
- !ERTS_EMPTY_RUNQ_PORTS(rq) : !ERTS_EMPTY_RUNQ(rq);
-
- return res;
+ if (res)
+ return res;
+ return runq_got_work_to_execute(rq);
}
/* Run queue balancing */
@@ -4808,7 +5178,7 @@ check_balance(ErtsRunQueue *c_rq)
sched_util_balancing = 1;
/*
* In order to avoid renaming a large amount of fields
- * we write utilization values instead of lenght values
+ * we write utilization values instead of length values
* in the 'max_len' and 'migration_limit' fields...
*/
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -5353,7 +5723,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
if (rq->waiting) {
- wake_dirty_schedulers(rq, 1);
+ wake_dirty_scheduler(rq);
}
} else
#endif
@@ -5646,14 +6016,30 @@ erts_sched_set_wake_cleanup_threshold(char *str)
static void
init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
- if (!esdp)
- awdp->sched_id = 0;
+ int id = 0;
+ if (esdp) {
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ id = (int) esdp->no;
+ break;
#ifdef ERTS_DIRTY_SCHEDULERS
- else if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp);
+ case ERTS_SCHED_DIRTY_CPU:
+ id = (int) erts_no_schedulers;
+ id += (int) esdp->dirty_no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ id = (int) erts_no_schedulers;
+ id += (int) erts_no_dirty_cpu_schedulers;
+ id += (int) esdp->dirty_no;
+ break;
#endif
- else
- awdp->sched_id = (int) esdp->no;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ break;
+ }
+ }
+
+ awdp->sched_id = id;
awdp->esdp = esdp;
awdp->ssi = esdp ? esdp->ssi : NULL;
#ifdef ERTS_SMP
@@ -5700,7 +6086,8 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
ErtsSchedulerSleepInfo* ssi,
ErtsRunQueue* runq,
char** daww_ptr, size_t daww_sz,
- Process *shadow_proc)
+ Process *shadow_proc,
+ Uint64 time_stamp)
{
esdp->timer_wheel = NULL;
#ifdef ERTS_SMP
@@ -5716,13 +6103,29 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
MAX_REG * sizeof(FloatDef));
#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp->run_queue = runq;
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
- ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
+ if (runq == ERTS_DIRTY_CPU_RUNQ)
+ esdp->type = ERTS_SCHED_DIRTY_CPU;
+ else {
+ ASSERT(runq == ERTS_DIRTY_IO_RUNQ);
+ esdp->type = ERTS_SCHED_DIRTY_IO;
+ }
+ esdp->dirty_no = (Uint) num;
+ if (num == 1) {
+ /*
+ * Multi-scheduling block functionality depends
+ * on finding dirty scheduler number 1 here...
+ */
+ runq->scheduler = esdp;
+ }
}
else {
+ esdp->type = ERTS_SCHED_NORMAL;
esdp->no = (Uint) num;
- ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
+ esdp->dirty_no = 0;
+ runq->scheduler = esdp;
}
esdp->dirty_shadow_process = shadow_proc;
if (shadow_proc) {
@@ -5734,7 +6137,10 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
}
#else
+ runq->scheduler = esdp;
+ esdp->run_queue = runq;
esdp->no = (Uint) num;
+ esdp->type = ERTS_SCHED_NORMAL;
#endif
esdp->ssi = ssi;
@@ -5746,9 +6152,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
erts_init_atom_cache_map(&esdp->atom_cache_map);
- esdp->run_queue = runq;
- esdp->run_queue->scheduler = esdp;
-
esdp->last_monotonic_time = 0;
esdp->check_time_reds = 0;
@@ -5767,7 +6170,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->reductions = 0;
- init_sched_wall_time(&esdp->sched_wall_time);
+ init_sched_wall_time(esdp, time_stamp);
erts_port_task_handle_init(&esdp->nosuspend_port_task_handle);
}
@@ -5779,7 +6182,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
#endif
)
{
- int ix, n, no_ssi;
+ int ix, n, no_ssi, tot_rqs;
char *daww_ptr;
size_t daww_sz;
size_t size_runqs;
@@ -5812,26 +6215,19 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
/* Create and initialize run queues */
n = no_schedulers;
- size_runqs = sizeof(ErtsAlignedRunQueue) * (n + ERTS_NUM_DIRTY_RUNQS);
+ tot_rqs = (n + ERTS_NUM_DIRTY_RUNQS);
+ size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
#ifdef ERTS_SMP
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_aligned_run_queues += ERTS_NUM_DIRTY_RUNQS;
-#endif
erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
#endif
erts_no_run_queues = n;
- for (ix = -(ERTS_NUM_DIRTY_RUNQS); ix < n; ix++) {
+ for (ix = 0; ix < tot_rqs; ix++) {
int pix, rix;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsRunQueue *rq = ERTS_RUNQ_IX_IS_DIRTY(ix) ?
- ERTS_DIRTY_RUNQ_IX(ix) : ERTS_RUNQ_IX(ix);
-#else
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
-#endif
rq->ix = ix;
@@ -5839,13 +6235,17 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* id if the esdp->no <-> ix+1 mapping change.
*/
- erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
+ erts_smp_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_smp_cnd_init(&rq->cnd);
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix))
- erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
+ make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ }
rq->sleepers.list = NULL;
#endif
#endif
@@ -5864,7 +6264,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_smp_atomic32_set_nob(&rq->len, 0);
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- rq->halt_in_progress = 0;
rq->procs.pending_exiters = NULL;
rq->procs.context_switches = 0;
@@ -5982,17 +6381,18 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_aligned_scheduler_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
- n*sizeof(ErtsAlignedSchedulerData));
+ n*sizeof(ErtsAlignedSchedulerData));
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz,
- NULL);
+ NULL, 0);
}
#ifdef ERTS_DIRTY_SCHEDULERS
{
+ Uint64 ts = sched_wall_time_ts();
int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
int adspix = 0;
ErtsAlignedDirtyShadowProcess *adsp =
@@ -6012,13 +6412,13 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_CPU_RUNQ, NULL, 0,
- &adsp[adspix++].dsp);
+ &adsp[adspix++].dsp, ts);
}
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_IO_RUNQ, NULL, 0,
- &adsp[adspix++].dsp);
+ &adsp[adspix++].dsp, ts);
}
}
#endif
@@ -6038,7 +6438,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
- erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
+ erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
@@ -6104,6 +6505,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ERTS_SCHED_DIRTY_IO,
no_dirty_io_schedulers);
+ erts_smp_atomic32_init_nob(&dirty_count.cpu.active,
+ (erts_aint32_t) no_dirty_cpu_schedulers);
+ erts_smp_atomic32_init_nob(&dirty_count.io.active,
+ (erts_aint32_t) no_dirty_io_schedulers);
+
#endif
if (set_schdlr_sspnd_change_flags)
@@ -6123,7 +6529,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_tsd_set(sched_data_key, (void *) esdp);
#endif
}
- erts_no_schedulers = 1;
erts_no_dirty_cpu_schedulers = 0;
erts_no_dirty_io_schedulers = 0;
#endif
@@ -6133,8 +6538,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
/* init port tasks */
erts_port_task_init();
- aux_work_timeout_late_init();
-
#ifndef ERTS_SMP
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_scheduler_data->verify_unused_temp_alloc
@@ -6445,10 +6848,30 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
int enqueue; /* < 0 -> use proxy */
ErtsRunQueue* runq;
- if (is_normal_sched)
- running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
- else
+ if (!is_normal_sched)
running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ else {
+ running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ && (p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ /*
+ * Delay dirty GC; will be enabled automatically
+ * again by next GC...
+ */
+
+ /*
+ * No normal execution until dirty CLA or hibernat has
+ * been handled...
+ */
+ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE)));
+
+ state = erts_smp_atomic32_read_band_nob(&p->state,
+ ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+ state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ }
+#endif
+ }
a = state;
@@ -6960,15 +7383,8 @@ resume_process(Process *p, ErtsProcLocks locks)
#ifdef ERTS_SMP
-static void
-scheduler_ix_resume_wake(Uint ix)
-{
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
-}
-
-static void
-scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+static ERTS_INLINE void
+sched_resume_wake__(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
@@ -6982,9 +7398,31 @@ scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
break;
}
xflgs = oflgs;
- } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+ } while (oflgs & (ERTS_SSI_FLG_MSB_EXEC|ERTS_SSI_FLG_SUSPENDED));
+}
+
+static void
+nrml_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static void
+dcpu_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+static void
+dio_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#endif
+
static erts_aint32_t
sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
{
@@ -7063,10 +7501,20 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
static void
init_scheduler_suspend(void)
{
- erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
- schdlr_sspnd.online = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- schdlr_sspnd.curr_online = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- schdlr_sspnd.active = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
+ erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ schdlr_sspnd.online.normal = 1;
+ schdlr_sspnd.curr_online.normal = 1;
+ schdlr_sspnd.active.normal = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.online.dirty_cpu = 0;
+ schdlr_sspnd.curr_online.dirty_cpu = 0;
+ schdlr_sspnd.active.dirty_cpu = 0;
+ schdlr_sspnd.online.dirty_io = 0;
+ schdlr_sspnd.curr_online.dirty_io = 0;
+ schdlr_sspnd.active.dirty_io = 0;
+ schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO;
+#endif
erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0);
schdlr_sspnd.chngq = NULL;
schdlr_sspnd.changer = am_false;
@@ -7127,6 +7575,252 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static ERTS_INLINE int
+have_dirty_work(void)
+{
+ return !(ERTS_EMPTY_RUNQ(ERTS_DIRTY_CPU_RUNQ)
+ | ERTS_EMPTY_RUNQ(ERTS_DIRTY_IO_RUNQ));
+}
+
+#define ERTS_MSB_NONE_PRIO_BIT PORT_BIT
+
+static ERTS_INLINE Uint32
+msb_runq_prio_bit(Uint32 flgs)
+{
+ int pbit;
+
+ pbit = (int) (flgs & ERTS_RUNQ_FLGS_PROCS_QMASK);
+ if (flgs & PORT_BIT) {
+ /* rate ports as proc prio high */
+ pbit |= HIGH_BIT;
+ }
+ if (flgs & ERTS_RUNQ_FLG_MISC_OP) {
+ /* rate misc ops as proc prio normal */
+ pbit |= NORMAL_BIT;
+ }
+ if (flgs & LOW_BIT) {
+ /* rate low prio as normal (avoid starvation) */
+ pbit |= NORMAL_BIT;
+ }
+ if (!pbit)
+ pbit = (int) ERTS_MSB_NONE_PRIO_BIT;
+ else
+ pbit &= -pbit; /* least significant bit set... */
+ ASSERT(pbit);
+
+ /* High prio low value; low prio high value... */
+ return (Uint32) pbit;
+}
+
+static ERTS_INLINE void
+msb_runq_prio_bits(Uint32 *nrmlp, Uint32 *dcpup, Uint32 *diop)
+{
+ Uint32 flgs = ERTS_RUNQ_FLGS_GET(ERTS_RUNQ_IX(0));
+ if (flgs & ERTS_RUNQ_FLG_HALTING) {
+ /*
+ * Emulator is halting; only execute port jobs
+ * on normal scheduler. Ensure that we switch
+ * to the normal scheduler.
+ */
+ *nrmlp = HIGH_BIT;
+ *dcpup = ERTS_MSB_NONE_PRIO_BIT;
+ *diop = ERTS_MSB_NONE_PRIO_BIT;
+ }
+ else {
+ *nrmlp = msb_runq_prio_bit(flgs);
+
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_CPU_RUNQ);
+ *dcpup = msb_runq_prio_bit(flgs);
+
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_IO_RUNQ);
+ *diop = msb_runq_prio_bit(flgs);
+ }
+}
+
+static int
+msb_scheduler_type_switch(ErtsSchedType sched_type,
+ ErtsSchedulerData *esdp,
+ long no)
+{
+ Uint32 nrml_prio, dcpu_prio, dio_prio;
+ ErtsSchedType exec_type;
+ ErtsRunQueue *exec_rq;
+#ifdef DEBUG
+ erts_aint32_t dbg_val;
+#endif
+
+ ASSERT(schdlr_sspnd.msb.ongoing);
+
+ /*
+ * This function determines how to switch
+ * between scheduler types when multi-scheduling
+ * is blocked.
+ *
+ * If no dirty work exist, we always select
+ * execution of normal scheduler. If nothing
+ * executes, normal scheduler 1 should be waiting
+ * in sys_schedule(), otherwise we cannot react
+ * on I/O events.
+ *
+ * We unconditionally switch back to normal
+ * scheduler after executing dirty in order to
+ * make sure we check for I/O...
+ */
+
+ msb_runq_prio_bits(&nrml_prio, &dcpu_prio, &dio_prio);
+
+ exec_type = ERTS_SCHED_NORMAL;
+ if (sched_type == ERTS_SCHED_NORMAL) {
+
+ /*
+ * Check priorities of work in the
+ * different run-queues and determine
+ * run-queue with highest prio job...
+ */
+
+ if ((dcpu_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & (dio_prio == ERTS_MSB_NONE_PRIO_BIT)) {
+ /*
+ * No dirty work exist; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+
+ if (dcpu_prio < nrml_prio) {
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ if (dio_prio < dcpu_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ else {
+ if (dio_prio < nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+
+ /*
+ * Make sure to alternate between dirty types
+ * inbetween normal execution if highest
+ * priorities are equal.
+ */
+
+ if (exec_type == ERTS_SCHED_NORMAL) {
+ if (dcpu_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else if (dio_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ else {
+ /*
+ * Normal work has higher prio than
+ * dirty work; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+ }
+
+ ASSERT(exec_type != ERTS_SCHED_NORMAL);
+ if (dio_prio == dcpu_prio) {
+ /* Alter between dirty types... */
+ if (schdlr_sspnd.last_msb_dirty_type == ERTS_SCHED_DIRTY_IO)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ }
+
+ ASSERT(sched_type != exec_type);
+
+ if (exec_type != ERTS_SCHED_NORMAL)
+ schdlr_sspnd.last_msb_dirty_type = exec_type;
+ else {
+ erts_aint32_t calls;
+ /*
+ * Going back to normal scheduler after
+ * dirty execution; make sure it will check
+ * for I/O...
+ */
+ if (ERTS_USE_MODIFIED_TIMING())
+ calls = ERTS_MODIFIED_TIMING_INPUT_REDS + 1;
+ else
+ calls = INPUT_REDUCTIONS + 1;
+ erts_smp_atomic32_set_nob(&function_calls, calls);
+
+ if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT)
+ | (dio_prio != ERTS_MSB_NONE_PRIO_BIT))) {
+ /*
+ * We have dirty work, but an empty
+ * normal run-queue.
+ *
+ * Since the normal run-queue is
+ * empty, the normal scheduler will
+ * go to sleep when selected for
+ * execution. We have dirty work to
+ * do, so we only want it to check
+ * I/O, and then come back here and
+ * switch to dirty execution.
+ *
+ * To prevent the scheduler from going
+ * to sleep we trick it into believing
+ * it has work to do...
+ */
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0),
+ ERTS_RUNQ_FLG_MISC_OP);
+ }
+ }
+
+ /*
+ * Suspend this scheduler and wake up scheduler
+ * number one of another type...
+ */
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_bset_mb(&esdp->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC);
+
+ switch (exec_type) {
+ case ERTS_SCHED_NORMAL:
+ exec_rq = ERTS_RUNQ_IX(0);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ exec_rq = ERTS_DIRTY_CPU_RUNQ;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ exec_rq = ERTS_DIRTY_IO_RUNQ;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ exec_rq = NULL;
+ break;
+ }
+
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_MSB_EXEC);
+ ASSERT(dbg_val & ERTS_SSI_FLG_SUSPENDED);
+
+ wake_scheduler(exec_rq);
+
+ return 1; /* suspend this scheduler... */
+
+}
+
+#endif
+
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
@@ -7152,32 +7846,52 @@ suspend_scheduler(ErtsSchedulerData *esdp)
* Regardless of why a scheduler is suspended, it ends up here.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
- online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
- sched_type = ERTS_SCHED_DIRTY_CPU;
- }
- else {
- online_flag = 0;
- sched_type = ERTS_SCHED_DIRTY_IO;
- }
+
+#if !defined(ERTS_DIRTY_SCHEDULERS)
+
+ sched_type = ERTS_SCHED_NORMAL;
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ no = esdp->no;
+ ASSERT(no != 1);
+
+#else
+
+ sched_type = esdp->type;
+ switch (sched_type) {
+ case ERTS_SCHED_NORMAL:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ no = esdp->no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
+ no = esdp->dirty_no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ online_flag = 0;
+ no = esdp->dirty_no;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return;
}
- else
-#endif
- {
- online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
- no = esdp->no;
- sched_type = ERTS_SCHED_NORMAL;
+
+ if (erts_smp_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) {
+ ASSERT(no == 1);
+ if (!msb_scheduler_type_switch(sched_type, esdp, no))
+ return;
+ /* Suspend and let scheduler 1 of another type execute... */
}
- ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
+#endif
- if (sched_type != ERTS_SCHED_NORMAL)
+ if (sched_type != ERTS_SCHED_NORMAL) {
+ dirty_active(esdp, -1);
erts_smp_runq_unlock(esdp->run_queue);
+ dirty_sched_wall_time_change(esdp, 0);
+ }
else {
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (no != 1)
+ evacuate_run_queue(esdp->run_queue, &sbp);
erts_smp_runq_unlock(esdp->run_queue);
@@ -7185,10 +7899,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
-
- sched_wall_time_change(esdp, 0);
-
}
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
@@ -7196,9 +7908,6 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type);
- ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) >= 1);
-
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
while (1) {
@@ -7220,9 +7929,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ERTS_SCHED_NORMAL) == 1) {
clr_flg = ERTS_SCHDLR_SSPND_CHNG_NMSB;
}
- else if (schdlr_sspnd.active
- == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)) {
- clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
+ else if (schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL) == 1
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_CPU) == 0
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_IO) == 0) {
+ clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
}
if (clr_flg) {
@@ -7294,10 +8007,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
- if (curr_online
- && (sched_type == ERTS_SCHED_NORMAL
- ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
- : !schdlr_sspnd.msb.ongoing)) {
+ if (curr_online) {
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
@@ -7313,13 +8023,11 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
else {
- erts_aint32_t qmask;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
+ int evacuate = no == 1 ? 0 : !ERTS_EMPTY_RUNQ(esdp->run_queue);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work|qmask) {
+ if (aux_work|evacuate) {
if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
@@ -7331,7 +8039,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
- if (qmask) {
+ if (evacuate) {
erts_smp_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
erts_smp_runq_unlock(esdp->run_queue);
@@ -7446,7 +8154,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (changing) {
if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
&& !schdlr_sspnd.msb.ongoing
- && schdlr_sspnd.online == schdlr_sspnd.active) {
+ && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online,
+ &schdlr_sspnd.active)) {
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
@@ -7461,34 +8170,33 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
- ASSERT((sched_type == ERTS_SCHED_NORMAL
- ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
- : !schdlr_sspnd.msb.ongoing));
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- ASSERT(!resume.msb.chngrs);
schdlr_sspnd_resume_procs(sched_type, &resume);
ASSERT(curr_online);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_active);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_sched_wall_time_change(esdp, 1);
+ else {
+ (void) erts_get_monotonic_time(esdp);
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
}
- if (sched_type == ERTS_SCHED_NORMAL)
- (void) erts_get_monotonic_time(esdp);
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
- if (sched_type == ERTS_SCHED_NORMAL) {
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_active(esdp, 1);
+ else {
schedule_bound_processes(esdp->run_queue, &sbp);
erts_sched_check_cpu_bind_post_suspend(esdp);
@@ -7754,10 +8462,8 @@ erts_set_schedulers_online(Process *p,
erts_sched_poke(ssi);
}
} else {
- for (ix = dirty_online; ix < dirty_no; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
+ for (ix = dirty_online; ix < dirty_no; ix++)
+ dcpu_sched_ix_resume_wake(ix);
}
}
if (!dirty_only)
@@ -7785,19 +8491,19 @@ erts_set_schedulers_online(Process *p,
else /* if decrease */ {
#ifdef ERTS_DIRTY_SCHEDULERS
if (change_dirty) {
- ErtsSchedulerSleepInfo* ssi;
if (schdlr_sspnd.msb.ongoing) {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_sched_poke(ssi);
- }
- } else {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ erts_sched_poke(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ /*
+ * Newly suspended scheduler may have just been
+ * about to handle a task. Make sure someone takes
+ * care of such a task...
+ */
+ dcpu_sched_ix_wake(0);
}
}
if (!dirty_only)
@@ -7860,9 +8566,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
{
int resume_proc, ix, res, have_unlocked_plocks = 0;
ErtsProcList *plp;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerSleepInfo* ssi;
-#endif
ErtsMultiSchedulingBlock *msbp;
erts_aint32_t chng_flg;
int have_blckd_flg;
@@ -7903,21 +8606,23 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
p->flags |= have_blckd_flg;
goto wait_until_msb;
}
- else if (msbp->blckrs) {
- ASSERT(msbp->ongoing);
+ else if (msbp->blckrs || (normal && erts_no_schedulers == 1)) {
+ ASSERT(!msbp->blckrs || msbp->ongoing);
+ msbp->ongoing = 1;
plp = proclist_create(p);
erts_proclist_store_last(&msbp->blckrs, plp);
p->flags |= have_blckd_flg;
ASSERT(normal
- ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL)
- : schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
+ ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL)
+ : schdlr_sspnd_get_nscheds_tot(&schdlr_sspnd.active) == 1);
ASSERT(erts_proc_sched_data(p)->no == 1);
if (schdlr_sspnd.msb.ongoing)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else
res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
}
- else {
+ else {
int online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_NORMAL);
ASSERT(!msbp->ongoing);
@@ -7928,59 +8633,41 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
ASSERT(!msbp->ongoing);
msbp->ongoing = 1;
- if (schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)
- || (normal && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) == 1)) {
- ASSERT(erts_proc_sched_data(p)->no == 1);
- plp = proclist_create(p);
- erts_proclist_store_last(&msbp->blckrs, plp);
- if (schdlr_sspnd.msb.ongoing)
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- else
- res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
- }
- else {
- erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
- chng_flg);
- change_no_used_runqs(1);
- for (ix = 1; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
- for (ix = 1; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq);
- }
+ erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
+ chng_flg);
+ change_no_used_runqs(1);
+ for (ix = 1; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!normal) {
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ for (ix = 1; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
- }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!normal) {
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC);
+ erts_smp_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags,
+ ERTS_SSI_FLG_MSB_EXEC);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_suspend_wake(ix);
+ }
#endif
- wait_until_msb:
+ wait_until_msb:
- ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing));
+ ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing));
- plp = proclist_create(p);
- erts_proclist_store_last(&msbp->chngq, plp);
- resume_proc = 0;
- if (schdlr_sspnd.msb.ongoing)
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
- else
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
- }
+ plp = proclist_create(p);
+ erts_proclist_store_last(&msbp->chngq, plp);
+ resume_proc = 0;
+ if (schdlr_sspnd.msb.ongoing)
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
+ else
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
ASSERT(erts_proc_sched_data(p));
}
}
@@ -8012,27 +8699,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
}
if (!msbp->blckrs && !msbp->chngq) {
- int online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
- ERTS_SCHED_NORMAL);
+ int online;
erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
chng_flg);
p->flags &= ~have_blckd_flg;
msbp->ongoing = 0;
- if (online == 1) {
- /* No normal schedulers to resume */
- ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) == 1);
-#ifndef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~chng_flg);
-#endif
- }
- else if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
- if (plocks) {
+ if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
+ if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL);
change_no_used_runqs(online);
/* Resume all online run queues */
@@ -8043,19 +8722,15 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (!normal) {
- ASSERT(!schdlr_sspnd.msb.ongoing);
+ if (!schdlr_sspnd.msb.ongoing) {
+ /* Get rid of msb-exec flag in run-queue of scheduler 1 */
+ resume_run_queue(ERTS_RUNQ_IX(0));
online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU);
- for (ix = 0; ix < online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
-
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
+ for (ix = 0; ix < online; ix++)
+ dcpu_sched_ix_resume_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_resume_wake(ix);
}
#endif
}
@@ -8152,6 +8827,9 @@ sched_thread_func(void *vesdp)
erts_sched_init_time_sup(esdp);
+ if (no == 1)
+ erts_aux_work_timeout_late_init(esdp);
+
(void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
ERTS_RUNQ_FLG_EXEC);
@@ -8203,7 +8881,10 @@ sched_thread_func(void *vesdp)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
- process_main();
+ erts_ets_sched_spec_data_init(esdp);
+
+ process_main(esdp->x_reg_array, esdp->f_reg_array);
+
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Scheduler thread number %beu terminated\n",
@@ -8218,8 +8899,7 @@ sched_dirty_cpu_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER;
+ Uint no = esdp->dirty_no;
ASSERT(no != 0);
ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -8228,6 +8908,8 @@ sched_dirty_cpu_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
esdp->thr_id += erts_no_schedulers;
erts_msacc_init_thread("dirty_cpu_scheduler", no, 0);
@@ -8265,8 +8947,7 @@ sched_dirty_io_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER;
+ Uint no = esdp->dirty_no;
ASSERT(no != 0);
ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -8275,6 +8956,8 @@ sched_dirty_io_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers;
erts_msacc_init_thread("dirty_io_scheduler", no, 0);
@@ -8372,6 +9055,7 @@ erts_start_schedulers(void)
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d\n", ix);
@@ -8379,6 +9063,7 @@ erts_start_schedulers(void)
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix);
@@ -8647,8 +9332,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
erts_aint32_t state;
state = erts_smp_atomic32_read_nob(&rp->state);
ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
- || !(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ || !(state & ERTS_PSFLG_RUNNING));
}
#endif
@@ -8685,17 +9369,6 @@ erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
}
/*
- * Like erts_pid2proc_not_running(), but hands over the process
- * in a suspended state unless (c_p is looked up).
- */
-Process *
-erts_pid2proc_suspend(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 1);
-}
-
-/*
* erts_pid2proc_nropt() is normally the same as
* erts_pid2proc_not_running(). However it is only
* to be used when 'not running' is a pure optimization,
@@ -8813,21 +9486,6 @@ handle_pend_bif_async_suspend(Process *suspendee,
}
}
-#else
-
-/*
- * Non-smp version of erts_pid2proc_suspend().
- */
-Process *
-erts_pid2proc_suspend(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- Process *rp = erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
- if (rp)
- erts_suspend(rp, pid_locks, NULL);
- return rp;
-}
-
#endif /* ERTS_SMP */
/*
@@ -9172,38 +9830,69 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
BIF_RET(am_false);
}
+static ERTS_INLINE void
+run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int incl_active_sched, int locked)
+{
+ Sint rq_len;
+
+ if (locked)
+ rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len);
+ else
+ rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len);
+ ASSERT(rq_len >= 0);
+
+ if (incl_active_sched) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ erts_aint32_t dcnt;
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) {
+ dcnt = erts_smp_atomic32_read_nob(&dirty_count.cpu.active);
+ ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_cpu_schedulers);
+ }
+ else {
+ ASSERT(ERTS_RUNQ_IS_DIRTY_IO_RUNQ(rq));
+ dcnt = erts_smp_atomic32_read_nob(&dirty_count.io.active);
+ ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_io_schedulers);
+ }
+ rq_len += (Sint) dcnt;
+ }
+ else
+#endif
+ {
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)
+ rq_len++;
+ }
+ }
+ if (qlen)
+ qlen[(*ip)++] = rq_len;
+ *tot_len += (Uint) rq_len;
+}
Uint
-erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
+erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched,
+ int incl_dirty_io)
{
- int i = 0;
+ int i = 0, j = 0;
Uint len = 0;
- if (atomic_queues_read)
- ERTS_ATOMIC_FOREACH_RUNQ(rq,
- {
- Sint rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len);
- ASSERT(rq_len >= 0);
- if (incl_active_sched
- && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
- rq_len++;
- }
- if (qlen)
- qlen[i++] = rq_len;
- len += (Uint) rq_len;
- }
- );
+ int no_rqs = erts_no_run_queues;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (incl_dirty_io)
+ no_rqs += ERTS_NUM_DIRTY_RUNQS;
+ else
+ no_rqs += ERTS_NUM_DIRTY_CPU_RUNQS;
+#endif
+
+ if (atomic_queues_read) {
+ ERTS_ATOMIC_FOREACH_RUNQ_X(rq, no_rqs,
+ run_queues_len_aux(rq, &len, qlen, &j,
+ incl_active_sched, 1),
+ /* Nothing... */);
+ }
else {
- for (i = 0; i < erts_no_run_queues; i++) {
+ for (i = 0; i < no_rqs; i++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(i);
- Sint rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len);
- ASSERT(rq_len >= 0);
- if (incl_active_sched
- && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
- rq_len++;
- }
- if (qlen)
- qlen[i] = rq_len;
- len += (Uint) rq_len;
+ run_queues_len_aux(rq, &len, qlen, &j, incl_active_sched, 0);
}
}
@@ -9721,7 +10410,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!is_normal_sched) {
if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED) {
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
suspend_scheduler(esdp);
}
}
@@ -9731,8 +10420,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ASSERT(is_normal_sched);
- if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC)) {
+ if (flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
(void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
suspend_scheduler(esdp);
flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
@@ -9771,13 +10462,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (!is_normal_sched && rq->halt_in_progress) {
+ if (!is_normal_sched & !!(flags & ERTS_RUNQ_FLG_HALTING)) {
/* Wait for emulator to terminate... */
while (1)
erts_milli_sleep(1000*1000);
}
- else if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
- || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ else if (!runq_got_work_to_execute_flags(flags)) {
/* Prepare for scheduler wait */
#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -9791,21 +10481,44 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (flags & ERTS_RUNQ_FLG_INACTIVE)
empty_runq(rq);
else {
- if (is_normal_sched && try_steal_task(rq))
- goto continue_check_activities_to_run;
-
- empty_runq(rq);
-
- /*
- * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
- * after trying to steal a task.
- */
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- non_empty_runq(rq);
- flags |= ERTS_RUNQ_FLG_NONEMPTY;
- goto continue_check_activities_to_run_known_flags;
+ ASSERT(!runq_got_work_to_execute(rq));
+ if (!is_normal_sched) {
+ /* Dirty scheduler */
+ if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
+ /* Go suspend... */
+ goto continue_check_activities_to_run_known_flags;
+ }
+ }
+ else {
+ /* Normal scheduler */
+ if (try_steal_task(rq))
+ goto continue_check_activities_to_run;
+ /*
+ * Check for suspend has to be done after trying
+ * to steal a task...
+ */
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if ((flags & ERTS_RUNQ_FLG_SUSPENDED)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /* If multi scheduling block and we have
+ * dirty work, suspend and let dirty
+ * scheduler handle work... */
+ || ((((flags & (ERTS_RUNQ_FLG_HALTING
+ | ERTS_RUNQ_FLG_MSB_EXEC))
+ == ERTS_RUNQ_FLG_MSB_EXEC))
+ && have_dirty_work())
+#endif
+ ) {
+ non_empty_runq(rq);
+ flags |= ERTS_RUNQ_FLG_NONEMPTY;
+ /*
+ * Go suspend...
+ */
+ goto continue_check_activities_to_run_known_flags;
+ }
}
+ empty_runq(rq);
}
#endif
@@ -9857,7 +10570,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
#endif
}
- if (rq->misc.start)
+ if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
#ifdef ERTS_SMP
@@ -9868,13 +10581,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
* Find a new port to run.
*/
- if (RUNQ_READ_LEN(&rq->ports.info.len)) {
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+
+ if (flags & PORT_BIT) {
int have_outstanding_io;
have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
if ((!erts_eager_check_io
&& have_outstanding_io
&& fcalls > 2*input_reductions)
- || rq->halt_in_progress) {
+ || (flags & ERTS_RUNQ_FLG_HALTING)) {
/*
* If we have performed more than 2*INPUT_REDUCTIONS since
* last call to erl_sys_schedule() and we still haven't
@@ -9987,6 +10702,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_EXITING))
== ERTS_PSFLG_EXITING)
& (!!is_normal_sched))
@@ -9998,7 +10714,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- != ERTS_PSFLG_SUSPENDED));
+ != ERTS_PSFLG_SUSPENDED)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ & (!(state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))
+ | (!!is_normal_sched))
+#endif
+ );
+
if (run_process) {
if (state & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
@@ -10104,9 +10827,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
goto sunlock_sched_out_proc;
}
- ASSERT((state & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
- || *p->i == (BeamInstr) em_call_nif);
-
ASSERT(rq == ERTS_DIRTY_CPU_RUNQ
? (state & (ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
@@ -10143,65 +10863,70 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- if (state & (ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- /*
- * GC is normally never delayed when a process
- * is scheduled out, but might be when executing
- * hand written beam assembly in
- * prim_eval:'receive'. If GC is delayed we are
- * not allowed to execute system tasks.
- */
- if (!(p->flags & F_DELAY_GC)) {
- int cost = execute_sys_tasks(p, &state, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0
+ if (is_normal_sched) {
+
+ if (state & ERTS_PSFLG_RUNNING_SYS) {
+ /*
+ * GC is normally never delayed when a process
+ * is scheduled out, but might be when executing
+ * hand written beam assembly in
+ * prim_eval:'receive'. If GC is delayed we are
+ * not allowed to execute system tasks.
+ */
+ if (!(p->flags & F_DELAY_GC)) {
+ int cost = execute_sys_tasks(p, &state, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
#ifdef ERTS_DIRTY_SCHEDULERS
- || !is_normal_sched
- || (state & ERTS_PSFLGS_DIRTY_WORK)
+ if (state & ERTS_PSFLGS_DIRTY_WORK)
+ goto sched_out_proc;
#endif
- ) {
- goto sched_out_proc;
- }
- }
+ }
- ASSERT(state & psflg_running_sys);
- ASSERT(!(state & psflg_running));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
- while (1) {
- erts_aint32_t n, e;
+ while (1) {
+ erts_aint32_t n, e;
- if (((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
- && !(state & ERTS_PSFLG_EXITING)) {
- goto sched_out_proc;
- }
+ if (((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ && !(state & ERTS_PSFLG_EXITING)) {
+ goto sched_out_proc;
+ }
- n = e = state;
- n &= ~psflg_running_sys;
- n |= psflg_running;
+ n = e = state;
+ n &= ~psflg_running_sys;
+ n |= psflg_running;
- state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
- if (state == e) {
- state = n;
- break;
- }
+ state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ break;
+ }
- ASSERT(state & psflg_running_sys);
- ASSERT(!(state & psflg_running));
- }
- }
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
+ }
+ }
- if (ERTS_IS_GC_DESIRED(p) && !ERTS_SCHEDULER_IS_DIRTY_IO(esdp)) {
- if (!(state & ERTS_PSFLG_EXITING) && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
- int cost = scheduler_gc_proc(p, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0)
- goto sched_out_proc;
- }
- }
+ if (ERTS_IS_GC_DESIRED(p)) {
+ if (!(state & ERTS_PSFLG_EXITING)
+ && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ int cost = scheduler_gc_proc(p, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto sched_out_proc;
+#endif
+ }
+ }
+ }
if (proxy_p) {
free_proxy_proc(proxy_p);
@@ -10213,7 +10938,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* Never run a suspended process */
- ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t dstate = erts_smp_atomic32_read_nob(&p->state);
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate)
+ || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate));
+ }
+#endif
ASSERT(erts_proc_read_refc(p) > 0);
@@ -10234,9 +10965,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
static int
-notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
+notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
+ Eterm st_result, int normal_sched)
{
- Process *rp = erts_proc_lookup(st->requester);
+ Process *rp;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!normal_sched)
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ st->requester, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ else
+#endif
+ rp = erts_proc_lookup(st->requester);
if (rp) {
ErtsProcLocks rp_locks;
ErlOffHeap *ohp;
@@ -10284,6 +11024,11 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!normal_sched)
+ erts_proc_dec_refc(rp);
+#endif
}
erts_cleanup_offheap(&st->off_heap);
@@ -10439,18 +11184,23 @@ done:
}
static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio);
+#ifdef ERTS_DIRTY_SCHEDULERS
+static void save_dirty_task(Process *c_p, ErtsProcSysTask *st);
+#endif
static int
execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
{
- int garbage_collected = 0;
+ int minor_gc = 0, major_gc = 0;
erts_aint32_t state = *statep;
int reds = in_reds;
int qmask = 0;
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
do {
+ ErtsProcSysTaskType type;
ErtsProcSysTask *st;
int st_prio;
Eterm st_res;
@@ -10468,18 +11218,35 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
if (!st)
break;
- switch (st->type) {
- case ERTS_PSTT_GC:
+ type = st->type;
+
+ switch (type) {
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
if (c_p->flags & F_DISABLE_GC) {
save_gc_task(c_p, st, st_prio);
st = NULL;
reds--;
}
else {
- if (!garbage_collected) {
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ if ((!minor_gc
+ || (!major_gc && type == ERTS_PSTT_GC_MAJOR))
+ && !(c_p->flags & F_HIBERNATED)) {
+ if (type == ERTS_PSTT_GC_MAJOR) {
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ }
reds -= scheduler_gc_proc(c_p, reds);
- garbage_collected = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
+#endif
+ if (type == ERTS_PSTT_GC_MAJOR)
+ minor_gc = major_gc = 1;
+ else
+ minor_gc = 1;
}
st_res = am_true;
}
@@ -10493,7 +11260,6 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
fcalls = reds - CONTEXT_REDS;
st_res = erts_check_process_code(c_p,
st->arg[0],
- unsigned_val(st->arg[1]),
&cpc_reds,
fcalls);
reds -= cpc_reds;
@@ -10504,27 +11270,36 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
}
break;
}
-#ifdef ERTS_NEW_PURGE_STRATEGY
case ERTS_PSTT_CLA: {
int fcalls;
int cla_reds = 0;
+ int do_gc;
+
if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
fcalls = reds;
else
fcalls = reds - CONTEXT_REDS;
- st_res = erts_proc_copy_literal_area(c_p,
- &cla_reds,
- fcalls,
- st->arg[0] == am_true);
+ do_gc = st->arg[0] == am_true;
+ st_res = erts_proc_copy_literal_area(c_p, &cla_reds,
+ fcalls, do_gc);
reds -= cla_reds;
if (is_non_value(st_res)) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->flags & F_DIRTY_CLA) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
+#endif
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
+ break;
}
+ if (do_gc) /* We did a major gc */
+ minor_gc = major_gc = 1;
break;
}
-#endif
case ERTS_PSTT_COHMQ:
reds -= erts_complete_off_heap_message_queue_change(c_p);
st_res = am_true;
@@ -10535,13 +11310,19 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
st_res = am_true;
break;
#endif
+#ifdef ERTS_SMP
+ case ERTS_PSTT_ETS_FREE_FIXATION:
+ reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]);
+ st_res = am_true;
+ break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
}
if (st)
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
state = erts_smp_atomic32_read_acqb(&c_p->state);
} while (qmask && reds > 0);
@@ -10560,7 +11341,9 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
erts_aint32_t state = in_state;
int max_reds = in_reds;
int reds = 0;
- int qmask = 0;
+ int qmask = 1; /* Set to 1 to force looping as long as there
+ * are dirty tasks.
+ */
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
@@ -10569,21 +11352,30 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
Eterm st_res;
int st_prio;
- st = fetch_sys_task(c_p, state, &qmask, &st_prio);
- if (!st)
- break;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (c_p->dirty_sys_tasks) {
+ st = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st->next;
+ }
+ else
+#endif
+ {
+ st = fetch_sys_task(c_p, state, &qmask, &st_prio);
+ if (!st)
+ break;
+ }
switch (st->type) {
- case ERTS_PSTT_GC:
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
case ERTS_PSTT_CPC:
- case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_ETS_FREE_FIXATION:
st_res = am_false;
break;
-#ifdef ERTS_NEW_PURGE_STRATEGY
case ERTS_PSTT_CLA:
st_res = am_ok;
break;
-#endif
#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
@@ -10596,7 +11388,7 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
break;
}
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
state = erts_smp_atomic32_read_acqb(&c_p->state);
} while (qmask && reds < max_reds);
@@ -10606,6 +11398,92 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
#ifdef ERTS_DIRTY_SCHEDULERS
+void
+erts_execute_dirty_system_task(Process *c_p)
+{
+ Eterm cla_res = THE_NON_VALUE;
+ ErtsProcSysTask *stasks;
+
+ /*
+ * If multiple operations, perform them in the following
+ * order (in order to avoid unnecessary GC):
+ * 1. Copy Literal Area (implies major GC).
+ * 2. GC Hibernate (implies major GC if not woken).
+ * 3. Major GC (implies minor GC).
+ * 4. Minor GC.
+ *
+ * System task requests are handled after the actual
+ * operations have been performed...
+ */
+
+ ASSERT(!(c_p->flags & (F_DELAY_GC|F_DISABLE_GC)));
+
+ if (c_p->flags & F_DIRTY_CLA) {
+ int cla_reds = 0;
+ cla_res = erts_proc_copy_literal_area(c_p, &cla_reds, c_p->fcalls, 1);
+ ASSERT(is_value(cla_res));
+ }
+
+ if (c_p->flags & F_DIRTY_GC_HIBERNATE) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ if (c_p->msg.len)
+ c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */
+ else {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->fvalue = NIL;
+ erts_garbage_collect_hibernate(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ if (c_p->flags & F_DIRTY_MAJOR_GC)
+ c_p->flags |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg,
+ c_p->arity, c_p->fcalls);
+ }
+
+ ASSERT(!(c_p->flags & (F_DIRTY_CLA
+ | F_DIRTY_GC_HIBERNATE
+ | F_DIRTY_MAJOR_GC
+ | F_DIRTY_MINOR_GC)));
+
+ stasks = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = NULL;
+
+ while (stasks) {
+ Eterm st_res;
+ ErtsProcSysTask *st = stasks;
+ stasks = st->next;
+
+ switch (st->type) {
+ case ERTS_PSTT_CLA:
+ ASSERT(is_value(cla_res));
+ st_res = cla_res;
+ break;
+ case ERTS_PSTT_GC_MAJOR:
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_GC_MINOR:
+ st_res = am_true;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Not supported dirty system task");
+ break;
+ }
+
+ (void) notify_sys_task_executed(c_p, st, st_res, 0);
+
+ }
+
+ erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+}
+
static BIF_RETTYPE
dispatch_system_task(Process *c_p, erts_aint_t fail_state,
ErtsProcSysTask *st, Eterm target,
@@ -10749,17 +11627,19 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
switch (req_type) {
case am_garbage_collect:
- st->type = ERTS_PSTT_GC;
- noproc_res = am_false;
- if (!rp)
+ switch (st->arg[0]) {
+ case am_minor: st->type = ERTS_PSTT_GC_MINOR; break;
+ case am_major: st->type = ERTS_PSTT_GC_MAJOR; break;
+ default: goto badarg;
+ }
+ noproc_res = am_false;
+ if (!rp)
goto noproc;
break;
case am_check_process_code:
if (is_not_atom(st->arg[0]))
goto badarg;
- if (is_not_small(st->arg[1]) || (unsigned_val(st->arg[1]) & ~ERTS_CPC_ALL))
- goto badarg;
noproc_res = am_false;
st->type = ERTS_PSTT_CPC;
if (!rp)
@@ -10775,7 +11655,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
#endif
break;
-#ifdef ERTS_NEW_PURGE_STRATEGY
case am_copy_literals:
if (st->arg[0] != am_true && st->arg[0] != am_false)
goto badarg;
@@ -10784,7 +11663,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
if (!rp)
goto noproc;
break;
-#endif
default:
goto badarg;
@@ -10808,7 +11686,7 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
failure = am_internal_error;
}
- notify_sys_task_executed(c_p, st, failure);
+ notify_sys_task_executed(c_p, st, failure, 1);
}
ERTS_BIF_PREP_RET(ret, am_ok);
@@ -10846,13 +11724,12 @@ erts_internal_request_system_task_4(BIF_ALIST_4)
}
static void
-erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
+erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg)
{
Process *rp = erts_proc_lookup(pid);
if (rp) {
ErtsProcSysTask *st;
erts_aint32_t state, fail_state;
- int i;
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(0));
@@ -10861,8 +11738,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
st->reply_tag = NIL;
st->req_id = NIL;
st->req_id_sz = 0;
- for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++)
- st->arg[i] = NIL;
+ st->arg[0] = (Eterm)arg;
ERTS_INIT_OFF_HEAP(&st->off_heap);
state = erts_smp_atomic32_read_nob(&rp->state);
@@ -10878,7 +11754,13 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
void
erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
{
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ);
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ, NULL);
+}
+
+void
+erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix);
}
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -10895,9 +11777,11 @@ flush_dirty_trace_messages(void *vpid)
erts_free(ERTS_ALC_T_DIRTY_SL, vpid);
#endif
- proc = erts_proc_lookup(pid);
- if (proc)
- (void) erts_flush_trace_messages(proc, 0);
+ proc = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_MAIN, 0);
+ if (proc) {
+ (void) erts_flush_trace_messages(proc, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ }
}
#endif /* ERTS_DIRTY_SCHEDULERS */
@@ -10926,7 +11810,7 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
dhndl = erts_thr_progress_unmanaged_delay();
#endif
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ);
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL);
#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
@@ -11023,6 +11907,15 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+static void
+save_dirty_task(Process *c_p, ErtsProcSysTask *st)
+{
+ st->next = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st;
+}
+#endif
+
int
erts_set_gc_state(Process *c_p, int enable)
{
@@ -11245,6 +12138,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
non_empty_runq(rq);
#endif
+ ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
erts_smp_runq_unlock(rq);
smp_notify_inc_runq(rq);
@@ -11275,6 +12170,9 @@ exec_misc_ops(ErtsRunQueue *rq)
rq->misc.end = NULL;
}
+ if (!rq->misc.start)
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
erts_smp_runq_unlock(rq);
while (molp) {
@@ -11301,6 +12199,8 @@ erts_get_total_reductions(Uint *redsp, Uint *diffp)
Uint reds = 0;
ERTS_ATOMIC_FOREACH_RUNQ_X(rq,
+ erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS,
+
reds += rq->procs.reductions,
if (redsp) *redsp = reds;
@@ -11359,6 +12259,7 @@ static void early_init_process_struct(void *varg, Eterm data)
proc->common.id = make_internal_pid(data);
#ifdef ERTS_DIRTY_SCHEDULERS
erts_smp_atomic32_init_nob(&proc->dirty_state, 0);
+ proc->dirty_sys_tasks = NULL;
#endif
erts_smp_atomic32_init_relb(&proc->state, arg->state);
@@ -11434,6 +12335,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
erts_shcopy_t info;
INITIALIZE_SHCOPY(info);
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -11492,7 +12396,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
arg_size = copy_shared_calculate(args, &info);
#else
- arg_size = size_object(args);
+ arg_size = size_object_litopt(args, &litarea);
#endif
heap_need = arg_size;
@@ -11516,10 +12420,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
}
p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
-
- p->u.initial[INITIAL_MOD] = mod;
- p->u.initial[INITIAL_FUN] = func;
- p->u.initial[INITIAL_ARI] = (Uint) arity;
+
+ p->u.initial.module = mod;
+ p->u.initial.function = func;
+ p->u.initial.arity = (Uint) arity;
/*
* Must initialize binary lists here before copying binaries to process.
@@ -11561,7 +12465,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/* No need to initialize p->fcalls. */
- p->current = p->u.initial+INITIAL_MOD;
+ p->current = &p->u.initial;
p->i = (BeamInstr *) beam_apply;
p->cp = (BeamInstr *) beam_apply+1;
@@ -11574,7 +12478,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
DESTROY_SHCOPY(info);
#else
- p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
+ p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, &litarea);
#endif
p->arity = 3;
@@ -11615,9 +12519,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->msg_inq.len = 0;
#endif
p->bif_timers = NULL;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- p->accessor_bif_timers = NULL;
-#endif
p->mbuf = NULL;
p->msg_frag = NULL;
p->mbuf_sz = 0;
@@ -11743,11 +12644,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_spawn)) {
+ ErtsCodeMFA cmfa = {mod, func, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(p, mod, func, arity, process_name, mfa);
- DTRACE2(process_spawn, process_name, mfa);
+ dtrace_fun_decode(p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_spawn, process_name, mfa_buf);
}
#endif
return res;
@@ -11815,16 +12717,13 @@ void erts_init_empty_process(Process *p)
p->msg.save = &p->msg.first;
p->msg.len = 0;
p->bif_timers = NULL;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- p->accessor_bif_timers = NULL;
-#endif
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
p->seq_trace_token = NIL;
- p->u.initial[0] = 0;
- p->u.initial[1] = 0;
- p->u.initial[2] = 0;
+ p->u.initial.module = 0;
+ p->u.initial.function = 0;
+ p->u.initial.arity = 0;
p->catches = 0;
p->cp = NULL;
p->i = NULL;
@@ -11863,6 +12762,7 @@ void erts_init_empty_process(Process *p)
#ifdef ERTS_DIRTY_SCHEDULERS
erts_smp_atomic32_init_nob(&p->dirty_state, 0);
+ p->dirty_sys_tasks = NULL;
#endif
erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
@@ -11917,9 +12817,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
ASSERT(p->bif_timers == NULL);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- ASSERT(p->accessor_bif_timers == NULL);
-#endif
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -11974,7 +12871,6 @@ delete_process(Process* p)
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
- void *nif_export;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
@@ -11991,9 +12887,7 @@ delete_process(Process* p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
- nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
- if (nif_export)
- erts_destroy_nif_export(nif_export);
+ erts_destroy_nif_export(p);
/* Cleanup psd */
@@ -12437,7 +13331,9 @@ send_exit_signal(Process *c_p, /* current process if and only
}
set_proc_exiting(c_p, state, rsn, NULL);
}
- else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
+ else if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
ErlHeapFragment *bp = NULL;
@@ -12464,8 +13360,7 @@ send_exit_signal(Process *c_p, /* current process if and only
ErlOffHeap *ohp;
Uint rsn_sz = size_object(rsn);
#ifdef ERTS_DIRTY_SCHEDULERS
- if (state & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (state & ERTS_PSFLG_DIRTY_RUNNING) {
bp = new_message_buffer(rsn_sz);
ohp = &bp->off_heap;
hp = &bp->mem[0];
@@ -12482,7 +13377,7 @@ send_exit_signal(Process *c_p, /* current process if and only
set_proc_exiting(rp, state, rsn_cpy, bp);
}
else { /* Process running... */
-
+
/*
* The pending exit will be discovered when the process
* is scheduled out if not discovered earlier.
@@ -12504,8 +13399,35 @@ send_exit_signal(Process *c_p, /* current process if and only
&bp->off_heap);
rp->pending_exit.bp = bp;
}
- erts_smp_atomic32_read_bor_relb(&rp->state,
- ERTS_PSFLG_PENDING_EXIT);
+
+ /*
+ * If no dirty work has been scheduled, pending exit will
+ * be discovered when the process is scheduled. If dirty work
+ * has been scheduled, we may need to add it to a normal run
+ * queue...
+ */
+#ifndef ERTS_DIRTY_SCHEDULERS
+ (void) erts_smp_atomic32_read_bor_relb(&rp->state,
+ ERTS_PSFLG_PENDING_EXIT);
+#else
+ {
+ erts_aint32_t a = erts_smp_atomic32_read_nob(&rp->state);
+ while (1) {
+ erts_aint32_t n, e;
+ int dwork;
+ n = e = a;
+ n |= ERTS_PSFLG_PENDING_EXIT;
+ dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK);
+ n &= ~ERTS_PSFLGS_DIRTY_WORK;
+ a = erts_smp_atomic32_cmpxchg_mb(&rp->state, n, e);
+ if (a == e) {
+ if (dwork)
+ erts_schedule_process(rp, n, *rp_locks);
+ break;
+ }
+ }
+ }
+#endif
}
}
/* else:
@@ -12571,9 +13493,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
switch (mon->type) {
case MON_ORIGIN:
/* We are monitoring someone else, we need to demonitor that one.. */
- if (is_atom(mon->pid)) { /* remote by name */
- ASSERT(is_node_name_atom(mon->pid));
- dep = erts_sysname_to_connected_dist_entry(mon->pid);
+ if (is_atom(mon->u.pid)) { /* remote by name */
+ ASSERT(is_node_name_atom(mon->u.pid));
+ dep = erts_sysname_to_connected_dist_entry(mon->u.pid);
if (dep) {
erts_smp_de_links_lock(dep);
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
@@ -12584,7 +13506,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
+ rmon->u.pid,
mon->name,
mon->ref,
1);
@@ -12595,10 +13517,10 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_deref_dist_entry(dep);
}
} else {
- ASSERT(is_pid(mon->pid) || is_port(mon->pid));
+ ASSERT(is_pid(mon->u.pid) || is_port(mon->u.pid));
/* if is local by pid or name */
- if (is_internal_pid(mon->pid)) {
- Process *rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ if (is_internal_pid(mon->u.pid)) {
+ Process *rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
goto done;
}
@@ -12608,9 +13530,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
goto done;
}
erts_destroy_monitor(rmon);
- } else if (is_internal_port(mon->pid)) {
+ } else if (is_internal_port(mon->u.pid)) {
/* Is a local port */
- Port *prt = erts_port_lookup_raw(mon->pid);
+ Port *prt = erts_port_lookup_raw(mon->u.pid);
if (!prt) {
goto done;
}
@@ -12618,8 +13540,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
prt, mon->ref, NULL);
} else { /* remote by pid */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
+ ASSERT(is_external_pid(mon->u.pid));
+ dep = external_pid_dist_entry(mon->u.pid);
ASSERT(dep != NULL);
if (dep) {
erts_smp_de_links_lock(dep);
@@ -12631,8 +13553,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
- mon->pid,
+ rmon->u.pid,
+ mon->u.pid,
mon->ref,
1);
ASSERT(code == ERTS_DSIG_SEND_OK);
@@ -12644,22 +13566,21 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
break;
case MON_TARGET:
- ASSERT(mon->type == MON_TARGET);
- ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
- if (is_internal_port(mon->pid)) {
- Port *prt = erts_id2port(mon->pid);
+ ASSERT(is_pid(mon->u.pid) || is_internal_port(mon->u.pid));
+ if (is_internal_port(mon->u.pid)) {
+ Port *prt = erts_id2port(mon->u.pid);
if (prt == NULL) {
goto done;
}
erts_fire_port_monitor(prt, mon->ref);
erts_port_release(prt);
- } else if (is_internal_pid(mon->pid)) {/* local by name or pid */
+ } else if (is_internal_pid(mon->u.pid)) {/* local by name or pid */
Eterm watched;
Process *rp;
DeclareTmpHeapNoproc(lhp,3);
ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
| ERTS_PROC_LOCKS_MSG_SEND);
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
if (rp == NULL) {
goto done;
}
@@ -12678,8 +13599,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
/* else: demonitor while we exited, i.e. do nothing... */
erts_smp_proc_unlock(rp, rp_locks);
} else { /* external by pid or name */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
+ ASSERT(is_external_pid(mon->u.pid));
+ dep = external_pid_dist_entry(mon->u.pid);
ASSERT(dep != NULL);
if (dep) {
erts_smp_de_links_lock(dep);
@@ -12691,10 +13612,10 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
code = erts_dsig_send_m_exit(&dsd,
- mon->pid,
+ mon->u.pid,
(rmon->name != NIL
? rmon->name
- : rmon->pid),
+ : rmon->u.pid),
mon->ref,
pcontext->reason);
ASSERT(code == ERTS_DSIG_SEND_OK);
@@ -12704,6 +13625,11 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
}
}
break;
+ case MON_NIF_TARGET:
+ erts_fire_nif_monitor(mon->u.resource,
+ pcontext->p->common.id,
+ mon->ref);
+ break;
case MON_TIME_OFFSET:
erts_demonitor_time_offset(mon->ref);
break;
@@ -12956,7 +13882,7 @@ erts_continue_exit_process(Process *p)
ASSERT(erts_proc_read_refc(p) > 0);
if (p->bif_timers) {
- if (erts_cancel_bif_timers(p, p->bif_timers, &p->u.terminate)) {
+ if (erts_cancel_bif_timers(p, &p->bif_timers, &p->u.terminate)) {
ASSERT(erts_proc_read_refc(p) > 0);
goto yield;
}
@@ -12964,19 +13890,6 @@ erts_continue_exit_process(Process *p)
p->bif_timers = NULL;
}
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (p->accessor_bif_timers) {
- if (erts_detach_accessor_bif_timers(p,
- p->accessor_bif_timers,
- &p->u.terminate)) {
- ASSERT(erts_proc_read_refc(p) > 0);
- goto yield;
- }
- ASSERT(erts_proc_read_refc(p) > 0);
- p->accessor_bif_timers = NULL;
- }
-#endif
-
#ifdef ERTS_SMP
if (p->flags & F_SCHDLR_ONLN_WAITQ)
abort_sched_onln_chng_waitq(p);
@@ -13031,11 +13944,25 @@ erts_continue_exit_process(Process *p)
erts_set_gc_state(p, 1);
state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_ACTIVE_SYS) {
+ if (state & ERTS_PSFLG_ACTIVE_SYS
+#ifdef ERTS_DIRTY_SCHEDULERS
+ || p->dirty_sys_tasks
+#endif
+ ) {
if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
goto yield;
}
+#ifdef DEBUG
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(p->sys_task_qs == NULL);
+ ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(p->dirty_sys_tasks == NULL);
+#endif
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+#endif
+
if (p->flags & F_USING_DDLL) {
erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
p->flags &= ~F_USING_DDLL;
@@ -13186,8 +14113,11 @@ erts_continue_exit_process(Process *p)
have none here */
}
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
#ifdef ERTS_SMP
- erts_flush_trace_messages(p, 0);
+ erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
#endif
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
@@ -13195,11 +14125,6 @@ erts_continue_exit_process(Process *p)
if (!delay_del_proc)
delete_process(p);
-#ifdef ERTS_SMP
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-#endif
-
return;
yield:
@@ -13278,8 +14203,8 @@ erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p)
static void
print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA *cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -13293,7 +14218,8 @@ print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%d + %d",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
@@ -13354,6 +14280,8 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "WAITING"); break;
case ERTS_SSI_FLG_SUSPENDED:
erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_SSI_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
}
@@ -13463,6 +14391,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "NONEMPTY"); break;
case ERTS_RUNQ_FLG_PROTECTED:
erts_print(to, to_arg, "PROTECTED"); break;
+ case ERTS_RUNQ_FLG_EXEC:
+ erts_print(to, to_arg, "EXEC"); break;
+ case ERTS_RUNQ_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
+ case ERTS_RUNQ_FLG_MISC_OP:
+ erts_print(to, to_arg, "MISC_OP"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
}
@@ -13512,11 +14446,11 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
* on all schedulers, then schedules itself out.
- * 2) All shedulers detect this and set the flag halt_in_progress
+ * 2) All shedulers detect this and set the flag ERTS_RUNQ_FLG_HALTING
* on their run queue. The last scheduler sets all non-closed ports
* ERTS_PORT_SFLG_HALT. Global atomic erts_halt_progress is used
* as refcount to determine which is last.
- * 3) While the run ques has flag halt_in_progress no processes
+ * 3) While the run queues has flag ERTS_RUNQ_FLG_HALTING no processes
* will be scheduled, only ports.
* 4) When the last port closes that scheduler calls erlang:halt/1.
* The same global atomic is used as refcount.
@@ -13531,8 +14465,8 @@ void erts_halt(int code)
erts_no_schedulers,
-1)) {
#ifdef ERTS_DIRTY_SCHEDULERS
- ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
- ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING);
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING);
#endif
erts_halt_code = code;
notify_reap_ports_relb();
@@ -13546,6 +14480,9 @@ erts_dbg_check_halloc_lock(Process *p)
ErtsSchedulerData *esdp;
if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
return 1;
+ if ((p->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ && ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()))
+ return 1;
if (p->common.id == ERTS_INVALID_PID)
return 1;
esdp = erts_proc_sched_data(p);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 68fbb10602..9d7ba27c50 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,8 @@
#ifndef __PROCESS_H__
#define __PROCESS_H__
+#include "sys.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -37,8 +39,6 @@
typedef struct process Process;
-#include "sys.h"
-
#define ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
#include "erl_process_lock.h" /* Only pull out important types... */
#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
@@ -63,6 +63,9 @@ typedef struct process Process;
#define ERTS_ONLY_INCLUDE_TRACE_FLAGS
#include "erl_trace.h"
#undef ERTS_ONLY_INCLUDE_TRACE_FLAGS
+#define ERTS_ONLY_SCHED_SPEC_ETS_DATA
+#include "erl_db.h"
+#undef ERTS_ONLY_SCHED_SPEC_ETS_DATA
#ifdef HIPE
#include "hipe_process.h"
@@ -93,10 +96,6 @@ struct ErtsNodesMonitor_;
#define ERTS_HEAP_FREE(Type, Ptr, Size) \
erts_free((Type), (Ptr))
-#define INITIAL_MOD 0
-#define INITIAL_FUN 1
-#define INITIAL_ARI 2
-
#include "export.h"
struct saved_calls {
@@ -118,7 +117,11 @@ extern Uint erts_no_dirty_io_schedulers;
#endif
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
-#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern int erts_dcpu_sched_thread_suggested_stack_size;
+extern int erts_dio_sched_thread_suggested_stack_size;
+#endif
+#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
@@ -174,8 +177,14 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
#define ERTS_RUNQ_FLG_EXEC \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 7))
+#define ERTS_RUNQ_FLG_MSB_EXEC \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 8))
+#define ERTS_RUNQ_FLG_MISC_OP \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 9))
+#define ERTS_RUNQ_FLG_HALTING \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
-#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 8)
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 11)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -266,8 +275,9 @@ typedef enum {
#define ERTS_SSI_FLG_TSE_SLEEPING (((erts_aint32_t) 1) << 2)
#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_FLG_MSB_EXEC (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_FLGS_MAX 5
+#define ERTS_SSI_FLGS_MAX 6
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -278,7 +288,8 @@ typedef enum {
#define ERTS_SSI_FLGS_ALL \
(ERTS_SSI_FLGS_SLEEP \
| ERTS_SSI_FLG_WAITING \
- | ERTS_SSI_FLG_SUSPENDED)
+ | ERTS_SSI_FLG_SUSPENDED \
+ | ERTS_SSI_FLG_MSB_EXEC)
/*
* Keep ERTS_SSI_AUX_WORK flags ordered in expected frequency
@@ -308,6 +319,7 @@ typedef enum {
ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX,
ERTS_SSI_AUX_WORK_SET_TMO_IX,
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
+ ERTS_SSI_AUX_WORK_YIELD_IX,
ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */
@@ -344,6 +356,8 @@ typedef enum {
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX)
+#define ERTS_SSI_AUX_WORK_YIELD \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_YIELD_IX)
#define ERTS_SSI_AUX_WORK_REAP_PORTS \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX)
#define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \
@@ -397,6 +411,12 @@ typedef struct {
Process* last;
} ErtsRunPrioQueue;
+typedef enum {
+ ERTS_SCHED_NORMAL,
+ ERTS_SCHED_DIRTY_CPU,
+ ERTS_SCHED_DIRTY_IO
+} ErtsSchedType;
+
typedef struct ErtsSchedulerData_ ErtsSchedulerData;
typedef struct ErtsRunQueue_ ErtsRunQueue;
@@ -482,7 +502,6 @@ struct ErtsRunQueue_ {
erts_smp_atomic32_t len;
int wakeup_other;
int wakeup_other_reds;
- int halt_in_progress;
struct {
ErtsProcList *pending_exiters;
@@ -541,13 +560,15 @@ do { \
} while (0)
typedef struct {
- int need; /* "+sbu true" or scheduler_wall_time enabled */
+ union {
+ erts_atomic32_t mod; /* on dirty schedulers */
+ int need; /* "+sbu true" or scheduler_wall_time enabled */
+ } u;
int enabled;
Uint64 start;
struct {
Uint64 total;
Uint64 start;
- int currently;
} working;
} ErtsSchedWallTime;
@@ -602,6 +623,10 @@ typedef struct {
} delayed_wakeup;
#endif
struct {
+ ErtsEtsAllYieldData ets_all;
+ /* Other yielding operations... */
+ } yield;
+ struct {
struct {
erts_aint32_t flags;
void (*callback)(void *);
@@ -610,19 +635,16 @@ typedef struct {
} debug;
} ErtsAuxWorkData;
+#define ERTS_SCHED_AUX_YIELD_DATA(ESDP, NAME) \
+ (&(ESDP)->aux_work_data.yield.NAME)
+void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp);
+
#ifdef ERTS_DIRTY_SCHEDULERS
typedef enum {
ERTS_DIRTY_CPU_SCHEDULER,
ERTS_DIRTY_IO_SCHEDULER
} ErtsDirtySchedulerType;
-typedef union {
- struct {
- ErtsDirtySchedulerType type: 1;
- Uint num: sizeof(Uint)*8 - 1;
- } s;
- Uint no;
-} ErtsDirtySchedId;
#endif
struct ErtsSchedulerData_ {
@@ -646,9 +668,10 @@ struct ErtsSchedulerData_ {
#endif
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
+ ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
+ Uint dirty_no; /* Scheduler number for dirty schedulers */
Process *dirty_shadow_process;
#endif
Port *current_port;
@@ -676,7 +699,7 @@ struct ErtsSchedulerData_ {
ErtsSchedWallTime sched_wall_time;
ErtsGCInfo gc_info;
ErtsPortTaskHandle nosuspend_port_task_handle;
-
+ ErtsEtsTables ets_tables;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
Allctr_t *verify_unused_temp_alloc_data;
@@ -810,14 +833,16 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_CALL_TIME_BP 3
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
-#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
+#define ERTS_PSD_ETS_OWNED_TABLES 6
+#define ERTS_PSD_ETS_FIXED_TABLES 7
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 8
-#define ERTS_PSD_SIZE 7
+#define ERTS_PSD_SIZE 9
#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 6
+# define ERTS_PSD_SIZE 8
#endif
typedef struct {
@@ -842,8 +867,14 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ((ErtsProcLocks) 0)
-#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ((ErtsProcLocks) 0)
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS ERTS_PROC_LOCK_STATUS
+#define ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS ERTS_PROC_LOCK_STATUS
+
+#define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -946,9 +977,11 @@ struct process {
Eterm* stop; /* Stack top */
Eterm* heap; /* Heap start */
Eterm* hend; /* Heap end */
+ Eterm* abandoned_heap;
Uint heap_sz; /* Size of heap in words */
Uint min_heap_size; /* Minimum size of heap (in words). */
Uint min_vheap_size; /* Minimum size of virtual heap (in words). */
+ Uint max_heap_size; /* Maximum size of heap (in words). */
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
volatile unsigned long fp_exception;
@@ -961,16 +994,6 @@ struct process {
#endif
/*
- * Moved to after "struct hipe_process_state hipe", as a temporary fix for
- * LLVM hard-coding offsetof(struct process, hipe.nstack) (sic!)
- * (see void X86FrameLowering::adjustForHiPEPrologue(...) in
- * lib/Target/X86/X86FrameLowering.cpp).
- *
- * Used to be below "Eterm* hend".
- */
- Eterm* abandoned_heap;
-
- /*
* Saved x registers.
*/
Uint arity; /* Number of live argument registers (only valid
@@ -1007,9 +1030,6 @@ struct process {
ErlMessageQueue msg; /* Message queue */
ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- ErtsBifTimers *accessor_bif_timers; /* Accessor bif timers */
-#endif
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -1023,15 +1043,16 @@ struct process {
#endif
union {
void *terminate;
- BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
- of pointer to funcinfo instruction, hence the BeamInstr datatype */
+ ErtsCodeMFA initial; /* Initial module(0), function(1), arity(2),
+ often used instead of pointer to funcinfo
+ instruction. */
} u;
- BeamInstr* current; /* Current Erlang function, part of the funcinfo:
+ ErtsCodeMFA* current; /* Current Erlang function, part of the funcinfo:
* module(0), function(1), arity(2)
* (module and functions are tagged atoms;
- * arity an untagged integer). BeamInstr * because it references code
+ * arity an untagged integer).
*/
-
+
/*
* Information mainly for post-mortem use (erl crash dump).
*/
@@ -1048,7 +1069,6 @@ struct process {
Eterm *old_hend; /* Heap pointers for generational GC. */
Eterm *old_htop;
Eterm *old_heap;
- Uint max_heap_size; /* Maximum size of heap (in words). */
Uint16 gen_gcs; /* Number of (minor) generational GCs. */
Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */
ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */
@@ -1063,6 +1083,9 @@ struct process {
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
ErtsProcSysTaskQs *sys_task_qs;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsProcSysTask *dirty_sys_tasks;
+#endif
erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -1387,14 +1410,19 @@ extern int erts_system_profile_ts_type;
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */
#define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */
-#define F_ON_HEAP_MSGQ (1 << 13) /* Off heap msg queue */
+#define F_ON_HEAP_MSGQ (1 << 13) /* On heap msg queue */
#define F_OFF_HEAP_MSGQ_CHNG (1 << 14) /* Off heap msg queue changing */
#define F_ABANDONED_HEAP_USE (1 << 15) /* Have usage of abandoned heap */
#define F_DELAY_GC (1 << 16) /* Similar to disable GC (see below) */
#define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */
#define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */
-#define F_HIPE_MODE (1 << 19)
+#define F_HIPE_MODE (1 << 19) /* Process is executing in HiPE mode */
#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */
+#define F_DIRTY_CLA (1 << 21) /* Dirty copy literal area scheduled */
+#define F_DIRTY_GC_HIBERNATE (1 << 22) /* Dirty GC hibernate scheduled */
+#define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */
+#define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */
+#define F_HIBERNATED (1 << 25) /* Hibernated */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -1514,24 +1542,29 @@ extern int erts_system_profile_ts_type;
} while (0)
#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
-#define ERTS_NUM_DIRTY_RUNQS 2
+#define ERTS_NUM_DIRTY_CPU_RUNQS 1
+#define ERTS_NUM_DIRTY_IO_RUNQS 1
#else
-#define ERTS_NUM_DIRTY_RUNQS 0
+#define ERTS_NUM_DIRTY_CPU_RUNQS 0
+#define ERTS_NUM_DIRTY_IO_RUNQS 0
#endif
+#define ERTS_NUM_DIRTY_RUNQS (ERTS_NUM_DIRTY_CPU_RUNQS+ERTS_NUM_DIRTY_IO_RUNQS)
+
#define ERTS_RUNQ_IX(IX) \
- (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
&erts_aligned_run_queues[(IX)].runq)
#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_RUNQ_IX_IS_DIRTY(IX) \
- (-(ERTS_NUM_DIRTY_RUNQS) <= (IX) && (IX) < 0)
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
+ (erts_no_run_queues <= (IX)))
#define ERTS_DIRTY_RUNQ_IX(IX) \
(ASSERT(ERTS_RUNQ_IX_IS_DIRTY(IX)), \
&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq)
-#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq)
-#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1)
-#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2)
+#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[erts_no_run_queues].runq)
+#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[erts_no_run_queues+1].runq)
+#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ) == ERTS_DIRTY_CPU_RUNQ)
+#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ) == ERTS_DIRTY_IO_RUNQ)
#else
#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
#endif
@@ -1545,23 +1578,13 @@ extern int erts_system_profile_ts_type;
#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \
&erts_aligned_dirty_io_scheduler_data[(IX)].esd)
-#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \
- ((ESDP)->dirty_no.s.num)
-#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \
- ((ESDP)->dirty_no.s.type)
-#ifdef ERTS_SMP
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \
- ((ESDP)->dirty_no.s.num != 0)
+ ((ESDP)->type != ERTS_SCHED_NORMAL)
#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \
- (ERTS_SCHEDULER_IS_DIRTY((ESDP)) & ((ESDP)->dirty_no.s.type == 0))
+ ((ESDP)->type == ERTS_SCHED_DIRTY_CPU)
#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
- (ERTS_SCHEDULER_IS_DIRTY((ESDP)) & ((ESDP)->dirty_no.s.type == 1))
-#else
-#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
-#endif
-#else
+ ((ESDP)->type == ERTS_SCHED_DIRTY_IO)
+#else /* !ERTS_DIRTY_SCHEDULERS */
#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
@@ -1576,21 +1599,19 @@ void erts_init_scheduling(int, int
, int, int, int
#endif
);
-
+#ifdef ERTS_DIRTY_SCHEDULERS
+void erts_execute_dirty_system_task(Process *c_p);
+#endif
int erts_set_gc_state(Process *c_p, int enable);
-Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
+Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int dirty_cpu, int want_dirty_io);
Eterm erts_system_check_request(Process *c_p);
Eterm erts_gc_info_request(Process *c_p);
Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
-int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
-void erts_destroy_nif_export(void *); /* see erl_nif.c */
-int erts_check_nif_export_in_area(Process *p, char *start, Uint size);
-
ErtsProcList *erts_proclist_create(Process *);
-ErtsProcList *erts_proclist_copy(ErtsProcList *);
void erts_proclist_destroy(ErtsProcList *);
ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *);
@@ -1772,6 +1793,8 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
ErtsThrPrgrLaterOp *,
UWord);
void erts_schedule_complete_off_heap_message_queue_change(Eterm pid);
+struct db_fixation;
+void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*);
void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc);
int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
@@ -1813,12 +1836,13 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
void (*func)(void *),
void *arg);
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
+void erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
Eterm erts_process_state2status(erts_aint32_t);
Eterm erts_process_status(Process *, Eterm);
-Uint erts_run_queues_len(Uint *, int, int);
+Uint erts_run_queues_len(Uint *, int, int, int);
void erts_add_to_runq(Process *);
Eterm erts_bound_schedulers_term(Process *c_p);
Eterm erts_get_cpu_topology_term(Process *c_p, Eterm which);
@@ -1925,6 +1949,8 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
+ERTS_GLB_INLINE void erts_schedule_dirty_sys_execution(Process *c_p);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
@@ -1934,6 +1960,34 @@ erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
if (!(state & ERTS_PSFLG_ACTIVE))
erts_schedule_process(p, state, locks);
}
+
+ERTS_GLB_INLINE void
+erts_schedule_dirty_sys_execution(Process *c_p)
+{
+ erts_aint32_t a, n, e;
+
+ a = erts_smp_atomic32_read_nob(&c_p->state);
+
+ /*
+ * Only a currently executing process schedules
+ * itself for dirty-sys execution...
+ */
+
+ ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+ /* Don't set dirty-active-sys if we are about to exit... */
+
+ while (!(a & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_PENDING_EXIT))) {
+ e = a;
+ n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ a = erts_smp_atomic32_cmpxchg_mb(&c_p->state, n, e);
+ if (a == e)
+ break; /* dirty-active-sys set */
+ }
+}
+
#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
@@ -2507,10 +2561,6 @@ ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end)
}
#endif
-Process *erts_pid2proc_suspend(Process *,
- ErtsProcLocks,
- Eterm,
- ErtsProcLocks);
#ifdef ERTS_SMP
Process *erts_pid2proc_not_running(Process *,
@@ -2552,8 +2602,6 @@ extern int erts_disable_proc_not_running_opt;
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-void erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time);
-
#ifdef ERTS_SMP
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 42654604cb..3c80f0e0f6 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -54,10 +54,10 @@
#define HASH_RANGE(PDict) ((PDict)->usedSlots)
#define MAKE_HASH(Term) \
- ((is_small(Term)) ? unsigned_val(Term) : \
+ ((is_small(Term)) ? (Uint32) unsigned_val(Term) : \
((is_atom(Term)) ? \
- (atom_tab(atom_val(Term))->slot.bucket.hvalue) : \
- make_internal_hash(Term)))
+ (Uint32) atom_val(Term) : \
+ make_internal_hash(Term, 0)))
#define PD_SZ2BYTES(Sz) (sizeof(ProcDict) + ((Sz) - 1)*sizeof(Eterm))
@@ -408,6 +408,11 @@ static void pd_hash_erase_all(Process *p)
}
}
+Uint32 erts_pd_make_hx(Eterm key)
+{
+ return MAKE_HASH(key);
+}
+
Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id)
{
unsigned int hval;
diff --git a/erts/emulator/beam/erl_process_dict.h b/erts/emulator/beam/erl_process_dict.h
index b50a2af72c..ab58f3c239 100644
--- a/erts/emulator/beam/erl_process_dict.h
+++ b/erts/emulator/beam/erl_process_dict.h
@@ -43,6 +43,7 @@ void erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
Eterm erts_dictionary_copy(struct process *p, ProcDict *pd);
Eterm erts_pd_hash_get(struct process *p, Eterm id);
+Uint32 erts_pd_make_hx(Eterm key);
Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id);
#endif
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 8d66391d55..b826e6c5d3 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -337,8 +337,8 @@ stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
static void
print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA* cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -350,7 +350,8 @@ print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%bpu + %bpu",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
@@ -446,8 +447,8 @@ heap_dump(fmtfn_t to, void *to_arg, Eterm x)
ProcBin* pb = (ProcBin *) binary_val(x);
Binary* val = pb->val;
- if (erts_atomic_xchg_nob(&val->refc, 0) != 0) {
- val->flags = (UWord) all_binaries;
+ if (erts_atomic_xchg_nob(&val->intern.refc, 0) != 0) {
+ val->intern.flags = (UWord) all_binaries;
all_binaries = val;
}
erts_print(to, to_arg,
@@ -528,7 +529,7 @@ dump_binaries(fmtfn_t to, void *to_arg, Binary* current)
erts_print(to, to_arg, "%02X", bytes[i]);
}
erts_putc(to, to_arg, '\n');
- current = (Binary *) current->flags;
+ current = (Binary *) current->intern.flags;
}
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index a69185bc5c..ff124d5ba7 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -112,21 +112,13 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
-#ifdef ERTS_ENABLE_LOCK_COUNT
-static void lcnt_enable_proc_lock_count(Process *proc, int enable);
-#endif
-
void
erts_init_proc_lock(int cpus)
{
int i;
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i), 1);
-#else
- erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
}
#if ERTS_PROC_LOCK_OWN_IMPL
erts_thr_install_exit_handler(cleanup_tse);
@@ -944,7 +936,7 @@ erts_pid2proc_opt(Process *c_p,
erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
- erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+ erts_lcnt_proc_lock_unacquire(&proc->lock, lcnt_locks);
#endif
managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
@@ -1006,6 +998,41 @@ erts_pid2proc_opt(Process *c_p,
return proc;
}
+static ERTS_INLINE
+Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
+{
+ Process *proc;
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
+
+ proc = erts_proc_lookup_raw(pid);
+ if (proc) {
+ if (!allow_exit && ERTS_PROC_IS_EXITING(proc))
+ proc = NULL;
+ else
+ erts_proc_inc_refc(proc);
+ }
+
+#ifdef ERTS_SMP
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+
+ return proc;
+}
+
+Process *erts_proc_lookup_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 0);
+}
+
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 1);
+}
+
void
erts_proc_lock_init(Process *p)
{
@@ -1027,39 +1054,38 @@ erts_proc_lock_init(Process *p)
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- int do_lock_count = 1;
-#else
- int do_lock_count = 0;
-#endif
-
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.main, "proc_main", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.link, "proc_link", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.msgq, "proc_msgq", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.btm, "proc_btm", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.btm.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.btm.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
- do_lock_count);
+ erts_mtx_init(&p->lock.status, "proc_status", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
- erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.trace, "proc_trace", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.trace.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.trace.lc);
@@ -1096,117 +1122,70 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_proc_lock_count(int enable) {
- int ix, max = erts_ptab_max(&erts_proc);
- Process *proc = NULL;
- for (ix = 0; ix < max; ++ix) {
- if ((proc = erts_pix2proc(ix)) != NULL)
- lcnt_enable_proc_lock_count(proc, enable);
- } /* for all processes */
-}
-
void erts_lcnt_proc_lock_init(Process *p) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) {
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_main));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_status));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_trace));
- } else { /* now the common case */
- Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL;
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_trace), "proc_trace", ERTS_LCNT_LT_PROCLOCK, pid);
- } /* the lock names should really be aligned to four characters */
+ erts_lcnt_init_ref(&p->lock.lcnt_carrier);
+
+ if(erts_lcnt_check_enabled(ERTS_LOCK_FLAGS_CATEGORY_PROCESS)) {
+ erts_lcnt_enable_proc_lock_count(p, 1);
+ }
} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
- erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_trace));
+ erts_lcnt_uninstall(&p->lock.lcnt_carrier);
}
-static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
- if (enable) {
- if (!ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_init(proc);
- }
- }
- else {
- if (ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_destroy(proc);
- }
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable) {
+ if(proc->common.id == ERTS_INVALID_PID) {
+ /* Locks without an id are more trouble than they're worth; there's no
+ * way to look them up and we can't track them with _STATIC since it's
+ * too early to tell whether we're a system process (proc->static_flags
+ * hasn't been not set yet). */
+ } else if(!enable) {
+ erts_lcnt_proc_lock_destroy(proc);
+ } else if(!erts_lcnt_check_ref_installed(&proc->lock.lcnt_carrier)) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+
+ carrier = erts_lcnt_create_lock_info_carrier(ERTS_LCNT_PROCLOCK_COUNT);
+
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN,
+ "proc_main", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK,
+ "proc_link", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ,
+ "proc_msgq", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM,
+ "proc_btm", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS,
+ "proc_status",proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE,
+ "proc_trace", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+
+ erts_lcnt_install(&proc->lock.lcnt_carrier, carrier);
}
}
-void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock(&(lock->lcnt_trace)); }
-}
+void erts_lcnt_update_process_locks(int enable) {
+ int i, max;
-void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
- char *file, unsigned int line) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
- }
- if (locks & ERTS_PROC_LOCK_BTM) {
- erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
- }
- if (locks & ERTS_PROC_LOCK_TRACE) {
- erts_lcnt_lock_post_x(&(lock->lcnt_trace), file, line);
- }
-}
+ max = erts_ptab_max(&erts_proc);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock_unaquire(&(lock->lcnt_trace)); }
-}
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Process *proc;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ proc = erts_pix2proc(i);
+
+ if(proc != NULL) {
+ erts_lcnt_enable_proc_lock_count(proc, enable);
+ }
-void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_unlock(&(lock->lcnt_trace)); }
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
+ }
}
-void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_trylock(&(lock->lcnt_trace), res); }
-} /* reversed logic */
+
#endif /* ERTS_ENABLE_LOCK_COUNT */
@@ -1221,7 +1200,7 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_lock_x(&lck,file,line);
@@ -1254,7 +1233,7 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_trylock_x(locked, &lck, file, line);
@@ -1286,7 +1265,7 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unlock(&lck);
@@ -1321,7 +1300,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_might_unlock(&lck);
@@ -1369,7 +1348,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_require_lock(&lck, file, line);
@@ -1416,7 +1395,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unrequire_lock(&lck);
@@ -1465,7 +1444,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
lck.id = lc_id.proc_lock_main;
@@ -1496,7 +1475,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LOCK_TYPE_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
@@ -1711,22 +1690,22 @@ erts_proc_lc_my_proc_locks(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK)};
+ ERTS_LOCK_TYPE_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t locks[6] = {p->lock.main.lc,
p->lock.link.lc,
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 2cccf0697a..023ba4d4ae 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -78,13 +78,19 @@ typedef struct erts_proc_lock_t_ {
ErtsProcLocks flags;
#endif
erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt_main;
- erts_lcnt_lock_t lcnt_link;
- erts_lcnt_lock_t lcnt_msgq;
- erts_lcnt_lock_t lcnt_btm;
- erts_lcnt_lock_t lcnt_status;
- erts_lcnt_lock_t lcnt_trace;
+#if defined(ERTS_ENABLE_LOCK_COUNT) && !ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ /* Each erts_mtx_t has its own lock counter ^ */
+
+ #define ERTS_LCNT_PROCLOCK_IDX_MAIN 0
+ #define ERTS_LCNT_PROCLOCK_IDX_LINK 1
+ #define ERTS_LCNT_PROCLOCK_IDX_MSGQ 2
+ #define ERTS_LCNT_PROCLOCK_IDX_BTM 3
+ #define ERTS_LCNT_PROCLOCK_IDX_STATUS 4
+ #define ERTS_LCNT_PROCLOCK_IDX_TRACE 5
+
+ #define ERTS_LCNT_PROCLOCK_COUNT 6
+
+ erts_lcnt_ref_t lcnt_carrier;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
@@ -219,6 +225,10 @@ typedef struct erts_proc_lock_t_ {
#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
& ~ERTS_PROC_LOCK_MAIN)
+/* All locks we first must unlock to lock L */
+#define ERTS_PROC_LOCKS_HIGHER_THAN(L) \
+ (ERTS_PROC_LOCKS_ALL & (~(L) & ~((L)-1)))
+
#define ERTS_PIX_LOCKS_BITS 10
#define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS)
@@ -241,14 +251,170 @@ typedef struct erts_proc_lock_t_ {
void erts_lcnt_proc_lock_init(Process *p);
void erts_lcnt_proc_lock_destroy(Process *p);
+
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
-void erts_lcnt_enable_proc_lock_count(int enable);
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable);
+void erts_lcnt_update_process_locks(int enable);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
+ char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, file, line);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, res);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, res);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, res);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, res);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, res);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, res);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+} /* reversed logic */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_ENABLE_LOCK_COUNT*/
@@ -940,6 +1106,8 @@ void erts_proc_safelock(Process *a_proc,
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
+Process *erts_proc_lookup_inc_refc(Eterm pid);
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid);
ERTS_GLB_INLINE Process *erts_pix2proc(int ix);
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid);
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index 22830a19c4..b3bcb3af3f 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -372,7 +372,8 @@ erts_ptab_init_table(ErtsPTab *ptab,
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
+ erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
last_data_init_nob(ptab, ~((Uint64) 0));
@@ -474,7 +475,7 @@ erts_ptab_init_table(ErtsPTab *ptab,
* we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2.
*
* In order to fix this, we insert a pointer from the table
- * to the invalid_element, wich will be interpreted as a
+ * to the invalid_element, which will be interpreted as a
* slot currently being modified. This way we will be able to
* have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while
* still having a table size of the power of 2.
@@ -733,7 +734,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
* erts_ptab_list() implements BIFs listing the content of the table,
* e.g. erlang:processes/0.
*/
-static void cleanup_ptab_list_bif_data(Binary *bp);
+static int cleanup_ptab_list_bif_data(Binary *bp);
static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
@@ -771,23 +772,23 @@ erts_ptab_list(Process *c_p, ErtsPTab *ptab)
}
else {
Eterm *hp;
- Eterm magic_bin;
+ Eterm magic_ref;
ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
ERTS_BIF_PREP_YIELD2(ret_val,
&ptab_list_continue_export,
c_p,
res_acc,
- magic_bin);
+ magic_ref);
}
return ret_val;
}
-static void
+static int
cleanup_ptab_list_bif_data(Binary *bp)
{
ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
@@ -875,6 +876,8 @@ cleanup_ptab_list_bif_data(Binary *bp)
ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+
+ return 1;
}
static int
@@ -1287,9 +1290,7 @@ static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
res_acc = BIF_ARG_1;
- ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_2);
ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_ptab_list_bif_data);
diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h
index 5fefaea978..e59d6900b0 100644
--- a/erts/emulator/beam/erl_rbtree.h
+++ b/erts/emulator/beam/erl_rbtree.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2015. All Rights Reserved.
+ * Copyright Ericsson AB 2015-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -105,7 +105,10 @@
* <ERTS_RBT_PREFIX>_rbt_yield_state_t.
*
* The yield state should be statically initialized by
- * ERTS_RBT_YIELD_STAT_INITER.
+ * ERTS_RBT_YIELD_STAT_INITER
+ *
+ * or dynamically initialized with
+ * ERTS_RBT_YIELD_STAT_INIT(<ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate)
*
*
* The following API functions are implemented if corresponding
@@ -178,8 +181,8 @@
* Operate by calling the operator 'op' on each element.
* Order is undefined.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -195,8 +198,8 @@
* Order is undefined. Each element should be destroyed
* by 'op'.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed.
*
* 'arg' is passed as argument to 'op'.
@@ -228,8 +231,8 @@
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -244,8 +247,8 @@
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -296,8 +299,8 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -318,8 +321,8 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -422,6 +425,13 @@
#ifndef ERTS_RBT_YIELD_STAT_INITER
# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
#endif
+#ifndef ERTS_RBT_YIELD_STAT_INIT
+# define ERTS_RBT_YIELD_STAT_INIT(YS) \
+ do { \
+ (YS)->x = NULL; \
+ (YS)->up = 0; \
+ } while (0)
+#endif
#define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \
X ## Y
@@ -476,12 +486,12 @@ typedef struct {
#if defined(ERTS_RBT_HARD_DEBUG) \
&& (defined(ERTS_RBT_WANT_DELETE) \
|| defined(ERTS_RBT_NEED_INSERT__))
-static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root);
+static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *node);
# define ERTS_RBT_NEED_HDBG_CHECK_TREE__
-# define ERTS_RBT_HDBG_CHECK_TREE__(R) \
- ERTS_RBT_FUNC__(hdbg_check_tree)((R))
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) \
+ ERTS_RBT_FUNC__(hdbg_check_tree)((R),(N))
#else
-# define ERTS_RBT_HDBG_CHECK_TREE__(R) ((void) 1)
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) ((void) 1)
#endif
#ifdef ERTS_RBT_NEED_ROTATE__
@@ -634,7 +644,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
ERTS_RBT_T null_x; /* null_x is used to get the fixup started when we
splice out a node without children. */
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
ERTS_RBT_INIT_EMPTY_TNODE(&null_x);
@@ -852,7 +862,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
}
}
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
}
@@ -982,7 +992,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
{
ERTS_RBT_KEY_T kn = ERTS_RBT_GET_KEY(n);
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
ERTS_RBT_INIT_EMPTY_TNODE(n);
@@ -1004,7 +1014,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
if (lookup && ERTS_RBT_IS_EQ(kn, kx)) {
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
return x;
}
@@ -1038,7 +1048,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
ERTS_RBT_FUNC__(insert_fixup__)(root, n);
}
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
return NULL;
}
@@ -1364,7 +1374,7 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
ystate->x = NULL;
ystate->up = 0;
}
- return 1; /* Done */
+ return 0; /* Done */
}
x = p;
}
@@ -1579,15 +1589,17 @@ ERTS_RBT_FUNC__(debug_print)(FILE *filep, ERTS_RBT_T *x, int indent,
#ifdef ERTS_RBT_NEED_HDBG_CHECK_TREE__
static void
-ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
+ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *n)
{
int black_depth = -1, no_black = 0;
ERTS_RBT_T *c, *p, *x = root;
ERTS_RBT_KEY_T kx;
ERTS_RBT_KEY_T kc;
- if (!x)
+ if (!x) {
+ ERTS_RBT_ASSERT(!n);
return;
+ }
ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x));
@@ -1597,6 +1609,9 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
while (1) {
+ if (x == n)
+ n = NULL;
+
if (ERTS_RBT_IS_BLACK(x))
no_black++;
else {
@@ -1668,6 +1683,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
if (!p) {
ERTS_RBT_ASSERT(root == x);
ERTS_RBT_ASSERT(no_black == 0);
+ ERTS_RBT_ASSERT(!n);
return; /* Done */
}
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index cab4bd73db..96238318c9 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -161,7 +161,7 @@ enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
if ((i & 1) == 0)
itmp = itmp2;
else {
- enq = (erts_sspa_blk_t *) itmp;
+ enq = (erts_sspa_blk_t *) itmp2;
itmp = erts_atomic_read_acqb(&enq->next_atmc);
ASSERT(itmp != ERTS_AINT_NULL);
}
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 713ed50b86..696bdbdaf1 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -128,14 +128,14 @@ ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y);
#define ERTS_SMP_HAVE_REC_MTX_INIT 1
ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx);
#endif
-ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
@@ -153,18 +153,15 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name);
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -181,11 +178,10 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -194,11 +190,10 @@ ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file,
ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -1062,34 +1057,18 @@ erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
#endif
ERTS_GLB_INLINE void
-erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra, 1);
+ erts_mtx_init(mtx, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra, 1);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked(mtx, name);
+ erts_mtx_init_locked(mtx, name, extra, flags);
#endif
}
@@ -1211,39 +1190,25 @@ erts_smp_rwmtx_set_reader_group(int no)
}
ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwmtx_init_x(rwmtx, name, extra);
+ erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt(rwmtx, opt, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwmtx_init(rwmtx, name);
+ erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags);
#endif
}
@@ -1379,20 +1344,10 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name)
+erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_spinlock_init(lock, name);
+ erts_spinlock_init(lock, name, extra, flags);
#else
(void)lock;
#endif
@@ -1445,20 +1400,10 @@ erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name)
+erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwlock_init(lock, name);
+ erts_rwlock_init(lock, name, extra, flags);
#else
(void)lock;
#endif
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 7d857ad326..d904e35e40 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -59,6 +59,42 @@ erts_set_literal_tag(Eterm *term, Eterm *hp_start, Eterm hsz)
#endif
}
+void
+erts_term_init(void)
+{
+#ifdef ERTS_ORDINARY_REF_MARKER
+ /* Ordinary and magic references of same size... */
+
+ ErtsRefThing ref_thing;
+
+ ERTS_CT_ASSERT(ERTS_ORDINARY_REF_MARKER == ~((Uint32)0));
+ ref_thing.m.header = ERTS_REF_THING_HEADER;
+ ref_thing.m.mb = (ErtsMagicBinary *) ~((UWord) 3);
+ ref_thing.m.next = (struct erl_off_heap_header *) ~((UWord) 3);
+ if (ref_thing.o.marker == ERTS_ORDINARY_REF_MARKER)
+ ERTS_INTERNAL_ERROR("Cannot differentiate between magic and ordinary references");
+
+ ERTS_CT_ASSERT(offsetof(ErtsORefThing,marker) != 0);
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) == sizeof(ErtsMRefThing));
+# ifdef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should not have been defined...
+# endif
+
+#else
+ /* Ordinary and magic references of different sizes... */
+
+# ifndef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should have been defined...
+# endif
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) != sizeof(ErtsMRefThing));
+
+#endif
+
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsORefThing));
+ ERTS_CT_ASSERT(ERTS_MAGIC_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsMRefThing));
+
+}
+
/*
* XXX: define NUMBER_CODE() here when new representation is used
*/
@@ -95,8 +131,8 @@ ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple);
ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint32*,internal_ref_data,Wterm,is_internal_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm,is_internal_magic_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm,is_internal_ordinary_ref);
ET_DEFINE_CHECKED(struct erl_node_*,internal_ref_node,Eterm,is_internal_ref);
ET_DEFINE_CHECKED(Eterm*,external_val,Wterm,is_external);
ET_DEFINE_CHECKED(Uint,external_data_words,Wterm,is_external);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index c3234ee349..842802f8d9 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +23,8 @@
#include "erl_mmap.h"
+void erts_term_init(void);
+
typedef UWord Wterm; /* Full word terms */
struct erl_node_; /* Declared in erl_node_tables.h */
@@ -718,73 +720,235 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define ERTS_MAX_REF_NUMBERS 3
#define ERTS_REF_NUMBERS ERTS_MAX_REF_NUMBERS
-#if defined(ARCH_64)
-# define ERTS_REF_WORDS (ERTS_REF_NUMBERS/2 + 1)
-# define ERTS_REF_32BIT_WORDS (ERTS_REF_NUMBERS+1)
-#else
-# define ERTS_REF_WORDS ERTS_REF_NUMBERS
-# define ERTS_REF_32BIT_WORDS ERTS_REF_NUMBERS
+#ifndef ERTS_ENDIANNESS
+# error ERTS_ENDIANNESS not defined...
#endif
-typedef struct {
- Eterm header;
- union {
- Uint32 ui32[ERTS_REF_32BIT_WORDS];
- Uint ui[ERTS_REF_WORDS];
- } data;
-} RefThing;
-
-#define REF_THING_SIZE (sizeof(RefThing)/sizeof(Uint))
-#define REF_THING_HEAD_SIZE (sizeof(Eterm)/sizeof(Uint))
+#if ERTS_REF_NUMBERS != 3
+# error "A new reference layout for 64-bit needs to be implemented..."
+#endif
-#define make_ref_thing_header(DW) \
- _make_header((DW)+REF_THING_HEAD_SIZE-1,_TAG_HEADER_REF)
+struct magic_binary;
#if defined(ARCH_64)
+# define ERTS_ORDINARY_REF_MARKER (~((Uint32) 0))
+
+typedef struct {
+ Eterm header;
+#if ERTS_ENDIANNESS <= 0
+ Uint32 marker;
+#endif
+ Uint32 num[ERTS_REF_NUMBERS];
+#if ERTS_ENDIANNESS > 0
+ Uint32 marker;
+#endif
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+#if !ERTS_ENDIANNESS
+ Uint32 num[ERTS_REF_NUMBERS];
+ Uint32 marker;
+#endif
+} ErtsMRefThing;
+
/*
- * Ref layout on a 64-bit little endian machine:
+ * Ordinary ref layout on a 64-bit little endian machine:
*
* 63 31 0
* +--------------+--------------+
* | Thing word |
* +--------------+--------------+
- * | Data 0 | 32-bit arity |
+ * | Data 0 | 0xffffffff |
* +--------------+--------------+
* | Data 2 | Data 1 |
* +--------------+--------------+
*
- * Data is stored as an Uint32 array with 32-bit arity as first number.
+ * Ordinary ref layout on a 64-bit big endian machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Data 0 | Data 1 |
+ * +--------------+--------------+
+ * | Data 2 | 0xffffffff |
+ * +--------------+--------------+
+ *
+ * Magic Ref layout on a 64-bit machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Magic Binary Pointer |
+ * +--------------+--------------+
+ * | Next Off Heap Pointer |
+ * +--------------+--------------+
+ *
+ * Both pointers in the magic ref are 64-bit aligned. That is,
+ * least significant bits are zero. The marker 32-bit word is
+ * placed over the least significant bits of one of the pointers.
+ * That is, we can distinguish between magic and ordinary ref
+ * by looking at the marker field.
+ *
*/
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = ERTS_REF_NUMBERS; \
- ((RefThing *) (Hp))->data.ui32[1] = (R0); \
- ((RefThing *) (Hp))->data.ui32[2] = (R1); \
- ((RefThing *) (Hp))->data.ui32[3] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->marker = ERTS_ORDINARY_REF_MARKER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
} while (0)
-#else
+#if ERTS_ENDIANNESS
+/* Known big or little endian */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#else /* !ERTS_ENDIANNESS */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ((ErtsMRefThing *) (Hp))->marker = 0; \
+ ((ErtsMRefThing *) (Hp))->num[0] = (Binp)->refn[0]; \
+ ((ErtsMRefThing *) (Hp))->num[1] = (Binp)->refn[1]; \
+ ((ErtsMRefThing *) (Hp))->num[2] = (Binp)->refn[2]; \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#endif /* !ERTS_ENDIANNESS */
+
+#else /* ARCH_32 */
+
+typedef struct {
+ Eterm header;
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+} ErtsMRefThing;
+
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = (R0); \
- ((RefThing *) (Hp))->data.ui32[1] = (R1); \
- ((RefThing *) (Hp))->data.ui32[2] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
} while (0)
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic(&(Binp)->refn)); \
+} while (0)
+
+#endif /* ARCH_32 */
+
+typedef union {
+ ErtsMRefThing m;
+ ErtsORefThing o;
+} ErtsRefThing;
+
+/* for copy sharing */
+#define BOXED_VISITED_MASK ((Eterm) 3)
+#define BOXED_VISITED ((Eterm) 1)
+#define BOXED_SHARED_UNPROCESSED ((Eterm) 2)
+#define BOXED_SHARED_PROCESSED ((Eterm) 3)
+
+#define ERTS_REF_THING_SIZE (sizeof(ErtsORefThing)/sizeof(Uint))
+#define ERTS_MAGIC_REF_THING_SIZE (sizeof(ErtsMRefThing)/sizeof(Uint))
+#define ERTS_MAX_INTERNAL_REF_SIZE (sizeof(ErtsRefThing)/sizeof(Uint))
+
+#define make_ref_thing_header(Words) \
+ _make_header((Words)-1,_TAG_HEADER_REF)
+
+#define ERTS_REF_THING_HEADER _make_header(ERTS_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+#if defined(ARCH_64) && ERTS_ENDIANNESS /* All internal refs of same size... */
+
+# undef ERTS_MAGIC_REF_THING_HEADER
+
+# define is_ref_thing_header(x) ((x) == ERTS_REF_THING_HEADER)
+
+#ifdef SHCOPY
+#define is_ordinary_ref_thing(x) \
+ (((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+#else
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header((*((Eterm *)(x))) & ~BOXED_VISITED_MASK)), \
+ ((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+#endif
+
+#define is_magic_ref_thing(x) \
+ (!is_ordinary_ref_thing((x)))
+
+#define is_internal_magic_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_magic_ref_thing(boxed_val((x))))
+
+#define is_internal_ordinary_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_ordinary_ref_thing(boxed_val((x))))
+
+#else /* Ordinary and magic references of different sizes... */
+
+# define ERTS_MAGIC_REF_THING_HEADER \
+ _make_header(ERTS_MAGIC_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+# define is_ref_thing_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
+
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_REF_THING_HEADER)
+
+#define is_magic_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_magic_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_ordinary_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER)
+
#endif
-#define is_ref_thing_header(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
#define make_internal_ref(x) make_boxed((Eterm*)(x))
-#define _unchecked_ref_thing_ptr(x) \
- ((RefThing*) _unchecked_internal_ref_val(x))
-#define ref_thing_ptr(x) \
- ((RefThing*) internal_ref_val(x))
+#define _unchecked_ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) _unchecked_internal_ref_val(x))
+#define ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) internal_ref_val(x))
+
+#define _unchecked_magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) _unchecked_internal_ref_val(x))
+#define magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) internal_ref_val(x))
#define is_internal_ref(x) \
(_unchecked_is_boxed((x)) && is_ref_thing_header(*boxed_val((x))))
@@ -796,16 +960,21 @@ do { \
_ET_DECLARE_CHECKED(Eterm*,internal_ref_val,Wterm)
#define internal_ref_val(x) _ET_APPLY(internal_ref_val,(x))
-#define internal_thing_ref_data_words(t) (thing_arityval(*(Eterm*)(t)))
-#define _unchecked_internal_ref_data_words(x) \
- (_unchecked_thing_arityval(*_unchecked_internal_ref_val(x)))
-_ET_DECLARE_CHECKED(Uint,internal_ref_data_words,Wterm)
-#define internal_ref_data_words(x) _ET_APPLY(internal_ref_data_words,(x))
+#define internal_ordinary_thing_ref_numbers(ort) (((ErtsORefThing *)(ort))->num)
+#define _unchecked_internal_ordinary_ref_numbers(x) (internal_ordinary_thing_ref_numbers(_unchecked_ordinary_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm)
+#define internal_ordinary_ref_numbers(x) _ET_APPLY(internal_ordinary_ref_numbers,(x))
+
+#if defined(ARCH_64) && !ERTS_ENDIANNESS
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->num)
+#else
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->mb->refn)
+#endif
+
+#define _unchecked_internal_magic_ref_numbers(x) (internal_magic_thing_ref_numbers(_unchecked_magic_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm)
+#define internal_magic_ref_numbers(x) _ET_APPLY(internal_magic_ref_numbers,(x))
-#define internal_thing_ref_data(thing) ((thing)->data.ui32)
-#define _unchecked_internal_ref_data(x) (internal_thing_ref_data(_unchecked_ref_thing_ptr(x)))
-_ET_DECLARE_CHECKED(Uint32*,internal_ref_data,Wterm)
-#define internal_ref_data(x) _ET_APPLY(internal_ref_data,(x))
#define _unchecked_internal_ref_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 700ed90def..2a9f276e02 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -321,13 +321,23 @@ tmp_thr_prgr_data(ErtsSchedulerData *esdp)
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
if (!tpd) {
- /*
- * We only allocate the part up to the wakeup_request field
- * which is the first field only used by registered threads
- */
- tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
- offsetof(ErtsThrPrgrData, wakeup_request));
- init_tmp_thr_prgr_data(tpd);
+ /*
+ * We only allocate the part up to the wakeup_request field which is
+ * the first field only used by registered threads
+ */
+ size_t alloc_size = offsetof(ErtsThrPrgrData, wakeup_request);
+
+ /* We may land here as a result of unmanaged_delay being called from
+ * the lock counting module, which in turn might be called from within
+ * the allocator, so we use plain malloc to avoid deadlocks. */
+ tpd =
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ malloc(alloc_size);
+#else
+ erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA, alloc_size);
+#endif
+
+ init_tmp_thr_prgr_data(tpd);
}
return tpd;
@@ -337,8 +347,13 @@ static ERTS_INLINE void
return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
{
if (tpd->is_temporary) {
- erts_tsd_set(erts_thr_prgr_data_key__, NULL);
- erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ free(tpd);
+#else
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+#endif
}
}
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index eccd49f2a9..8b5c17d739 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -259,13 +259,16 @@
#include "sys.h"
+#include "erl_lock_flags.h"
+#include "erl_term.h"
+
#ifdef USE_THREADS
#define ETHR_TRY_INLINE_FUNCS
#include "ethread.h"
+
#include "erl_lock_check.h"
#include "erl_lock_count.h"
-#include "erl_term.h"
#if defined(__GLIBC__) && (__GLIBC__ << 16) + __GLIBC_MINOR__ < (2 << 16) + 4
/*
@@ -307,9 +310,11 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
-
} erts_mtx_t;
typedef ethr_cond erts_cnd_t;
@@ -320,7 +325,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwmtx_t;
@@ -365,7 +373,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_spinlock_t;
@@ -376,7 +387,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwlock_t;
@@ -479,16 +493,14 @@ ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
- Uint16 opt, int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
- char *name,
- Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
@@ -507,18 +519,15 @@ ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name);
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -608,16 +617,10 @@ ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp
ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
erts_aint64_t mask,
erts_aint64_t set);
-
-ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
- char *name,
- Eterm extra,
- Uint16 opt);
-ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -626,11 +629,10 @@ ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigne
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_rwlock_init_x(erts_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -2164,107 +2166,41 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
+erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
-#endif
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "initialize mutex");
+ }
-ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
- int enable_lcnt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
-#endif
+ flags |= ERTS_LOCK_TYPE_MUTEX;
+#ifdef DEBUG
+ mtx->flags = flags;
#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
-#endif
- ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
+ erts_lc_init_lock_x(&mtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
-erts_mtx_init(erts_mtx_t *mtx, char *name)
+erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
-#endif
-}
+ erts_mtx_init(mtx, name, extra, flags);
-ERTS_GLB_INLINE void
-erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
ethr_mutex_lock(&mtx->mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ #ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+ #endif
+ #ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+ #endif
#endif
}
@@ -2273,11 +2209,14 @@ erts_mtx_destroy(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&mtx->lcnt);
+ erts_lcnt_uninstall(&mtx->lcnt);
#endif
res = ethr_mutex_destroy(&mtx->mtx);
if (res != 0) {
@@ -2374,7 +2313,8 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2474,7 +2414,7 @@ erts_rwmtx_set_reader_group(int no)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX);
#endif
res = ethr_rwmutex_set_reader_group(no);
if (res != 0)
@@ -2483,57 +2423,32 @@ erts_rwmtx_set_reader_group(int no)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
+ char *name, Eterm extra, erts_lock_flags_t flags) {
#ifdef USE_THREADS
int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (name && name[0] == '\0')
- erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
- else
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
-#endif
-#endif
-}
+ if (res != 0) {
+ erts_thr_fatal_error(res, "initialize rwmutex");
+ }
-ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra)
-{
- erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
-}
+ flags |= ERTS_LOCK_TYPE_RWMUTEX;
+#ifdef DEBUG
+ rwmtx->flags = flags;
+#endif
-ERTS_GLB_INLINE void
-erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_init_lock_x(&rwmtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX);
-#endif
+ erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
-{
- erts_rwmtx_init_opt(rwmtx, NULL, name);
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra,
+ erts_lock_flags_t flags) {
+ erts_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
}
ERTS_GLB_INLINE void
@@ -2541,11 +2456,14 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&rwmtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&rwmtx->lcnt);
+ erts_lcnt_uninstall(&rwmtx->lcnt);
#endif
res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
if (res != 0) {
@@ -2573,7 +2491,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2582,13 +2500,13 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
@@ -2607,13 +2525,13 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2627,10 +2545,10 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
@@ -2648,7 +2566,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2657,13 +2575,13 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
@@ -2682,13 +2600,13 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2702,10 +2620,10 @@ erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
@@ -2743,7 +2661,8 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2757,7 +2676,8 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3090,59 +3010,26 @@ erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
/* spinlock */
ERTS_GLB_INLINE void
-erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
+erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK, extra);
-#endif
-#else
- (void)lock;
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "init spinlock");
+ }
-ERTS_GLB_INLINE void
-erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
- Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+ flags |= ERTS_LOCK_TYPE_SPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_spinlock_init(erts_spinlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -3150,11 +3037,14 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_spinlock_destroy(&lock->slck);
if (res != 0) {
@@ -3222,7 +3112,8 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_TYPE_SPINLOCK;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3233,39 +3124,26 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
/* rwspinlock */
ERTS_GLB_INLINE void
-erts_rwlock_init_x(erts_rwlock_t *lock, char *name, Eterm extra)
+erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK, extra);
-#endif
-#else
- (void)lock;
+ if (res) {
+ erts_thr_fatal_error(res, "init rwlock");
+ }
+
+ flags |= ERTS_LOCK_TYPE_RWSPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-}
-ERTS_GLB_INLINE void
-erts_rwlock_init(erts_rwlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -3273,11 +3151,14 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_rwlock_destroy(&lock->rwlck);
if (res != 0) {
@@ -3301,10 +3182,10 @@ erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_unlock(&lock->rwlck);
#else
@@ -3322,13 +3203,13 @@ erts_read_lock(erts_rwlock_t *lock)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -3344,10 +3225,10 @@ erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_unlock(&lock->rwlck);
#else
@@ -3365,13 +3246,13 @@ erts_write_lock(erts_rwlock_t *lock)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -3388,7 +3269,8 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3402,7 +3284,8 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index a1c4220633..ccc5526664 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,19 +21,52 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
-/* timer wheel size NEED to be a power of 2 */
-#ifdef SMALL_MEMORY
-#define ERTS_TIW_SIZE (1 << 13)
-#else
-#define ERTS_TIW_SIZE (1 << 16)
+#if 0
+# define ERTS_TW_DEBUG
+#endif
+#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
#endif
-#if defined(DEBUG) || 0
+#if defined(ERTS_TW_DEBUG)
#define ERTS_TIME_ASSERT(B) ERTS_ASSERT(B)
#else
#define ERTS_TIME_ASSERT(B) ((void) 1)
#endif
+#ifdef ERTS_TW_DEBUG
+/*
+ * Soon wheel will handle about 1 seconds
+ * Later wheel will handle about 8 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 10
+# define ERTS_TW_LATER_WHEEL_BITS 10
+#else
+# ifdef SMALL_MEMORY
+/*
+ * Soon wheel will handle about 4 seconds
+ * Later wheel will handle about 2 hours and 19 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 12
+# define ERTS_TW_LATER_WHEEL_BITS 12
+# else
+/*
+ * Soon wheel will handle about 16 seconds
+ * Later wheel will handle about 37 hours and 16 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 14
+# define ERTS_TW_LATER_WHEEL_BITS 14
+# endif
+#endif
+
+/*
+ * Number of slots in each timer wheel...
+ *
+ * These *need* to be a power of 2
+ */
+#define ERTS_TW_SOON_WHEEL_SIZE (1 << ERTS_TW_SOON_WHEEL_BITS)
+#define ERTS_TW_LATER_WHEEL_SIZE (1 << ERTS_TW_LATER_WHEEL_BITS)
+
typedef enum {
ERTS_NO_TIME_WARP_MODE,
ERTS_SINGLE_TIME_WARP_MODE,
@@ -103,7 +136,10 @@ Eterm erts_system_time_source(struct process*c_p);
#define ERTS_CLKTCK_RESOLUTION (erts_time_sup__.r.o.clktck_resolution)
#endif
-#define ERTS_TIMER_WHEEL_MSEC (ERTS_TIW_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_SOON_WHEEL_MSEC (ERTS_TW_SOON_WHEEL_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_LATER_WHEEL_MSEC (ERTS_TW_LATER_WHEEL_SIZE*ERTS_TW_SOON_WHEEL_MSEC/2)
+
+#define ERTS_TIMER_WHEEL_MSEC ERTS_TW_LATER_WHEEL_MSEC
struct erts_time_sup_read_only__ {
ErtsMonotonicTime monotonic_time_unit;
@@ -412,34 +448,25 @@ erts_time_unit_conversion(Uint64 value,
void erts_sched_init_time_sup(ErtsSchedulerData *esdp);
-#define ERTS_TWHEEL_SLOT_AT_ONCE -1
-#define ERTS_TWHEEL_SLOT_INACTIVE -2
+#define ERTS_TW_SLOT_INACTIVE (-2)
/*
** Timer entry:
*/
typedef struct erl_timer {
- struct erl_timer* next; /* next entry tiw slot or chain */
- struct erl_timer* prev; /* prev entry tiw slot or chain */
- union {
- struct {
- void (*timeout)(void*); /* called when timeout */
- void (*cancel)(void*); /* called when cancel (may be NULL) */
- void* arg; /* argument to timeout/cancel procs */
- } func;
- ErtsThrPrgrLaterOp cleanup;
- } u;
ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
+ struct erl_timer* next; /* next entry tiw slot or chain */
+ struct erl_timer* prev; /* prev entry tiw slot or chain */
+ void (*timeout)(void*); /* called when timeout */
+ void* arg; /* argument to timeout/cancel procs */
int slot;
} ErtsTWheelTimer;
typedef void (*ErlTimeoutProc)(void*);
-typedef void (*ErlCancelProc)(void*);
void erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsTWheelTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg,
- ErtsMonotonicTime timeout_pos);
+ void *arg, ErtsMonotonicTime timeout_pos);
void erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p);
ErtsTimerWheel *erts_create_timer_wheel(ErtsSchedulerData *esdp);
@@ -447,12 +474,13 @@ ErtsMonotonicTime erts_check_next_timeout_time(ErtsSchedulerData *);
ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p);
ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_tweel_read_timeout(ErtsTWheelTimer *twt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p)
{
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
@@ -460,6 +488,12 @@ ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_
return *((ErtsMonotonicTime *) nxt_tmo_ref);
}
+ERTS_GLB_INLINE ErtsMonotonicTime
+erts_tweel_read_timeout(ErtsTWheelTimer *twt)
+{
+ return twt->timeout_pos;
+}
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
void
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 6aa2a7500f..f6bb52dde1 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -678,7 +678,6 @@ check_time_correction(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_time_correction,
- NULL,
(void *) esdp,
timeout_pos);
}
@@ -729,7 +728,6 @@ check_time_offset(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_time_offset,
- NULL,
vesdp,
timeout_pos);
}
@@ -836,7 +834,6 @@ late_init_time_correction(ErtsSchedulerData *esdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_func,
- NULL,
(quick_init_drift_adj
? NULL
: esdp),
@@ -957,8 +954,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
- erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
- erts_smp_mtx_init(&erts_get_time_mtx, "get_time");
+ erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_smp_mtx_init(&erts_get_time_mtx, "get_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
time_sup.r.o.correction = time_correction;
time_sup.r.o.warp_mode = time_warp_mode;
@@ -1123,8 +1122,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx,
- &rwmtx_opts, "get_corrected_time");
+ erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts,
+ "get_corrected_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
cdatap = &time_sup.inf.c.parmon.cdata;
@@ -1289,56 +1289,62 @@ erts_finalize_time_offset(void)
/* info functions */
void
-elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff)
+elapsed_time_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff)
{
- UWord prev_total_user, prev_total_sys;
- UWord total_user, total_sys;
+ ErtsMonotonicTime prev_total_user, prev_total_sys;
+ ErtsMonotonicTime total_user, total_sys;
SysTimes now;
sys_times(&now);
- total_user = (now.tms_utime * 1000) / SYS_CLK_TCK;
- total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK;
+ total_user = (ErtsMonotonicTime) ((now.tms_utime * 1000) / SYS_CLK_TCK);
+ total_sys = (ErtsMonotonicTime) ((now.tms_stime * 1000) / SYS_CLK_TCK);
if (ms_user != NULL)
*ms_user = total_user;
if (ms_sys != NULL)
*ms_sys = total_sys;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
+ if (ms_user_diff || ms_sys_diff) {
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
- prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK;
- prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK;
- t_start = now;
+ prev_total_user = (ErtsMonotonicTime) ((t_start.tms_utime * 1000) / SYS_CLK_TCK);
+ prev_total_sys = (ErtsMonotonicTime) ((t_start.tms_stime * 1000) / SYS_CLK_TCK);
+ t_start = now;
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
- if (ms_user_diff != NULL)
- *ms_user_diff = total_user - prev_total_user;
+ if (ms_user_diff != NULL)
+ *ms_user_diff = total_user - prev_total_user;
- if (ms_sys_diff != NULL)
- *ms_sys_diff = total_sys - prev_total_sys;
+ if (ms_sys_diff != NULL)
+ *ms_sys_diff = total_sys - prev_total_sys;
+ }
}
/* wall clock routines */
void
-wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
+wall_clock_elapsed_time_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_diff)
{
ErtsMonotonicTime now, elapsed;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
now = time_sup.r.o.get_time();
update_last_mtime(NULL, now);
elapsed = ERTS_MONOTONIC_TO_MSEC(now);
- *ms_total = (UWord) elapsed;
- *ms_diff = (UWord) (elapsed - prev_wall_clock_elapsed);
- prev_wall_clock_elapsed = elapsed;
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ *ms_total = elapsed;
+
+ if (ms_diff) {
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ *ms_diff = elapsed - prev_wall_clock_elapsed;
+ prev_wall_clock_elapsed = elapsed;
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ }
}
/* get current time */
@@ -1502,7 +1508,7 @@ static time_t gregday(int year, int month, int day)
pyear = gyear - 1;
ndays = (pyear/4) - (pyear/100) + (pyear/400) + pyear*365 + 366;
}
- /* number of days in all months preceeding month */
+ /* number of days in all months preceding month */
for (m = 1; m < month; m++)
ndays += mdays[m];
/* Extra day if leap year and March or later */
@@ -1831,7 +1837,10 @@ erts_demonitor_time_offset(Eterm ref)
ErtsMonitor *mon;
ASSERT(is_internal_ref(ref));
erts_smp_mtx_lock(&erts_get_time_mtx);
- mon = erts_remove_monitor(&time_offset_monitors, ref);
+ if (is_internal_ordinary_ref(ref))
+ mon = erts_remove_monitor(&time_offset_monitors, ref);
+ else
+ mon = NULL;
if (!mon)
res = 0;
else {
@@ -1848,7 +1857,7 @@ erts_demonitor_time_offset(Eterm ref)
typedef struct {
Eterm pid;
Eterm ref;
- Eterm heap[REF_THING_SIZE];
+ Eterm heap[ERTS_REF_THING_SIZE];
} ErtsTimeOffsetMonitorInfo;
typedef struct {
@@ -1866,14 +1875,14 @@ save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
cntxt = (ErtsTimeOffsetMonitorContext *) vcntxt;
mix = (cntxt->ix)++;
- cntxt->to_mon_info[mix].pid = mon->pid;
+ cntxt->to_mon_info[mix].pid = mon->u.pid;
to_hp = &cntxt->to_mon_info[mix].heap[0];
- ASSERT(is_internal_ref(mon->ref));
+ ASSERT(is_internal_ordinary_ref(mon->ref));
from_hp = internal_ref_val(mon->ref);
- ASSERT(thing_arityval(*from_hp) + 1 == REF_THING_SIZE);
+ ASSERT(thing_arityval(*from_hp) + 1 == ERTS_REF_THING_SIZE);
- for (hix = 0; hix < REF_THING_SIZE; hix++)
+ for (hix = 0; hix < ERTS_REF_THING_SIZE; hix++)
to_hp[hix] = from_hp[hix];
cntxt->to_mon_info[mix].ref
@@ -1933,7 +1942,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
hp = (Eterm *) (tmp + no_monitors*sizeof(ErtsTimeOffsetMonitorInfo));
hsz = 6; /* 5-tuple */
- hsz += REF_THING_SIZE;
+ hsz += ERTS_REF_THING_SIZE;
hsz += ERTS_SINT64_HEAP_SIZE(new_offset);
if (IS_SSMALL(new_offset))
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 00bb114c26..db7d0ac449 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -336,7 +336,8 @@ void erts_init_trace(void) {
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers");
+ erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef HAVE_ERTS_NOW_CPU
erts_cpu_timestamp = 0;
@@ -781,7 +782,8 @@ trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
tmp = make_small(0);
} else {
hp = HAlloc(p, 4);
- tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
+ tmp = TUPLE3(hp,p->current->module,p->current->function,
+ make_small(p->current->arity));
hp += 4;
}
@@ -1027,14 +1029,14 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
{
Eterm mfa;
- BeamInstr *code_ptr = find_function_from_pc(pc);
+ ErtsCodeMFA *cmfa = find_function_from_pc(pc);
-
- if (!code_ptr) {
+ if (!cmfa) {
mfa = am_undefined;
} else {
Eterm *hp = HAlloc(p, 4);
- mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
}
send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL,
@@ -1046,11 +1048,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
+erts_trace_return(Process* p, ErtsCodeMFA *mfa,
+ Eterm retval, ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa, mod, name;
- int arity;
+ Eterm mfa_tuple;
Uint meta_flags, *tracee_flags;
ASSERT(tracer);
@@ -1084,15 +1086,13 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
tracee_flags = &meta_flags;
}
- mod = fi[0];
- name = fi[1];
- arity = fi[2];
-
hp = HAlloc(p, 4);
- mfa = TUPLE3(hp, mod, name, make_small(arity));
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function,
+ make_small(mfa->arity));
hp += 4;
send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
- NULL, TRACE_FUN_T_CALL, am_return_from, mfa, retval, am_true);
+ NULL, TRACE_FUN_T_CALL, am_return_from, mfa_tuple,
+ retval, am_true);
}
/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
@@ -1103,7 +1103,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
* Where Class is atomic but Value is any term.
*/
void
-erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
+erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
ErtsTracer *tracer)
{
Eterm* hp;
@@ -1142,7 +1142,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
}
hp = HAlloc(p, 7);;
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function, make_small(mfa->arity));
hp += 4;
cv = TUPLE2(hp, class, value);
hp += 3;
@@ -1165,7 +1165,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
* if it is a pid or port we do a meta trace.
*/
Uint32
-erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
+erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer)
{
Eterm* hp;
@@ -1244,7 +1244,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* such as size_object() and copy_struct(), we must make sure that we
* temporarily convert any match contexts to sub binaries.
*/
- arity = (Eterm) mfa[2];
+ arity = info->mfa.arity;
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
@@ -1339,7 +1339,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
hp += 2;
}
}
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
+ mfa_tuple = TUPLE3(hp, info->mfa.module, info->mfa.function, mfa_tuple);
hp += 4;
/*
@@ -1462,7 +1462,8 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
}
void
-monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint time)
+monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp,
+ ErtsCodeMFA *out_fp, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
@@ -1493,11 +1494,13 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
tmo = erts_bld_uint(&hp, NULL, time);
if (in_fp != NULL) {
- in_mfa = TUPLE3(hp,(Eterm) in_fp[0], (Eterm) in_fp[1], make_small(in_fp[2]));
+ in_mfa = TUPLE3(hp, in_fp->module, in_fp->function,
+ make_small(in_fp->arity));
hp +=4;
}
if (out_fp != NULL) {
- out_mfa = TUPLE3(hp,(Eterm) out_fp[0], (Eterm) out_fp[1], make_small(out_fp[2]));
+ out_mfa = TUPLE3(hp, out_fp->module, out_fp->function,
+ make_small(out_fp->arity));
hp +=4;
}
tmo_tpl = TUPLE2(hp,am_timeout, tmo);
@@ -1873,7 +1876,6 @@ trace_port_tmp_binary(char *bin, Sint sz, Binary **bptrp, Eterm **hp)
} else {
ProcBin* pb = (ProcBin *)*hp;
Binary *bptr = erts_bin_nrml_alloc(sz);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, bin, sz);
pb->thing_word = HEADER_PROC_BIN;
pb->size = sz;
@@ -1998,8 +2000,8 @@ trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
TRACE_FUN_T_RECEIVE,
am_receive, data, THE_NON_VALUE, am_true);
- if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
- erts_bin_free(bptr);
+ if (bptr)
+ erts_bin_release(bptr);
if (orig_hp)
erts_free(ERTS_ALC_T_TMP, orig_hp);
@@ -2049,8 +2051,8 @@ void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
am_send, msg, to, am_true);
- if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
- erts_bin_free(bptr);
+ if (bptr)
+ erts_bin_release(bptr);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -2132,7 +2134,7 @@ profile_runnable_proc(Process *p, Eterm status){
Eterm *hp, msg;
Eterm where = am_undefined;
ErlHeapFragment *bp = NULL;
- BeamInstr *current = NULL;
+ ErtsCodeMFA *cmfa = NULL;
#ifndef ERTS_SMP
#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
@@ -2154,14 +2156,14 @@ profile_runnable_proc(Process *p, Eterm status){
if (!ERTS_PROC_IS_EXITING(p)) {
if (p->current) {
- current = p->current;
+ cmfa = p->current;
} else {
- current = find_function_from_pc(p->i);
+ cmfa = find_function_from_pc(p->i);
}
}
#ifdef ERTS_SMP
- if (!current) {
+ if (!cmfa) {
hsz -= 4;
}
@@ -2169,8 +2171,10 @@ profile_runnable_proc(Process *p, Eterm status){
hp = bp->mem;
#endif
- if (current) {
- where = TUPLE3(hp, current[0], current[1], make_small(current[2])); hp += 4;
+ if (cmfa) {
+ where = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
+ hp += 4;
} else {
where = make_small(0);
}
@@ -2622,7 +2626,8 @@ init_sys_msg_dispatcher(void)
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
- erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+ erts_smp_mtx_init(&smq_mtx, "sys_msg_q", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
@@ -3182,7 +3187,9 @@ static void init_tracer_nif()
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx");
+
+ erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_tracer_nif_clear();
@@ -3226,7 +3233,7 @@ static int tracer_cmp_fun(void* a, void* b)
static HashValue tracer_hash_fun(void* obj)
{
- return make_internal_hash(((ErtsTracerNif*)obj)->module);
+ return make_internal_hash(((ErtsTracerNif*)obj)->module, 0);
}
static void *tracer_alloc_fun(void* tmpl)
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 0095d4386b..01fe1e5e23 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -101,11 +101,11 @@ void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
void trace_send(Process*, Eterm, Eterm);
void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec,
+Uint32 erts_call_trace(Process *p, ErtsCodeInfo *info, struct binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval,
+void erts_trace_return(Process* p, ErtsCodeMFA *mfa, Eterm retval,
ErtsTracer *tracer);
-void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
+void erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
ErtsTracer *tracer);
void erts_trace_return_to(Process *p, BeamInstr *pc);
void trace_sched(Process*, ErtsProcLocks, Eterm);
@@ -134,7 +134,8 @@ void erts_system_profile_setup_active_schedulers(void);
/* system_monitor */
void monitor_long_gc(Process *p, Uint time);
-void monitor_long_schedule_proc(Process *p, BeamInstr *in_i, BeamInstr *out_i, Uint time);
+void monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_i,
+ ErtsCodeMFA *out_i, Uint time);
void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time);
void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
@@ -142,6 +143,11 @@ Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
@@ -176,7 +182,7 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
struct binary* match_prog_set,
struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index bd5e1482fb..2d1d1443a7 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -123,19 +123,16 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
-static RestartContext *get_rc_from_bin(Eterm bin)
+static RestartContext *get_rc_from_bin(Eterm mref)
{
- Binary *mbp;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin));
-
- mbp = ((ProcBin *) binary_val(bin))->val;
-
+ Binary *mbp = erts_magic_ref2bin(mref);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
return (RestartContext *) ERTS_MAGIC_BIN_DATA(mbp);
@@ -148,8 +145,8 @@ static Eterm make_magic_bin_for_restart(Process *p, RestartContext *rc)
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
Eterm *hp;
memcpy(restartp,rc,sizeof(RestartContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(p), mbp);
}
@@ -1890,74 +1887,57 @@ binary_to_atom(Process* proc, Eterm bin, Eterm enc, int must_exist)
byte* bytes;
byte *temp_alloc = NULL;
Uint bin_size;
+ Eterm a;
if ((bytes = erts_get_aligned_binary_bytes(bin, &temp_alloc)) == 0) {
BIF_ERROR(proc, BADARG);
}
bin_size = binary_size(bin);
+
if (enc == am_latin1) {
- Eterm a;
- if (bin_size > MAX_ATOM_CHARACTERS) {
- system_limit:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, SYSTEM_LIMIT);
- }
if (!must_exist) {
- a = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_LATIN1,
- 0);
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(a))
- goto badarg;
- BIF_RET(a);
- } else if (erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_RET(a);
- } else {
+ int lix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_LATIN1,
+ 0);
+ if (lix == ATOM_BAD_ENCODING_ERROR) {
+ badarg:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, BADARG);
+ } else if (lix == ATOM_MAX_CHARS_ERROR) {
+ system_limit:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, SYSTEM_LIMIT);
+ }
+
+ a = make_atom(lix);
+ } else if (!erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
goto badarg;
}
- } else if (enc == am_utf8 || enc == am_unicode) {
- Eterm res;
- Uint num_chars = 0;
- const byte* p = bytes;
- Uint left = bin_size;
- while (left) {
- if (++num_chars > MAX_ATOM_CHARACTERS) {
+ } else if (enc == am_utf8 || enc == am_unicode) {
+ if (!must_exist) {
+ int uix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_UTF8,
+ 0);
+ if (uix == ATOM_BAD_ENCODING_ERROR) {
+ goto badarg;
+ } else if (uix == ATOM_MAX_CHARS_ERROR) {
goto system_limit;
}
- if ((p[0] & 0x80) == 0) {
- ++p;
- --left;
- }
- else if (left >= 2
- && (p[0] & 0xFE) == 0xC2 /* only allow latin1 subset */
- && (p[1] & 0xC0) == 0x80) {
- p += 2;
- left -= 2;
- }
- else goto badarg;
- }
- if (!must_exist) {
- res = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_UTF8,
- 0);
+ a = make_atom(uix);
}
- else if (!erts_atom_get((char*)bytes, bin_size, &res, ERTS_ATOM_ENC_UTF8)) {
+ else if (!erts_atom_get((char*)bytes, bin_size, &a, ERTS_ATOM_ENC_UTF8)) {
goto badarg;
}
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(res))
- goto badarg;
- BIF_RET(res);
} else {
- badarg:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, BADARG);
+ goto badarg;
}
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(a);
}
BIF_RETTYPE binary_to_atom_2(BIF_ALIST_2)
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 81800752f0..3d28b05752 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -117,11 +117,10 @@ int erts_fit_in_bits_int32(Sint32);
int erts_fit_in_bits_uint(Uint);
Sint erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
-Uint32 make_internal_hash(Eterm);
+Uint32 make_internal_hash(Eterm, Uint32 salt);
void erts_save_emu_args(int argc, char **argv);
Eterm erts_get_emu_args(struct process *c_p);
@@ -132,6 +131,7 @@ Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+#define erts_bld_monotonic_time erts_bld_sint64
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 3d8c647386..0b8d78c469 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -36,7 +36,7 @@
#define EMULATOR "BEAM"
#define SEQ_TRACE 1
-#define CONTEXT_REDS 2000 /* Swap process out after this number */
+#define CONTEXT_REDS 4000 /* Swap process out after this number */
#define MAX_ARG 255 /* Max number of arguments allowed */
#define MAX_REG 1024 /* Max number of x(N) registers used */
@@ -159,11 +159,12 @@ typedef struct op_entry {
int sz; /* Number of loaded words. */
char* pack; /* Instructions for packing engine. */
char* sign; /* Signature string. */
- unsigned count; /* Number of times executed. */
} OpEntry;
-extern OpEntry opc[]; /* Description of all instructions. */
-extern int num_instructions; /* Number of instruction in opc[]. */
+extern const OpEntry opc[]; /* Description of all instructions. */
+extern const int num_instructions; /* Number of instruction in opc[]. */
+
+extern Uint erts_instr_count[];
/* some constants for various table sizes etc */
@@ -198,4 +199,11 @@ extern int erts_pd_initial_size;/* Initial Process dictionary table size */
#include "erl_term.h"
+#ifdef NO_JUMP_TABLE
+#define BeamOp(Op) (Op)
+#else
+extern void** beam_ops;
+#define BeamOp(Op) beam_ops[(Op)]
+#endif
+
#endif /* __ERL_VM_H__ */
diff --git a/erts/emulator/beam/error.h b/erts/emulator/beam/error.h
index 6c33b12dd0..64c08b1570 100644
--- a/erts/emulator/beam/error.h
+++ b/erts/emulator/beam/error.h
@@ -21,6 +21,8 @@
#ifndef __ERROR_H__
#define __ERROR_H__
+#include "code_ix.h"
+
/*
* There are three primary exception classes:
*
@@ -37,14 +39,11 @@
*/
/*
- * Bits 0-1 index the 'exception class tag' table.
- */
-#define EXC_CLASSBITS 3
-#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
-
-/*
* Exception class tags (indices into the 'exception_tag' array)
*/
+#define EXTAG_OFFSET 0
+#define EXTAG_BITS 2
+
#define EXTAG_ERROR 0
#define EXTAG_EXIT 1
#define EXTAG_THROWN 2
@@ -52,20 +51,31 @@
#define NUMBER_EXC_TAGS 3 /* The number of exception class tags */
/*
- * Exit code flags (bits 2-7)
+ * Index to the 'exception class tag' table.
+ */
+#define EXC_CLASSBITS ((1<<EXTAG_BITS)-1)
+#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
+
+/*
+ * Exit code flags
*
* These flags make is easier and quicker to decide what to do with the
* exception in the early stages, before a handler is found, and also
* maintains some separation between the class tag and the actions.
*/
-#define EXF_PANIC (1<<2) /* ignore catches */
-#define EXF_THROWN (1<<3) /* nonlocal return */
-#define EXF_LOG (1<<4) /* write to logger on termination */
-#define EXF_NATIVE (1<<5) /* occurred in native code */
-#define EXF_SAVETRACE (1<<6) /* save stack trace in internal form */
-#define EXF_ARGLIST (1<<7) /* has arglist for top of trace */
+#define EXF_OFFSET EXTAG_BITS
+#define EXF_BITS 7
-#define EXC_FLAGBITS 0x00fc
+#define EXF_PANIC (1<<(0+EXF_OFFSET)) /* ignore catches */
+#define EXF_THROWN (1<<(1+EXF_OFFSET)) /* nonlocal return */
+#define EXF_LOG (1<<(2+EXF_OFFSET)) /* write to logger on termination */
+#define EXF_NATIVE (1<<(3+EXF_OFFSET)) /* occurred in native code */
+#define EXF_SAVETRACE (1<<(4+EXF_OFFSET)) /* save stack trace in internal form */
+#define EXF_ARGLIST (1<<(5+EXF_OFFSET)) /* has arglist for top of trace */
+#define EXF_RESTORE_NIF (1<<(6+EXF_OFFSET)) /* restore original bif/nif */
+
+#define EXC_FLAGBITS (((1<<(EXF_BITS+EXF_OFFSET))-1) \
+ & ~((1<<(EXF_OFFSET))-1))
/*
* The primary fields of an exception code
@@ -75,11 +85,16 @@
#define NATIVE_EXCEPTION(x) ((x) | EXF_NATIVE)
/*
- * Bits 8-12 of the error code are used for indexing into
+ * Error code used for indexing into
* the short-hand error descriptor table.
*/
-#define EXC_INDEXBITS 0x1f00
-#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> 8)
+#define EXC_OFFSET (EXF_OFFSET+EXF_BITS)
+#define EXC_BITS 5
+
+#define EXC_INDEXBITS (((1<<(EXC_BITS+EXC_OFFSET))-1) \
+ & ~((1<<(EXC_OFFSET))-1))
+
+#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> EXC_OFFSET)
/*
* Exit codes used for raising a fresh exception. The primary exceptions
@@ -105,46 +120,46 @@
/* Error with given arglist term
* (exit reason in p->fvalue) */
-#define EXC_NORMAL ((1 << 8) | EXC_EXIT)
+#define EXC_NORMAL ((1 << EXC_OFFSET) | EXC_EXIT)
/* Normal exit (reason 'normal') */
-#define EXC_INTERNAL_ERROR ((2 << 8) | EXC_ERROR | EXF_PANIC)
+#define EXC_INTERNAL_ERROR ((2 << EXC_OFFSET) | EXC_ERROR | EXF_PANIC)
/* Things that shouldn't happen */
-#define EXC_BADARG ((3 << 8) | EXC_ERROR)
+#define EXC_BADARG ((3 << EXC_OFFSET) | EXC_ERROR)
/* Bad argument to a BIF */
-#define EXC_BADARITH ((4 << 8) | EXC_ERROR)
+#define EXC_BADARITH ((4 << EXC_OFFSET) | EXC_ERROR)
/* Bad arithmetic */
-#define EXC_BADMATCH ((5 << 8) | EXC_ERROR)
+#define EXC_BADMATCH ((5 << EXC_OFFSET) | EXC_ERROR)
/* Bad match in function body */
-#define EXC_FUNCTION_CLAUSE ((6 << 8) | EXC_ERROR)
+#define EXC_FUNCTION_CLAUSE ((6 << EXC_OFFSET) | EXC_ERROR)
/* No matching function head */
-#define EXC_CASE_CLAUSE ((7 << 8) | EXC_ERROR)
+#define EXC_CASE_CLAUSE ((7 << EXC_OFFSET) | EXC_ERROR)
/* No matching case clause */
-#define EXC_IF_CLAUSE ((8 << 8) | EXC_ERROR)
+#define EXC_IF_CLAUSE ((8 << EXC_OFFSET) | EXC_ERROR)
/* No matching if clause */
-#define EXC_UNDEF ((9 << 8) | EXC_ERROR)
+#define EXC_UNDEF ((9 << EXC_OFFSET) | EXC_ERROR)
/* No farity that matches */
-#define EXC_BADFUN ((10 << 8) | EXC_ERROR)
+#define EXC_BADFUN ((10 << EXC_OFFSET) | EXC_ERROR)
/* Not an existing fun */
-#define EXC_BADARITY ((11 << 8) | EXC_ERROR)
+#define EXC_BADARITY ((11 << EXC_OFFSET) | EXC_ERROR)
/* Attempt to call fun with
* wrong number of arguments. */
-#define EXC_TIMEOUT_VALUE ((12 << 8) | EXC_ERROR)
+#define EXC_TIMEOUT_VALUE ((12 << EXC_OFFSET) | EXC_ERROR)
/* Bad time out value */
-#define EXC_NOPROC ((13 << 8) | EXC_ERROR)
+#define EXC_NOPROC ((13 << EXC_OFFSET) | EXC_ERROR)
/* No process or port */
-#define EXC_NOTALIVE ((14 << 8) | EXC_ERROR)
+#define EXC_NOTALIVE ((14 << EXC_OFFSET) | EXC_ERROR)
/* Not distributed */
-#define EXC_SYSTEM_LIMIT ((15 << 8) | EXC_ERROR)
+#define EXC_SYSTEM_LIMIT ((15 << EXC_OFFSET) | EXC_ERROR)
/* Ran out of something */
-#define EXC_TRY_CLAUSE ((16 << 8) | EXC_ERROR)
+#define EXC_TRY_CLAUSE ((16 << EXC_OFFSET) | EXC_ERROR)
/* No matching try clause */
-#define EXC_NOTSUP ((17 << 8) | EXC_ERROR)
+#define EXC_NOTSUP ((17 << EXC_OFFSET) | EXC_ERROR)
/* Not supported */
-#define EXC_BADMAP ((18 << 8) | EXC_ERROR)
+#define EXC_BADMAP ((18 << EXC_OFFSET) | EXC_ERROR)
/* Bad map */
-#define EXC_BADKEY ((19 << 8) | EXC_ERROR)
+#define EXC_BADKEY ((19 << EXC_OFFSET) | EXC_ERROR)
/* Bad key in map */
#define NUMBER_EXIT_CODES 20 /* The number of exit code indices */
@@ -152,7 +167,7 @@
/*
* Internal pseudo-error codes.
*/
-#define TRAP (1 << 8) /* BIF Trap to erlang code */
+#define TRAP (1 << EXC_OFFSET) /* BIF Trap to erlang code */
/*
* Aliases for some common exit codes.
@@ -197,7 +212,7 @@ struct StackTrace {
Eterm header; /* bignum header - must be first in struct */
Eterm freason; /* original exception reason is saved in the struct */
BeamInstr* pc;
- BeamInstr* current;
+ ErtsCodeMFA* current;
int depth; /* number of saved pointers in trace[] */
BeamInstr *trace[1]; /* varying size - must be last in struct */
};
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 6cf6fed31a..828c833ffc 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -103,7 +103,8 @@ static HashValue
export_hash(struct export_entry* ee)
{
Export* x = ee->ep;
- return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
+ return EXPORT_HASH(x->info.mfa.module, x->info.mfa.function,
+ x->info.mfa.arity);
}
static int
@@ -111,9 +112,9 @@ export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e)
{
Export* tmpl = tmpl_e->ep;
Export* obj = obj_e->ep;
- return !(tmpl->code[0] == obj->code[0] &&
- tmpl->code[1] == obj->code[1] &&
- tmpl->code[2] == obj->code[2]);
+ return !(tmpl->info.mfa.module == obj->info.mfa.module &&
+ tmpl->info.mfa.function == obj->info.mfa.function &&
+ tmpl->info.mfa.arity == obj->info.mfa.arity);
}
@@ -130,21 +131,23 @@ export_alloc(struct export_entry* tmpl_e)
blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob));
erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
obj = &blob->exp;
- obj->fake_op_func_info_for_hipe[0] = 0;
- obj->fake_op_func_info_for_hipe[1] = 0;
- obj->code[0] = tmpl->code[0];
- obj->code[1] = tmpl->code[1];
- obj->code[2] = tmpl->code[2];
- obj->code[3] = (BeamInstr) em_call_error_handler;
- obj->code[4] = 0;
+ obj->info.op = 0;
+ obj->info.u.gen_bp = NULL;
+ obj->info.mfa.module = tmpl->info.mfa.module;
+ obj->info.mfa.function = tmpl->info.mfa.function;
+ obj->info.mfa.arity = tmpl->info.mfa.arity;
+ obj->beam[0] = (BeamInstr) em_call_error_handler;
+ obj->beam[1] = 0;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
- obj->addressv[ix] = obj->code+3;
+ obj->addressv[ix] = obj->beam;
blob->entryv[ix].slot.index = -1;
blob->entryv[ix].ep = &blob->exp;
}
ix = 0;
+
+ DBG_TRACE_MFA_P(&obj->info.mfa, "export allocation at %p", obj);
}
else { /* Existing entry in another table, use free entry in blob */
blob = entry_to_blob(tmpl_e);
@@ -163,9 +166,12 @@ export_free(struct export_entry* obj)
obj->slot.index = -1;
for (i=0; i < ERTS_NUM_CODE_IX; i++) {
if (blob->entryv[i].slot.index >= 0) {
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export entry slot %u freed for %p",
+ (obj - blob->entryv), &blob->exp);
return;
}
}
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp);
erts_free(ERTS_ALC_T_EXPORT, blob);
erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
}
@@ -176,7 +182,8 @@ init_export_table(void)
HashFunctions f;
int i;
- erts_smp_mtx_init(&export_staging_lock, "export_tab");
+ erts_smp_mtx_init(&export_staging_lock, "export_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
@@ -224,7 +231,9 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
while (b != (HashBucket*) 0) {
Export* ep = ((struct export_entry*) b)->ep;
- if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
+ if (ep->info.mfa.module == m &&
+ ep->info.mfa.function == f &&
+ ep->info.mfa.arity == a) {
return ep;
}
b = b->next;
@@ -237,9 +246,9 @@ static struct export_entry* init_template(struct export_templ* templ,
{
templ->entry.ep = &templ->exp;
templ->entry.slot.index = -1;
- templ->exp.code[0] = m;
- templ->exp.code[1] = f;
- templ->exp.code[2] = a;
+ templ->exp.info.mfa.module = m;
+ templ->exp.info.mfa.function = f;
+ templ->exp.info.mfa.arity = a;
return &templ->entry;
}
@@ -263,8 +272,8 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
if (ee == NULL ||
- (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
+ (ee->ep->addressv[code_ix] == ee->ep->beam &&
+ ee->ep->beam[0] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 17fc4828ca..7c812b306c 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -33,22 +33,20 @@ typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ ErtsCodeInfo info; /* MUST be just before beam[] */
+
/*
- * code[0]: Tagged atom for module.
- * code[1]: Tagged atom for function.
- * code[2]: Arity (untagged integer).
- * code[3]: This entry is 0 unless the 'address' field points to it.
+ * beam[0]: This entry is 0 unless the 'addressv' field points to it.
* Threaded code instruction to load function
* (em_call_error_handler), execute BIF (em_apply_bif),
* or a breakpoint instruction (op_i_generic_breakpoint).
- * code[4]: Function pointer to BIF function (for BIFs only),
+ * beam[1]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
* on_load function that has not been run yet, or pointer
- * to code for function code[3] is a breakpont instruction.
+ * to code if function beam[0] is a breakpoint instruction.
* Otherwise: 0.
*/
- BeamInstr code[5];
+ BeamInstr beam[2];
} Export;
@@ -74,8 +72,8 @@ extern erts_smp_mtx_t export_staging_lock;
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
-(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \
- ((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
+(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->beam) && \
+ ((EntryPtr)->beam[0] == (BeamInstr) em_apply_bif))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 9436259011..1560844521 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1079,7 +1079,7 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
Eterm *tp = tuple_val(BIF_ARG_1);
Eterm Term = tp[1];
Eterm bt = tp[2];
- Binary *bin = ((ProcBin *) binary_val(bt))->val;
+ Binary *bin = erts_magic_ref2bin(bt);
Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin);
if (is_tuple(res)) {
ASSERT(BIF_P->flags & F_DISABLE_GC);
@@ -1129,8 +1129,11 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
case 0:
flags = TERM_TO_BINARY_DFLAGS & ~DFLAG_NEW_FLOATS;
break;
- case 1:
+ case 1: /* Current default... */
flags = TERM_TO_BINARY_DFLAGS;
+ break;
+ case 2:
+ flags = TERM_TO_BINARY_DFLAGS | DFLAG_UTF8_ATOMS;
break;
default:
goto error;
@@ -1398,17 +1401,18 @@ static void b2t_destroy_context(B2TContext* context)
}
}
-static void b2t_context_destructor(Binary *context_bin)
+static int b2t_context_destructor(Binary *context_bin)
{
B2TContext* ctx = (B2TContext*) ERTS_MAGIC_BIN_DATA(context_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
b2t_destroy_context(ctx);
+ return 1;
}
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
{
- Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *context_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
@@ -1442,8 +1446,8 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src)
if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) {
ctx->u.dc.next = &ctx->u.dc.res;
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), context_b);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), context_b);
return ctx;
}
@@ -1808,7 +1812,7 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
#endif
#define TERM_TO_BINARY_MEMCPY_FACTOR 8
-static void ttb_context_destructor(Binary *context_bin)
+static int ttb_context_destructor(Binary *context_bin)
{
TTBContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
if (context->alive) {
@@ -1820,7 +1824,7 @@ static void ttb_context_destructor(Binary *context_bin)
case TTBEncode:
DESTROY_SAVED_WSTACK(&context->s.ec.wstack);
if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.ec.result_bin->intern.refc),1));
erts_bin_free(context->s.ec.result_bin);
context->s.ec.result_bin = NULL;
}
@@ -1829,19 +1833,20 @@ static void ttb_context_destructor(Binary *context_bin)
erl_zlib_deflate_finish(&(context->s.cc.stream));
if (context->s.cc.destination_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.destination_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.destination_bin->intern.refc),1));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
}
if (context->s.cc.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.result_bin->intern.refc),1));
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
}
break;
}
}
+ return 1;
}
static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
@@ -1871,8 +1876,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, PROC_BIN_SIZE+3); \
- c_term = erts_mk_magic_binary_term(&hp, &MSO(p), context_b); \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE+3); \
+ c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
return res; \
@@ -1918,7 +1923,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
result_bin = erts_bin_nrml_alloc(size);
- erts_refc_init(&result_bin->refc, 0);
result_bin->orig_bytes[0] = VERSION_MAGIC;
/* Next state immediately, no need to export context */
context->state = TTBEncode;
@@ -1958,8 +1962,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -1978,7 +1981,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context->s.cc.result_bin = result_bin;
result_bin = erts_bin_nrml_alloc(real_size);
- erts_refc_init(&result_bin->refc, 0);
result_bin->orig_bytes[0] = VERSION_MAGIC;
context->s.cc.destination_bin = result_bin;
@@ -2026,15 +2028,15 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->next = MSO(p).first;
MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = result_bin;
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2053,13 +2055,13 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
erl_zlib_deflate_finish(&(context->s.cc.stream));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2174,12 +2176,8 @@ enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags)
* We use this atom as sysname in local pid/port/refs
* for the ETS compressed format (DFLAG_INTERNAL_TAGS).
*
- * We used atom '' earlier but that turned out to cause problems
- * for buggy erl_interface/ic usage of c-nodes with empty node names.
- * A long atom reduces risk of nodes actually called this and the length
- * does not matter anyway as it's encoded with atom index (ATOM_INTERNAL_REF2).
*/
-#define INTERNAL_LOCAL_SYSNAME am_await_microstate_accounting_modifications
+#define INTERNAL_LOCAL_SYSNAME am_ErtsSecretAtom
static byte*
enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
@@ -2583,7 +2581,9 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ erts_magic_ref_save_bin(obj);
+
+ i = ref_no_numbers(obj);
put_int16(i, ep);
ep += 2;
ep = enc_atom(acmp, sysname, ep, dflags);
@@ -2776,7 +2776,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
erts_emasculate_writable_binary(pb);
bytes += (pb->val->orig_bytes - before_realloc);
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
sys_memcpy(&tmp, pb, sizeof(ProcBin));
tmp.next = *off_heap;
@@ -2837,9 +2837,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Export* exp = *((Export **) (export_val(obj) + 1));
if ((dflags & DFLAG_EXPORT_PTR_TAG) != 0) {
*ep++ = EXPORT_EXT;
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
- ep = enc_term(acmp, make_small(exp->code[2]), ep, dflags, off_heap);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
+ ep = enc_term(acmp, make_small(exp->info.mfa.arity),
+ ep, dflags, off_heap);
} else {
/* Tag, arity */
*ep++ = SMALL_TUPLE_EXT;
@@ -2847,10 +2848,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 1;
/* Module name */
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
/* Function name */
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
}
break;
}
@@ -3438,26 +3439,35 @@ dec_term_atom_common:
r0 = get_int32(ep); /* allow full word */
ep += 4;
- ref_ext_common:
+ ref_ext_common: {
+ ErtsORefThing *rtp;
+
if (ref_words > ERTS_MAX_REF_NUMBERS)
goto error;
node = dec_get_node(sysname, cre);
if(node == erts_this_node) {
- RefThing *rtp = (RefThing *) hp;
- ref_num = (Uint32 *) (hp + REF_THING_HEAD_SIZE);
+
+ rtp = (ErtsORefThing *) hp;
+ ref_num = &rtp->num[0];
+ if (ref_words != ERTS_REF_NUMBERS) {
+ int i;
+ if (ref_words > ERTS_REF_NUMBERS)
+ goto error; /* Not a ref that we created... */
+ for (i = ref_words; i < ERTS_REF_NUMBERS; i++)
+ ref_num[i] = 0;
+ }
-#if defined(ARCH_64)
- hp += REF_THING_HEAD_SIZE + ref_words/2 + 1;
- rtp->header = make_ref_thing_header(ref_words/2 + 1);
-#else
- hp += REF_THING_HEAD_SIZE + ref_words;
- rtp->header = make_ref_thing_header(ref_words);
+#ifdef ERTS_ORDINARY_REF_MARKER
+ rtp->marker = ERTS_ORDINARY_REF_MARKER;
#endif
+ hp += ERTS_REF_THING_SIZE;
+ rtp->header = ERTS_REF_THING_HEADER;
*objp = make_internal_ref(rtp);
}
else {
ExternalThing *etp = (ExternalThing *) hp;
+ rtp = NULL;
#if defined(ARCH_64)
hp += EXTERNAL_THING_HEAD_SIZE + ref_words/2 + 1;
#else
@@ -3475,12 +3485,13 @@ dec_term_atom_common:
factory->off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
ref_num = &(etp->data.ui32[0]);
- }
-
#if defined(ARCH_64)
- *(ref_num++) = ref_words /* 32-bit arity */;
+ *(ref_num++) = ref_words /* 32-bit arity */;
#endif
+ }
+
ref_num[0] = r0;
+
for(i = 1; i < ref_words; i++) {
ref_num[i] = get_int32(ep);
ep += 4;
@@ -3489,8 +3500,26 @@ dec_term_atom_common:
if ((1 + ref_words) % 2)
ref_num[ref_words] = 0;
#endif
+ if (node == erts_this_node) {
+ /* Check if it was a magic reference... */
+ ErtsMagicBinary *mb = erts_magic_ref_lookup_bin(ref_num);
+ if (mb) {
+ /*
+ * Was a magic ref; adjust it...
+ *
+ * Refc on binary was increased by lookup above...
+ */
+ ASSERT(rtp);
+ hp = (Eterm *) rtp;
+ write_magic_ref_thing(hp, factory->off_heap, mb);
+ OH_OVERHEAD(factory->off_heap,
+ mb->orig_size / sizeof(Eterm));
+ hp += ERTS_MAGIC_REF_THING_SIZE;
+ }
+ }
break;
}
+ }
case BINARY_EXT:
{
n = get_int32(ep);
@@ -3507,7 +3536,6 @@ dec_term_atom_common:
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
@@ -3560,7 +3588,6 @@ dec_term_atom_common:
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
@@ -3758,9 +3785,8 @@ dec_term_atom_common:
funp->arity = arity;
#ifdef HIPE
if (funp->fe->native_address == NULL) {
- hipe_set_closure_stub(funp->fe, num_free);
+ hipe_set_closure_stub(funp->fe);
}
- funp->native_address = funp->fe->native_address;
#endif
hp = factory->hp;
@@ -3832,9 +3858,6 @@ dec_term_atom_common:
funp->fe = erts_put_fun_entry(module, old_uniq, old_index);
funp->arity = funp->fe->address[-1] - num_free;
-#ifdef HIPE
- funp->native_address = funp->fe->native_address;
-#endif
hp = factory->hp;
/* Environment */
@@ -3867,7 +3890,7 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
pb->next = factory->off_heap->first;
factory->off_heap->first = (struct erl_off_heap_header*)pb;
@@ -3885,7 +3908,7 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
pb->next = factory->off_heap->first;
factory->off_heap->first = (struct erl_off_heap_header*)pb;
@@ -3985,7 +4008,7 @@ error:
factory->hp = hp; /* the largest must be the freshest */
}
}
- else ASSERT(factory->hp == hp);
+ else ASSERT(!factory->hp || factory->hp == hp);
error_hamt:
erts_factory_undo(factory);
@@ -4116,7 +4139,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
/*fall through*/
case REF_DEF:
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ i = ref_no_numbers(obj);
result += (1 + 2 + encode_size_struct2(acmp, ref_node_name(obj), dflags) +
1 + 4*i);
break;
@@ -4273,9 +4296,9 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
{
Export* ep = *((Export **) (export_val(obj) + 1));
result += 1;
- result += encode_size_struct2(acmp, ep->code[0], dflags);
- result += encode_size_struct2(acmp, ep->code[1], dflags);
- result += encode_size_struct2(acmp, make_small(ep->code[2]), dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.module, dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.function, dflags);
+ result += encode_size_struct2(acmp, make_small(ep->info.mfa.arity), dflags);
}
break;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 19286e1310..182d3aa44e 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -42,9 +42,16 @@
#include "erl_utils.h"
#include "erl_port.h"
#include "erl_gc.h"
+#include "erl_nif.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
struct enif_func_t;
+#ifdef DEBUG
+# define ERTS_NIF_ASSERT_IN_ENV
+#endif
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -57,16 +64,59 @@ struct enif_environment_t /* ErlNifEnv */
int exception_thrown; /* boolean */
Process *tracee;
int exiting; /* boolean (dirty nifs might return in exiting state) */
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int dbg_disable_assert_in_env;
+#endif
};
+struct enif_resource_type_t
+{
+ struct enif_resource_type_t* next; /* list of all resource types */
+ struct enif_resource_type_t* prev;
+ struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
+ ErlNifResourceDtor* dtor; /* user destructor function */
+ ErlNifResourceStop* stop;
+ ErlNifResourceDown* down;
+ erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
+ +1 for active erl_module_nif */
+ Eterm module;
+ Eterm name;
+};
+
+typedef struct
+{
+ erts_smp_mtx_t lock;
+ ErtsMonitor* root;
+ int pending_failed_fire;
+ int is_dying;
+
+ size_t user_data_sz;
+} ErtsResourceMonitors;
+
+typedef struct ErtsResource_
+{
+ struct enif_resource_type_t* type;
+ ErtsResourceMonitors* monitors;
+#ifdef DEBUG
+ erts_refc_t nif_refc;
+#else
+# ifdef ARCH_32
+ byte align__[4];
+# endif
+#endif
+ char data[1];
+}ErtsResource;
+
+#define DATA_TO_RESOURCE(PTR) ErtsContainerStruct(PTR, ErtsResource, data)
+#define erts_resource_ref_size(P) ERTS_MAGIC_REF_THING_SIZE
+
+extern Eterm erts_bld_resource_ref(Eterm** hp, ErlOffHeap*, ErtsResource*);
+
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
-#ifdef ERTS_DIRTY_SCHEDULERS
-extern void erts_pre_dirty_nif(ErtsSchedulerData *,
- struct enif_environment_t*, Process*,
- struct erl_module_nif*);
-extern void erts_post_dirty_nif(struct enif_environment_t* env);
-#endif
+extern void erts_resource_stop(ErtsResource*, ErlNifEvent, int is_direct_call);
+void erts_fire_nif_monitor(ErtsResource*, Eterm pid, Eterm ref);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(fmtfn_t to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
@@ -78,6 +128,12 @@ extern Eterm erts_nif_call_function(Process *p, Process *tracee,
struct enif_func_t *,
int argc, Eterm *argv);
+#ifdef ERTS_DIRTY_SCHEDULERS
+int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
#define ERL_DE_UNLOAD 1
@@ -119,7 +175,7 @@ typedef struct de_proc_entry {
PROC_AWAIT_LOAD == Wants to be notified when we
reloaded the driver (old was locked) */
Uint flags; /* ERL_FL_DE_DEREFERENCED when reload in progress */
- Eterm heap[REF_THING_SIZE]; /* "ref heap" */
+ Eterm heap[ERTS_REF_THING_SIZE]; /* "ref heap" */
struct de_proc_entry *next;
} DE_ProcEntry;
@@ -207,118 +263,6 @@ extern Eterm erts_ddll_monitor_driver(Process *p,
ErtsProcLocks plocks);
/*
-** Just like the driver binary but with initial flags
-** Note that the two structures Binary and ErlDrvBinary HAVE to
-** be equal except for extra fields in the beginning of the struct.
-** ErlDrvBinary is defined in erl_driver.h.
-** When driver_alloc_binary is called, a Binary is allocated, but
-** the pointer returned is to the address of the first element that
-** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
-** The driver need never know about additions to the internal Binary of the
-** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
-** and Binary, the macros below can convert one type to the other, as they both
-** in reality are equal.
-*/
-
-#ifdef ARCH_32
- /* *DO NOT USE* only for alignment. */
-#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
-#else
-#define ERTS_BINARY_STRUCT_ALIGNMENT
-#endif
-
-/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
-#define ERTS_INTERNAL_BINARY_FIELDS \
- UWord flags; \
- erts_refc_t refc; \
- ERTS_BINARY_STRUCT_ALIGNMENT
-
-typedef struct binary {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- char orig_bytes[1]; /* to be continued */
-} Binary;
-
-#define ERTS_SIZEOF_Binary(Sz) \
- (offsetof(Binary,orig_bytes) + (Sz))
-
-typedef struct {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- void (*destructor)(Binary *);
- union {
- struct {
- ERTS_BINARY_STRUCT_ALIGNMENT
- char data[1];
- } aligned;
- struct {
- char data[1];
- } unaligned;
- } u;
-} ErtsMagicBinary;
-
-#ifdef ARCH_32
-#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 4
-#else
-#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 0
-#endif
-
-typedef union {
- Binary binary;
- ErtsMagicBinary magic_binary;
- struct {
- ERTS_INTERNAL_BINARY_FIELDS
- ErlDrvBinary binary;
- } driver;
-} ErtsBinary;
-
-/*
- * 'Binary' alignment:
- * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
- * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
- * 32-bits architectures and 8 bytes on 64-bits architectures.
- */
-
-#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
- ((ErtsBinary *) (BP))->magic_binary.destructor
-#define ERTS_MAGIC_BIN_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
-#define ERTS_MAGIC_DATA_OFFSET \
- (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
-#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
- (ERTS_MAGIC_DATA_OFFSET + (Sz))
-#define ERTS_MAGIC_BIN_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
-
-/* On 32-bit arch these macro variants will save memory
- by not forcing 8-byte alignment for the magic payload.
-*/
-#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
-#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
- (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
-#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
- ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
-#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
- (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
-#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
-#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
- ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
-
-
-#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
-#define ErlDrvBinary2Binary(D) ((Binary *) \
- (((char *) (D)) \
- - offsetof(ErtsBinary, driver.binary)))
-
-/* A "magic" binary flag */
-#define BIN_FLAG_MAGIC 1
-#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
-#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
-#define BIN_FLAG_DRV 8
-
-/*
* This structure represents one type of a binary in a process.
*/
@@ -339,46 +283,12 @@ typedef struct proc_bin {
*/
#define PROC_BIN_SIZE (sizeof(ProcBin)/sizeof(Eterm))
-ERTS_GLB_INLINE Eterm erts_mk_magic_binary_term(Eterm **hpp,
- ErlOffHeap *ohp,
- Binary *mbp);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
-{
- ProcBin *pb = (ProcBin *) *hpp;
- *hpp += PROC_BIN_SIZE;
-
- ASSERT(mbp->flags & BIN_FLAG_MAGIC);
-
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = 0;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*) pb;
- pb->val = mbp;
- pb->bytes = (byte *) mbp->orig_bytes;
- pb->flags = 0;
-
- erts_refc_inc(&mbp->refc, 1);
-
- return make_binary(pb);
-}
-
-#endif
-
-#define ERTS_TERM_IS_MAGIC_BINARY(T) \
- (is_binary((T)) \
- && (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
- && (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
-
-
union erl_off_heap_ptr {
struct erl_off_heap_header* hdr;
ProcBin *pb;
struct erl_fun_thing* fun;
struct external_thing_* ext;
+ ErtsMRefThing *mref;
Eterm* ep;
void* voidp;
};
@@ -773,8 +683,8 @@ do { \
typedef struct ErtsPStack_ {
byte* pstart;
- byte* psp;
- byte* pend;
+ int offs; /* "stack pointer" as byte offset from pstart */
+ int size; /* allocated size in bytes */
ErtsAlcType_t alloc_type;
}ErtsPStack;
@@ -785,8 +695,8 @@ void erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes);
#define PSTACK_DECLARE(s, DEF_PSTACK_SIZE) \
PSTACK_TYPE PSTK_DEF_STACK(s)[DEF_PSTACK_SIZE]; \
ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \
- (byte*)(PSTK_DEF_STACK(s) - 1), /* psp */ \
- (byte*)(PSTK_DEF_STACK(s) + (DEF_PSTACK_SIZE)), /* pend */\
+ -(int)sizeof(PSTACK_TYPE), /* offs */ \
+ DEF_PSTACK_SIZE*sizeof(PSTACK_TYPE), /* size */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
@@ -806,19 +716,21 @@ do { \
} \
} while(0)
-#define PSTACK_IS_EMPTY(s) (s.psp < s.pstart)
+#define PSTACK_IS_EMPTY(s) (s.offs < 0)
-#define PSTACK_COUNT(s) (((PSTACK_TYPE*)s.psp + 1) - (PSTACK_TYPE*)s.pstart)
+#define PSTACK_COUNT(s) ((s.offs + sizeof(PSTACK_TYPE)) / sizeof(PSTACK_TYPE))
-#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), (PSTACK_TYPE*)(s.psp))
+#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
-#define PSTACK_PUSH(s) \
- (s.psp += sizeof(PSTACK_TYPE), \
- ((s.psp == s.pend) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
- sizeof(PSTACK_TYPE)) : (void)0), \
- ((PSTACK_TYPE*) s.psp))
+#define PSTACK_PUSH(s) \
+ (s.offs += sizeof(PSTACK_TYPE), \
+ ((s.offs == s.size) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
+ sizeof(PSTACK_TYPE)) : (void)0), \
+ ((PSTACK_TYPE*) (s.pstart + s.offs)))
-#define PSTACK_POP(s) ((PSTACK_TYPE*) (s.psp -= sizeof(PSTACK_TYPE)))
+#define PSTACK_POP(s) ((s.offs -= sizeof(PSTACK_TYPE)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
/*
* Do not free the stack after this, it may have pointers into what
@@ -831,8 +743,8 @@ do {\
(dst)->pstart = erts_alloc(s.alloc_type,\
sizeof(PSTK_DEF_STACK(s)));\
sys_memcpy((dst)->pstart, s.pstart, _pbytes);\
- (dst)->psp = (dst)->pstart + _pbytes - sizeof(PSTACK_TYPE);\
- (dst)->pend = (dst)->pstart + sizeof(PSTK_DEF_STACK(s));\
+ (dst)->offs = s.offs;\
+ (dst)->size = s.size;\
(dst)->alloc_type = s.alloc_type;\
} else\
*(dst) = s;\
@@ -847,8 +759,8 @@ do { \
ASSERT(s.pstart == (byte*)PSTK_DEF_STACK(s)); \
s = *(src); /* struct copy */ \
(src)->pstart = NULL; \
- ASSERT(s.psp >= (s.pstart - sizeof(PSTACK_TYPE))); \
- ASSERT(s.psp < s.pend); \
+ ASSERT(s.offs >= -(int)sizeof(PSTACK_TYPE)); \
+ ASSERT(s.offs < s.size); \
} while (0)
#define PSTACK_DESTROY_SAVED(pstack)\
@@ -969,21 +881,6 @@ void erts_bif_info_init(void);
/* bif.c */
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
-{
- Eterm *hp = HAlloc(c_p, REF_THING_SIZE);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
- return make_internal_ref(hp);
-}
-
-#endif
-
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
Eterm,
@@ -991,7 +888,7 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm);
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(Process*,Eterm*));
+ Eterm (*bif)(Process*, Eterm*, BeamInstr*));
void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
@@ -1000,12 +897,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* beam_bif_load.c */
-#define ERTS_CPC_ALLOW_GC (1 << 0)
-#define ERTS_CPC_ALL ERTS_CPC_ALLOW_GC
-Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls);
-#ifdef ERTS_NEW_PURGE_STRATEGY
+Eterm erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls);
Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed);
-#endif
typedef struct ErtsLiteralArea_ {
struct erl_off_heap_header *off_heap;
@@ -1019,10 +912,7 @@ typedef struct ErtsLiteralArea_ {
extern erts_smp_atomic_t erts_copy_literal_area__;
#define ERTS_COPY_LITERAL_AREA() \
((ErtsLiteralArea *) erts_smp_atomic_read_nob(&erts_copy_literal_area__))
-
-#ifdef ERTS_NEW_PURGE_STRATEGY
extern Process *erts_literal_area_collector;
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
extern Process *erts_dirty_process_code_checker;
#endif
@@ -1031,7 +921,7 @@ extern Process *erts_code_purger;
/* beam_load.c */
typedef struct {
- BeamInstr* current; /* Pointer to: Mod, Name, Arity */
+ ErtsCodeMFA* mfa; /* Pointer to: Mod, Name, Arity */
Uint needed; /* Heap space needed for entire tuple */
Uint32 loc; /* Location in source code */
Eterm* fname_ptr; /* Pointer to fname table */
@@ -1048,13 +938,14 @@ Eterm erts_finish_loading(Binary* loader_state, Process* c_p,
Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
-BeamInstr* find_function_from_pc(BeamInstr* pc);
+ErtsCodeMFA* find_function_from_pc(BeamInstr* pc);
Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
Eterm args, Eterm* mfa_p);
-void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
+void erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin);
/* beam_ranges.c */
void erts_init_ranges(void);
@@ -1074,6 +965,9 @@ void print_process_info(fmtfn_t, void *, Process*);
void info(fmtfn_t, void *);
void loaded(fmtfn_t, void *);
+/* sighandler sys.c */
+int erts_set_signal(Eterm signal, Eterm type);
+
/* erl_arith.c */
double erts_get_positive_zero_float(void);
@@ -1105,26 +999,26 @@ typedef struct {
Eterm* shtable_start;
ErtsAlcType_t shtable_alloc_type;
Uint literal_size;
- Eterm *range_ptr;
- Uint range_sz;
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
} erts_shcopy_t;
-#define INITIALIZE_SHCOPY(info) \
-do { \
- ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA();\
- info.queue_start = info.queue_default; \
- info.bitstore_start = info.bitstore_default; \
- info.shtable_start = info.shtable_default; \
- info.literal_size = 0; \
- if (larea__) { \
- info.range_ptr = &larea__->start[0]; \
- info.range_sz = larea__->end - info.range_ptr; \
- } \
- else { \
- info.range_ptr = NULL; \
- info.range_sz = 0; \
- } \
-} while(0)
+#define INITIALIZE_SHCOPY(info) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ info.queue_start = info.queue_default; \
+ info.bitstore_start = info.bitstore_default; \
+ info.shtable_start = info.shtable_default; \
+ info.literal_size = 0; \
+ if (larea__) { \
+ info.lit_purge_ptr = &larea__->start[0]; \
+ info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
+ } \
+ else { \
+ info.lit_purge_ptr = NULL; \
+ info.lit_purge_sz = 0; \
+ } \
+ } while(0)
#define DESTROY_SHCOPY(info) \
do { \
@@ -1140,18 +1034,42 @@ do { \
} while(0)
/* copy.c */
+typedef struct {
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+} erts_literal_area_t;
+
+#define INITIALIZE_LITERAL_PURGE_AREA(Area) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ if (larea__) { \
+ (Area).lit_purge_ptr = &larea__->start[0]; \
+ (Area).lit_purge_sz = larea__->end - (Area).lit_purge_ptr; \
+ } \
+ else { \
+ (Area).lit_purge_ptr = NULL; \
+ (Area).lit_purge_sz = 0; \
+ } \
+ } while(0)
+
Eterm copy_object_x(Eterm, Process*, Uint);
#define copy_object(Term, Proc) copy_object_x(Term,Proc,0)
-Uint size_object(Eterm);
+Uint size_object_x(Eterm, erts_literal_area_t*);
+#define size_object(Term) size_object_x(Term,NULL)
+#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea)
+
Uint copy_shared_calculate(Eterm, erts_shcopy_t*);
Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
Uint size_shared(Eterm);
-Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz);
+Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*);
#define copy_struct(Obj,Sz,HPP,OH) \
- copy_struct_x(Obj,Sz,HPP,OH,NULL)
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL)
+#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea)
+
Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
@@ -1180,7 +1098,7 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
void init_emulator(void);
-void process_main(void);
+void process_main(Eterm* x_reg_array, FloatDef* f_reg_array);
void erts_dirty_process_main(ErtsSchedulerData *);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
@@ -1214,10 +1132,14 @@ extern erts_tid_t erts_main_thread;
#endif
extern int erts_compat_rel;
extern int erts_use_sender_punish;
-void erts_short_init(void);
void erl_start(int, char**);
void erts_usage(void);
Eterm erts_preloaded(Process* p);
+
+#ifndef ERTS_SMP
+extern void *erts_scheduler_stack_limit;
+#endif
+
/* erl_md5.c */
typedef struct {
@@ -1257,9 +1179,12 @@ void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
void erts_emergency_close_ports(void);
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon);
+Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_io_lock_count(int enable);
+void erts_lcnt_update_driver_locks(int enable);
+void erts_lcnt_update_port_locks(int enable);
#endif
/* driver_tab.c */
@@ -1278,6 +1203,11 @@ Uint64 erts_timestamp_millis(void);
Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
+void *erts_calc_stacklimit(char *prev_c, UWord stacksize);
+int erts_check_below_limit(char *ptr, char *limit);
+int erts_check_above_limit(char *ptr, char *limit);
+void *erts_ptr_id(void *ptr);
+
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -1313,6 +1243,11 @@ void erts_init_external(void);
/* erl_map.c */
void erts_init_map(void);
+/* beam_debug.c */
+UWord erts_check_stack_recursion_downwards(char *start_c);
+UWord erts_check_stack_recursion_upwards(char *start_c);
+int erts_is_above_stack_limit(char *ptr);
+
/* erl_unicode.c */
void erts_init_unicode(void);
Sint erts_unicode_set_loop_limit(Sint limit);
@@ -1353,6 +1288,8 @@ int erts_utf8_to_latin1(byte* dest, const byte* source, int slen);
void bin_write(fmtfn_t, void*, byte*, size_t);
Sint intlist_to_buf(Eterm, char*, Sint); /* most callers pass plain char*'s */
+int erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written);
+Sint erts_unicode_list_to_buf_len(Eterm list);
struct Sint_buf {
#if defined(ARCH_64)
@@ -1453,18 +1390,6 @@ Eterm erts_gc_bor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bxor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live);
-
Uint erts_current_reductions(Process* current, Process *p);
int erts_print_system_version(fmtfn_t to, void *arg, Process *c_p);
@@ -1501,21 +1426,20 @@ Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads);
#define MatchSetRef(MPSP) \
do { \
if ((MPSP) != NULL) { \
- erts_refc_inc(&(MPSP)->refc, 1); \
+ erts_refc_inc(&(MPSP)->intern.refc, 1); \
} \
} while (0)
#define MatchSetUnref(MPSP) \
do { \
- if (((MPSP) != NULL) && erts_refc_dectest(&(MPSP)->refc, 0) <= 0) { \
- erts_bin_free(MPSP); \
+ if (((MPSP) != NULL)) { \
+ erts_bin_release(MPSP); \
} \
} while(0)
#define MatchSetGetSource(MPSP) erts_match_set_get_source(MPSP)
extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA);
-Eterm erts_match_set_lint(Process *p, Eterm matchexpr);
extern void erts_match_set_release_result(Process* p);
ERTS_GLB_INLINE void erts_match_set_release_result_trace(Process* p, Eterm);
@@ -1573,8 +1497,7 @@ int erts_beam_jump_table(void);
ERTS_GLB_INLINE void dtrace_pid_str(Eterm pid, char *process_buf);
ERTS_GLB_INLINE void dtrace_proc_str(Process *process, char *process_buf);
ERTS_GLB_INLINE void dtrace_port_str(Port *port, char *port_buf);
-ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+ERTS_GLB_INLINE void dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1608,8 +1531,7 @@ dtrace_port_str(Port *port, char *port_buf)
}
ERTS_GLB_INLINE void
-dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf)
{
if (process_buf) {
@@ -1617,7 +1539,7 @@ dtrace_fun_decode(Process *process,
}
erts_snprintf(mfa_buf, DTRACE_TERM_BUF_SIZE, "%T:%T/%d",
- module, function, arity);
+ mfa->module, mfa->function, mfa->arity);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index c3eb610fdc..b609f6de39 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -258,14 +258,7 @@ static ERTS_INLINE void port_init_instr(Port *prt
#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
- char *lock_str = "port_lock";
- erts_mtx_init_locked_x(prt->lock, lock_str, id,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 0
-#endif
- );
+ erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -1447,7 +1440,7 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_unblock_fpe(sp->fpe_was_unmasked);
}
-#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
+#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (ERTS_REF_THING_SIZE + 3)
static ERTS_INLINE void
queue_port_sched_op_reply(Process *rp,
@@ -1462,7 +1455,7 @@ queue_port_sched_op_reply(Process *rp,
ref= make_internal_ref(hp);
write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
msg = TUPLE2(hp, ref, msg);
@@ -1511,7 +1504,7 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt)
}
-ErtsPortOpResult
+static ErtsPortOpResult
erts_schedule_proc2port_signal(Process *c_p,
Port *prt,
Eterm caller,
@@ -1968,7 +1961,6 @@ int
erts_port_output_async(Port *prt, Eterm from, Eterm list)
{
- ErtsPortOpResult res;
ErtsProc2PortSigData *sigdp;
erts_driver_t *drv = prt->drv_ptr;
size_t size;
@@ -2102,26 +2094,18 @@ erts_port_output_async(Port *prt, Eterm from, Eterm list)
sigdp->u.output.size = size;
port_sig_callback = port_sig_output;
}
- sigdp->flags = 0;
ns_pthp = NULL;
task_flags = 0;
- res = erts_schedule_proc2port_signal(NULL,
- prt,
- ERTS_INVALID_PID,
- NULL,
- sigdp,
- task_flags,
- ns_pthp,
- port_sig_callback);
+ erts_schedule_proc2port_signal(NULL,
+ prt,
+ ERTS_INVALID_PID,
+ NULL,
+ sigdp,
+ task_flags,
+ ns_pthp,
+ port_sig_callback);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
- return 1;
- }
return 1;
bad_value:
@@ -2554,10 +2538,6 @@ erts_port_output(Process *c_p,
port_sig_callback);
if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
return res;
}
@@ -2736,21 +2716,14 @@ erts_port_exit(Process *c_p,
&bp->off_heap);
}
- res = erts_schedule_proc2port_signal(c_p,
- prt,
- c_p ? c_p->common.id : from,
- refp,
- sigdp,
- 0,
- NULL,
- port_sig_exit);
-
- if (res == ERTS_PORT_OP_DROPPED) {
- if (bp)
- free_message_buffer(bp);
- }
-
- return res;
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ NULL,
+ port_sig_exit);
}
static ErtsPortOpResult
@@ -3101,7 +3074,7 @@ port_monitor(Port *prt, erts_aint32_t state, Eterm origin,
ASSERT(is_pid(origin));
ASSERT(is_atom(name) || is_port(name) || name == NIL);
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
@@ -3126,7 +3099,7 @@ static int
port_sig_monitor(Port *prt, erts_aint32_t state, int op,
ErtsProc2PortSigData *sigdp)
{
- Eterm hp[REF_THING_SIZE];
+ Eterm hp[ERTS_REF_THING_SIZE];
Eterm ref = make_internal_ref(&hp);
write_ref_thing(hp, sigdp->ref[0], sigdp->ref[1], sigdp->ref[2]);
@@ -3247,7 +3220,7 @@ static int
port_sig_demonitor(Port *prt, erts_aint32_t state, int op,
ErtsProc2PortSigData *sigdp)
{
- Eterm hp[REF_THING_SIZE];
+ Eterm hp[ERTS_REF_THING_SIZE];
Eterm ref = make_internal_ref(&hp);
write_ref_thing(hp, sigdp->u.demonitor.ref[0],
sigdp->u.demonitor.ref[1],
@@ -3304,10 +3277,10 @@ ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
sigdp->u.demonitor.origin = origin->common.id;
sigdp->u.demonitor.name = target->common.id;
{
- RefThing *reft = ref_thing_ptr(ref);
+ Uint32 *nums = internal_ref_numbers(ref);
/* Start from 1 skip ref arity */
sys_memcpy(sigdp->u.demonitor.ref,
- internal_thing_ref_numbers(reft),
+ nums,
sizeof(sigdp->u.demonitor.ref));
}
@@ -3419,9 +3392,8 @@ void erts_init_io(int port_tab_size,
else if (port_tab_size < ERTS_MIN_PORTS)
port_tab_size = ERTS_MIN_PORTS;
- erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
- &drv_list_rwmtx_opts,
- "driver_list");
+ erts_smp_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
driver_list = NULL;
erts_smp_tsd_key_create(&driver_list_lock_status_key,
"erts_driver_list_lock_status_key");
@@ -3433,7 +3405,7 @@ void erts_init_io(int port_tab_size,
NULL,
(ErtsPTabElementCommon *) &erts_invalid_port.common,
port_tab_size,
- common_element_size, /* Doesn't need to be excact */
+ common_element_size, /* Doesn't need to be exact */
"port_table",
legacy_port_tab,
1);
@@ -3458,67 +3430,94 @@ void erts_init_io(int port_tab_size,
}
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable)
{
if (dp->lock) {
- if (enable)
- erts_lcnt_init_lock_x(&dp->lock->lcnt,
- "driver_lock",
- ERTS_LCNT_LT_MUTEX,
- erts_atom_put((byte*)dp->name,
- sys_strlen(dp->name),
- ERTS_ATOM_ENC_LATIN1,
- 1));
-
- else
- erts_lcnt_destroy_lock(&dp->lock->lcnt);
+ if (enable) {
+ Eterm name_as_atom = erts_atom_put((byte*)dp->name, sys_strlen(dp->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+ erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", name_as_atom,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_uninstall(&dp->lock->lcnt);
+ }
}
}
-static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+static void lcnt_enable_port_lock_count(Port *prt, int enable)
{
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
- if (!enable) {
- erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_destroy_lock(&prt->lock->lcnt);
+
+ if(enable) {
+ ErlDrvPDL pdl = prt->port_data_lock;
+
+ erts_lcnt_install_new_lock_info(&prt->sched.mtx.lcnt, "port_sched_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+
+ if(pdl) {
+ erts_lcnt_install_new_lock_info(&pdl->mtx.lcnt, "port_data_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_install_new_lock_info(&prt->lock->lcnt, "port_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+ } else {
+ erts_lcnt_uninstall(&prt->sched.mtx.lcnt);
+
+ if(prt->port_data_lock) {
+ erts_lcnt_uninstall(&prt->port_data_lock->mtx.lcnt);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_uninstall(&prt->lock->lcnt);
+ }
}
- else {
- erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
- "port_sched_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_init_lock_x(&prt->lock->lcnt,
- "port_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
+}
+
+void erts_lcnt_update_driver_locks(int enable) {
+ erts_driver_t *driver;
+
+ lcnt_enable_driver_lock_count(&vanilla_driver, enable);
+ lcnt_enable_driver_lock_count(&spawn_driver, enable);
+#ifndef __WIN32__
+ lcnt_enable_driver_lock_count(&forker_driver, enable);
+#endif
+ lcnt_enable_driver_lock_count(&fd_driver, enable);
+
+ erts_rwmtx_rlock(&erts_driver_list_lock);
+
+ for (driver = driver_list; driver; driver = driver->next) {
+ lcnt_enable_driver_lock_count(driver, enable);
}
+
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
-void erts_lcnt_enable_io_lock_count(int enable) {
- erts_driver_t *dp;
- int ix, max = erts_ptab_max(&erts_port);
- Port *prt;
+void erts_lcnt_update_port_locks(int enable) {
+ int i, max;
- for (ix = 0; ix < max; ix++) {
- if ((prt = erts_pix2port(ix)) != NULL) {
- lcnt_enable_port_lock_count(prt, enable);
+ max = erts_ptab_max(&erts_port);
+
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Port *port;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ port = erts_pix2port(i);
+
+ if(port != NULL) {
+ lcnt_enable_port_lock_count(port, enable);
}
- } /* for all ports */
- lcnt_enable_drv_lock_count(&vanilla_driver, enable);
- lcnt_enable_drv_lock_count(&spawn_driver, enable);
-#ifndef __WIN32__
- lcnt_enable_drv_lock_count(&forker_driver, enable);
-#endif
- lcnt_enable_drv_lock_count(&fd_driver, enable);
- /* enable lock counting in all drivers */
- for (dp = driver_list; dp; dp = dp->next) {
- lcnt_enable_drv_lock_count(dp, enable);
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
}
-} /* enable/disable lock counting of ports */
+}
+
#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */
/*
* Buffering of data when using line oriented I/O on ports
@@ -3701,7 +3700,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
ERTS_SMP_CHK_NO_PROC_LOCKS;
ASSERT(!prt || prt->common.id == sender);
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ASSERT(!prt || erts_lc_is_port_locked(prt));
#endif
@@ -3793,7 +3792,6 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
Binary* bptr;
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, buf, len);
pb = (ProcBin *) hp;
@@ -4194,8 +4192,8 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
ErtsMonitor *rmon;
Process *rp;
- ASSERT(is_internal_pid(mon->pid));
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ ASSERT(is_internal_pid(mon->u.pid));
+ rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
goto done;
}
@@ -4296,7 +4294,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
Process *origin;
ErtsProcLocks origin_locks;
- if (mon->type != MON_TARGET || ! is_pid(mon->pid)) {
+ if (mon->type != MON_TARGET || ! is_pid(mon->u.pid)) {
return;
}
/*
@@ -4305,7 +4303,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
*/
origin_locks = ERTS_PROC_LOCKS_MSG_SEND | ERTS_PROC_LOCK_LINK;
- origin = erts_pid2proc(NULL, 0, mon->pid, origin_locks);
+ origin = erts_pid2proc(NULL, 0, mon->u.pid, origin_locks);
if (origin) {
DeclareTmpHeapNoproc(lhp,3);
SweepContext *ctx = (SweepContext *)ctx0;
@@ -4560,8 +4558,7 @@ static void
cleanup_scheduled_control(Binary *binp, char *bufp)
{
if (binp) {
- if (erts_refc_dectest(&binp->refc, 0) == 0)
- erts_bin_free(binp);
+ erts_bin_release(binp);
}
else {
if (bufp)
@@ -4905,7 +4902,7 @@ erts_port_control(Process* c_p,
ASSERT(bufp <= bufp + size);
ASSERT(binp->orig_bytes <= bufp
&& bufp + size <= binp->orig_bytes + binp->orig_size);
- erts_refc_inc(&binp->refc, 1);
+ erts_refc_inc(&binp->intern.refc, 1);
}
}
@@ -4932,10 +4929,9 @@ erts_port_control(Process* c_p,
0,
NULL,
port_sig_control);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_control(binp, bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -5225,10 +5221,9 @@ erts_port_call(Process* c_p,
0,
NULL,
port_sig_call);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_call(bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -5429,7 +5424,7 @@ reply_io_bytes(void *vreq)
rp_locks = ERTS_PROC_LOCK_MAIN;
}
- hsz = 5 /* 4-tuple */ + REF_THING_SIZE;
+ hsz = 5 /* 4-tuple */ + ERTS_REF_THING_SIZE;
erts_bld_uint64(NULL, &hsz, in);
erts_bld_uint64(NULL, &hsz, out);
@@ -5438,7 +5433,7 @@ reply_io_bytes(void *vreq)
ref = make_internal_ref(hp);
write_ref_thing(hp, req->refn[0], req->refn[1], req->refn[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
ein = erts_bld_uint64(&hp, NULL, in);
eout = erts_bld_uint64(&hp, NULL, out);
@@ -5467,7 +5462,7 @@ erts_request_io_bytes(Process *c_p)
ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ,
sizeof(ErtsIOBytesReq));
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
refn = internal_ref_numbers(ref);
@@ -5501,7 +5496,7 @@ typedef struct {
static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
{
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
- erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->pid,mon->ref);
+ erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->u.pid, mon->ref);
}
static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
@@ -6402,7 +6397,6 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
ProcBin* pbp;
Binary* bp = erts_bin_nrml_alloc(size);
ASSERT(bufp);
- erts_refc_init(&bp->refc, 1);
sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
pbp = (ProcBin *) erts_produce_heap(&factory,
PROC_BIN_SIZE, HEAP_EXTRA);
@@ -6912,21 +6906,21 @@ ErlDrvSInt
driver_binary_get_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_read(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_read(&bp->intern.refc, 1);
}
ErlDrvSInt
driver_binary_inc_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_inctest(&bp->refc, 2);
+ return (ErlDrvSInt) erts_refc_inctest(&bp->intern.refc, 2);
}
ErlDrvSInt
driver_binary_dec_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_dectest(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_dectest(&bp->intern.refc, 1);
}
@@ -6942,7 +6936,6 @@ driver_alloc_binary(ErlDrvSizeT size)
bin = erts_bin_drv_alloc_fnf((Uint) size);
if (!bin)
return NULL; /* The driver write must take action */
- erts_refc_init(&bin->refc, 1);
return Binary2ErlDrvBinary(bin);
}
@@ -6972,8 +6965,7 @@ void driver_free_binary(ErlDrvBinary* dbin)
return;
bin = ErlDrvBinary2Binary(dbin);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
@@ -7098,7 +7090,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
+ erts_mtx_init(&pdl->mtx, "port_data_lock", pp->common.id, ERTS_LOCK_FLAGS_CATEGORY_IO);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7623,22 +7615,29 @@ erl_drv_convert_time_unit(ErlDrvTime val,
(int) to);
}
-static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
{
- RefThing *refp;
- ASSERT(is_internal_ref(ref));
- ERTS_CT_ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
- refp = ref_thing_ptr(ref);
- memset(mon,0,sizeof(ErlDrvMonitor));
- memcpy(mon,refp,sizeof(RefThing));
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ ASSERT(is_internal_ordinary_ref(ref));
+ sys_memcpy((void *) mon, (void *) internal_ref_val(ref),
+ ERTS_REF_THING_SIZE*sizeof(Uint));
}
+Eterm erts_driver_monitor_to_ref(Eterm *hp, const ErlDrvMonitor *mon)
+{
+ Eterm ref;
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ sys_memcpy((void *) hp, (void *) mon, ERTS_REF_THING_SIZE*sizeof(Uint));
+ ref = make_internal_ref(hp);
+ ASSERT(is_internal_ordinary_ref(ref));
+ return ref;
+}
static int do_driver_monitor_process(Port *prt,
- Eterm *buf,
ErlDrvTermData process,
ErlDrvMonitor *monitor)
{
+ Eterm buf[ERTS_REF_THING_SIZE];
Process *rp;
Eterm ref;
@@ -7657,7 +7656,7 @@ static int do_driver_monitor_process(Port *prt,
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- ref_to_driver_monitor(ref,monitor);
+ erts_ref_to_driver_monitor(ref,monitor);
return 0;
}
@@ -7681,32 +7680,27 @@ int driver_monitor_process(ErlDrvPort drvport,
/* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_monitor_process(prt,buf,process,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ret = do_driver_monitor_process(prt,process,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static int do_driver_demonitor_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor)
{
+ Eterm heap[ERTS_REF_THING_SIZE];
Process *rp;
Eterm ref;
ErtsMonitor *mon;
Eterm to;
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
+ ref = erts_driver_monitor_to_ref(heap, monitor);
+
mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return 1;
}
ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
+ to = mon->u.pid;
ASSERT(is_internal_pid(to));
rp = erts_pid2proc_opt(NULL,
0,
@@ -7744,31 +7738,26 @@ int driver_demonitor_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_demonitor_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ret = do_driver_demonitor_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static ErlDrvTermData do_driver_get_monitored_process(Port *prt,const ErlDrvMonitor *monitor)
{
Eterm ref;
ErtsMonitor *mon;
Eterm to;
+ Eterm heap[ERTS_REF_THING_SIZE];
+
+ ref = erts_driver_monitor_to_ref(heap, monitor);
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return driver_term_nil;
}
ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
+ to = mon->u.pid;
ASSERT(is_internal_pid(to));
return (ErlDrvTermData) to;
}
@@ -7790,21 +7779,16 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_get_monitored_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ret = do_driver_get_monitored_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-
int driver_compare_monitors(const ErlDrvMonitor *monitor1,
const ErlDrvMonitor *monitor2)
{
- return memcmp(monitor1,monitor2,sizeof(ErlDrvMonitor));
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
}
void erts_fire_port_monitor(Port *prt, Eterm ref)
@@ -7824,7 +7808,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
}
callback = prt->drv_ptr->process_exit;
ASSERT(callback != NULL);
- ref_to_driver_monitor(ref,&drv_monitor);
+ erts_ref_to_driver_monitor(ref,&drv_monitor);
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
@@ -8273,23 +8257,16 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->flags = de->driver_flags;
drv->handle = handle;
#ifdef ERTS_SMP
- if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
- drv->lock = NULL;
- else {
- drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_mtx_t));
- erts_mtx_init_x(drv->lock,
- "driver_lock",
-#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1,
- 1),
-#else
- NIL,
-#endif
- 1
- );
+ if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
+ drv->lock = NULL;
+ } else {
+ Eterm driver_id = erts_atom_put((byte *) drv->name,
+ sys_strlen(drv->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t));
+
+ erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
#endif
drv->entry = de;
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 5978478323..7987cb2eb5 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -26,6 +26,7 @@
#include "erl_vm.h"
#include "global.h"
#include "module.h"
+#include "beam_catches.h"
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -67,6 +68,18 @@ static int module_cmp(Module* tmpl, Module* obj)
return tmpl->module != obj->module;
}
+void erts_module_instance_init(struct erl_module_instance* modi)
+{
+ modi->code_hdr = 0;
+ modi->code_length = 0;
+ modi->catches = BEAM_CATCHES_NIL;
+ modi->nif = NULL;
+ modi->num_breakpoints = 0;
+ modi->num_traced_exports = 0;
+#ifdef HIPE
+ modi->hipe_code = NULL;
+#endif
+}
static Module* module_alloc(Module* tmpl)
{
@@ -74,18 +87,11 @@ static Module* module_alloc(Module* tmpl)
erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module));
obj->module = tmpl->module;
- obj->curr.code_hdr = 0;
- obj->old.code_hdr = 0;
- obj->curr.code_length = 0;
- obj->old.code_length = 0;
obj->slot.index = -1;
- obj->curr.nif = NULL;
- obj->old.nif = NULL;
- obj->curr.num_breakpoints = 0;
- obj->old.num_breakpoints = 0;
- obj->curr.num_traced_exports = 0;
- obj->old.num_traced_exports = 0;
+ erts_module_instance_init(&obj->curr);
+ erts_module_instance_init(&obj->old);
obj->on_load = 0;
+ DBG_TRACE_MFA(make_atom(obj->module), 0, 0, "module_alloc");
return obj;
}
@@ -114,11 +120,13 @@ void init_module_table(void)
}
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ erts_smp_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
erts_smp_atomic_init_nob(&tot_module_bytes, 0);
}
+
Module*
erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
{
@@ -139,19 +147,14 @@ erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
}
}
-Module*
-erts_put_module(Eterm mod)
+
+static Module* put_module(Eterm mod, IndexTable* mod_tab)
{
Module e;
- IndexTable* mod_tab;
int oldsz, newsz;
Module* res;
ASSERT(is_atom(mod));
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_has_code_write_permission());
-
- mod_tab = &module_tables[erts_staging_code_ix()];
e.module = atom_val(mod);
oldsz = index_table_sz(mod_tab);
res = (Module*) index_put_entry(mod_tab, (void*) &e);
@@ -160,6 +163,15 @@ erts_put_module(Eterm mod)
return res;
}
+Module*
+erts_put_module(Eterm mod)
+{
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_has_code_write_permission());
+
+ return put_module(mod, &module_tables[erts_staging_code_ix()]);
+}
+
Module *module_code(int i, ErtsCodeIndex code_ix)
{
return (Module*) erts_index_lookup(&module_tables[code_ix], i);
@@ -181,6 +193,13 @@ static ErtsCodeIndex dbg_load_code_ix = 0;
static int entries_at_start_staging = 0;
+static ERTS_INLINE void copy_module(Module* dst_mod, Module* src_mod)
+{
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ dst_mod->on_load = src_mod->on_load;
+}
+
void module_start_staging(void)
{
IndexTable* src = &module_tables[erts_active_code_ix()];
@@ -199,10 +218,7 @@ void module_start_staging(void)
src_mod = (Module*) erts_index_lookup(src, i);
dst_mod = (Module*) erts_index_lookup(dst, i);
ASSERT(src_mod->module == dst_mod->module);
-
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
- dst_mod->on_load = src_mod->on_load;
+ copy_module(dst_mod, src_mod);
}
/*
@@ -214,9 +230,7 @@ void module_start_staging(void)
dst_mod = (Module*) index_put_entry(dst, src_mod);
ASSERT(dst_mod != src_mod);
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
- dst_mod->on_load = src_mod->on_load;
+ copy_module(dst_mod, src_mod);
}
newsz = index_table_sz(dst);
erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index f105b3f401..9d258d5dbf 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -23,6 +23,10 @@
#include "index.h"
+#ifdef HIPE
+#include "hipe_module.h"
+#endif
+
struct erl_module_instance {
BeamCodeHeader* code_hdr;
int code_length; /* Length of loaded code in bytes. */
@@ -30,6 +34,9 @@ struct erl_module_instance {
struct erl_module_nif* nif;
int num_breakpoints;
int num_traced_exports;
+#ifdef HIPE
+ HipeModule *hipe_code;
+#endif
};
typedef struct erl_module {
@@ -42,6 +49,7 @@ typedef struct erl_module {
struct erl_module_instance* on_load;
} Module;
+void erts_module_instance_init(struct erl_module_instance* modi);
Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix);
Module* erts_put_module(Eterm mod);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 879daaca0a..44613c7d85 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2017. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -182,15 +182,7 @@ i_jump_on_val x f I I
i_jump_on_val y f I I
%macro: get_list GetList -pack
-get_list x x x
-get_list x x y
-get_list x y x
-get_list x y y
-
-get_list y x x
-get_list y x y
-get_list y y x
-get_list y y y
+get_list xy xy xy
# The following get_list instructions using x(0) are frequently used.
get_list r x x
@@ -218,12 +210,10 @@ set_tuple_element s d P
# Get tuple element
%macro: i_get_tuple_element GetTupleElement -pack
-i_get_tuple_element x P x
-i_get_tuple_element y P x
+i_get_tuple_element xy P x
%cold
-i_get_tuple_element x P y
-i_get_tuple_element y P y
+i_get_tuple_element xy P y
%hot
%macro: i_get_tuple_element2 GetTupleElement2 -pack
@@ -270,11 +260,7 @@ system_limit j
move C=cxy x==0 | jump Lbl => move_jump Lbl C
%macro: move_jump MoveJump -nonext
-move_jump f n
-move_jump f c
-move_jump f x
-move_jump f y
-
+move_jump f ncxy
# Movement to and from the stack is common
# Try to pack as much as we can into one instruction
@@ -326,12 +312,10 @@ swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \
is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D
%macro: swap_temp SwapTemp -pack
-swap_temp x x x
-swap_temp x y x
+swap_temp x xy x
%macro: swap Swap -pack
-swap x x
-swap x y
+swap x xy
move Src=x D1=x | move Src=x D2=x => move_dup Src D1 D2
move Src=x SD=x | move SD=x D=x => move_dup Src SD D
@@ -381,10 +365,7 @@ move_shift x y x
move_shift x x y
%macro: move_dup MoveDup -pack
-move_dup x x x
-move_dup x x y
-move_dup y x x
-move_dup y x y
+move_dup xy x xy
%macro: move2_par Move2Par -pack
@@ -409,7 +390,7 @@ move3 x x x x x x
move S=n D=y => init D
move S=c D=y => move S x | move x D
-%macro:move Move -pack -gen_dest
+%macro:move Move -pack
move x x
move x y
move y x
@@ -483,8 +464,7 @@ i_is_ne_exact_literal f y c
is_eq_exact Lbl Y=y X=x => is_eq_exact Lbl X Y
%macro: is_eq_exact EqualExact -fail_action -pack
-is_eq_exact f x x
-is_eq_exact f x y
+is_eq_exact f x xy
is_eq_exact f s s
%macro: is_lt IsLessThan -fail_action
@@ -538,7 +518,7 @@ i_put_tuple y I
#
put_list Const=c n Dst => move Const x | put_list x n Dst
-%macro:put_list PutList -pack -gen_dest
+%macro:put_list PutList -pack
put_list x n x
put_list y n x
@@ -620,6 +600,18 @@ test_heap Need u==1 | put_list Y=y x==0 x==0 => test_heap_1_put_list Need Y
%macro: test_heap_1_put_list TestHeapPutList -pack
test_heap_1_put_list I y
+#
+# is_tagged_tuple Fail=f Src=rxy Arity Atom=a
+#
+
+is_tagged_tuple Fail Literal=q Arity Atom => \
+ move Literal x | is_tagged_tuple Fail x Arity Atom
+is_tagged_tuple Fail=f c Arity Atom => jump Fail
+
+%macro:is_tagged_tuple IsTaggedTuple -fail_action
+
+is_tagged_tuple f rxy A a
+
# Test tuple & arity (head)
is_tuple Fail Literal=q => move Literal x | is_tuple Fail x
@@ -628,21 +620,16 @@ is_tuple Fail=f S=xy | test_arity Fail=f S=xy Arity => is_tuple_of_arity Fail S
%macro:is_tuple_of_arity IsTupleOfArity -fail_action
-is_tuple_of_arity f r A
-is_tuple_of_arity f x A
-is_tuple_of_arity f y A
+is_tuple_of_arity f rxy A
%macro: is_tuple IsTuple -fail_action
-is_tuple f r
-is_tuple f x
-is_tuple f y
+is_tuple f rxy
test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity
test_arity Fail=f c Arity => jump Fail
%macro: test_arity IsArity -fail_action
-test_arity f x A
-test_arity f y A
+test_arity f xy A
get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \
get_tuple_element Reg=x P3 D3=x | \
@@ -667,8 +654,7 @@ is_integer Fail=f S=x | allocate Need Regs => is_integer_allocate Fail S Need Re
is_integer_allocate f x I I
%macro: is_integer IsInteger -fail_action
-is_integer f x
-is_integer f y
+is_integer f xy
is_list Fail=f n =>
is_list Fail Literal=q => move Literal x | is_list Fail x
@@ -682,8 +668,7 @@ is_list f y
is_nonempty_list Fail=f S=x | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action -pack
-is_nonempty_list_allocate f r I t
-is_nonempty_list_allocate f x I t
+is_nonempty_list_allocate f rx I t
is_nonempty_list F=f x==0 | test_heap I1 I2 => is_non_empty_list_test_heap F I1 I2
@@ -694,12 +679,10 @@ is_nonempty_list Fail=f S=x | get_list S D1=x D2=x => \
is_nonempty_list_get_list Fail S D1 D2
%macro: is_nonempty_list_get_list IsNonemptyListGetList -fail_action -pack
-is_nonempty_list_get_list f r x x
-is_nonempty_list_get_list f x x x
+is_nonempty_list_get_list f rx x x
%macro: is_nonempty_list IsNonemptyList -fail_action
-is_nonempty_list f x
-is_nonempty_list f y
+is_nonempty_list f xy
%macro: is_atom IsAtom -fail_action
is_atom f x
@@ -721,8 +704,7 @@ is_nil Fail=f n =>
is_nil Fail=f qia => jump Fail
%macro: is_nil IsNil -fail_action
-is_nil f x
-is_nil f y
+is_nil f xy
is_binary Fail Literal=q => move Literal x | is_binary Fail x
is_binary Fail=f c => jump Fail
@@ -770,8 +752,7 @@ is_boolean Fail=f ac => jump Fail
%cold
%macro: is_boolean IsBoolean -fail_action
-is_boolean f x
-is_boolean f y
+is_boolean f xy
%hot
is_function2 Fail=f acq Arity => jump Fail
@@ -792,24 +773,23 @@ allocate_init t I y
#################################################################
#
-# The BIFs erts_internal:check_process_code/2 must be called like a function,
+# The BIFs erts_internal:check_process_code/1 must be called like a function,
# to ensure that c_p->i (program counter) is set correctly (an ordinary
# BIF call doesn't set it).
#
-call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:check_process_code/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext_only Bif
#
-# The BIFs erlang:garbage_collect/0 must be called like a function,
+# The BIFs erts_internal:garbage_collect/1 must be called like a function,
# to allow them to invoke the garbage collector. (The stack pointer must
# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
#
-
-call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
-call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
-call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:garbage_collect/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext_only Bif
#
# put/2 and erase/1 must be able to do garbage collection, so we must call
@@ -1066,8 +1046,7 @@ i_get_hash c I d
i_get s d
%macro: self Self
-self x
-self y
+self xy
%macro: node Node
node x
@@ -1078,8 +1057,7 @@ node y
i_fast_element j x I d
i_fast_element j y I d
-i_element j x s d
-i_element j y s d
+i_element j xy s d
bif1 f b s d
bif1_body b s d
@@ -1098,8 +1076,7 @@ i_move_call c f
%macro:move_call MoveCall -arg_f -size -nonext
move_call/2
-move_call x f
-move_call y f
+move_call xy f
move S=c x==0 | call_last Ar P=f D => i_move_call_last P D S
move S x==0 | call_last Ar P=f D => move_call_last S P D
@@ -1109,8 +1086,7 @@ i_move_call_last f P c
%macro:move_call_last MoveCallLast -arg_f -nonext -pack
move_call_last/3
-move_call_last x f Q
-move_call_last y f Q
+move_call_last xy f Q
move S=c x==0 | call_only Ar P=f => i_move_call_only P S
move S=x x==0 | call_only Ar P=f => move_call_only S P
@@ -1154,8 +1130,7 @@ i_make_fun I t
%hot
%macro: is_function IsFunction -fail_action
-is_function f x
-is_function f y
+is_function f xy
is_function Fail=f c => jump Fail
func_info M F A => i_func_info u M F A
@@ -1167,8 +1142,7 @@ func_info M F A => i_func_info u M F A
%cold
bs_start_match2 Fail=f ica X Y D => jump Fail
bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D
-i_bs_start_match2 x f I I d
-i_bs_start_match2 y f I I d
+i_bs_start_match2 xy f I I d
bs_save2 Reg Index => gen_bs_save(Reg, Index)
i_bs_save2 x I
@@ -1196,13 +1170,13 @@ i_bs_get_integer_32 x f I d
bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action -gen_dest
-%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action -gen_dest
-%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action -gen_dest
+%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action
+%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action
+%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action
-i_bs_get_binary_imm2 f x I I I d
-i_bs_get_binary2 f x I s I d
-i_bs_get_binary_all2 f x I I d
+i_bs_get_binary_imm2 f x I I I x
+i_bs_get_binary2 f x I s I x
+i_bs_get_binary_all2 f x I I x
i_bs_get_binary_all_reuse x f I
# Fetching float from binaries.
@@ -1211,8 +1185,8 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
-%macro: i_bs_get_float2 BsGetFloat2 -fail_action -gen_dest
-i_bs_get_float2 f x I s I d
+%macro: i_bs_get_float2 BsGetFloat2 -fail_action
+i_bs_get_float2 f x I s I x
# Miscellanous
@@ -1223,8 +1197,7 @@ bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \
i_bs_skip_bits_imm2 f x I
%macro: i_bs_skip_bits2 BsSkipBits2 -fail_action
-i_bs_skip_bits2 f x x I
-i_bs_skip_bits2 f x y I
+i_bs_skip_bits2 f x xy I
%macro: i_bs_skip_bits_all2 BsSkipBitsAll2 -fail_action
i_bs_skip_bits_all2 f x I
@@ -1289,8 +1262,7 @@ bs_init2 Fail Sz Words=u==0 Regs Flags Dst => \
bs_init2 Fail Sz Words Regs Flags Dst => \
i_bs_init_fail_heap Sz Words Fail Regs Dst
-i_bs_init_fail x j I d
-i_bs_init_fail y j I d
+i_bs_init_fail xy j I d
i_bs_init_fail_heap s I j I d
@@ -1311,8 +1283,7 @@ bs_init_bits Fail Sz Words=u==0 Regs Flags Dst => \
bs_init_bits Fail Sz Words Regs Flags Dst => \
i_bs_init_bits_fail_heap Sz Words Fail Regs Dst
-i_bs_init_bits_fail x j I d
-i_bs_init_bits_fail y j I d
+i_bs_init_bits_fail xy j I d
i_bs_init_bits_fail_heap s I j I d
@@ -1480,8 +1451,7 @@ is_map Fail Lit=q | literal_is_map(Lit) =>
is_map Fail cq => jump Fail
%macro: is_map IsMap -fail_action
-is_map f x
-is_map f y
+is_map f xy
## Transform has_map_fields #{ K1 := _, K2 := _ } to has_map_elements
@@ -1501,16 +1471,10 @@ i_get_map_element Fail Src=xy Key=y Dst => \
move Key x | i_get_map_element Fail Src x Dst
%macro: i_get_map_element_hash GetMapElementHash -fail_action
-i_get_map_element_hash f x c I x
-i_get_map_element_hash f y c I x
-i_get_map_element_hash f x c I y
-i_get_map_element_hash f y c I y
+i_get_map_element_hash f xy c I xy
%macro: i_get_map_element GetMapElement -fail_action
-i_get_map_element f x x x
-i_get_map_element f y x x
-i_get_map_element f x x y
-i_get_map_element f y x y
+i_get_map_element f xy x xy
#
# Convert the plus operations to a generic plus instruction.
@@ -1576,12 +1540,9 @@ gc_bif2 Fail Live u$bif:erlang:bxor/2 S1 S2 Dst => \
gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst
-i_increment r I I d
-i_increment x I I d
-i_increment y I I d
+i_increment rxy I I d
-i_plus j I x x d
-i_plus j I x y d
+i_plus j I x xy d
i_plus j I s s d
i_minus j I x x d
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index acda51c9fc..bf3267cff1 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -145,7 +145,8 @@ void init_register_table(void)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
@@ -272,13 +273,13 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
int ix;
HashBucket* b;
#ifdef ERTS_SMP
- ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (c_p) ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
-#endif
-
+ ErtsProcLocks c_p_locks = 0;
+ if (c_p) {
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ }
reg_safe_read_lock(c_p, &c_p_locks);
+
if (c_p && !c_p_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#endif
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index 30b26a7296..527c9efeca 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -155,7 +155,8 @@ int safe_hash_table_sz(SafeHash *h)
** Init a pre allocated or static hash structure
** and allocate buckets. NOT SAFE
*/
-SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
+SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_flags_t flags,
+ int size, SafeHashFunctions fun)
{
int i, bytes;
@@ -170,7 +171,8 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size,
erts_smp_atomic_init_nob(&h->is_rehashing, 0);
erts_smp_atomic_init_nob(&h->nitems, 0);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
+ erts_smp_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL,
+ flags);
}
return h;
}
@@ -273,5 +275,22 @@ void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_ar
}
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int enable) {
+ int i;
+
+ for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) {
+ erts_smp_mtx_t *lock = &h->lock_vec[i].mtx;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL,
+ ERTS_LOCK_TYPE_MUTEX | flags);
+ } else {
+ erts_lcnt_uninstall(&lock->lcnt);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
index 285103cb17..dde48a6de8 100644
--- a/erts/emulator/beam/safe_hash.h
+++ b/erts/emulator/beam/safe_hash.h
@@ -28,6 +28,7 @@
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_lock_flags.h"
typedef unsigned long SafeHashValue;
@@ -85,7 +86,7 @@ typedef struct
/* A: Lockless atomics */
} SafeHash;
-SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, int, SafeHashFunctions);
+SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, erts_lock_flags_t, int, SafeHashFunctions);
void safe_hash_get_info(SafeHashInfo*, SafeHash*);
int safe_hash_table_sz(SafeHash *);
@@ -96,5 +97,9 @@ void* safe_hash_erase(SafeHash*, void*);
void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int);
+#endif
+
#endif /* __SAFE_HASH_H__ */
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index d49edad6dc..b6c77794d2 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -99,6 +99,12 @@
#define ErtsContainerStruct(ptr, type, member) \
((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)))
+/* Use this variant when the member is an array */
+#define ErtsContainerStruct_(ptr, type, memberv) \
+ ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv)))
+
+#define ErtsSizeofMember(type, member) sizeof(((type *)0)->member)
+
#if defined (__WIN32__)
# include "erl_win_sys.h"
#else
@@ -481,18 +487,10 @@ extern volatile int erts_break_requested;
void erts_do_break_handling(void);
#endif
-#ifdef ERTS_WANT_GOT_SIGUSR1
-# ifndef UNIX
-# define ERTS_GOT_SIGUSR1 0
-# else
-# ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read_mb(&erts_got_sigusr1))
-# else
-extern volatile int erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
-# endif
-# endif
+#if !defined(ERTS_SMP) && !defined(__WIN32__)
+extern volatile Uint erts_signal_state;
+#define ERTS_SIGNAL_STATE erts_signal_state
+void erts_handle_signal_state(void);
#endif
#ifdef ERTS_SMP
@@ -604,6 +602,8 @@ __decl_noreturn void __noreturn erts_exit(int n, char*, ...);
Eterm erts_check_io_info(void *p);
+UWord erts_sys_get_page_size(void);
+
/* Size of misc memory allocated from system dependent code */
Uint erts_sys_misc_mem_sz(void);
@@ -784,10 +784,10 @@ Preload* sys_preloaded(void);
unsigned char* sys_preload_begin(Preload*);
void sys_preload_end(Preload*);
int sys_get_key(int);
-void elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff);
-void wall_clock_elapsed_time_both(UWord *ms_total,
- UWord *ms_diff);
+void elapsed_time_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff);
+void wall_clock_elapsed_time_both(ErtsMonotonicTime *ms_total,
+ ErtsMonotonicTime *ms_diff);
void get_time(int *hour, int *minute, int *second);
void get_date(int *year, int *month, int *day);
void get_localtime(int *year, int *month, int *day,
@@ -901,6 +901,9 @@ typedef erts_atomic_t erts_refc_t;
ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val);
ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp,
erts_aint_t min_val);
ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val);
@@ -934,6 +937,30 @@ erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
}
ERTS_GLB_INLINE erts_aint_t
+erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val)
+{
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
+ while (1) {
+ erts_aint_t exp, new;
+#ifdef ERTS_REFC_DEBUG
+ if (val < 0)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ if (val == unless_val)
+ return val;
+ new = val + 1;
+ exp = val;
+ val = erts_atomic_cmpxchg_nob((erts_atomic_t *) refcp, new, exp);
+ if (val == exp)
+ return new;
+ }
+}
+
+ERTS_GLB_INLINE erts_aint_t
erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
@@ -1006,6 +1033,9 @@ typedef erts_smp_atomic_t erts_smp_refc_t;
ERTS_GLB_INLINE void erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inc_unless(erts_smp_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val);
ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inctest(erts_smp_refc_t *refcp,
erts_aint_t min_val);
ERTS_GLB_INLINE void erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val);
@@ -1039,6 +1069,31 @@ erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val)
}
ERTS_GLB_INLINE erts_aint_t
+erts_smp_refc_inc_unless(erts_smp_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val)
+{
+ erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
+ while (1) {
+ erts_aint_t exp, new;
+#ifdef ERTS_REFC_DEBUG
+ if (val < 0)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ if (val == unless_val)
+ return val;
+ new = val + 1;
+ exp = val;
+ val = erts_smp_atomic_cmpxchg_nob((erts_smp_atomic_t *) refcp, new, exp);
+ if (val == exp)
+ return new;
+ }
+}
+
+
+ERTS_GLB_INLINE erts_aint_t
erts_smp_refc_inctest(erts_smp_refc_t *refcp, erts_aint_t min_val)
{
erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 6f15082130..a3069e419a 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,57 +17,157 @@
*
* %CopyrightEnd%
*/
-
+
/*
- * TIMING WHEEL
+ * TIMER WHEEL
+ *
+ *
+ * The time scale used for timers is Erlang monotonic time. The
+ * time unit used is ERTS specific clock ticks. A clock tick is
+ * currently defined to 1 millisecond. That is, the resolution of
+ * timers triggered by the runtime system is 1 millisecond.
*
- * Timeouts kept in an wheel. A timeout is measured relative to the
- * current slot (tiw_pos) in the wheel, and inserted at slot
- * (tiw_pos + timeout) % TIW_SIZE. Each timeout also has a count
- * equal to timeout/TIW_SIZE, which is needed since the time axis
- * is wrapped arount the wheel.
+ * When a timer is set, it is determined at what Erlang monotonic
+ * time, in clock ticks, it should be triggered.
*
- * Several slots may be processed in one operation. If the number of
- * slots is greater that the wheel size, the wheel is only traversed
- * once,
+ * The 'pos' field of the wheel corresponds to current time of
+ * the wheel. That is, it corresponds to Erlang monotonic time in
+ * clock tick time unit. The 'pos' field of the wheel is
+ * monotonically increased when erts_bump_timers() is called. All
+ * timers in the wheel that have a time less than or equal to
+ * 'pos' are triggered by the bump operation. The bump operation
+ * may however be spread over multiple calls to erts_bump_timers()
+ * if there are a lots of timers to trigger.
*
- * The following example shows a time axis where there is one timeout
- * at each "tick", and where 1, 2, 3 ... wheel slots are released in
- * one operation. The notation "<x" means "release all items with
- * counts less than x".
+ * Each scheduler thread maintains its own timer wheel. The timer
+ * wheel of a scheduler, however, actually consists of two wheels.
+ * A soon wheel and a later wheel.
+ *
+ *
+ * -- The Soon Wheel --
+ *
+ * The soon wheel contain timers that should be triggered soon.
+ * That is, they are soon to be triggered. Each slot in the soon
+ * wheel is 1 clock tick wide. The number of slots in the soon
+ * wheel is currently 2¹⁴. That is, it contains timers in the
+ * range ('pos', 'pos' + 2¹⁴] which corresponds to a bit more
+ * than 16 seconds.
+ *
+ * When the bump operation is started, 'pos' is moved forward to a
+ * position that corresponds to current Erlang monotonic time. Then
+ * all timers that are in the range (old 'pos', new 'pos'] are
+ * triggered. During a bump operation, the soon wheel may contain
+ * timers in the two, possibly overlapping, ranges (old 'pos',
+ * old 'pos' + 2¹⁴], and (new 'pos', new 'pos' + 2¹⁴]. This may
+ * occur even if the bump operation doesn't yield, due to timeout
+ * callbacks inserting new timers.
+ *
+ *
+ * -- The Later Wheel --
+ *
+ * The later wheel contain timers that are further away from 'pos'
+ * than the width of the soon timer wheel. That is, currently
+ * timers further away from 'pos' than 2¹⁴ clock ticks. The width
+ * of each slot in the later wheel is half the width of the soon
+ * wheel. That is, each slot is currently 2¹³ clock ticks wide
+ * which corresponds to about 8 seconds. If three timers of the
+ * times 'pos' + 17000, 'pos' + 18000, and 'pos' + 19000 are
+ * inserted, they will all end up in the same slot in the later
+ * wheel.
+ *
+ * The number of slots in the later wheel is currently the same as
+ * in the soon wheel, i.e. 2¹⁴. That is, one revolution of the later
+ * wheel currently corresponds to 2¹⁴×2¹³ clock ticks which is
+ * almost 37 ½ hour. Timers even further away than that are put in
+ * the later slot identified by their time modulo the size of the later
+ * wheel. Such timers are however very uncommon. Most timers used
+ * by the runtime system will utilize the high level timer API.
+ * The high level timer implementation will not insert timers
+ * further away then one revolution into the later wheel. It will
+ * instead keep such timers in a tree of very long timers. The
+ * high level timer implementation utilize one timer wheel timer
+ * for the management of this tree of timers. This timer is set to
+ * the closest timeout in the tree. This timer may however be
+ * further away than one revolution in the later wheel.
+ *
+ * The 'later.pos' field identifies next position in the later wheel.
+ * 'later.pos' is always increased by the width of a later wheel slot.
+ * That is, currently 2¹³ clock ticks. When 'pos' is moved (during
+ * a bump operation) closer to 'later.pos' than the width of a later
+ * wheel slot, i.e. currently when 'pos' + 2¹³ ≥ 'later.pos', we
+ * inspect the slot identified by 'later.pos' and then move 'later.pos'
+ * forward. When inspecting the later slot we move all timers in the
+ * slot, that are in the soon wheel range, from the later wheel to
+ * the soon wheel. Timers one or more revolutions of the later wheel
+ * away are kept in the slot.
+ *
+ * During normal operation, timers originally located in the later
+ * wheel will currently be moved into the soon wheel about 8 to
+ * 16 seconds before they should be triggered. During extremely
+ * heavy load, the scheduler might however be heavily delayed, so
+ * the code must be prepared for situations where time for
+ * triggering the timer has passed when we inspect the later wheel
+ * slot, and then trigger the timer immediately. We must also be
+ * prepared to inspect multiple later wheel slots at once due to the
+ * delay.
+ *
+ *
+ * -- Slot Management --
+ *
+ * All timers of a slot are placed in a circular double linked
+ * list. This makes insertion and removal of a timer O(1).
+ *
+ * While bumping timers in a slot, we move the circular list
+ * away from the slot, and refer to it from the 'sentinel'
+ * field. The list will stay there until we are done with it
+ * even if the bump operation should yield. The cancel operation
+ * can remove the timer from this position as well as from the
+ * slot position by just removing it from the circular double
+ * linked list that it is in.
+ *
+ * -- At Once Slot --
+ *
+ * If a timer is set that has a time earlier or equal to 'pos',
+ * it is not inserted into the wheel. It is instead inserted,
+ * into a circular double linked list referred to by the "at
+ * once" slot. When the bump operation is performed these timers
+ * will be triggered at once. The circular list of the slot will
+ * be moved to the 'sentinel' field while bumping these timers
+ * as when bumping an ordinary wheel slot. A yielding bump
+ * operation and cancelation of timers is handled the same way
+ * as if the timer was in a wheel slot.
+ *
+ * -- Searching for Next Timeout --
+ *
+ * In order to limit the amount of work needed in order to find
+ * next timeout, we keep track of total amount of timers in the
+ * wheels, total amount of timers in the later wheel, total amount
+ * of timers in soon wheel, and the total amount of timers in
+ * each range of slots. Each slot range currently contain 512
+ * slots.
+ *
+ * When next timeout is less than the soon wheel width away we
+ * determine the exact timeout. Due to the timer counts of
+ * slot ranges, we currently at most need to search 1024 slots
+ * in the soon wheel. This besides inspecting slot range counts
+ * and two slots in the later wheel which potentially might trigger
+ * timeouts for moving timers from the later wheel to the soon wheel
+ * earlier than timeouts in the soon wheel. We also keep track
+ * of latest known minimum timeout position in each wheel which
+ * makes it possible to avoid scanning from current position
+ * each time.
+ *
+ * When next timeout is further away than the soon wheel width
+ * we settle for the earliest possible timeout in the first
+ * non-empty slot range. The further away the next timeout is, the
+ * more likely it is that the next timeout change before we
+ * actually get there. That is, a change due to another timer is
+ * set to an earlier time and/or the timer is cancelled. It is
+ * therefore in this case no point determining next timeout
+ * exactly. If the state should not change, we will wake up a bit
+ * early and do a recalculation of next timeout and eventually
+ * we will be so close to it that we determine it exactly.
*
- * Size of wheel: 4
- *
- * --|----|----|----|----|----|----|----|----|----|----|----|----|----
- * 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0 2.1 2.2 2.3 3.0
- *
- * 1 [ )
- * <1 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0 2.1 2.2 2.3 2.0
- *
- * 2 [ )
- * <1 <1 0.2 0.3 0.0 0.1 1.2 1.3 1.0 1.1 2.2 2.3 2.0
- *
- * 3 [ )
- * <1 <1 <1 0.3 0.0 0.1 0.2 1.3 1.0 1.1 1.2 2.3 2.0
- *
- * 4 [ )
- * <1 <1 <1 <1 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0
- *
- * 5 [ )
- * <2 <1 <1 <1. 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0
- *
- * 6 [ )
- * <2 <2 <1 <1. 0.2 0.3 0.0 0.1 1.2 1.3 1.0
- *
- * 7 [ )
- * <2 <2 <2 <1. 0.3 0.0 0.1 0.2 1.3 1.0
- *
- * 8 [ )
- * <2 <2 <2 <2. 0.0 0.1 0.2 0.3 1.0
- *
- * 9 [ )
- * <3 <2 <2 <2. 0.1 0.2 0.3 0.0
- *
*/
#ifdef HAVE_CONFIG_H
@@ -80,8 +180,11 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
-#define ERTS_MONOTONIC_DAY ERTS_SEC_TO_MONOTONIC(60*60*24)
-#define ERTS_CLKTCKS_DAY ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY)
+#define ERTS_MAX_CLKTCKS \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_TIME_MAX)
+
+#define ERTS_CLKTCKS_WEEK \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_SEC_TO_MONOTONIC(7*60*60*24))
#ifdef ERTS_ENABLE_LOCK_CHECK
#define ASSERT_NO_LOCKED_LOCKS erts_lc_check_exact(NULL, 0)
@@ -90,6 +193,10 @@
#endif
#if 0
+# define ERTS_TW_HARD_DEBUG
+#endif
+
+#if defined(ERTS_TW_HARD_DEBUG) && !defined(ERTS_TW_DEBUG)
# define ERTS_TW_DEBUG
#endif
#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
@@ -97,16 +204,62 @@
#endif
#undef ERTS_TW_ASSERT
-#if defined(ERTS_TW_DEBUG)
+#if defined(ERTS_TW_DEBUG)
# define ERTS_TW_ASSERT(E) ERTS_ASSERT(E)
#else
# define ERTS_TW_ASSERT(E) ((void) 1)
#endif
#ifdef ERTS_TW_DEBUG
-# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 5
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 500
#else
-# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 100
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 10000
+#endif
+#define ERTS_TW_COST_SLOT 1
+#define ERTS_TW_COST_SLOT_MOVE 5
+#define ERTS_TW_COST_TIMEOUT 100
+
+/*
+ * Every slot in the soon wheel is a clock tick (as defined
+ * by ERTS) wide. A clock tick is currently 1 milli second.
+ */
+
+#define ERTS_TW_SOON_WHEEL_FIRST_SLOT 0
+#define ERTS_TW_SOON_WHEEL_END_SLOT \
+ (ERTS_TW_SOON_WHEEL_FIRST_SLOT + ERTS_TW_SOON_WHEEL_SIZE)
+
+#define ERTS_TW_SOON_WHEEL_MASK (ERTS_TW_SOON_WHEEL_SIZE-1)
+
+/*
+ * Every slot in the later wheel is as wide as half the size
+ * of the soon wheel.
+ */
+
+#define ERTS_TW_LATER_WHEEL_SHIFT (ERTS_TW_SOON_WHEEL_BITS - 1)
+#define ERTS_TW_LATER_WHEEL_SLOT_SIZE \
+ ((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT))
+#define ERTS_TW_LATER_WHEEL_POS_MASK \
+ (~((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT)-1))
+
+#define ERTS_TW_LATER_WHEEL_FIRST_SLOT ERTS_TW_SOON_WHEEL_SIZE
+#define ERTS_TW_LATER_WHEEL_END_SLOT \
+ (ERTS_TW_LATER_WHEEL_FIRST_SLOT + ERTS_TW_LATER_WHEEL_SIZE)
+
+#define ERTS_TW_LATER_WHEEL_MASK (ERTS_TW_LATER_WHEEL_SIZE-1)
+
+#define ERTS_TW_SCNT_BITS 9
+#define ERTS_TW_SCNT_SHIFT
+#define ERTS_TW_SCNT_SIZE \
+ ((ERTS_TW_SOON_WHEEL_SIZE + ERTS_TW_LATER_WHEEL_SIZE) \
+ >> ERTS_TW_SCNT_BITS)
+
+#ifdef __GNUC__
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger soon timer wheel
+#endif
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger later timer wheel
+#endif
#endif
/* Actual interval time chosen by sys_init_time() */
@@ -119,95 +272,360 @@ static int tiw_itime; /* Constant after init */
# define TIW_ITIME tiw_itime
#endif
+const int etp_tw_soon_wheel_size = ERTS_TW_SOON_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_soon_wheel_mask = ERTS_TW_SOON_WHEEL_MASK;
+const int etp_tw_soon_wheel_first_slot = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+
+const int etp_tw_later_wheel_size = ERTS_TW_LATER_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_later_wheel_slot_size = ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+const int etp_tw_later_wheel_shift = ERTS_TW_LATER_WHEEL_SHIFT;
+const ErtsMonotonicTime etp_tw_later_wheel_mask = ERTS_TW_LATER_WHEEL_MASK;
+const ErtsMonotonicTime etp_tw_later_wheel_pos_mask = ERTS_TW_LATER_WHEEL_POS_MASK;
+const int etp_tw_later_wheel_first_slot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
struct ErtsTimerWheel_ {
- ErtsTWheelTimer *w[ERTS_TIW_SIZE];
+ ErtsTWheelTimer *slots[1 /* At Once Slot */
+ + ERTS_TW_SOON_WHEEL_SIZE /* Soon Wheel Slots */
+ + ERTS_TW_LATER_WHEEL_SIZE]; /* Later Wheel Slots */
+ ErtsTWheelTimer **w;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ Sint bump_scnt[ERTS_TW_SCNT_SIZE];
ErtsMonotonicTime pos;
Uint nto;
struct {
- ErtsTWheelTimer *head;
- ErtsTWheelTimer *tail;
Uint nto;
} at_once;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ Uint nto;
+ } soon;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ int min_tpos_slot;
+ ErtsMonotonicTime pos;
+ Uint nto;
+ } later;
int yield_slot;
int yield_slots_left;
- int yield_start_pos;
ErtsTWheelTimer sentinel;
int true_next_timeout_time;
+ ErtsMonotonicTime next_timeout_pos;
ErtsMonotonicTime next_timeout_time;
};
-static ERTS_INLINE ErtsMonotonicTime
-find_next_timeout(ErtsSchedulerData *esdp,
- ErtsTimerWheel *tiw,
- int search_all,
- ErtsMonotonicTime curr_time, /* When !search_all */
- ErtsMonotonicTime max_search_time) /* When !search_all */
+#define ERTS_TW_SLOT_AT_ONCE (-1)
+
+#define ERTS_TW_BUMP_LATER_WHEEL(TIW) \
+ ((tiw)->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE >= (TIW)->later.pos)
+
+static int bump_later_wheel(ErtsTimerWheel *tiw, int *yield_count_p);
+
+#ifdef ERTS_TW_DEBUG
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_soon_slots((TIW), (TO_POS))
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_later_slots((TIW), (TO_POS))
+void dbg_verify_empty_soon_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+void dbg_verify_empty_later_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+#else
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS)
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS)
+#endif
+
+static ERTS_INLINE int
+scnt_get_ix(int slot)
{
- int start_ix, tiw_pos_ix;
- ErtsTWheelTimer *p;
+ return slot >> ERTS_TW_SCNT_BITS;
+}
+
+static ERTS_INLINE void
+scnt_inc(Sint *scnt, int slot)
+{
+ scnt[slot >> ERTS_TW_SCNT_BITS]++;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static ERTS_INLINE void
+scnt_ix_inc(Sint *scnt, int six)
+{
+ scnt[six]++;
+}
+
+#endif
+
+static ERTS_INLINE void
+scnt_dec(Sint *scnt, int slot)
+{
+ scnt[slot >> ERTS_TW_SCNT_BITS]--;
+ ERTS_TW_ASSERT(scnt[slot >> ERTS_TW_SCNT_BITS] >= 0);
+}
+
+static ERTS_INLINE void
+scnt_ix_dec(Sint *scnt, int six)
+{
+ scnt[six]--;
+ ERTS_TW_ASSERT(scnt[six] >= 0);
+}
+
+static ERTS_INLINE void
+scnt_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt, int first_slot,
+ int end_slot, ErtsMonotonicTime slot_sz)
+{
+ int slot = *slotp;
+ int left = *leftp;
+ int ix;
+
+ ERTS_TW_ASSERT(*leftp >= 0);
+
+ left--;
+ slot++;
+ if (slot == end_slot)
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+
+ while (!scnt[ix] && left > 0) {
+ int diff, old_slot = slot;
+ ix++;
+ slot = (ix << ERTS_TW_SCNT_BITS);
+ diff = slot - old_slot;
+ if (left < diff) {
+ slot = old_slot + left;
+ diff = left;
+ }
+ if (slot < end_slot)
+ left -= diff;
+ else {
+ left -= end_slot - old_slot;
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+ }
+ }
+
+ ERTS_TW_ASSERT(left >= -1);
+
+ if (posp)
+ *posp += slot_sz * ((ErtsMonotonicTime) (*leftp - left));
+ if (sixp)
+ *sixp = slot >> ERTS_TW_SCNT_BITS;
+ *leftp = left;
+ *slotp = slot;
+}
+
+
+static ERTS_INLINE void
+scnt_soon_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_SOON_WHEEL_FIRST_SLOT,
+ ERTS_TW_SOON_WHEEL_END_SLOT, 1);
+}
+
+static ERTS_INLINE void
+scnt_later_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_LATER_WHEEL_FIRST_SLOT,
+ ERTS_TW_LATER_WHEEL_END_SLOT,
+ ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+}
+
+
+static ERTS_INLINE int
+soon_slot(ErtsMonotonicTime soon_pos)
+{
+ ErtsMonotonicTime slot = soon_pos;
+ slot &= ERTS_TW_SOON_WHEEL_MASK;
+
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+static ERTS_INLINE int
+later_slot(ErtsMonotonicTime later_pos)
+{
+ ErtsMonotonicTime slot = later_pos;
+ slot >>= ERTS_TW_LATER_WHEEL_SHIFT;
+ slot &= ERTS_TW_LATER_WHEEL_MASK;
+ slot += ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
+ ERTS_TW_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS) \
+ hrd_dbg_check_wheels((TIW), (CHK_MIN_TPOS))
+static void hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos);
+#else
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS)
+#endif
+
+static ErtsMonotonicTime
+find_next_timeout(ErtsSchedulerData *esdp, ErtsTimerWheel *tiw)
+{
+ int slot, slots;
int true_min_timeout = 0;
- ErtsMonotonicTime min_timeout, min_timeout_pos, slot_timeout_pos;
+ ErtsMonotonicTime min_timeout_pos;
+
+ ERTS_TW_ASSERT(tiw->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE < tiw->later.pos
+ && tiw->later.pos <= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ ERTS_TW_ASSERT(tiw->yield_slot == ERTS_TW_SLOT_INACTIVE);
if (tiw->nto == 0) { /* no timeouts in wheel */
- if (!search_all)
- min_timeout_pos = tiw->pos;
- else {
- curr_time = erts_get_monotonic_time(esdp);
- tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
- }
- min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
- goto found_next;
+ ErtsMonotonicTime curr_time = erts_get_monotonic_time(esdp);
+ tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->later.pos = min_timeout_pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ min_timeout_pos += ERTS_CLKTCKS_WEEK;
+ goto done;
}
- slot_timeout_pos = min_timeout_pos = tiw->pos;
- if (search_all)
- min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
- else
- min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time + max_search_time);
+ ERTS_TW_ASSERT(tiw->soon.nto || tiw->later.nto);
- start_ix = tiw_pos_ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
+ if (!tiw->soon.nto) {
+ ErtsMonotonicTime tpos, min_tpos;
- do {
- if (++slot_timeout_pos >= min_timeout_pos)
- break;
-
- p = tiw->w[tiw_pos_ix];
-
- if (p) {
- ErtsTWheelTimer *end = p;
-
- do {
- ErtsMonotonicTime timeout_pos;
- timeout_pos = p->timeout_pos;
- if (min_timeout_pos > timeout_pos) {
- true_min_timeout = 1;
- min_timeout_pos = timeout_pos;
- if (min_timeout_pos <= slot_timeout_pos)
- goto found_next;
- }
- p = p->next;
- } while (p != end);
- }
+ /* Search later wheel... */
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ if (min_tpos <= tiw->later.pos) {
+ tpos = tiw->later.pos;
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = min_tpos - tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp >= ERTS_TW_LATER_WHEEL_SIZE) {
+ /* Timeout more than one revolution ahead... */
+
+ /* Pre-timeout for move from later to soon wheel... */
+ min_timeout_pos = min_tpos - ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ goto done;
+ }
+ tpos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, min_tpos);
+ slots = ERTS_TW_LATER_WHEEL_SIZE - ((int) tmp);
+ }
+
+ slot = later_slot(tpos);
+
+ /*
+ * We never search for an exact timeout in the
+ * later wheel, but instead settle for the first
+ * scnt range used.
+ */
+ if (tiw->w[slot])
+ true_min_timeout = 1;
+ else
+ scnt_later_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ ERTS_TW_ASSERT(slot == later_slot(tpos));
+
+ /* Pre-timeout for move from later to soon wheel... */
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ min_timeout_pos = tpos;
+ }
+ else {
+ ErtsMonotonicTime tpos;
+ /* Search soon wheel... */
+
+ min_timeout_pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+
+ /*
+ * Besides inspecting the soon wheel we
+ * may also have to inspect two slots in the
+ * later wheel which potentially can trigger
+ * timeouts before timeouts in soon wheel...
+ */
+ if (tiw->later.min_tpos > (tiw->later.pos
+ + 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE)) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(
+ tiw, 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ }
+ else {
+ int fslot;
+ tpos = tiw->later.pos;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ fslot = later_slot(tiw->later.pos);
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ else {
+ tpos += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos < min_timeout_pos) {
+ fslot++;
+ if (fslot == ERTS_TW_LATER_WHEEL_END_SLOT)
+ fslot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ }
+ }
+ }
+
+ if (tiw->soon.min_tpos <= tiw->pos) {
+ tpos = tiw->pos;
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = tiw->soon.min_tpos - tiw->pos;
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_SIZE > tmp);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, tiw->soon.min_tpos);
+ slots = ERTS_TW_SOON_WHEEL_SIZE - ((int) tmp);
+ tpos = tiw->soon.min_tpos;
+ }
+
+ slot = soon_slot(tpos);
+
+ /* find next non-empty slot */
+ while (tpos < min_timeout_pos) {
+ if (tiw->w[slot]) {
+ ERTS_TW_ASSERT(tiw->w[slot]->timeout_pos == tpos);
+ min_timeout_pos = tpos;
+ break;
+ }
+ scnt_soon_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+ }
- tiw_pos_ix++;
- if (tiw_pos_ix == ERTS_TIW_SIZE)
- tiw_pos_ix = 0;
- } while (start_ix != tiw_pos_ix);
+ tiw->soon.min_tpos = min_timeout_pos;
+ true_min_timeout = 1;
+ }
+
+done: {
+ ErtsMonotonicTime min_timeout;
-found_next:
+ min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
+ tiw->next_timeout_pos = min_timeout_pos;
+ tiw->next_timeout_time = min_timeout;
+ tiw->true_next_timeout_time = true_min_timeout;
- min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
- tiw->next_timeout_time = min_timeout;
- tiw->true_next_timeout_time = true_min_timeout;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 1);
- return min_timeout;
+ return min_timeout;
+ }
}
static ERTS_INLINE void
insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
{
- ERTS_TW_ASSERT(slot >= 0);
- ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
p->slot = slot;
if (!tiw->w[slot]) {
tiw->w[slot] = p;
@@ -223,55 +641,89 @@ insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
prev->next = p;
next->prev = p;
}
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ tiw->at_once.nto++;
+ else {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->soon.nto++;
+ if (tiw->soon.min_tpos > tpos)
+ tiw->soon.min_tpos = tpos;
+ }
+ else {
+ ERTS_TW_ASSERT(p->timeout_pos >= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->later.nto++;
+ if (tiw->later.min_tpos > tpos) {
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ }
+ }
+ scnt_inc(tiw->scnt, slot);
+ }
}
static ERTS_INLINE void
remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
int slot = p->slot;
- ERTS_TW_ASSERT(slot != ERTS_TWHEEL_SLOT_INACTIVE);
-
- if (slot >= 0) {
- /*
- * Timer in wheel or in circular
- * list of timers currently beeing
- * triggered (referred by sentinel).
- */
- ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
-
- if (p->next == p) {
- ERTS_TW_ASSERT(tiw->w[slot] == p);
- tiw->w[slot] = NULL;
- }
- else {
- if (tiw->w[slot] == p)
- tiw->w[slot] = p->next;
- p->prev->next = p->next;
- p->next->prev = p->prev;
- }
+ int empty_slot;
+ ERTS_TW_ASSERT(slot != ERTS_TW_SLOT_INACTIVE);
+
+ /*
+ * Timer is in circular list either referred to
+ * by at once slot, slot in soon wheel, slot
+ * in later wheel, or by sentinel (timers currently
+ * being triggered).
+ */
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ if (p->next == p) {
+ /* Cannot be referred by sentinel, i.e. must be referred by slot... */
+ ERTS_TW_ASSERT(tiw->w[slot] == p);
+ tiw->w[slot] = NULL;
+ empty_slot = 1;
}
else {
- /* Timer in "at once" queue... */
- ERTS_TW_ASSERT(slot == ERTS_TWHEEL_SLOT_AT_ONCE);
- if (p->prev)
- p->prev->next = p->next;
- else {
- ERTS_TW_ASSERT(tiw->at_once.head == p);
- tiw->at_once.head = p->next;
- }
- if (p->next)
- p->next->prev = p->prev;
- else {
- ERTS_TW_ASSERT(tiw->at_once.tail == p);
- tiw->at_once.tail = p->prev;
- }
+ if (tiw->w[slot] == p)
+ tiw->w[slot] = p->next;
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
+ empty_slot = 0;
+ }
+ if (slot == ERTS_TW_SLOT_AT_ONCE) {
ERTS_TW_ASSERT(tiw->at_once.nto > 0);
tiw->at_once.nto--;
}
-
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
-
- tiw->nto--;
+ else {
+ scnt_dec(tiw->scnt, slot);
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && p->timeout_pos == tiw->next_timeout_pos) {
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ }
+ else {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && tiw->later.min_tpos_slot == slot) {
+ ErtsMonotonicTime tpos = tiw->later.min_tpos;
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos == tiw->next_timeout_pos)
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ }
+ }
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
ErtsMonotonicTime
@@ -280,58 +732,26 @@ erts_check_next_timeout_time(ErtsSchedulerData *esdp)
ErtsTimerWheel *tiw = esdp->timer_wheel;
ErtsMonotonicTime time;
ERTS_MSACC_DECLARE_CACHE_X();
+ ERTS_TW_ASSERT(tiw->next_timeout_time
+ == ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos));
if (tiw->true_next_timeout_time)
- return tiw->next_timeout_time;
+ return tiw->next_timeout_time; /* known timeout... */
+ if (tiw->next_timeout_pos > tiw->pos + ERTS_TW_SOON_WHEEL_SIZE)
+ return tiw->next_timeout_time; /* sufficiently later away... */
ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(ERTS_MSACC_STATE_TIMERS);
- time = find_next_timeout(esdp, tiw, 1, 0, 0);
+ time = find_next_timeout(esdp, tiw);
ERTS_MSACC_POP_STATE_M_X();
return time;
}
-#ifndef ERTS_TW_DEBUG
-#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) ((void) 0)
-#else
-#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) debug_check_safe_to_skip_to((TIW), (TO))
-static void
-debug_check_safe_to_skip_to(ErtsTimerWheel *tiw, ErtsMonotonicTime skip_to_pos)
-{
- int slots, ix;
- ErtsTWheelTimer *tmr;
- ErtsMonotonicTime tmp;
-
- ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
- tmp = skip_to_pos - tiw->pos;
- ERTS_TW_ASSERT(tmp >= 0);
- if (tmp < (ErtsMonotonicTime) ERTS_TIW_SIZE)
- slots = (int) tmp;
- else
- slots = ERTS_TIW_SIZE;
-
- while (slots > 0) {
- tmr = tiw->w[ix];
- if (tmr) {
- ErtsTWheelTimer *end = tmr;
- do {
- ERTS_TW_ASSERT(tmr->timeout_pos > skip_to_pos);
- tmr = tmr->next;
- } while (tmr != end);
- }
- ix++;
- if (ix == ERTS_TIW_SIZE)
- ix = 0;
- slots--;
- }
-}
-#endif
-
static ERTS_INLINE void
timeout_timer(ErtsTWheelTimer *p)
{
ErlTimeoutProc timeout;
void *arg;
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
- timeout = p->u.func.timeout;
- arg = p->u.func.arg;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
+ timeout = p->timeout;
+ arg = p->arg;
(*timeout)(arg);
ASSERT_NO_LOCKED_LOCKS;
}
@@ -339,73 +759,108 @@ timeout_timer(ErtsTWheelTimer *p)
void
erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
- int tiw_pos_ix, slots, yielded_slot_restarted, yield_count;
- ErtsMonotonicTime bump_to, tmp_slots, old_pos;
+ int slot, restarted, yield_count, slots, scnt_ix;
+ ErtsMonotonicTime bump_to;
+ Sint *scnt, *bump_scnt;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
yield_count = ERTS_TWHEEL_BUMP_YIELD_LIMIT;
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
/*
* In order to be fair we always continue with work
* where we left off when restarting after a yield.
*/
- if (tiw->yield_slot >= 0) {
- yielded_slot_restarted = 1;
- tiw_pos_ix = tiw->yield_slot;
- slots = tiw->yield_slots_left;
+ slot = tiw->yield_slot;
+ restarted = slot != ERTS_TW_SLOT_INACTIVE;
+ if (restarted) {
bump_to = tiw->pos;
- old_pos = tiw->yield_start_pos;
- goto restart_yielded_slot;
+ if (slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT)
+ goto restart_yielded_later_slot;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ goto restart_yielded_at_once_slot;
+ scnt_ix = scnt_get_ix(slot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_SOON_WHEEL_SIZE);
+ goto restart_yielded_soon_slot;
}
do {
- yielded_slot_restarted = 0;
-
+ restarted = 0;
bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
while (1) {
ErtsTWheelTimer *p;
- old_pos = tiw->pos;
-
if (tiw->nto == 0) {
empty_wheel:
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, bump_to);
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ tiw->next_timeout_pos = bump_to + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);;
tiw->pos = bump_to;
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->later.pos = bump_to + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
ERTS_MSACC_POP_STATE_M_X();
return;
}
- p = tiw->at_once.head;
- while (p) {
- if (--yield_count <= 0) {
- ERTS_TW_ASSERT(tiw->nto > 0);
- ERTS_TW_ASSERT(tiw->at_once.nto > 0);
- tiw->yield_slot = ERTS_TWHEEL_SLOT_AT_ONCE;
- tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
- ERTS_MSACC_POP_STATE_M_X();
- return;
- }
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[ERTS_TW_SLOT_AT_ONCE] = NULL;
+
+ while (1) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->nto--;
+ tiw->at_once.nto--;
+
+ timeout_timer(p);
+
+ yield_count -= ERTS_TW_COST_TIMEOUT;
- ERTS_TW_ASSERT(tiw->nto > 0);
- ERTS_TW_ASSERT(tiw->at_once.nto > 0);
- tiw->nto--;
- tiw->at_once.nto--;
- tiw->at_once.head = p->next;
- if (p->next)
- p->next->prev = NULL;
- else
- tiw->at_once.tail = NULL;
+ restart_yielded_at_once_slot:
- timeout_timer(p);
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (yield_count <= 0) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->yield_slot = ERTS_TW_SLOT_AT_ONCE;
+ ERTS_MSACC_POP_STATE_M_X();
+ return; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
- p = tiw->at_once.head;
}
if (tiw->pos >= bump_to) {
@@ -416,39 +871,66 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
if (tiw->nto == 0)
goto empty_wheel;
- if (tiw->true_next_timeout_time) {
- ErtsMonotonicTime skip_until_pos;
+ /*
+ * Save slot counts in bump operation local
+ * array.
+ *
+ * The amount of timers to trigger (or move)
+ * will only decrease from now until we have
+ * completed this bump operation (even if we
+ * yield in the middle of it).
+ *
+ * The amount of timers in the wheels may
+ * however increase due to timers being set
+ * by timeout callbacks.
+ */
+ sys_memcpy((void *) bump_scnt, (void *) scnt,
+ sizeof(Sint) * ERTS_TW_SCNT_SIZE);
+
+ if (tiw->soon.min_tpos > tiw->pos) {
+ ErtsMonotonicTime skip_until_pos = tiw->soon.min_tpos;
+
/*
* No need inspecting slots where we know no timeouts
* to trigger should reside.
*/
- skip_until_pos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
if (skip_until_pos > bump_to)
skip_until_pos = bump_to;
skip_until_pos--;
if (skip_until_pos > tiw->pos) {
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, skip_until_pos);
-
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, skip_until_pos);
tiw->pos = skip_until_pos;
}
}
- tiw_pos_ix = (int) ((tiw->pos+1) & (ERTS_TIW_SIZE-1));
- tmp_slots = (bump_to - tiw->pos);
- if (tmp_slots < (ErtsMonotonicTime) ERTS_TIW_SIZE)
- slots = (int) tmp_slots;
- else
- slots = ERTS_TIW_SIZE;
+ {
+ ErtsMonotonicTime tmp_slots = bump_to - tiw->pos;
+ tmp_slots = (bump_to - tiw->pos);
+ if (tmp_slots < ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ slot = soon_slot(tiw->pos+1);
tiw->pos = bump_to;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
+
+ scnt_ix = scnt_get_ix(slot);
+
+ /* Timeout timers in soon wheel */
while (slots > 0) {
- p = tiw->w[tiw_pos_ix];
+ yield_count -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[slot];
if (p) {
+ /* timeout callback need tiw->pos to be up to date */
if (p->next == p) {
ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
@@ -459,22 +941,28 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
tiw->sentinel.next->prev = &tiw->sentinel;
tiw->sentinel.prev->next = &tiw->sentinel;
}
- tiw->w[tiw_pos_ix] = NULL;
+ tiw->w[slot] = NULL;
while (1) {
- if (p->timeout_pos > bump_to) {
- /* Very unusual case... */
- ++yield_count;
- insert_timer_into_slot(tiw, tiw_pos_ix, p);
- }
- else {
- /* Normal case... */
- timeout_timer(p);
- tiw->nto--;
- }
-
- restart_yielded_slot:
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= p->slot
+ && p->slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ scnt_ix_dec(scnt, scnt_ix);
+ if (p->timeout_pos <= bump_to) {
+ timeout_timer(p);
+ tiw->nto--;
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ yield_count -= ERTS_TW_COST_TIMEOUT;
+ }
+ else {
+ /* uncommon case */
+ insert_timer_into_slot(tiw, slot, p);
+ yield_count -= ERTS_TW_COST_SLOT_MOVE;
+ }
+
+ restart_yielded_soon_slot:
p = tiw->sentinel.next;
if (p == &tiw->sentinel) {
@@ -482,12 +970,9 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
break;
}
- if (--yield_count <= 0) {
- tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
- tiw->yield_slot = tiw_pos_ix;
+ if (yield_count <= 0) {
+ tiw->yield_slot = slot;
tiw->yield_slots_left = slots;
- tiw->yield_start_pos = old_pos;
ERTS_MSACC_POP_STATE_M_X();
return; /* Yield! */
}
@@ -496,24 +981,166 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
p->next->prev = &tiw->sentinel;
}
}
- tiw_pos_ix++;
- if (tiw_pos_ix == ERTS_TIW_SIZE)
- tiw_pos_ix = 0;
- slots--;
+
+ scnt_soon_wheel_next(&slot, &slots, NULL, &scnt_ix, bump_scnt);
}
+
+ if (ERTS_TW_BUMP_LATER_WHEEL(tiw)) {
+ restart_yielded_later_slot:
+ if (bump_later_wheel(tiw, &yield_count))
+ return; /* Yield! */
+ }
}
- } while (yielded_slot_restarted);
+ } while (restarted);
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ ERTS_TW_ASSERT(tiw->next_timeout_pos == bump_to);
- /* Search at most two seconds ahead... */
- (void) find_next_timeout(NULL, tiw, 0, curr_time, ERTS_SEC_TO_MONOTONIC(2));
+ (void) find_next_timeout(NULL, tiw);
ERTS_MSACC_POP_STATE_M_X();
}
+static int
+bump_later_wheel(ErtsTimerWheel *tiw, int *ycount_p)
+{
+ ErtsMonotonicTime cpos = tiw->pos;
+ ErtsMonotonicTime later_pos = tiw->later.pos;
+ int ycount = *ycount_p;
+ int slots, fslot, scnt_ix;
+ Sint *scnt, *bump_scnt;
+
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ if (tiw->yield_slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ fslot = tiw->yield_slot;
+ scnt_ix = scnt_get_ix(fslot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_LATER_WHEEL_SIZE);
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ goto restart_yielded_slot;
+ }
+ else {
+ ErtsMonotonicTime end_later_pos, tmp_slots, min_tpos;
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+ end_later_pos = cpos + ERTS_TW_SOON_WHEEL_SIZE;
+ end_later_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ /* Skip known empty slots... */
+ if (min_tpos > later_pos) {
+ if (min_tpos > end_later_pos) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, end_later_pos);
+ tiw->later.pos = end_later_pos;
+ goto done;
+ }
+ later_pos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, later_pos);
+ }
+
+ tmp_slots = end_later_pos;
+ tmp_slots -= later_pos;
+ tmp_slots /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp_slots < ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+
+ fslot = later_slot(later_pos);
+ scnt_ix = scnt_get_ix(fslot);
+
+ tiw->later.pos = end_later_pos;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *p;
+
+ ycount -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[fslot];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[fslot] = NULL;
+
+ while (1) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+
+ ERTS_TW_ASSERT(p->slot == fslot);
+
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ scnt_ix_dec(scnt, scnt_ix);
+
+ if (tpos >= tiw->later.pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE) {
+ /* keep in later slot; very uncommon... */
+ insert_timer_into_slot(tiw, fslot, p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ ERTS_TW_ASSERT(tpos < cpos + ERTS_TW_SOON_WHEEL_SIZE);
+ if (tpos > cpos) {
+ /* move into soon wheel */
+ insert_timer_into_slot(tiw, soon_slot(tpos), p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ /* trigger at once */
+ timeout_timer(p);
+ tiw->nto--;
+ ycount -= ERTS_TW_COST_TIMEOUT;
+ }
+ }
+
+ restart_yielded_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (ycount < 0) {
+ tiw->yield_slot = fslot;
+ tiw->yield_slots_left = slots;
+ *ycount_p = 0;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+ return 1; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+ }
+
+ scnt_later_wheel_next(&fslot, &slots, NULL, &scnt_ix, bump_scnt);
+ }
+
+done:
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ *ycount_p = ycount;
+
+ return 0;
+}
+
Uint
erts_timer_wheel_memory_size(void)
{
@@ -526,25 +1153,51 @@ erts_create_timer_wheel(ErtsSchedulerData *esdp)
ErtsMonotonicTime mtime;
int i;
ErtsTimerWheel *tiw;
+
+ /* Some compile time sanity checks... */
+
+ /* Slots... */
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE == -1);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_INACTIVE < ERTS_TW_SLOT_AT_ONCE);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE + 1 == ERTS_TW_SOON_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT < ERTS_TW_SOON_WHEEL_END_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_END_SLOT == ERTS_TW_LATER_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ /* Both wheel sizes should be a powers of 2 */
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_SIZE
+ && !(ERTS_TW_SOON_WHEEL_SIZE & (ERTS_TW_SOON_WHEEL_SIZE-1)));
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_SIZE
+ && !(ERTS_TW_LATER_WHEEL_SIZE & (ERTS_TW_LATER_WHEEL_SIZE-1)));
+
tiw = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_WHEEL,
sizeof(ErtsTimerWheel));
- for(i = 0; i < ERTS_TIW_SIZE; i++)
+ tiw->w = &tiw->slots[1];
+ for(i = ERTS_TW_SLOT_AT_ONCE; i < ERTS_TW_LATER_WHEEL_END_SLOT; i++)
tiw->w[i] = NULL;
+ for (i = 0; i < ERTS_TW_SCNT_SIZE; i++)
+ tiw->scnt[i] = 0;
+
mtime = erts_get_monotonic_time(esdp);
tiw->pos = ERTS_MONOTONIC_TO_CLKTCKS(mtime);
tiw->nto = 0;
- tiw->at_once.head = NULL;
- tiw->at_once.tail = NULL;
tiw->at_once.nto = 0;
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->soon.nto = 0;
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ tiw->later.pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->later.nto = 0;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY;
+ tiw->next_timeout_pos = tiw->pos + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);
tiw->sentinel.next = &tiw->sentinel;
tiw->sentinel.prev = &tiw->sentinel;
- tiw->sentinel.u.func.timeout = NULL;
- tiw->sentinel.u.func.cancel = NULL;
- tiw->sentinel.u.func.arg = NULL;
+ tiw->sentinel.timeout = NULL;
+ tiw->sentinel.arg = NULL;
return tiw;
}
@@ -577,53 +1230,56 @@ erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode)
void
erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsTWheelTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg,
- ErtsMonotonicTime timeout_pos)
+ void *arg, ErtsMonotonicTime timeout_pos)
{
- ErtsMonotonicTime timeout_time;
+ int slot;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
- p->u.func.timeout = timeout;
- p->u.func.cancel = cancel;
- p->u.func.arg = arg;
+ p->timeout = timeout;
+ p->arg = arg;
+
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_INACTIVE);
- ERTS_TW_ASSERT(p->slot == ERTS_TWHEEL_SLOT_INACTIVE);
+ tiw->nto++;
+ /* calculate slot */
if (timeout_pos <= tiw->pos) {
- tiw->nto++;
- tiw->at_once.nto++;
- p->next = NULL;
- p->prev = tiw->at_once.tail;
- if (tiw->at_once.tail) {
- ERTS_TW_ASSERT(tiw->at_once.head);
- tiw->at_once.tail->next = p;
- }
- else {
- ERTS_TW_ASSERT(!tiw->at_once.head);
- tiw->at_once.head = p;
- }
- tiw->at_once.tail = p;
- p->timeout_pos = tiw->pos;
- p->slot = ERTS_TWHEEL_SLOT_AT_ONCE;
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->pos);
+ /* at once */
+ p->timeout_pos = timeout_pos = tiw->pos;
+ slot = ERTS_TW_SLOT_AT_ONCE;
+ }
+ else if (timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE) {
+ /* soon wheel */
+ p->timeout_pos = timeout_pos;
+ slot = soon_slot(timeout_pos);
+ if (tiw->soon.min_tpos > timeout_pos)
+ tiw->soon.min_tpos = timeout_pos;
}
else {
- int slot;
-
- /* calculate slot */
- slot = (int) (timeout_pos & (ERTS_TIW_SIZE-1));
-
- insert_timer_into_slot(tiw, slot, p);
-
- tiw->nto++;
-
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
- p->timeout_pos = timeout_pos;
+ /* later wheel */
+ p->timeout_pos = timeout_pos;
+ slot = later_slot(timeout_pos);
+
+ /*
+ * Next timeout due to this timeout
+ * should be in good time before the
+ * actual timeout (one later wheel slot
+ * size). This, in order to move it
+ * from the later wheel to the soon
+ * wheel.
+ */
+ timeout_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ timeout_pos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
}
- if (timeout_time < tiw->next_timeout_time) {
+ insert_timer_into_slot(tiw, slot, p);
+
+ if (timeout_pos <= tiw->next_timeout_pos) {
tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = timeout_time;
+ if (timeout_pos < tiw->next_timeout_pos) {
+ tiw->next_timeout_pos = timeout_pos;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ }
}
ERTS_MSACC_POP_STATE_M_X();
}
@@ -631,15 +1287,10 @@ erts_twheel_set_timer(ErtsTimerWheel *tiw,
void
erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
- if (p->slot != ERTS_TWHEEL_SLOT_INACTIVE) {
- ErlCancelProc cancel;
- void *arg;
+ if (p->slot != ERTS_TW_SLOT_INACTIVE) {
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
remove_timer(tiw, p);
- cancel = p->u.func.cancel;
- arg = p->u.func.arg;
- if (cancel)
- (*cancel)(arg);
+ tiw->nto--;
ERTS_MSACC_POP_STATE_M_X();
}
}
@@ -657,22 +1308,17 @@ erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
tmr = tiw->sentinel.next;
while (tmr != &tiw->sentinel) {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
tmr = tmr->next;
}
- for (tmr = tiw->at_once.head; tmr; tmr = tmr->next) {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
- }
-
- for (ix = 0; ix < ERTS_TIW_SIZE; ix++) {
+ for (ix = ERTS_TW_SLOT_AT_ONCE; ix < ERTS_TW_LATER_WHEEL_END_SLOT; ix++) {
tmr = tiw->w[ix];
if (tmr) {
do {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
tmr = tmr->next;
} while (tmr != tiw->w[ix]);
}
@@ -680,35 +1326,206 @@ erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
}
#ifdef ERTS_TW_DEBUG
-void erts_p_slpq(void)
+
+void
+dbg_verify_empty_soon_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
{
- erts_printf("Not yet implemented...\n");
-#if 0
- ErtsMonotonicTime current_time = erts_get_monotonic_time(NULL);
- int i;
- ErtsTWheelTimer* p;
-
- /* print the whole wheel, starting at the current position */
- erts_printf("\ncurrent time = %bps tiw_pos = %d tiw_nto %d\n",
- current_time, tiw->pos, tiw->nto);
- i = tiw->pos;
- if (tiw->w[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw->w[i]; p != NULL; p = p->next) {
- erts_printf(" (timeout time %bps, slot %d)\n",
- ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos),
- p->slot);
- }
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = soon_slot(tiw->pos);
+ tmp = to_pos;
+ if (tmp > tiw->pos) {
+ int slots;
+ tmp -= tiw->pos;
+ ERTS_TW_ASSERT(tmp > 0);
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+
+ while (slots > 0) {
+ ERTS_TW_ASSERT(!tiw->w[ix]);
+ ix++;
+ if (ix == ERTS_TW_SOON_WHEEL_END_SLOT)
+ ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+void
+dbg_verify_empty_later_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
+{
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = later_slot(tiw->later.pos);
+ tmp = to_pos;
+ tmp &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ if (tmp > tiw->later.pos) {
+ ErtsMonotonicTime pos_min;
+ int slots;
+ tmp -= tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(tmp > 0);
+
+ pos_min = tiw->later.pos;
+
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp;
+ else {
+ pos_min += ((tmp / ERTS_TW_LATER_WHEEL_SIZE)
+ * ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *tmr = tiw->w[ix];
+ pos_min += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmr) {
+ ErtsTWheelTimer *end = tmr;
+ do {
+ ERTS_TW_ASSERT(tmr->timeout_pos >= pos_min);
+ tmr = tmr->next;
+ } while (tmr != end);
+ }
+ ix++;
+ if (ix == ERTS_TW_LATER_WHEEL_END_SLOT)
+ ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+#endif /* ERTS_TW_DEBUG */
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static void
+hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos)
+{
+ int ix, six, soon_tmo, later_tmo, at_once_tmo,
+ scnt_slot, scnt_slots, scnt_six;
+ ErtsMonotonicTime min_tpos;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ ErtsTWheelTimer *p;
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ scnt[six] = 0;
+
+ min_tpos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
+
+ at_once_tmo = 0;
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ at_once_tmo++;
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_AT_ONCE);
+ ERTS_TW_ASSERT(p->timeout_pos <= tiw->pos);
+ ERTS_TW_ASSERT(!check_min_tpos || tiw->pos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
}
- for(i = ((i+1) & (ERTS_TIW_SIZE-1)); i != (tiw->pos & (ERTS_TIW_SIZE-1)); i = ((i+1) & (ERTS_TIW_SIZE-1))) {
- if (tiw->w[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw->w[i]; p != NULL; p = p->next) {
- erts_printf(" (timeout time %bps, slot %d)\n",
- ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos), p->slot);
- }
- }
+
+ soon_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_SOON_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ soon_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(ix == soon_slot(tpos));
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
}
-#endif
+
+ later_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_LATER_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ six = scnt_get_ix(ix);
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ later_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(later_slot(tpos) == ix);
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ }
+
+ if (tiw->yield_slot != ERTS_TW_SLOT_INACTIVE) {
+ p = tiw->sentinel.next;
+ ix = tiw->yield_slot;
+ while (p != &tiw->sentinel) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ ERTS_TW_ASSERT(ix == p->slot);
+ if (ix == ERTS_TW_SLOT_AT_ONCE)
+ at_once_tmo++;
+ else {
+ scnt_inc(scnt, ix);
+ if (ix >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ later_tmo++;
+ ERTS_TW_ASSERT(ix == later_slot(tpos));
+ }
+ else {
+ soon_tmo++;
+ ERTS_TW_ASSERT(ix == (tpos & ERTS_TW_SOON_WHEEL_MASK));
+ ERTS_TW_ASSERT(tpos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ }
+ p = p->next;
+ }
+ }
+ }
+
+
+ ERTS_TW_ASSERT(tiw->at_once.nto == at_once_tmo);
+ ERTS_TW_ASSERT(tiw->soon.nto == soon_tmo);
+ ERTS_TW_ASSERT(tiw->later.nto == later_tmo);
+ ERTS_TW_ASSERT(tiw->nto == soon_tmo + later_tmo + at_once_tmo);
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ ERTS_TW_ASSERT(scnt[six] == tiw->scnt[six]);
}
-#endif /* ERTS_TW_DEBUG */
+
+#endif /* ERTS_TW_HARD_DEBUG */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index d90c282c7e..bab7352479 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -42,6 +42,7 @@
#include "dist.h"
#include "erl_printf.h"
#include "erl_threads.h"
+#include "erl_lock_count.h"
#include "erl_smp.h"
#include "erl_time.h"
#include "erl_thr_progress.h"
@@ -56,6 +57,8 @@
#ifdef HIPE
# include "hipe_mode_switch.h"
#endif
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -204,9 +207,8 @@ erl_grow_wstack(ErtsWStack* s, Uint need)
void
erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
{
- Uint old_size = s->pend - s->pstart;
+ Uint old_size = s->size;
Uint new_size;
- Uint sp_offs = s->psp - s->pstart;
if (need_bytes < old_size)
new_size = 2 * old_size;
@@ -220,8 +222,7 @@ erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
sys_memcpy(new_ptr, s->pstart, old_size);
s->pstart = new_ptr;
}
- s->pend = s->pstart + new_size;
- s->psp = s->pstart + sp_offs;
+ s->size = new_size;
}
/*
@@ -700,12 +701,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
/* make a hash index from an erlang term */
/*
-** There are three hash functions.
-** make_broken_hash: the one used for backward compatibility
-** is called from the bif erlang:hash/2. Should never be used
-** as it a) hashes only a part of binaries, b) hashes bignums really poorly,
-** c) hashes bignums differently on different endian processors and d) hashes
-** small integers with different weights on different bytes.
+** There are two hash functions.
**
** make_hash: A hash function that will give the same values for the same
** terms regardless of the internal representation. Small integers are
@@ -894,11 +890,11 @@ tail_recur:
{
Export* ep = *((Export **) (export_val(term) + 1));
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
+ hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
+ (atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
break;
}
@@ -1045,6 +1041,10 @@ tail_recur:
DESTROY_WSTACK(stack);
return hash;
+#undef MAKE_HASH_TUPLE_OP
+#undef MAKE_HASH_TERM_ARRAY_OP
+#undef MAKE_HASH_CDR_PRE_OP
+#undef MAKE_HASH_CDR_POST_OP
#undef UINT32_HASH_STEP
#undef UINT32_HASH_RET
}
@@ -1331,11 +1331,11 @@ make_hash2(Eterm term)
{
Export* ep = *((Export **) (export_val(term) + 1));
UINT32_HASH_2
- (ep->code[2],
- atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
+ (ep->info.mfa.arity,
+ atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
HCONST);
UINT32_HASH
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue,
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
HCONST_14);
goto hash2_common;
}
@@ -1579,16 +1579,13 @@ do { /* Lightweight mixing of constant (type info) */ \
} while (0)
Uint32
-make_internal_hash(Eterm term)
+make_internal_hash(Eterm term, Uint32 salt)
{
Uint32 hash;
- Uint32 hash_xor_pairs;
-
- ERTS_UNDEF(hash_xor_pairs, 0);
/* Optimization. Simple cases before declaration of estack. */
if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
- hash = 0;
+ hash = salt;
#if ERTS_SIZEOF_ETERM == 8
UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
#elif ERTS_SIZEOF_ETERM == 4
@@ -1602,7 +1599,7 @@ make_internal_hash(Eterm term)
Eterm tmp;
DECLARE_ESTACK(s);
- hash = 0;
+ hash = salt;
for (;;) {
switch (primary_tag(term)) {
case TAG_PRIMARY_LIST:
@@ -1665,6 +1662,11 @@ make_internal_hash(Eterm term)
Eterm* ptr = boxed_val(term) + 1;
Uint size;
int i;
+
+ /*
+ * We rely on key-value iteration order being constant
+ * for identical maps (in this VM instance).
+ */
switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
case HAMT_SUBTAG_HEAD_FLATMAP:
{
@@ -1676,26 +1678,10 @@ make_internal_hash(Eterm term)
if (size == 0)
goto pop_next;
- /* We want a hash function that is *independent* of
- * the order in which keys and values are encountered.
- * We therefore calculate context independent hashes for all .
- * key-value pairs and then xor them together.
- *
- * We *do* need to use an initial seed for each pair, i.e. the
- * hash value, so the hash value is reset for each pair with 'hash'.
- * If we don't, no additional entropy is given to the system and the
- * hash collision resolution in maps:from_list/1 would fail.
- */
- ESTACK_PUSH(s, hash_xor_pairs);
- ESTACK_PUSH(s, hash);
- ESTACK_PUSH(s, HASH_MAP_TAIL);
for (i = size - 1; i >= 0; i--) {
- ESTACK_PUSH(s, hash); /* initial seed for all pairs */
- ESTACK_PUSH(s, HASH_MAP_PAIR);
ESTACK_PUSH(s, vs[i]);
ESTACK_PUSH(s, ks[i]);
}
- hash_xor_pairs = 0;
goto pop_next;
}
case HAMT_SUBTAG_HEAD_ARRAY:
@@ -1704,10 +1690,6 @@ make_internal_hash(Eterm term)
UINT32_HASH(size, HCONST_16);
if (size == 0)
goto pop_next;
- ESTACK_PUSH(s, hash_xor_pairs);
- ESTACK_PUSH(s, hash);
- ESTACK_PUSH(s, HASH_MAP_TAIL);
- hash_xor_pairs = 0;
}
switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
case HAMT_SUBTAG_HEAD_ARRAY:
@@ -1723,8 +1705,6 @@ make_internal_hash(Eterm term)
while (i) {
if (is_list(*ptr)) {
Eterm* cons = list_val(*ptr);
- ESTACK_PUSH(s, hash); /* initial seed for all pairs */
- ESTACK_PUSH(s, HASH_MAP_PAIR);
ESTACK_PUSH(s, CDR(cons));
ESTACK_PUSH(s, CAR(cons));
}
@@ -1740,7 +1720,7 @@ make_internal_hash(Eterm term)
case EXPORT_SUBTAG:
{
Export* ep = *((Export **) (export_val(term) + 1));
- /* Assumes Export entries never moves */
+ /* Assumes Export entries never move */
POINTER_HASH(ep, HCONST_14);
goto pop_next;
}
@@ -1839,7 +1819,7 @@ make_internal_hash(Eterm term)
break;
case REF_SUBTAG:
UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
- ASSERT(internal_ref_no_of_numbers(term) == 3);
+ ASSERT(internal_ref_no_numbers(term) == 3);
UINT32_HASH_2(internal_ref_numbers(term)[1],
internal_ref_numbers(term)[2], HCONST_8);
goto pop_next;
@@ -1848,7 +1828,7 @@ make_internal_hash(Eterm term)
{
ExternalThing* thing = external_thing_ptr(term);
- ASSERT(external_thing_ref_no_of_numbers(thing) == 3);
+ ASSERT(external_thing_ref_no_numbers(thing) == 3);
/* See limitation #2 */
#ifdef ARCH_64
POINTER_HASH(thing->node, HCONST_7);
@@ -1895,7 +1875,7 @@ make_internal_hash(Eterm term)
goto pop_next;
}
default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
}
}
break;
@@ -1908,7 +1888,7 @@ make_internal_hash(Eterm term)
goto pop_next;
default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
pop_next:
if (ESTACK_ISEMPTY(s)) {
@@ -1920,17 +1900,6 @@ make_internal_hash(Eterm term)
term = ESTACK_POP(s);
switch (term) {
- case HASH_MAP_TAIL: {
- hash = (Uint32) ESTACK_POP(s);
- UINT32_HASH(hash_xor_pairs, HCONST_19);
- hash_xor_pairs = (Uint32) ESTACK_POP(s);
- goto pop_next;
- }
- case HASH_MAP_PAIR:
- hash_xor_pairs ^= hash;
- hash = (Uint32) ESTACK_POP(s); /* initial seed for all pairs */
- goto pop_next;
-
case HASH_CDR:
CONST_HASH(HCONST_18); /* Hash CDR i cons cell */
goto pop_next;
@@ -1954,259 +1923,6 @@ make_internal_hash(Eterm term)
#undef HCONST
#undef MIX
-
-Uint32 make_broken_hash(Eterm term)
-{
- Uint32 hash = 0;
- DECLARE_WSTACK(stack);
- unsigned op;
-tail_recur:
- op = tag_val_def(term);
- for (;;) {
- switch (op) {
- case NIL_DEF:
- hash = hash*FUNNY_NUMBER3 + 1;
- break;
- case ATOM_DEF:
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(term))->slot.bucket.hvalue);
- break;
- case SMALL_DEF:
-#if defined(ARCH_64)
- {
- Sint y1 = signed_val(term);
- Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
- Uint32 y3 = (Uint32) (y2 >> 32);
- int arity = 1;
-
-#if defined(WORDS_BIGENDIAN)
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- Uint32 y4 = (Uint32) y2;
- hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
- if (y3) {
- hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
- arity++;
- }
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#else
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- hash = hash*FUNNY_NUMBER2 + ((Uint32) y2);
- if (y3)
- {
- hash = hash*FUNNY_NUMBER2 + y3;
- arity++;
- }
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#endif
- }
-#else
- hash = hash*FUNNY_NUMBER2 + unsigned_val(term);
-#endif
- break;
-
- case BINARY_DEF:
- {
- size_t sz = binary_size(term);
- size_t i = (sz < 15) ? sz : 15;
-
- hash = hash_binary_bytes(term, i, hash);
- hash = hash*FUNNY_NUMBER4 + sz;
- break;
- }
-
- case EXPORT_DEF:
- {
- Export* ep = *((Export **) (export_val(term) + 1));
-
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
- break;
- }
-
- case FUN_DEF:
- {
- ErlFunThing* funp = (ErlFunThing *) fun_val(term);
- Uint num_free = funp->num_free;
-
- hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
- if (num_free > 0) {
- if (num_free > 1) {
- WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
- }
- term = funp->env[0];
- goto tail_recur;
- }
- break;
- }
-
- case PID_DEF:
- hash = hash*FUNNY_NUMBER5 + internal_pid_number(term);
- break;
- case EXTERNAL_PID_DEF:
- hash = hash*FUNNY_NUMBER5 + external_pid_number(term);
- break;
- case PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_port_number(term);
- break;
- case EXTERNAL_PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + external_port_number(term);
- break;
- case REF_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_ref_numbers(term)[0];
- break;
- case EXTERNAL_REF_DEF:
- hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
- break;
- case FLOAT_DEF:
- {
- FloatDef ff;
- GET_DOUBLE(term, ff);
- if (ff.fd == 0.0f) {
- /* ensure positive 0.0 */
- ff.fd = erts_get_positive_zero_float();
- }
- hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
- }
- break;
- case MAKE_HASH_CDR_PRE_OP:
- term = (Eterm) WSTACK_POP(stack);
- if (is_not_list(term)) {
- WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
- goto tail_recur;
- }
- /*fall through*/
- case LIST_DEF:
- {
- Eterm* list = list_val(term);
- WSTACK_PUSH2(stack, (UWord) CDR(list),
- (UWord) MAKE_HASH_CDR_PRE_OP);
- term = CAR(list);
- goto tail_recur;
- }
-
- case MAKE_HASH_CDR_POST_OP:
- hash *= FUNNY_NUMBER8;
- break;
-
- case BIG_DEF:
- {
- Eterm* ptr = big_val(term);
- int is_neg = BIG_SIGN(ptr);
- Uint arity = BIG_ARITY(ptr);
- Uint i = arity;
- ptr++;
-#if D_EXP == 16
- /* hash over 32 bit LE */
-
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#elif D_EXP == 32
-
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- hash = hash*FUNNY_NUMBER2 + ((d << 16) | (d >> 16));
- }
-#else
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#endif
-
-#elif D_EXP == 64
- {
- Uint32 h = 0, l;
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + ((l << 16) | (l >> 16));
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + ((h << 16) | (h >> 16));
- }
-#else
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + l;
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + h;
- }
-#endif
- /* adjust arity to match 32 bit mode */
- arity = (arity << 1) - (h == 0);
- }
-
-#else
-#error "unsupported D_EXP size"
-#endif
- hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- }
- break;
-
- case MAP_DEF:
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
- break;
- case TUPLE_DEF:
- {
- Eterm* ptr = tuple_val(term);
- Uint arity = arityval(*ptr);
-
- WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
- op = MAKE_HASH_TUPLE_OP;
- }/*fall through*/
- case MAKE_HASH_TUPLE_OP:
- case MAKE_HASH_TERM_ARRAY_OP:
- {
- Uint i = (Uint) WSTACK_POP(stack);
- Eterm* ptr = (Eterm*) WSTACK_POP(stack);
- if (i != 0) {
- term = *ptr;
- WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
- goto tail_recur;
- }
- if (op == MAKE_HASH_TUPLE_OP) {
- Uint32 arity = (UWord) WSTACK_POP(stack);
- hash = hash*FUNNY_NUMBER9 + arity;
- }
- break;
- }
-
- default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_broken_hash\n");
- return 0;
- }
- if (WSTACK_ISEMPTY(stack)) break;
- op = (Uint) WSTACK_POP(stack);
- }
-
- DESTROY_WSTACK(stack);
- return hash;
-
-#undef MAKE_HASH_TUPLE_OP
-#undef MAKE_HASH_TERM_ARRAY_OP
-#undef MAKE_HASH_CDR_PRE_OP
-#undef MAKE_HASH_CDR_POST_OP
-}
-
static Eterm
do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
ErlHeapFragment **bp, Process **p, Uint sz)
@@ -2740,22 +2456,20 @@ tailrecur_ne:
anum = external_thing_ref_numbers(athing);
bnum = external_thing_ref_numbers(bthing);
- alen = external_thing_ref_no_of_numbers(athing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ alen = external_thing_ref_no_numbers(athing);
+ blen = external_thing_ref_no_numbers(bthing);
goto ref_common;
+
case REF_SUBTAG:
- if (!is_internal_ref(b))
- goto not_equal;
- {
- RefThing* athing = ref_thing_ptr(a);
- RefThing* bthing = ref_thing_ptr(b);
- alen = internal_thing_ref_no_of_numbers(athing);
- blen = internal_thing_ref_no_of_numbers(bthing);
- anum = internal_thing_ref_numbers(athing);
- bnum = internal_thing_ref_numbers(bthing);
- }
+ if (!is_internal_ref(b))
+ goto not_equal;
+
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
ref_common:
ASSERT(alen > 0 && blen > 0);
@@ -3307,13 +3021,15 @@ tailrecur_ne:
Export* a_exp = *((Export **) (export_val(a) + 1));
Export* b_exp = *((Export **) (export_val(b) + 1));
- if ((j = erts_cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
+ b_exp->info.mfa.module)) != 0) {
RETURN_NEQ(j);
}
- if ((j = erts_cmp_atoms(a_exp->code[1], b_exp->code[1])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
+ b_exp->info.mfa.function)) != 0) {
RETURN_NEQ(j);
}
- ON_CMP_GOTO((Sint) a_exp->code[2] - (Sint) b_exp->code[2]);
+ ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
}
break;
case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
@@ -3385,25 +3101,21 @@ tailrecur_ne:
*/
if (is_internal_ref(b)) {
- RefThing* bthing = ref_thing_ptr(b);
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
} else if(is_external_ref(b)) {
ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = REF_DEF;
goto mixed_types;
}
- {
- RefThing* athing = ref_thing_ptr(a);
- anode = erts_this_node;
- anum = internal_thing_ref_numbers(athing);
- alen = internal_thing_ref_no_of_numbers(athing);
- }
+ anode = erts_this_node;
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
ref_common:
CMP_NODES(anode, bnode);
@@ -3433,15 +3145,14 @@ tailrecur_ne:
goto pop_next;
case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
if (is_internal_ref(b)) {
- RefThing* bthing = ref_thing_ptr(b);
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
} else if (is_external_ref(b)) {
ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = EXTERNAL_REF_DEF;
goto mixed_types;
@@ -3450,7 +3161,7 @@ tailrecur_ne:
ExternalThing* athing = external_thing_ptr(a);
anode = athing->node;
anum = external_thing_ref_numbers(athing);
- alen = external_thing_ref_no_of_numbers(athing);
+ alen = external_thing_ref_no_numbers(athing);
}
goto ref_common;
default:
@@ -3827,40 +3538,41 @@ not_equal:
Eterm
store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
+ struct erl_off_heap_header *ohhp;
Uint i;
Uint size;
- Uint *from_hp;
- Uint *to_hp = *hpp;
+ Eterm *from_hp;
+ Eterm *to_hp = *hpp;
ASSERT(is_external(ns) || is_internal_ref(ns));
- if(is_external(ns)) {
- from_hp = external_val(ns);
- size = thing_arityval(*from_hp) + 1;
- *hpp += size;
-
- for(i = 0; i < size; i++)
- to_hp[i] = from_hp[i];
-
- erts_smp_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
-
- ((struct erl_off_heap_header*) to_hp)->next = oh->first;
- oh->first = (struct erl_off_heap_header*) to_hp;
-
- return make_external(to_hp);
- }
-
- /* Internal ref */
- from_hp = internal_ref_val(ns);
-
+ from_hp = boxed_val(ns);
size = thing_arityval(*from_hp) + 1;
-
*hpp += size;
for(i = 0; i < size; i++)
to_hp[i] = from_hp[i];
- return make_internal_ref(to_hp);
+ if (is_external_header(*from_hp)) {
+ ExternalThing *etp = (ExternalThing *) from_hp;
+ ASSERT(is_external(ns));
+ erts_smp_refc_inc(&etp->node->refc, 2);
+ }
+ else if (is_ordinary_ref_thing(from_hp))
+ return make_internal_ref(to_hp);
+ else {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
+ ErtsMagicBinary *mb = mreft->mb;
+ ASSERT(is_magic_ref_thing(from_hp));
+ erts_refc_inc(&mb->intern.refc, 2);
+ OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
+ }
+
+ ohhp = (struct erl_off_heap_header*) to_hp;
+ ohhp->next = oh->first;
+ oh->first = ohhp;
+
+ return make_boxed(to_hp);
}
Eterm
@@ -3923,6 +3635,122 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
return -2; /* not enough space */
}
+/** @brief Fill buf with the UTF8 contents of the unicode list
+ * @param len Max number of characters to write.
+ * @param written NULL or bytes written.
+ * @return 0 ok,
+ * -1 type error,
+ * -2 list too long, only \c len characters written
+ */
+int
+erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+ Sint val;
+ int res;
+
+ while (1) {
+ if (is_nil(list)) {
+ res = 0;
+ break;
+ }
+ if (is_not_list(list)) {
+ res = -1;
+ break;
+ }
+ listptr = list_val(list);
+
+ if (len-- <= 0) {
+ res = -2;
+ break;
+ }
+
+ if (is_not_small(CAR(listptr))) {
+ res = -1;
+ break;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ buf[sz] = val;
+ sz++;
+ } else if (val < 0x800) {
+ buf[sz+0] = 0xC0 | (val >> 6);
+ buf[sz+1] = 0x80 | (val & 0x3F);
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ res = -1;
+ break;
+ }
+ buf[sz+0] = 0xE0 | (val >> 12);
+ buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+2] = 0x80 | (val & 0x3F);
+ sz += 3;
+ } else if (val < 0x110000) {
+ buf[sz+0] = 0xF0 | (val >> 18);
+ buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
+ buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+3] = 0x80 | (val & 0x3F);
+ sz += 4;
+ } else {
+ res = -1;
+ break;
+ }
+ list = CDR(listptr);
+ }
+
+ if (written)
+ *written = sz;
+ return res;
+}
+
+Sint
+erts_unicode_list_to_buf_len(Eterm list)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+
+ if (is_nil(list)) {
+ return 0;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+
+ while (1) {
+ Sint val;
+
+ if (is_not_small(CAR(listptr))) {
+ return -1;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ sz++;
+ } else if (val < 0x800) {
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ return -1;
+ }
+ sz += 3;
+ } else if (val < 0x110000) {
+ sz += 4;
+ } else {
+ return -1;
+ }
+ list = CDR(listptr);
+ if (is_nil(list)) {
+ return sz;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+ }
+}
+
/*
** Convert an integer to a byte list
** return pointer to converted stuff (need not to be at start of buf!)
@@ -5031,6 +4859,53 @@ Uint64 erts_timestamp_millis(void)
#endif
}
+void *
+erts_calc_stacklimit(char *prev_c, UWord stacksize)
+{
+ /*
+ * We *don't* want this function inlined, i.e., it is
+ * risky to call this function from another function
+ * in utils.c
+ */
+
+ UWord pagesize = erts_sys_get_page_size();
+ char c;
+ char *start;
+ if (&c > prev_c) {
+ start = (char *) ((((UWord) prev_c) / pagesize) * pagesize);
+ return (void *) (start + stacksize);
+ }
+ else {
+ start = (char *) (((((UWord) prev_c) - 1) / pagesize + 1) * pagesize);
+ return (void *) (start - stacksize);
+ }
+}
+
+/*
+ * erts_check_below_limit() and
+ * erts_check_above_limit() are put
+ * in utils.c in order to prevent
+ * inlining.
+ */
+
+int
+erts_check_below_limit(char *ptr, char *limit)
+{
+ return ptr < limit;
+}
+
+int
+erts_check_above_limit(char *ptr, char *limit)
+{
+ return ptr > limit;
+}
+
+void *
+erts_ptr_id(void *ptr)
+{
+ return ptr;
+}
+
#ifdef DEBUG
/*
* Handy functions when using a debugger - don't use in the code!