aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names4
-rw-r--r--erts/emulator/beam/beam_bif_load.c796
-rw-r--r--erts/emulator/beam/beam_bp.c2002
-rw-r--r--erts/emulator/beam/beam_bp.h233
-rw-r--r--erts/emulator/beam/beam_catches.c136
-rw-r--r--erts/emulator/beam/beam_catches.h11
-rw-r--r--erts/emulator/beam/beam_debug.c30
-rw-r--r--erts/emulator/beam/beam_emu.c415
-rw-r--r--erts/emulator/beam/beam_load.c600
-rw-r--r--erts/emulator/beam/beam_load.h49
-rw-r--r--erts/emulator/beam/beam_ranges.c349
-rw-r--r--erts/emulator/beam/benchmark.c34
-rw-r--r--erts/emulator/beam/benchmark.h13
-rw-r--r--erts/emulator/beam/bif.c49
-rw-r--r--erts/emulator/beam/bif.h18
-rw-r--r--erts/emulator/beam/bif.tab11
-rw-r--r--erts/emulator/beam/break.c112
-rw-r--r--erts/emulator/beam/code_ix.c168
-rw-r--r--erts/emulator/beam/code_ix.h141
-rw-r--r--erts/emulator/beam/copy.c433
-rw-r--r--erts/emulator/beam/dist.c42
-rw-r--r--erts/emulator/beam/dtrace-wrapper.h2
-rw-r--r--erts/emulator/beam/erl_alloc.c29
-rw-r--r--erts/emulator/beam/erl_alloc.types41
-rw-r--r--erts/emulator/beam/erl_alloc_util.c130
-rw-r--r--erts/emulator/beam/erl_async.c16
-rw-r--r--erts/emulator/beam/erl_bif_binary.c69
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c13
-rwxr-xr-x[-rw-r--r--]erts/emulator/beam/erl_bif_info.c151
-rw-r--r--erts/emulator/beam/erl_bif_op.c5
-rw-r--r--erts/emulator/beam/erl_bif_os.c110
-rw-r--r--erts/emulator/beam/erl_bif_port.c16
-rw-r--r--erts/emulator/beam/erl_bif_re.c11
-rw-r--r--erts/emulator/beam/erl_bif_trace.c636
-rw-r--r--erts/emulator/beam/erl_bits.c30
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c2
-rw-r--r--erts/emulator/beam/erl_db.c54
-rw-r--r--erts/emulator/beam/erl_db_hash.c63
-rw-r--r--erts/emulator/beam/erl_db_tree.c12
-rw-r--r--erts/emulator/beam/erl_db_util.c16
-rw-r--r--erts/emulator/beam/erl_debug.c270
-rw-r--r--erts/emulator/beam/erl_debug.h20
-rw-r--r--erts/emulator/beam/erl_driver.h11
-rw-r--r--erts/emulator/beam/erl_fun.h8
-rw-r--r--erts/emulator/beam/erl_gc.c15
-rw-r--r--erts/emulator/beam/erl_init.c144
-rw-r--r--erts/emulator/beam/erl_lock_check.c5
-rw-r--r--erts/emulator/beam/erl_lock_check.h4
-rw-r--r--erts/emulator/beam/erl_lock_count.c31
-rw-r--r--erts/emulator/beam/erl_lock_count.h5
-rw-r--r--erts/emulator/beam/erl_message.c102
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_nif.c1167
-rw-r--r--erts/emulator/beam/erl_nif.h17
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h4
-rw-r--r--erts/emulator/beam/erl_nmgc.c1401
-rw-r--r--erts/emulator/beam/erl_nmgc.h364
-rw-r--r--erts/emulator/beam/erl_node_tables.c14
-rw-r--r--erts/emulator/beam/erl_port.h6
-rw-r--r--erts/emulator/beam/erl_port_task.c3
-rw-r--r--erts/emulator/beam/erl_port_task.h9
-rw-r--r--erts/emulator/beam/erl_posix_str.c1
-rw-r--r--erts/emulator/beam/erl_printf_term.c5
-rw-r--r--erts/emulator/beam/erl_process.c611
-rw-r--r--erts/emulator/beam/erl_process.h113
-rw-r--r--erts/emulator/beam/erl_process_dump.c2
-rw-r--r--erts/emulator/beam/erl_process_lock.c201
-rw-r--r--erts/emulator/beam/erl_process_lock.h8
-rw-r--r--erts/emulator/beam/erl_ptab.c11
-rw-r--r--erts/emulator/beam/erl_resolv_dns.c23
-rw-r--r--erts/emulator/beam/erl_resolv_nodns.c23
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c113
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h7
-rw-r--r--erts/emulator/beam/erl_term.c2
-rw-r--r--erts/emulator/beam/erl_time_sup.c16
-rw-r--r--erts/emulator/beam/erl_trace.c181
-rw-r--r--erts/emulator/beam/erl_trace.h9
-rw-r--r--erts/emulator/beam/erl_unicode.c185
-rw-r--r--erts/emulator/beam/erl_utils.h1
-rw-r--r--erts/emulator/beam/erl_vm.h21
-rw-r--r--erts/emulator/beam/erlang_dtrace.d2916
-rw-r--r--erts/emulator/beam/export.c354
-rw-r--r--erts/emulator/beam/export.h49
-rw-r--r--erts/emulator/beam/external.c12
-rwxr-xr-x[-rw-r--r--]erts/emulator/beam/global.h250
-rw-r--r--erts/emulator/beam/index.c28
-rw-r--r--erts/emulator/beam/index.h14
-rw-r--r--erts/emulator/beam/io.c179
-rw-r--r--erts/emulator/beam/module.c159
-rw-r--r--erts/emulator/beam/module.h64
-rw-r--r--erts/emulator/beam/ops.tab18
-rw-r--r--erts/emulator/beam/sys.h140
-rw-r--r--erts/emulator/beam/utils.c24
93 files changed, 9536 insertions, 6858 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 30b891be5d..c47a608215 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -248,7 +248,6 @@ atom gather_sched_wall_time_result
atom getting_linked
atom getting_unlinked
atom global
-atom global_heaps_size
atom Gt='>'
atom grun
atom group_leader
@@ -257,13 +256,13 @@ atom heap_block_size
atom heap_size
atom heap_sizes
atom heap_type
+atom heart_port
atom heir
atom hidden
atom hide
atom high
atom hipe_architecture
atom http httph https http_response http_request http_header http_eoh http_error http_bin httph_bin
-atom hybrid
atom id
atom if_clause
atom imports
@@ -395,6 +394,7 @@ atom opt
atom or
atom ordered_set
atom orelse
+atom os_pid
atom os_type
atom os_version
atom ose_bg_proc
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 29367c057a..e0a4f86d2d 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -37,25 +37,83 @@
static void set_default_trace_pattern(Eterm module);
static Eterm check_process_code(Process* rp, Module* modp);
-static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp);
-static void delete_export_references(Eterm module);
-static int purge_module(int module);
+static void delete_code(Module* modp);
static void decrement_refc(BeamInstr* code);
static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
-static void remove_from_address_table(BeamInstr* code);
-Eterm
-load_module_2(BIF_ALIST_2)
+
+
+BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
{
- Eterm reason;
- Eterm* hp;
- int sz;
- byte* code;
+ Module* modp;
Eterm res;
+ ErtsCodeIndex code_ix;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ code_ix = erts_active_code_ix();
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ return am_undefined;
+ }
+ erts_rlock_old_code(code_ix);
+ res = ((modp->curr.code && is_native(modp->curr.code)) ||
+ (modp->old.code != 0 && is_native(modp->old.code))) ?
+ am_true : am_false;
+ erts_runlock_old_code(code_ix);
+ return res;
+}
+
+BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
+{
+ Module* modp;
+ Eterm res;
+
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
+
+ if (modp && modp->curr.num_breakpoints > 0) {
+ ASSERT(modp->curr.code != NULL);
+ erts_clear_module_break(modp);
+ ASSERT(modp->curr.num_breakpoints == 0);
+ }
+
+ erts_start_staging_code_ix();
+
+ res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+
+ if (res == BIF_ARG_1) {
+ erts_end_staging_code_ix();
+ erts_commit_staging_code_ix();
+ }
+ else {
+ erts_abort_staging_code_ix();
+ }
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
+ return res;
+}
+
+BIF_RETTYPE
+prepare_loading_2(BIF_ALIST_2)
+{
byte* temp_alloc = NULL;
- struct LoaderState* stp;
+ byte* code;
+ Uint sz;
+ Binary* magic;
+ Eterm reason;
+ Eterm* hp;
+ Eterm res;
if (is_not_atom(BIF_ARG_1)) {
error:
@@ -65,108 +123,307 @@ load_module_2(BIF_ALIST_2)
if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
goto error;
}
- hp = HAlloc(BIF_P, 3);
- /*
- * Read the BEAM file and prepare the module for loading.
- */
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
sz = binary_size(BIF_ARG_2);
- reason = erts_prepare_loading(stp, BIF_P, BIF_P->group_leader,
+ reason = erts_prepare_loading(magic, BIF_P, BIF_P->group_leader,
&BIF_ARG_1, code, sz);
erts_free_aligned_binary_bytes(temp_alloc);
if (reason != NIL) {
+ hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_error, reason);
BIF_RET(res);
}
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
+ erts_refc_dec(&magic->refc, 1);
+ BIF_RET(res);
+}
+
+struct m {
+ Binary* code;
+ Eterm module;
+ Module* modp;
+ Uint exception;
+};
+
+static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int);
+#ifdef ERTS_SMP
+static void smp_code_ix_commiter(void*);
+
+static struct /* Protected by code_write_permission */
+{
+ Process* stager;
+ ErtsThrPrgrLaterOp lop;
+}commiter_state;
+#endif
+
+static Eterm
+exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
+{
+ Eterm* hp = HAlloc(p, 3 + 2*exceptions);
+ Eterm res = NIL;
+
+ mp += exceptions - 1;
+ while (exceptions > 0) {
+ if (mp->exception) {
+ res = CONS(hp, mp->module, res);
+ hp += 2;
+ exceptions--;
+ }
+ mp--;
+ }
+ return TUPLE2(hp, tag, res);
+}
+
+
+BIF_RETTYPE
+finish_loading_1(BIF_ALIST_1)
+{
+ int i;
+ int n;
+ struct m* p = NULL;
+ Uint exceptions;
+ Eterm res;
+ int is_blocking = 0;
+ int do_commit = 0;
+
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_finish_loading_1], BIF_P, BIF_ARG_1);
+ }
/*
- * Stop all other processes and finish the loading of the module.
+ * Validate the argument before we start loading; it must be a
+ * proper list where each element is a magic binary containing
+ * prepared (not previously loaded) code.
+ *
+ * First count the number of elements and allocate an array
+ * to keep the elements in.
*/
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- reason = erts_finish_loading(stp, BIF_P, 0, &BIF_ARG_1);
- if (reason != NIL) {
- res = TUPLE2(hp, am_error, reason);
- } else {
- set_default_trace_pattern(BIF_ARG_1);
- res = TUPLE2(hp, am_module, BIF_ARG_1);
+ n = list_length(BIF_ARG_1);
+ if (n == -1) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ goto done;
}
+ p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
-}
+ /*
+ * We now know that the argument is a proper list. Validate
+ * and collect the binaries into the array.
+ */
-BIF_RETTYPE purge_module_1(BIF_ALIST_1)
-{
- int purge_res;
+ for (i = 0; i < n; i++) {
+ Eterm* cons = list_val(BIF_ARG_1);
+ Eterm term = CAR(cons);
+ ProcBin* pb;
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ goto done;
+ }
+ pb = (ProcBin*) binary_val(term);
+ p[i].code = pb->val;
+ p[i].module = erts_module_for_prepared_code(p[i].code);
+ if (p[i].module == NIL) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ goto done;
+ }
+ BIF_ARG_1 = CDR(cons);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ /*
+ * Since we cannot handle atomic loading of a group of modules
+ * if one or more of them uses on_load, we will only allow one
+ * element in the list. This limitation is intended to be
+ * lifted in the future.
+ */
- erts_export_consolidate();
- purge_res = purge_module(atom_val(BIF_ARG_1));
+ if (n > 1) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
+ goto done;
+ }
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ /*
+ * All types are correct. There cannot be a BADARG from now on.
+ * Before we can start loading, we must check whether any of
+ * the modules already has old code. To avoid a race, we must
+ * not allow other process to initiate a code loading operation
+ * from now on.
+ */
- if (purge_res < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ res = am_ok;
+ erts_start_staging_code_ix();
+
+ for (i = 0; i < n; i++) {
+ p[i].modp = erts_put_module(p[i].module);
+ }
+ for (i = 0; i < n; i++) {
+ if (p[i].modp->curr.num_breakpoints > 0 ||
+ p[i].modp->curr.num_traced_exports > 0 ||
+ erts_is_default_trace_enabled()) {
+ /* tracing involved, fallback with thread blocking */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ break;
+ }
}
- BIF_RET(am_true);
-}
-BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
-{
- Module* modp;
+ if (is_blocking) {
+ for (i = 0; i < n; i++) {
+ if (p[i].modp->curr.num_breakpoints) {
+ erts_clear_module_break(p[i].modp);
+ ASSERT(p[i].modp->curr.num_breakpoints == 0);
+ }
+ }
+ }
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ p[i].exception = 0;
+ if (p[i].modp->curr.code && p[i].modp->old.code) {
+ p[i].exception = 1;
+ exceptions++;
+ }
}
- if ((modp = erts_get_module(BIF_ARG_1)) == NULL) {
- return am_undefined;
+
+ if (exceptions) {
+ res = exception_list(BIF_P, am_not_purged, p, exceptions);
+ } else {
+ /*
+ * Now we can load all code. This can't fail.
+ */
+
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ Eterm mod;
+ Eterm retval;
+
+ erts_refc_inc(&p[i].code->refc, 1);
+ retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
+ ASSERT(retval == NIL || retval == am_on_load);
+ if (retval == am_on_load) {
+ p[i].exception = 1;
+ exceptions++;
+ }
+ }
+
+ /*
+ * Check whether any module has an on_load_handler.
+ */
+
+ if (exceptions) {
+ res = exception_list(BIF_P, am_on_load, p, exceptions);
+ }
+ do_commit = 1;
}
- return ((modp->code && is_native(modp->code)) ||
- (modp->old_code != 0 && is_native(modp->old_code))) ?
- am_true : am_false;
+
+done:
+ return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
}
-BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
-{
- Eterm res;
+static Eterm
+staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
+ struct m* loaded, int nloaded)
+{
+#ifdef ERTS_SMP
+ if (is_blocking || !commit)
+#endif
+ {
+ if (commit) {
+ erts_end_staging_code_ix();
+ erts_commit_staging_code_ix();
+ if (loaded) {
+ int i;
+ for (i=0; i < nloaded; i++) {
+ set_default_trace_pattern(loaded[i].module);
+ }
+ }
+ }
+ else {
+ erts_abort_staging_code_ix();
+ }
+ if (loaded) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ }
+ if (is_blocking) {
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ erts_release_code_write_permission();
+ return res;
+ }
+#ifdef ERTS_SMP
+ else {
+ ASSERT(is_value(res));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ if (loaded) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ }
+ erts_end_staging_code_ix();
+ /*
+ * Now we must wait for all schedulers to do a memory barrier before
+ * we can commit and let them access the new staged code. This allows
+ * schedulers to read active code_ix in a safe way while executing
+ * without any memory barriers at all.
+ */
+ ASSERT(commiter_state.stager == NULL);
+ commiter_state.stager = c_p;
+ erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &commiter_state.lop);
+ erts_smp_proc_inc_refc(c_p);
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ /*
+ * smp_code_ix_commiter() will do the rest "later"
+ * and resume this process to return 'res'.
+ */
+ ERTS_BIF_YIELD_RETURN(c_p, res);
+ }
+#endif
+}
- erts_export_consolidate();
- res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- return res;
+#ifdef ERTS_SMP
+static void smp_code_ix_commiter(void* null)
+{
+ Process* p = commiter_state.stager;
+
+ erts_commit_staging_code_ix();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+#ifdef DEBUG
+ commiter_state.stager = NULL;
+#endif
+ erts_release_code_write_permission();
}
+#endif /* ERTS_SMP */
+
+
BIF_RETTYPE
check_old_code_1(BIF_ALIST_1)
{
+ ErtsCodeIndex code_ix;
Module* modp;
+ Eterm res = am_false;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
- modp = erts_get_module(BIF_ARG_1);
- if (modp == NULL) { /* Doesn't exist. */
- BIF_RET(am_false);
- } else if (modp->old_code == NULL) { /* No old code. */
- BIF_RET(am_false);
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+ if (modp != NULL) {
+ erts_rlock_old_code(code_ix);
+ if (modp->old.code != NULL) {
+ res = am_true;
+ }
+ erts_runlock_old_code(code_ix);
}
- BIF_RET(am_true);
+ BIF_RET(res);
}
Eterm
@@ -180,12 +437,19 @@ check_process_code_2(BIF_ALIST_2)
}
if (is_internal_pid(BIF_ARG_1)) {
Eterm res;
- modp = erts_get_module(BIF_ARG_2);
+ ErtsCodeIndex code_ix;
+
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(BIF_ARG_2, code_ix);
if (modp == NULL) { /* Doesn't exist. */
return am_false;
- } else if (modp->old_code == NULL) { /* No old code. */
+ }
+ erts_rlock_old_code(code_ix);
+ if (modp->old.code == NULL) { /* No old code. */
+ erts_runlock_old_code(code_ix);
return am_false;
}
+ erts_runlock_old_code(code_ix);
#ifdef ERTS_SMP
rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
@@ -200,7 +464,14 @@ check_process_code_2(BIF_ALIST_2)
ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
BIF_ARG_1, BIF_ARG_2);
}
- res = check_process_code(rp, modp);
+ erts_rlock_old_code(code_ix);
+ if (modp->old.code != NULL) { /* must check again */
+ res = check_process_code(rp, modp);
+ }
+ else {
+ res = am_false;
+ }
+ erts_runlock_old_code(code_ix);
#ifdef ERTS_SMP
if (BIF_P != rp) {
erts_resume(rp, ERTS_PROC_LOCK_MAIN);
@@ -221,56 +492,71 @@ check_process_code_2(BIF_ALIST_2)
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
- int res;
+ ErtsCodeIndex code_ix;
+ Module* modp;
+ int is_blocking = 0;
+ int success = 0;
+ Eterm res = NIL;
- if (is_not_atom(BIF_ARG_1))
- goto badarg;
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_delete_module_1], BIF_P, BIF_ARG_1);
+ }
{
- Module *modp = erts_get_module(BIF_ARG_1);
+ erts_start_staging_code_ix();
+ code_ix = erts_staging_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
if (!modp) {
res = am_undefined;
}
- else if (modp->old_code != 0) {
+ else if (modp->old.code != 0) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
- res = am_badarg;
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
}
else {
- delete_export_references(BIF_ARG_1);
- delete_code(BIF_P, 0, modp);
+ if (modp->curr.num_breakpoints > 0 ||
+ modp->curr.num_traced_exports > 0) {
+ /* we have tracing, retry single threaded */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ if (modp->curr.num_breakpoints) {
+ erts_clear_module_break(modp);
+ ASSERT(modp->curr.num_breakpoints == 0);
+ }
+ }
+ delete_code(modp);
res = am_true;
+ success = 1;
}
}
-
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- if (res == am_badarg) {
- badarg:
- BIF_ERROR(BIF_P, BADARG);
- }
- BIF_RET(res);
+ return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0);
}
BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
{
Module* modp;
+ ErtsCodeIndex code_ix;
+ Eterm res = am_false;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
- if ((modp = erts_get_module(BIF_ARG_1)) == NULL ||
- modp->code == NULL ||
- modp->code[MI_ON_LOAD_FUNCTION_PTR] != 0) {
- BIF_RET(am_false);
+ code_ix = erts_active_code_ix();
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) != NULL) {
+ if (modp->curr.code != NULL
+ && modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] == 0) {
+ res = am_true;
+ }
}
- BIF_RET(am_true);
+ BIF_RET(res);
}
BIF_RETTYPE pre_loaded_0(BIF_ALIST_0)
@@ -280,27 +566,28 @@ BIF_RETTYPE pre_loaded_0(BIF_ALIST_0)
BIF_RETTYPE loaded_0(BIF_ALIST_0)
{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Module* modp;
Eterm previous = NIL;
Eterm* hp;
int i;
int j = 0;
-
- for (i = 0; i < module_code_size(); i++) {
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
+
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ if ((modp = module_code(i, code_ix)) != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
j++;
}
}
if (j > 0) {
hp = HAlloc(BIF_P, j*2);
- for (i = 0; i < module_code_size(); i++) {
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
- previous = CONS(hp, make_atom(module_code(i)->module),
- previous);
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ if ((modp=module_code(i,code_ix)) != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ previous = CONS(hp, make_atom(modp->module), previous);
hp += 2;
}
}
@@ -310,54 +597,65 @@ BIF_RETTYPE loaded_0(BIF_ALIST_0)
BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1)
{
- Module* modp = erts_get_module(BIF_ARG_1);
- Eterm on_load;
+ Module* modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
- if (!modp || modp->code == 0) {
- error:
- BIF_ERROR(BIF_P, BADARG);
+ if (modp && modp->curr.code) {
+ BIF_TRAP_CODE_PTR_0(BIF_P, modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]);
}
- if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
- goto error;
+ else {
+ BIF_ERROR(BIF_P, BADARG);
}
- BIF_TRAP_CODE_PTR_0(BIF_P, on_load);
}
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
- Module* modp = erts_get_module(BIF_ARG_1);
+ ErtsCodeIndex code_ix;
+ Module* modp;
Eterm on_load;
- if (!modp || modp->code == 0) {
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ /* ToDo: Use code_ix staging instead of thread blocking */
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+
+ if (!modp || modp->curr.code == 0) {
error:
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
BIF_ERROR(BIF_P, BADARG);
}
- if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
+ if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
goto error;
}
if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
if (BIF_ARG_2 == am_true) {
int i;
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(); i++) {
- Export *ep = export_list(i);
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export *ep = export_list(i,code_ix);
if (ep != NULL &&
ep->code[0] == BIF_ARG_1 &&
ep->code[4] != 0) {
- ep->address = (void *) ep->code[4];
+ ep->addressv[code_ix] = (void *) ep->code[4];
ep->code[4] = 0;
}
}
- modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
BeamInstr* code;
@@ -368,19 +666,21 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
* This is an combination of delete and purge. We purge
* the current code; the old code is not touched.
*/
- erts_total_code_size -= modp->code_length;
- code = modp->code;
- end = (BeamInstr *)((char *)code + modp->code_length);
+ erts_total_code_size -= modp->curr.code_length;
+ code = modp->curr.code;
+ end = (BeamInstr *)((char *)code + modp->curr.code_length);
erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->catches, code, modp->code_length);
+ beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length,
+ erts_active_code_ix());
erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->code = NULL;
- modp->code_length = 0;
- modp->catches = BEAM_CATCHES_NIL;
- remove_from_address_table(code);
+ modp->curr.code = NULL;
+ modp->curr.code_length = 0;
+ modp->curr.catches = BEAM_CATCHES_NIL;
+ erts_remove_from_ranges(code);
}
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
BIF_RET(am_true);
}
@@ -401,11 +701,11 @@ set_default_trace_pattern(Eterm module)
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
- (void) erts_set_trace_pattern(mfa, 1,
+ (void) erts_set_trace_pattern(0, mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid);
+ meta_tracer_pid, 1);
}
}
@@ -417,20 +717,18 @@ check_process_code(Process* rp, Module* modp)
Uint mod_size;
BeamInstr* end;
Eterm* sp;
-#ifndef HYBRID /* FIND ME! */
struct erl_off_heap_header* oh;
int done_gc = 0;
-#endif
#define INSIDE(a) (start <= (a) && (a) < end)
/*
* Pick up limits for the module.
*/
- start = modp->old_code;
- end = (BeamInstr *)((char *)start + modp->old_code_length);
+ start = modp->old.code;
+ end = (BeamInstr *)((char *)start + modp->old.code_length);
mod_start = (char *) start;
- mod_size = modp->old_code_length;
+ mod_size = modp->old.code_length;
/*
* Check if current instruction or continuation pointer points into module.
@@ -479,7 +777,6 @@ check_process_code(Process* rp, Module* modp)
* See if there are funs that refer to the old version of the module.
*/
-#ifndef HYBRID /* FIND ME! */
rescan:
for (oh = MSO(rp).first; oh; oh = oh->next) {
if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
@@ -505,7 +802,6 @@ check_process_code(Process* rp, Module* modp)
}
}
}
-#endif
/*
* See if there are constants inside the module referenced by the process.
@@ -564,10 +860,10 @@ check_process_code(Process* rp, Module* modp)
done_gc = 1;
FLAGS(rp) |= F_NEED_FULLSWEEP;
(void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- literals = (Eterm *) modp->old_code[MI_LITERALS_START];
- lit_size = (Eterm *) modp->old_code[MI_LITERALS_END] - literals;
+ literals = (Eterm *) modp->old.code[MI_LITERALS_START];
+ lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals;
oh = (struct erl_off_heap_header *)
- modp->old_code[MI_LITERALS_OFF_HEAP];
+ modp->old.code[MI_LITERALS_OFF_HEAP];
erts_garbage_collect_literals(rp, literals, lit_size, oh);
}
}
@@ -626,53 +922,82 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
#undef in_area
-
-static int
-purge_module(int module)
+BIF_RETTYPE purge_module_1(BIF_ALIST_1)
{
+ ErtsCodeIndex code_ix;
BeamInstr* code;
BeamInstr* end;
Module* modp;
+ int is_blocking = 0;
+ Eterm ret;
- /*
- * Correct module?
- */
-
- if ((modp = erts_get_module(make_atom(module))) == NULL) {
- return -2;
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
}
- /*
- * Any code to purge?
- */
- if (modp->old_code == 0) {
- return -1;
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1);
}
+ code_ix = erts_active_code_ix();
+
/*
- * Unload any NIF library
+ * Correct module?
*/
- if (modp->old_nif != NULL) {
- erts_unload_nif(modp->old_nif);
- modp->old_nif = NULL;
+
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
}
+ else {
+ erts_rwlock_old_code(code_ix);
- /*
- * Remove the old code.
- */
- ASSERT(erts_total_code_size >= modp->old_code_length);
- erts_total_code_size -= modp->old_code_length;
- code = modp->old_code;
- end = (BeamInstr *)((char *)code + modp->old_code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->old_catches, code, modp->old_code_length);
- decrement_refc(code);
- erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->old_code = NULL;
- modp->old_code_length = 0;
- modp->old_catches = BEAM_CATCHES_NIL;
- remove_from_address_table(code);
- return 0;
+ /*
+ * Any code to purge?
+ */
+ if (modp->old.code == 0) {
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ }
+ else {
+ /*
+ * Unload any NIF library
+ */
+ if (modp->old.nif != NULL) {
+ /* ToDo: Do unload nif without blocking */
+ erts_rwunlock_old_code(code_ix);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ erts_rwlock_old_code(code_ix);
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
+
+ /*
+ * Remove the old code.
+ */
+ ASSERT(erts_total_code_size >= modp->old.code_length);
+ erts_total_code_size -= modp->old.code_length;
+ code = modp->old.code;
+ end = (BeamInstr *)((char *)code + modp->old.code_length);
+ erts_cleanup_funs_on_purge(code, end);
+ beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
+ code_ix);
+ decrement_refc(code);
+ erts_free(ERTS_ALC_T_CODE, (void *) code);
+ modp->old.code = NULL;
+ modp->old.code_length = 0;
+ modp->old.catches = BEAM_CATCHES_NIL;
+ erts_remove_from_ranges(code);
+ ERTS_BIF_PREP_RET(ret, am_true);
+ }
+ erts_rwunlock_old_code(code_ix);
+ }
+ if (is_blocking) {
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+ erts_release_code_write_permission();
+ return ret;
}
static void
@@ -692,92 +1017,48 @@ decrement_refc(BeamInstr* code)
}
}
-static void
-remove_from_address_table(BeamInstr* code)
-{
- int i;
-
- for (i = 0; i < num_loaded_modules; i++) {
- if (modules[i].start == code) {
- num_loaded_modules--;
- while (i < num_loaded_modules) {
- modules[i] = modules[i+1];
- i++;
- }
- mid_module = &modules[num_loaded_modules/2];
- return;
- }
- }
- ASSERT(0); /* Not found? */
-}
-
-
/*
- * Move code from current to old.
+ * Move code from current to old and null all export entries for the module
*/
-static void
-delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp)
-{
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#ifdef ERTS_SMP
- if (c_p && c_p_locks)
- erts_proc_lc_chk_only_proc_main(c_p);
- else
-#endif
- erts_lc_check_exact(NULL, 0);
-#endif
-
- /*
- * Clear breakpoints if any
- */
- if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) {
- if (c_p && c_p_locks)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- erts_clear_module_break(modp);
- modp->code[MI_NUM_BREAKPOINTS] = 0;
- erts_smp_thr_progress_unblock();
- if (c_p && c_p_locks)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
- }
- modp->old_code = modp->code;
- modp->old_code_length = modp->code_length;
- modp->old_catches = modp->catches;
- modp->old_nif = modp->nif;
- modp->code = NULL;
- modp->code_length = 0;
- modp->catches = BEAM_CATCHES_NIL;
- modp->nif = NULL;
-}
-
-
-/* null all references on the export table for the module called with the
- atom index below */
-
static void
-delete_export_references(Eterm module)
+delete_code(Module* modp)
{
+ ErtsCodeIndex code_ix = erts_staging_code_ix();
+ Eterm module = make_atom(modp->module);
int i;
- ASSERT(is_atom(module));
-
- for (i = 0; i < export_list_size(); i++) {
- Export *ep = export_list(i);
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export *ep = export_list(i, code_ix);
if (ep != NULL && (ep->code[0] == module)) {
- if (ep->address == ep->code+3 &&
- (ep->code[3] == (BeamInstr) em_apply_bif)) {
- continue;
+ if (ep->addressv[code_ix] == ep->code+3) {
+ if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ }
+ else if (ep->code[3] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(modp->curr.num_traced_exports > 0);
+ erts_clear_export_break(modp, ep->code+3);
+ }
+ else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
+ || !erts_initialized);
}
- ep->address = ep->code+3;
+ ep->addressv[code_ix] = ep->code+3;
ep->code[3] = (BeamInstr) em_call_error_handler;
ep->code[4] = 0;
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
}
}
+
+ ASSERT(modp->curr.num_breakpoints == 0);
+ ASSERT(modp->curr.num_traced_exports == 0);
+ modp->old = modp->curr;
+ modp->curr.code = NULL;
+ modp->curr.code_length = 0;
+ modp->curr.catches = BEAM_CATCHES_NIL;
+ modp->curr.nif = NULL;
}
-
+
Eterm
beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
@@ -789,11 +1070,10 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->code != NULL && modp->old_code != NULL) {
+ if (modp->curr.code != NULL && modp->old.code != NULL) {
return am_not_purged;
- } else if (modp->old_code == NULL) { /* Make the current version old. */
- delete_code(c_p, c_p_locks, modp);
- delete_export_references(module);
+ } else if (modp->old.code == NULL) { /* Make the current version old. */
+ delete_code(modp);
}
return NIL;
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 34628b4708..2168d410b5 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -44,67 +44,34 @@
#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
-/*
-** Doubly linked ring macros
-*/
-
-#define BpInit(a,i) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (a); \
- (a)->prev = (a); \
-} while (0)
-
-#define BpSpliceNext(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->next->prev; \
- c->next->prev = d->next->prev; \
- d->next->prev = e; \
- e = c->next; \
- c->next = d->next; \
- d->next = e; \
-} while (0)
-
-#define BpSplicePrev(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->prev->next; \
- c->prev->next = d->prev->next; \
- d->prev->next = e; \
- e = c->prev; \
- c->prev = d->prev; \
- d->prev = e; \
-} while (0)
-
-#ifdef DEBUG
-# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a))
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
-# define BpSingleton(a) ((a)->next == (a))
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
#endif
-#define BpInitAndSpliceNext(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->prev = (b); \
- (b)->next->prev = (a); \
- (a)->next = (b)->next; \
- (b)->next = (a); \
-} while (0)
+#define ERTS_BPF_LOCAL_TRACE 0x01
+#define ERTS_BPF_META_TRACE 0x02
+#define ERTS_BPF_COUNT 0x04
+#define ERTS_BPF_COUNT_ACTIVE 0x08
+#define ERTS_BPF_DEBUG 0x10
+#define ERTS_BPF_TIME_TRACE 0x20
+#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
+#define ERTS_BPF_GLOBAL_TRACE 0x80
-#define BpInitAndSplicePrev(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (b); \
- (b)->prev->next = (a); \
- (a)->prev = (b)->prev; \
- (b)->prev = (a); \
-} while (0)
+#define ERTS_BPF_ALL 0xFF
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */
+extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-#define BREAK_IS_BIF (1)
-#define BREAK_IS_ERL (0)
-
+erts_smp_atomic32_t erts_active_bp_index;
+erts_smp_atomic32_t erts_staging_bp_index;
/* *************************************************************************
** Local prototypes
@@ -113,26 +80,30 @@ do { \
/*
** Helpers
*/
-
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-
-static int clear_break(Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
- BeamInstr break_op);
-
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid);
+static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid);
+static void set_function_break(BeamInstr *pc,
+ Binary *match_spec,
+ Uint break_flags,
+ enum erts_break_op count_op,
+ Eterm tracer_pid);
+
+static void clear_break(BpFunctions* f, Uint break_flags);
+static int clear_function_break(BeamInstr *pc, Uint break_flags);
+
+static BpDataTime* get_time_break(BeamInstr *pc);
+static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static void bp_time_diff(bp_data_time_item_t *item,
+ process_breakpoint_time_t *pbt,
+ Uint ms, Uint s, Uint us);
+
+static void bp_meta_unref(BpMetaPid* bmp);
+static void bp_count_unref(BpCount* bcp);
+static void bp_time_unref(BpDataTime* bdt);
+static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
+static void uninstall_breakpoint(BeamInstr* pc);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -152,240 +123,996 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_da
static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
static void bp_hash_delete(bp_time_hash_t *hash);
-
/* *************************************************************************
** External interfaces
*/
-erts_smp_spinlock_t erts_bp_lock;
-
void
erts_bp_init(void) {
- erts_smp_spinlock_init(&erts_bp_lock, "breakpoints");
+ erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
+ erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
}
-int
-erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
+
+void
+erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Uint max_funcs = 0;
+ int current;
+ int max_modules = module_code_size(code_ix);
+ int num_modules = 0;
+ Module* modp;
+ Module** module;
+ Uint i;
+
+ module = (Module **) Alloc(max_modules*sizeof(Module *));
+ num_modules = 0;
+ for (current = 0; current < max_modules; current++) {
+ modp = module_code(current, code_ix);
+ if (modp->curr.code) {
+ max_funcs += modp->curr.code[MI_NUM_FUNCTIONS];
+ module[num_modules++] = modp;
+ }
+ }
+
+ f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
+ i = 0;
+ for (current = 0; current < num_modules; current++) {
+ BeamInstr** code_base = (BeamInstr **) module[current]->curr.code;
+ BeamInstr* code;
+ Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS];
+ Uint fi;
+
+ if (specified > 0) {
+ if (mfa[0] != make_atom(module[current]->module)) {
+ /* Wrong module name */
+ continue;
+ }
+ }
+
+ for (fi = 0; fi < num_functions; fi++) {
+ Eterm* pc;
+ int wi;
+
+ code = code_base[MI_FUNCTIONS+fi];
+ ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ pc = code+5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ if (is_nil(code[3])) { /* Ignore BIF stub */
+ continue;
+ }
+ for (wi = 0;
+ wi < specified && (Eterm) code[2+wi] == mfa[wi];
+ wi++) {
+ /* Empty loop body */
+ }
+ if (wi == specified) {
+ /* Store match */
+ f->matching[i].pc = pc;
+ f->matching[i].mod = module[current];
+ i++;
+ }
+ }
+ }
+ f->matched = i;
+ Free(module);
}
-int
-erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+void
+erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ int i;
+ int num_exps = export_list_size(code_ix);
+ int ne;
+
+ f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
+ ne = 0;
+ for (i = 0; i < num_exps; i++) {
+ Export* ep = export_list(i, code_ix);
+ BeamInstr* pc;
+ int j;
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j < specified) {
+ continue;
+ }
+ pc = ep->code+3;
+ if (ep->addressv[code_ix] == pc) {
+ if ((*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) em_call_error_handler)) {
+ continue;
+ }
+ ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ continue;
+ }
+
+ f->matching[ne].pc = pc;
+ f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ ne++;
+
+ }
+ f->matched = ne;
}
-/* set breakpoint data for on exported bif entry */
+void
+erts_bp_free_matched_functions(BpFunctions* f)
+{
+ Free(f->matching);
+}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+erts_consolidate_bp_data(BpFunctions* f, int local)
+{
+ BpFunction* fs = f->matching;
+ Uint i;
+ Uint n = f->matched;
+
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+
+ for (i = 0; i < n; i++) {
+ consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ }
}
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
- set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_consolidate_bif_bp_data(void)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ for (i = 0; i < BIF_SIZE; i++) {
+ Export *ep = bif_export[i];
+ consolidate_bp_data(0, ep->code+3, 0);
+ }
}
-void erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static void
+consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+ GenericBpData* src;
+ GenericBpData* dst;
+ Uint flags;
+
+ if (g == 0) {
+ return;
+ }
+
+ src = &g->data[erts_active_bp_ix()];
+ dst = &g->data[erts_staging_bp_ix()];
+
+ /*
+ * The contents of the staging area may be out of date.
+ * Decrement all reference pointers.
+ */
+
+ flags = dst->flags;
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ bp_meta_unref(dst->meta_pid);
+ MatchSetUnref(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ bp_count_unref(dst->count);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ bp_time_unref(dst->time);
+ }
+
+ /*
+ * If all flags are zero, deallocate all breakpoint data.
+ */
+
+ flags = dst->flags = src->flags;
+ if (flags == 0) {
+ if (modp) {
+ if (local) {
+ modp->curr.num_breakpoints--;
+ } else {
+ modp->curr.num_traced_exports--;
+ }
+ ASSERT(modp->curr.num_breakpoints >= 0);
+ ASSERT(modp->curr.num_traced_exports >= 0);
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ }
+ pc[-4] = 0;
+ Free(g);
+ return;
+ }
+
+ /*
+ * Copy the active data to the staging area (making it ready
+ * for the next time it will be used).
+ */
+
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ dst->local_ms = src->local_ms;
+ MatchSetRef(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ dst->meta_pid = src->meta_pid;
+ erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_ms = src->meta_ms;
+ MatchSetRef(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ dst->count = src->count;
+ erts_refc_inc(&dst->count->refc, 1);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ dst->time = src->time;
+ erts_refc_inc(&dst->time->refc, 1);
+ ASSERT(dst->time->hash);
+ }
}
-int
-erts_set_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
+void
+erts_commit_staged_bp(void)
+{
+ ErtsBpIndex staging = erts_staging_bp_ix();
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
+ erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
}
-int
-erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
+void
+erts_install_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+ BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (*pc != br && g) {
+ Module* modp = f->matching[i].mod;
+
+ /*
+ * The breakpoint must be disabled in the active data
+ * (it will enabled later by switching bp indices),
+ * and enabled in the staging data.
+ */
+ ASSERT(g->data[erts_active_bp_ix()].flags == 0);
+ ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
+
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = br;
+ modp->curr.num_breakpoints++;
+ }
+ }
}
-int
-erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_uninstall_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ uninstall_breakpoint(pc);
+ }
}
-int
-erts_clear_trace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_trace_breakpoint));
+static void
+uninstall_breakpoint(BeamInstr* pc)
+{
+ if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g->data[erts_active_bp_ix()].flags == 0) {
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = g->orig_instr;
+ }
+ }
}
-int
-erts_clear_mtrace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+void
+erts_set_trace_break(BpFunctions* f, Binary *match_spec)
+{
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
}
void
-erts_clear_mtrace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+{
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_debug_breakpoint));
+void
+erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+
+ set_function_break(pc, match_spec, flags, 0, NIL);
}
-int
-erts_clear_count_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_count_breakpoint));
+void
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+{
+ set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_time_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_time_breakpoint));
+void
+erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+{
+ set_function_break(pc, NULL,
+ ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
}
-int
-erts_clear_break(Eterm mfa[3], int specified) {
+void
+erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_set_debug_break(BpFunctions* f) {
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+}
+
+void
+erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_clear_trace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_LOCAL_TRACE);
+}
+
+void
+erts_clear_call_trace_bif(BeamInstr *pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+
+ if (g) {
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ if (g->data[erts_staging_bp_ix()].flags & flags) {
+ clear_function_break(pc, flags);
+ }
+ }
+}
+
+void
+erts_clear_mtrace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_mtrace_bif(BeamInstr *pc)
+{
+ clear_function_break(pc, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_debug_break(BpFunctions* f)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified, 0);
+ clear_break(f, ERTS_BPF_DEBUG);
+}
+
+void
+erts_clear_count_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
+}
+
+void
+erts_clear_time_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_clear_all_breaks(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_ALL);
}
int
erts_clear_module_break(Module *modp) {
+ BeamInstr** code_base;
+ Uint n;
+ Uint i;
+
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
- return clear_module_break(modp, NULL, 0, 0);
+ code_base = (BeamInstr **) modp->curr.code;
+ if (code_base == NULL) {
+ return 0;
+ }
+ n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ clear_function_break(pc, ERTS_BPF_ALL);
+ }
+
+ erts_commit_staged_bp();
+
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ uninstall_breakpoint(pc);
+ consolidate_bp_data(modp, pc, 1);
+ ASSERT(pc[-4] == 0);
+ }
+ return n;
}
-int
-erts_clear_function_break(Module *modp, BeamInstr *pc) {
+void
+erts_clear_export_break(Module* modp, BeamInstr* pc)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(modp);
- return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
+
+ clear_function_break(pc, ERTS_BPF_ALL);
+ erts_commit_staged_bp();
+ *pc = (BeamInstr) 0;
+ consolidate_bp_data(modp, pc, 0);
+ ASSERT(pc[-4] == 0);
}
+BeamInstr
+erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags;
+ ErtsBpIndex ix = erts_active_bp_ix();
+
+ g = (GenericBp *) I[-4];
+ bp = &g->data[ix];
+ bp_flags = bp->flags;
+ ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE) &&
+ !IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
+ bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE);
+ if (bp_flags == 0) { /* Quick exit */
+ return g->orig_instr;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
+ ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ }
+
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm old_pid;
+ Eterm new_pid;
+
+ old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
+ if (new_pid != old_pid) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
+ erts_smp_atomic_inc_nob(&bp->count->acount);
+ }
+
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
+ Eterm w;
+ erts_trace_time_call(c_p, I, bp->time);
+ w = (BeamInstr) *c_p->cp;
+ if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
+ w == (BeamInstr) BeamOp(op_return_trace) ||
+ w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
+ Eterm* E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < c_p->htop) {
+ (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ }
+ E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = beam_return_time_trace;
+ c_p->stop = E;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_DEBUG) {
+ return (BeamInstr) BeamOp(op_i_debug_breakpoint);
+ } else {
+ return g->orig_instr;
+ }
+}
/*
- * SMP NOTE: Process p may have become exiting on return!
+ * Entry point called by the trace wrap functions in erl_bif_wrap.c
+ *
+ * The trace wrap functions are themselves called through the export
+ * entries instead of the original BIF functions.
*/
-BeamInstr
-erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid) {
- Eterm tpid1, tpid2;
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+Eterm
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
+{
+ Eterm result;
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
+ Export* ep = bif_export[bif_index];
+ Uint32 flags = 0, flags_meta = 0;
+ Eterm meta_tracer_pid = NIL;
+ int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
+ BeamInstr *cp = p->cp;
+ GenericBp* g;
+ GenericBpData* bp = NULL;
+ Uint bp_flags = 0;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ bp = &g->data[erts_active_bp_ix()];
+ bp_flags = bp->flags;
+ }
- ASSERT(bds);
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
- ASSERT(bdt);
- bdt = (BpDataTrace *) bdt->next;
- ASSERT(bdt);
- ASSERT(ret_flags);
- ASSERT(tracer_pid);
-
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
-
- *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- 1, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
- }
- bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
- return bdt->orig_instr;
+ /*
+ * Make continuation pointer OK, it is not during direct BIF calls,
+ * but it is correct during apply of bif.
+ */
+ if (!applying) {
+ p->cp = I;
+ }
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
+ flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ local, &ERTS_TRACER_PROC(p));
+ }
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm tpid1, tpid2;
+
+ tpid1 = tpid2 =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ 0, &tpid2);
+ meta_tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ }
+ }
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+ erts_trace_time_call(p, pc, bp->time);
+ }
+
+ /* Restore original continuation pointer (if changed). */
+ p->cp = cp;
+
+ func = bif_table[bif_index].f;
+
+ result = func(p, args, I);
+
+ if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+ Eterm *cpp;
+ /* Maybe advance cp to skip trace stack frames */
+ for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
+ if (*cp == i_return_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
+ } else if (*cp == i_return_to_trace) {
+ /* A return_to trace message is going to be generated
+ * by normal means, so we do not have to.
+ */
+ cp = NULL;
+ break;
+ } else break;
+ }
+ }
+
+ /* Try to get these in the order
+ * they usually appear in normal code... */
+ if (is_non_value(result)) {
+ Uint reason = p->freason;
+ if (reason != TRAP) {
+ Eterm class;
+ Eterm value = p->fvalue;
+ DeclareTmpHeapNoproc(nocatch,3);
+ UseTmpHeapNoproc(3);
+ /* Expand error value like in handle_error() */
+ if (reason & EXF_ARGLIST) {
+ Eterm *tp;
+ ASSERT(is_tuple(value));
+ tp = tuple_val(value);
+ value = tp[1];
+ }
+ if ((reason & EXF_THROWN) && (p->catches <= 0)) {
+ value = TUPLE2(nocatch, am_nocatch, value);
+ reason = EXC_ERROR;
+ }
+ /* Note: expand_error_value() could theoretically
+ * allocate on the heap, but not for any error
+ * returned by a BIF, and it would do no harm,
+ * just be annoying.
+ */
+ value = expand_error_value(p, reason, value);
+ class = exception_tag[GET_EXC_CLASS(reason)];
+
+ if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &meta_tracer_pid);
+ }
+ if (flags & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &ERTS_TRACER_PROC(p));
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
+ /* can only happen if(local)*/
+ Eterm *ptr = p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(p));
+ /* Search the nearest stack frame for a catch */
+ while (++ptr < STACK_START(p)) {
+ if (is_CP(*ptr)) break;
+ if (is_catch(*ptr)) {
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into
+ * calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ UnUseTmpHeapNoproc(3);
+ if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ } else {
+ if (flags_meta & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ }
+ /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
+ if (flags & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ /* can only happen if(local)*/
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return result;
+}
+
+static Eterm
+do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid)
+{
+ Eterm* cpp;
+ int return_to_trace = 0;
+ BeamInstr w;
+ BeamInstr *cp_save;
+ Uint32 flags;
+ Uint need = 0;
+ Eterm* E = c_p->stop;
+
+ w = *c_p->cp;
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp = &E[2];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp = &E[0];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp = &E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ for (;;) {
+ BeamInstr w = *cp_val(*cpp);
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp += 3;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp += 1;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp += 2;
+ } else {
+ break;
+ }
+ }
+ cp_save = c_p->cp;
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
+ }
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ if (cpp) {
+ c_p->cp = cp_save;
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
+ need += 1;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ need += 3;
+ }
+ if (need) {
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - need < c_p->htop) {
+ (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ E = c_p->stop;
+ }
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) {
+ E -= 1;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ E[0] = make_cp(c_p->cp);
+ c_p->cp = (BeamInstr *) beam_return_to_trace;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ E -= 3;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(am_true == tracer_pid ||
+ is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ E[2] = make_cp(c_p->cp);
+ E[1] = tracer_pid;
+ E[0] = make_cp(I - 3); /* We ARE at the beginning of an
+ instruction,
+ the funcinfo is above i. */
+ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
+ beam_exception_trace : beam_return_trace;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ c_p->stop = E;
+ return tracer_pid;
}
+void
+erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+ ASSERT(c_p);
+ ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
-/*
- * SMP NOTE: Process p may have become exiting on return!
- */
-Uint32
-erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
- Eterm *tracer_pid) {
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+ pbt = ERTS_PROC_GET_CALL_TIME(c_p);
+ get_sys_now(&ms, &s, &us);
- ASSERT(tracer_pid);
- if (bds) {
- Eterm tpid1, tpid2;
- Uint32 flags;
- bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ if (pbt == 0) {
+ /* First call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt);
+ } else {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = c_p->common.id;
+ sitem.count = 0;
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
- flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- local, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
}
- return flags;
}
- *tracer_pid = NIL;
- return 0;
+
+ /* Add count to this code */
+ sitem.pid = c_p->common.id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = I;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
}
+void
+erts_trace_time_return(Process *p, BeamInstr *pc)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if (pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->common.id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint));
-
- if (bdt) {
+erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ GenericBpData* bp = check_break(pc, flags);
+
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
+ *match_spec_ret = bp->local_ms;
}
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
- }
- return !0;
+ return 1;
}
return 0;
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
- if (bdt) {
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
+ *match_spec_ret = bp->meta_ms;
}
if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ *tracer_pid_ret =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -402,15 +1129,15 @@ erts_is_native_break(BeamInstr *pc) {
}
int
-erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
- BpDataCount *bdc =
- (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint));
+erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
- if (bdc) {
+ if (bp) {
if (count_ret) {
- *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount);
+ *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -421,7 +1148,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ BpDataTime *bdt = get_time_break(pc);
if (bdt) {
if (retval) {
@@ -464,7 +1191,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
bp_hash_delete(&hash);
}
- return !0;
+ return 1;
}
return 0;
@@ -478,15 +1205,16 @@ erts_find_local_func(Eterm mfa[3]) {
BeamInstr* code_ptr;
Uint i,n;
- if ((modp = erts_get_module(mfa[0])) == NULL)
+ if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL)
return NULL;
- if ((code_base = (BeamInstr **) modp->code) == NULL)
+ if ((code_base = (BeamInstr **) modp->curr.code) == NULL)
return NULL;
n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
code_ptr = code_base[MI_FUNCTIONS+i];
ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
- ASSERT(mfa[0] == ((Eterm) code_ptr[2]));
+ ASSERT(mfa[0] == ((Eterm) code_ptr[2]) ||
+ is_nil((Eterm) code_ptr[2]));
if (mfa[1] == ((Eterm) code_ptr[3]) &&
((BeamInstr) mfa[2]) == code_ptr[4]) {
return code_ptr + 5;
@@ -654,7 +1382,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ pbdt = get_time_break(pbt->pc);
if (pbdt) {
get_sys_now(&ms,&s,&us);
bp_time_diff(&sitem, pbt, ms, s, us);
@@ -692,669 +1420,259 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
} /* pbt */
}
-/* call_time breakpoint
- * Accumulated times are added to the previous bp,
- * not the current one. The current one is saved
- * for future reference.
- * The previous breakpoint is stored in the process it self, the psd.
- * We do not need to store in a stack frame.
- * There is no need for locking, each thread has its own
- * area in each bp to save data.
- * Since we need to diffrentiate between processes for each bp,
- * every bp has a hash (per thread) to process-bp statistics.
- * - egil
- */
-
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
- Uint ms,s,us;
- process_breakpoint_time_t *pbt = NULL;
- bp_data_time_item_t sitem, *item = NULL;
- bp_time_hash_t *h = NULL;
- BpDataTime *pbdt = NULL;
-
- ASSERT(p);
- ASSERT(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&p->state));
-
- /* get previous timestamp and breakpoint
- * from the process psd */
-
- pbt = ERTS_PROC_GET_CALL_TIME(p);
- get_sys_now(&ms,&s,&us);
-
- switch(type) {
- /* get pbt
- * timestamp = t0
- * lookup bdt from code
- * set ts0 to pbt
- * add call count here?
- */
- case ERTS_BP_CALL_TIME_CALL:
- case ERTS_BP_CALL_TIME_TAIL_CALL:
-
- if (pbt) {
- ASSERT(pbt->pc);
- /* add time to previous code */
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->common.id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* if null then the breakpoint was removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- } else {
- /* first call of process to instrumented function */
- pbt = Alloc(sizeof(process_breakpoint_time_t));
- (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
- }
- /* add count to this code */
- sitem.pid = p->common.id;
- sitem.count = 1;
- sitem.s_time = 0;
- sitem.us_time = 0;
-
- /* this breakpoint */
- ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- break;
-
- case ERTS_BP_CALL_TIME_RETURN:
- /* get pbt
- * lookup bdt from code
- * timestamp = t1
- * get ts0 from pbt
- * get item from bdt->hash[bp_hash(p->common.id)]
- * ack diff (t1, t0) to item
- */
-
- if(pbt) {
- /* might have been removed due to
- * trace_pattern(false)
- */
- ASSERT(pbt->pc);
-
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->common.id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* beware, the trace_pattern might have been removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- }
- break;
- default :
- ASSERT(0);
- /* will never happen */
- break;
- }
-}
-
-
/* *************************************************************************
** Local helpers
*/
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid)
+static void
+set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
{
- Module *modp;
- int num_processed = 0;
- if (!specified) {
- /* Find and process all modules in the system... */
- int current;
- int last = module_code_size();
- for (current = 0; current < last; current++) {
- modp = module_code(current);
- ASSERT(modp != NULL);
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0])) != NULL) {
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- }
- return num_processed;
-}
-
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i,n;
+ Uint i;
+ Uint n;
- ASSERT(break_op);
- ASSERT(modp);
- code_base = (BeamInstr **) modp->code;
- if (code_base == NULL) {
- return 0;
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ set_function_break(pc, match_spec, break_flags,
+ count_op, tracer_pid);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr+5;
-
- num_processed +=
- set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
- break_op, count_op, tracer_pid);
- }
- }
- return num_processed;
}
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
-
- BeamInstr **code_base = NULL;
- BpData *bd, **r, ***rs;
- size_t size;
- Uint ix = 0;
-
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)modp->code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(modp == NULL);
+static void
+set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
+
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ g = (GenericBp *) pc[-4];
+ if (g == 0) {
+ int i;
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return;
+ }
+ g = Alloc(sizeof(GenericBp));
+ g->orig_instr = *pc;
+ for (i = 0; i < ERTS_NUM_BP_IX; i++) {
+ g->data[i].flags = 0;
+ }
+ pc[-4] = (BeamInstr) g;
}
+ bp = &g->data[ix];
/*
- * Currently no trace support for native code.
+ * If we are changing an existing breakpoint, clean up old data.
*/
- if (erts_is_native_break(pc)) {
- return 0;
- }
- /* Do not allow two breakpoints of the same kind */
- if ( (bd = is_break(pc, break_op))) {
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
- || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- Binary *old_match_spec;
-
- /* Update match spec and tracer */
- MatchSetRef(match_spec);
- ErtsSmpBPLock(bdt);
- old_match_spec = bdt->match_spec;
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- ErtsSmpBPUnlock(bdt);
- MatchSetUnref(old_match_spec);
- } else {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_aint_t count = 0;
- erts_aint_t res = 0;
-
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
-
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_stop) {
- count = erts_smp_atomic_read_nob(&bdc->acount);
- if (count >= 0) {
- while(1) {
- res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count);
- if ((res == count) || count < 0) break;
- count = res;
- }
- }
- } else {
- /* Reset call counter */
- erts_smp_atomic_set_nob(&bdc->acount, 0);
- }
-
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-
- if (count_op == erts_break_stop) {
- bdt->pause = 1;
- } else {
- bdt->pause = 0;
- for (i = 0; i < bdt->n; i++) {
- bp_hash_delete(&(bdt->hash[i]));
- bp_hash_init(&(bdt->hash[i]), 32);
- }
- }
- } else {
- ASSERT (! count_op);
- }
- }
- return 1;
- }
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
- size = sizeof(BpDataTrace);
- } else {
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataCount);
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataTime);
+ common = break_flags & bp->flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
+ } else if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ bp_meta_unref(bp->meta_pid);
+ } else if (common & ERTS_BPF_COUNT) {
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
- ASSERT(! count_op);
- ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
- size = sizeof(BpDataDebug);
+ bp->flags |= ERTS_BPF_COUNT_ACTIVE;
+ erts_smp_atomic_set_nob(&bp->count->acount, 0);
}
- }
- rs = (BpData ***) (pc-4);
- if (! *rs) {
- size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
- *rs = (BpData **) Alloc(ssize);
- sys_memzero(*rs, ssize);
- }
-
- r = &((*rs)[0]);
-
- if (! *r) {
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
- /* First breakpoint; create singleton ring */
- bd = Alloc(size);
- BpInit(bd, *pc);
- *r = bd;
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else {
- ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
- *pc == (BeamInstr) em_apply_bif);
- if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp must be last, so if it is also first;
- * it must be singleton. */
- ASSERT(BpSingleton(*r));
- /* Insert new bp first in the ring, i.e second to last. */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, *pc, *r);
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else if ((*r)->prev->orig_instr
- == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp last in the ring; insert new second to last. */
- bd = Alloc(size);
- BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r);
- (*r)->prev->orig_instr = break_op;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
+ } else if (common & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt = bp->time;
+ Uint i = 0;
+
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
} else {
- /* Just insert last in the ring */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, (*r)->orig_instr, *r);
- (*r)->orig_instr = break_op;
- *r = bd;
+ bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
}
- }
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
}
- bd->this_instr = break_op;
- /* Init the bp type specific data */
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
-
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- bdt->pause = 0;
- bdt->n = erts_no_schedulers;
- bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+ /*
+ * Initialize the new breakpoint data.
+ */
+ if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetRef(match_spec);
+ bp->local_ms = match_spec;
+ } else if (break_flags & ERTS_BPF_META_TRACE) {
+ BpMetaPid* bmp;
+ MatchSetRef(match_spec);
+ bp->meta_ms = match_spec;
+ bmp = Alloc(sizeof(BpMetaPid));
+ erts_refc_init(&bmp->refc, 1);
+ erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
+ bp->meta_pid = bmp;
+ } else if (break_flags & ERTS_BPF_COUNT) {
+ BpCount* bcp;
+
+ ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
+ bcp = Alloc(sizeof(BpCount));
+ erts_refc_init(&bcp->refc, 1);
+ erts_smp_atomic_init_nob(&bcp->acount, 0);
+ bp->count = bcp;
+ } else if (break_flags & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt;
+ int i;
+
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
+ bdt = Alloc(sizeof(BpDataTime));
+ erts_refc_init(&bdt->refc, 1);
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
}
- } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_smp_atomic_init_nob(&bdc->acount, 0);
+ bp->time = bdt;
}
- if (bif == BREAK_IS_ERL) {
- ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
+ bp->flags |= break_flags;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+}
+
+static void
+clear_break(BpFunctions* f, Uint break_flags)
+{
+ Uint i;
+ Uint n;
+
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ clear_function_break(pc, break_flags);
}
- return 1;
}
-static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op)
+static int
+clear_function_break(BeamInstr *pc, Uint break_flags)
{
- int num_processed = 0;
- Module *modp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
- if (!specified) {
- /* Iterate over all modules */
- int current;
- int last = module_code_size();
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- for (current = 0; current < last; current++) {
- modp = module_code(current);
- ASSERT(modp != NULL);
- num_processed += clear_module_break(modp, mfa, specified, break_op);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0])) != NULL) {
- num_processed +=
- clear_module_break(modp, mfa, specified, break_op);
- }
+ if ((g = (GenericBp *) pc[-4]) == 0) {
+ return 1;
}
- return num_processed;
-}
-static int clear_module_break(Module *m, Eterm mfa[3], int specified,
- BeamInstr break_op) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i;
- BeamInstr n;
-
- ASSERT(m);
- code_base = (BeamInstr **) m->code;
- if (code_base == NULL) {
- return 0;
+ bp = &g->data[ix];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ common = bp->flags & break_flags;
+ bp->flags &= ~break_flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr + 5;
-
- num_processed +=
- clear_function_break(m, pc, BREAK_IS_ERL, break_op);
- }
+ if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ }
+ if (common & ERTS_BPF_COUNT) {
+ ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
+ bp_count_unref(bp->count);
+ }
+ if (common & ERTS_BPF_TIME_TRACE) {
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0);
+ bp_time_unref(bp->time);
}
- return num_processed;
-}
-static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
- BpData *bd;
- Uint ix = 0;
- BeamInstr **code_base = NULL;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return 1;
+}
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)m->code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(m == NULL);
+static void
+bp_meta_unref(BpMetaPid* bmp)
+{
+ if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
+ Free(bmp);
}
+}
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(pc)) {
- return 0;
+static void
+bp_count_unref(BpCount* bcp)
+{
+ if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
+ Free(bcp);
}
+}
- while ( (bd = is_break(pc, break_op))) {
- /* Remove all breakpoints of this type.
- * There should be only one of each type,
- * but break_op may be 0 which matches any type.
+static void
+bp_time_unref(BpDataTime* bdt)
+{
+ if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t* item = NULL;
+ process_breakpoint_time_t* pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
*/
- BeamInstr op;
- BpData ***rs = (BpData ***) (pc - 4);
- BpData **r = NULL;
-
-#ifdef DEBUG
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- ASSERT((*rs)[ix] == (*rs)[0]);
- }
-#endif
-
- r = &((*rs)[0]);
-
- ASSERT(*r);
- /* Find opcode for this breakpoint */
- if (break_op) {
- op = break_op;
- } else {
- if (bd == (*r)->next) {
- /* First breakpoint in ring */
- op = *pc;
- } else {
- op = bd->prev->orig_instr;
- }
- }
- if (BpSingleton(bd)) {
- ASSERT(*r == bd);
- /* Only one breakpoint to remove */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- Free(*rs);
- *rs = NULL;
- } else {
- BpData *bd_prev = bd->prev;
-
- BpSpliceNext(bd, bd_prev);
- ASSERT(BpSingleton(bd));
- if (bd == *r) {
- /* We removed the last breakpoint in the ring */
- *r = bd_prev;
- bd_prev->orig_instr = bd->orig_instr;
- } else if (bd_prev == *r) {
- /* We removed the first breakpoint in the ring */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- } else {
- bd_prev->orig_instr = bd->orig_instr;
- }
- }
- if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- MatchSetUnref(bdt->match_spec);
- }
- if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
- Uint j = 0;
- Process *h_p = NULL;
- bp_data_time_item_t *item = NULL;
- process_breakpoint_time_t *pbt = NULL;
-
- /* remove all psd associated with the hash
- * and then delete the hash.
- * ... sigh ...
- */
- for( i = 0; i < bdt->n; ++i) {
- if (bdt->hash[i].used) {
- for (j = 0; j < bdt->hash[i].n; ++j) {
- item = &(bdt->hash[i].item[j]);
- if (item->pid != NIL) {
- h_p = erts_proc_lookup(item->pid);
- if (h_p) {
- pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
- if (pbt) {
- Free(pbt);
- }
+ for (i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = erts_pid2proc(NULL, 0, item->pid,
+ ERTS_PROC_LOCK_MAIN);
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p,
+ ERTS_PROC_LOCK_MAIN,
+ NULL);
+ if (pbt) {
+ Free(pbt);
}
+ erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
}
}
}
- bp_hash_delete(&(bdt->hash[i]));
}
- Free(bdt->hash);
- bdt->hash = NULL;
- bdt->n = 0;
+ bp_hash_delete(&(bdt->hash[i]));
}
- Free(bd);
- if (bif == BREAK_IS_ERL) {
- ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
- --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
- }
- if (*rs) {
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
- }
- }
- } /* while bd != NULL */
- return 1;
+ Free(bdt->hash);
+ Free(bdt);
+ }
}
-
-
-/*
-** Searches (linear forward) the breakpoint ring for a specified opcode
-** and returns a pointer to the breakpoint data structure or NULL if
-** not found. If the specified opcode is 0, the last breakpoint is
-** returned. The program counter must point to the first executable
-** (breakpoint) instruction of the function.
-*/
-
-BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
- return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static BpDataTime*
+get_time_break(BeamInstr *pc)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ return bp ? bp->time : 0;
}
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (! erts_is_native_break(pc)) {
- BpData **rs = (BpData **) pc[-4];
- BpData *bd = NULL, *ebd = NULL;
-
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[bp_sched2ix_proc(p)];
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- return bd;
- }
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
- }
- bd = bd->next;
- }
- }
- return NULL;
-}
+static GenericBpData*
+check_break(BeamInstr *pc, Uint break_flags)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
- BpData **rs;
- BpData *bd = NULL, *ebd = NULL;
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
-
if (erts_is_native_break(pc)) {
- return NULL;
- }
- rs = (BpData **) pc[-4];
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[erts_bp_sched2ix()];
- ASSERT(bd);
- if ( (break_op == 0) || (bd->this_instr == break_op)) {
- return bd;
+ return 0;
}
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
+ if (g) {
+ GenericBpData* bp = &g->data[erts_active_bp_ix()];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ if (bp->flags & break_flags) {
+ return bp;
}
- bd = bd->next;
}
- return NULL;
+ return 0;
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 167069552f..28aaaa462a 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -25,77 +25,6 @@
#include "erl_vm.h"
#include "global.h"
-
-
-/* A couple of gotchas:
- *
- * The breakpoint structure from BeamInstr,
- * In beam_emu where the instruction counter pointer, I (or pc),
- * points to the *current* instruction. At that time, if the instruction
- * is a breakpoint instruction the pc looks like the following,
- *
- * I[-5] | op_i_func_info_IaaI | scheduler specific entries
- * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
- * I[-3] | Tagged Module | | |
- * I[-2] | Tagged Function | V V
- * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
- * I[0] | The bp instruction | ^ * the bp wheel * |
- * |------------------------------
- *
- * Common struct to all bp_data_*
- *
- * 1) The type of bp_data structure in the ring is deduced from the
- * orig_instr field of the structure _before_ in the ring, except for
- * the first structure in the ring that has its instruction in
- * pc[0] of the code to execute.
- * This is valid as long as you don't search for the function while it is
- * being executed by something else. Or is in the middle of its rotation for
- * any other reason.
- * A key, the bp beam instruction, is included for this reason.
- *
- * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
- * breakpoints are being executed.
- *
- * So, as an example, when a breakpointed function starts to execute,
- * the first instruction that is a breakpoint instruction at pc[0] finds
- * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
- * to the correct bp_data type.
-*/
-
-typedef struct bp_data {
- struct bp_data *next; /* Doubly linked ring pointers */
- struct bp_data *prev; /* -"- */
- BeamInstr orig_instr; /* The original instruction to execute */
- BeamInstr this_instr; /* key */
-} BpData;
-/*
-** All the following bp_data_.. structs must begin the same way
-*/
-
-typedef struct bp_data_trace {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Binary *match_spec;
- Eterm tracer_pid;
-} BpDataTrace;
-
-typedef struct bp_data_debug {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
-} BpDataDebug;
-
-typedef struct bp_data_count { /* Call count */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- erts_smp_atomic_t acount;
-} BpDataCount;
-
typedef struct {
Eterm pid;
Sint count;
@@ -110,13 +39,9 @@ typedef struct {
} bp_time_hash_t;
typedef struct bp_data_time { /* Call time */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Uint pause;
- Uint n;
- bp_time_hash_t *hash;
+ Uint n;
+ bp_time_hash_t *hash;
+ erts_refc_t refc;
} BpDataTime;
typedef struct {
@@ -126,64 +51,42 @@ typedef struct {
BeamInstr *pc;
} process_breakpoint_time_t; /* used within psd */
-extern erts_smp_spinlock_t erts_bp_lock;
+typedef struct {
+ erts_smp_atomic_t acount;
+ erts_refc_t refc;
+} BpCount;
+
+typedef struct {
+ erts_smp_atomic_t pid;
+ erts_refc_t refc;
+} BpMetaPid;
+
+typedef struct generic_bp_data {
+ Uint flags;
+ Binary* local_ms; /* Match spec for local call trace */
+ Binary* meta_ms; /* Match spec for meta trace */
+ BpMetaPid* meta_pid; /* Meta trace pid */
+ BpCount* count; /* For call count */
+ BpDataTime* time; /* For time trace */
+} GenericBpData;
+
+#define ERTS_NUM_BP_IX 2
+
+typedef struct generic_bp {
+ BeamInstr orig_instr;
+ GenericBpData data[ERTS_NUM_BP_IX];
+} GenericBp;
#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#define ERTS_BP_CALL_TIME_CALL (0)
-#define ERTS_BP_CALL_TIME_RETURN (1)
-#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
-
-#ifdef ERTS_SMP
-#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
-#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
-#else
-#define ErtsSmpBPLock(BDC)
-#define ErtsSmpBPUnlock(BDC)
-#endif
-
#ifdef ERTS_SMP
#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
#else
#define bp_sched2ix_proc(p) (0)
#endif
-#define ErtsCountBreak(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpDataCount *bdc = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- erts_aint_t count = 0; \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bdc = (BpDataCount *) bds[ix]; \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- bds[ix] = (BpData *) bdc; \
- count = erts_smp_atomic_read_nob(&bdc->acount); \
- if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \
- *(instr_result) = bdc->orig_instr; \
-} while (0)
-
-#define ErtsBreakSkip(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpData *bd = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bd = bds[ix]; \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- bds[ix] = bd; \
- *(instr_result) = bd->orig_instr; \
-} while (0)
-
enum erts_break_op{
erts_break_nop = 0, /* Must be false */
erts_break_set = !0, /* Must be true */
@@ -191,7 +94,17 @@ enum erts_break_op{
erts_break_stop
};
+typedef Uint32 ErtsBpIndex;
+typedef struct {
+ BeamInstr* pc;
+ Module* mod;
+} BpFunction;
+
+typedef struct {
+ Uint matched; /* Number matched */
+ BpFunction* matching; /* Matching functions */
+} BpFunctions;
/*
** Function interface exported from beam_bp.c
@@ -199,49 +112,66 @@ enum erts_break_op{
void erts_bp_init(void);
-int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid);
-int erts_clear_trace_break(Eterm mfa[3], int specified);
-int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
+void erts_prepare_bp_staging(void);
+void erts_commit_staged_bp(void);
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
+
+void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_free_matched_functions(BpFunctions* f);
+
+void erts_install_breakpoints(BpFunctions* f);
+void erts_uninstall_breakpoints(BpFunctions* f);
+void erts_consolidate_bp_data(BpFunctions* f, int local);
+void erts_consolidate_bif_bp_data(void);
+
+void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
+void erts_clear_trace_break(BpFunctions *f);
+
+void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+
+void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
Eterm tracer_pid);
-int erts_clear_mtrace_break(Eterm mfa[3], int specified);
+void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
Eterm tracer_pid);
void erts_clear_mtrace_bif(BeamInstr *pc);
-int erts_set_debug_break(Eterm mfa[3], int specified);
-int erts_clear_debug_break(Eterm mfa[3], int specified);
-int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_count_break(Eterm mfa[3], int specified);
+void erts_set_debug_break(BpFunctions *f);
+void erts_clear_debug_break(BpFunctions *f);
+void erts_set_count_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_count_break(BpFunctions *f);
-int erts_clear_break(Eterm mfa[3], int specified);
+
+void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-int erts_clear_function_break(Module *modp, BeamInstr *pc);
+void erts_clear_export_break(Module *modp, BeamInstr* pc);
+BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid);
-Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args,
- int local, Eterm *tracer_pid);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
+int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_rte);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, BeamInstr* pc);
void erts_schedule_time_break(Process *p, Uint out);
-int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_time_break(Eterm mfa[3], int specified);
+void erts_set_time_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_time_break(BpFunctions *f);
int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
void erts_clear_time_trace_bif(BeamInstr *pc);
-BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
@@ -258,6 +188,19 @@ ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
return 0;
#endif
}
+
+extern erts_smp_atomic32_t erts_active_bp_index;
+extern erts_smp_atomic32_t erts_staging_bp_index;
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_active_bp_index);
+}
+
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_staging_bp_index);
+}
#endif
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
index 406ef1db5f..92f7ffe5a2 100644
--- a/erts/emulator/beam/beam_catches.c
+++ b/erts/emulator/beam/beam_catches.c
@@ -31,78 +31,144 @@ typedef struct {
unsigned cdr;
} beam_catch_t;
-static int free_list;
-static unsigned high_mark;
-static unsigned tabsize;
-static beam_catch_t *beam_catches;
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
+
+struct bc_pool {
+ int free_list;
+ unsigned high_mark;
+ unsigned tabsize;
+ beam_catch_t *beam_catches;
+ /*
+ * Note that the 'beam_catches' area is shared by pools. Used slots
+ * are readonly as long as the module is not purgable. The free-list is
+ * protected by the code_ix lock.
+ */
+
+ IF_DEBUG(int is_staging;)
+};
+
+static struct bc_pool bccix[ERTS_NUM_CODE_IX];
void beam_catches_init(void)
{
- tabsize = DEFAULT_TABSIZE;
- free_list = -1;
- high_mark = 0;
+ int i;
+
+ bccix[0].tabsize = DEFAULT_TABSIZE;
+ bccix[0].free_list = -1;
+ bccix[0].high_mark = 0;
+ bccix[0].beam_catches = erts_alloc(ERTS_ALC_T_CODE,
+ sizeof(beam_catch_t)*DEFAULT_TABSIZE);
+ IF_DEBUG(bccix[0].is_staging = 0);
+ for (i=1; i<ERTS_NUM_CODE_IX; i++) {
+ bccix[i] = bccix[i-1];
+ }
+ /* For initial load: */
+ IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 1);
+}
- beam_catches = erts_alloc(ERTS_ALC_T_CODE, sizeof(beam_catch_t)*DEFAULT_TABSIZE);
+
+static void gc_old_vec(beam_catch_t* vec)
+{
+ int i;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ if (bccix[i].beam_catches == vec) {
+ return;
+ }
+ }
+ erts_free(ERTS_ALC_T_CODE, vec);
+}
+
+
+void beam_catches_start_staging(void)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+ ErtsCodeIndex src = erts_active_code_ix();
+ beam_catch_t* prev_vec = bccix[dst].beam_catches;
+
+ ASSERT(!bccix[src].is_staging && !bccix[dst].is_staging);
+
+ bccix[dst] = bccix[src];
+ gc_old_vec(prev_vec);
+ IF_DEBUG(bccix[dst].is_staging = 1);
+}
+
+void beam_catches_end_staging(int commit)
+{
+ IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 0);
}
unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
{
int i;
+ struct bc_pool* p = &bccix[erts_staging_code_ix()];
+ ASSERT(p->is_staging);
/*
* Allocate from free_list while it is non-empty.
* If free_list is empty, allocate at high_mark.
- *
- * This avoids the need to initialise the free list in
- * beam_catches_init(), which would cost O(TABSIZ) time.
*/
- if( free_list >= 0 ) {
- i = free_list;
- free_list = beam_catches[i].cdr;
- } else if( high_mark < tabsize ) {
- i = high_mark;
- high_mark++;
- } else {
- /* No free slots and table is full: realloc table */
- tabsize = 2*tabsize;
- beam_catches = erts_realloc(ERTS_ALC_T_CODE, beam_catches, sizeof(beam_catch_t)*tabsize);
- i = high_mark;
- high_mark++;
+ if (p->free_list >= 0) {
+ i = p->free_list;
+ p->free_list = p->beam_catches[i].cdr;
+ }
+ else {
+ if (p->high_mark >= p->tabsize) {
+ /* No free slots and table is full: realloc table */
+ beam_catch_t* prev_vec = p->beam_catches;
+ unsigned newsize = p->tabsize*2;
+
+ p->beam_catches = erts_alloc(ERTS_ALC_T_CODE,
+ newsize*sizeof(beam_catch_t));
+ sys_memcpy(p->beam_catches, prev_vec,
+ p->tabsize*sizeof(beam_catch_t));
+ gc_old_vec(prev_vec);
+ p->tabsize = newsize;
+ }
+ i = p->high_mark++;
}
- beam_catches[i].cp = cp;
- beam_catches[i].cdr = cdr;
+ p->beam_catches[i].cp = cp;
+ p->beam_catches[i].cdr = cdr;
return i;
}
BeamInstr *beam_catches_car(unsigned i)
{
- if( i >= tabsize ) {
+ struct bc_pool* p = &bccix[erts_active_code_ix()];
+
+ if (i >= p->tabsize ) {
erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
- return beam_catches[i].cp;
+ return p->beam_catches[i].cp;
}
-void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes)
+void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes,
+ ErtsCodeIndex code_ix)
{
+ struct bc_pool* p = &bccix[code_ix];
unsigned i, cdr;
+ ASSERT((code_ix == erts_active_code_ix()) != bccix[erts_staging_code_ix()].is_staging);
for(i = head; i != (unsigned)-1;) {
- if( i >= tabsize ) {
+ if (i >= p->tabsize) {
erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
- if( (char*)beam_catches[i].cp - (char*)code >= code_bytes ) {
+ if( (char*)p->beam_catches[i].cp - (char*)code >= code_bytes ) {
erl_exit(1,
"beam_catches_delmod: item %#x has cp %#lx which is not "
"in module's range [%#lx,%#lx[\r\n",
- i, (long)beam_catches[i].cp,
+ i, (long)p->beam_catches[i].cp,
(long)code, (long)((char*)code + code_bytes));
}
- beam_catches[i].cp = 0;
- cdr = beam_catches[i].cdr;
- beam_catches[i].cdr = free_list;
- free_list = i;
+ p->beam_catches[i].cp = 0;
+ cdr = p->beam_catches[i].cdr;
+ p->beam_catches[i].cdr = p->free_list;
+ p->free_list = i;
i = cdr;
}
}
diff --git a/erts/emulator/beam/beam_catches.h b/erts/emulator/beam/beam_catches.h
index 6223427f0d..b2bd2351a5 100644
--- a/erts/emulator/beam/beam_catches.h
+++ b/erts/emulator/beam/beam_catches.h
@@ -20,12 +20,21 @@
#ifndef __BEAM_CATCHES_H
#define __BEAM_CATCHES_H
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "code_ix.h"
+
#define BEAM_CATCHES_NIL (-1)
void beam_catches_init(void);
+void beam_catches_start_staging(void);
+void beam_catches_end_staging(int commit);
unsigned beam_catches_cons(BeamInstr* cp, unsigned cdr);
BeamInstr *beam_catches_car(unsigned i);
-void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes);
+void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes,
+ ErtsCodeIndex);
#define catch_pc(x) beam_catches_car(catch_val((x)))
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 8041c92162..a609ed8c71 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -84,6 +84,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
int i;
int specified = 0;
Eterm res;
+ BpFunctions f;
if (bool != am_true && bool != am_false)
goto error;
@@ -114,18 +115,30 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
mfa[2] = signed_val(mfa[2]);
}
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
+ erts_bp_match_functions(&f, mfa, specified);
if (bool == am_true) {
- res = make_small(erts_set_debug_break(mfa, specified));
+ erts_set_debug_break(&f);
+ erts_install_breakpoints(&f);
+ erts_commit_staged_bp();
} else {
- res = make_small(erts_clear_debug_break(mfa, specified));
+ erts_clear_debug_break(&f);
+ erts_commit_staged_bp();
+ erts_uninstall_breakpoints(&f);
}
+ erts_consolidate_bp_data(&f, 1);
+ res = make_small(f.matched);
+ erts_bp_free_matched_functions(&f);
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
-
+ erts_release_code_write_permission();
return res;
error:
@@ -207,6 +220,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
BIF_RET(am_false);
}
} else if (is_tuple(addr)) {
+ ErtsCodeIndex code_ix;
Module* modp;
Eterm mod;
Eterm name;
@@ -225,14 +239,14 @@ erts_debug_disassemble_1(BIF_ALIST_1)
goto error;
}
arity = signed_val(tp[3]);
- modp = erts_get_module(mod);
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(mod, code_ix);
/*
* Try the export entry first to allow disassembly of special functions
* such as erts_debug:apply/4. Then search for it in the module.
*/
-
- if ((ep = erts_find_function(mod, name, arity)) != NULL) {
+ if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
/* XXX: add "&& ep->address != ep->code+3" condition?
* Consider a traced function.
* Its ep will have ep->address == ep->code+3.
@@ -241,9 +255,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((BeamInstr *) ep->address) - 5;
+ code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5;
funcinfo = code_ptr+2;
- } else if (modp == NULL || (code_base = modp->code) == NULL) {
+ } else if (modp == NULL || (code_base = modp->curr.code) == NULL) {
BIF_RET(am_undef);
} else {
n = code_base[MI_NUM_FUNCTIONS];
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 106f742272..0e9d140908 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -26,7 +26,6 @@
#include "erl_vm.h"
#include "global.h"
#include "erl_process.h"
-#include "erl_nmgc.h"
#include "error.h"
#include "bif.h"
#include "big.h"
@@ -218,7 +217,6 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
-BeamInstr* em_call_traced_function;
/* NOTE These should be the only variables containing trace instructions.
@@ -233,11 +231,6 @@ BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/*
- * We should warn only once for tuple funs.
- */
-static erts_smp_atomic_t warned_for_tuple_funs;
-
-/*
* All Beam instructions in numerical order.
*/
@@ -249,20 +242,6 @@ void** beam_ops;
extern int count_instructions;
#endif
-#if defined(HYBRID)
-#define SWAPIN \
- g_htop = global_htop; \
- g_hend = global_hend; \
- HTOP = HEAP_TOP(c_p); \
- E = c_p->stop
-
-#define SWAPOUT \
- global_htop = g_htop; \
- global_hend = g_hend; \
- HEAP_TOP(c_p) = HTOP; \
- c_p->stop = E
-
-#else
#define SWAPIN \
HTOP = HEAP_TOP(c_p); \
E = c_p->stop
@@ -290,8 +269,6 @@ extern int count_instructions;
#define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
-#endif
-
#ifdef FORCE_HEAP_FRAGS
# define HEAP_SPACE_VERIFIED(Words) do { \
c_p->space_verified = (Words); \
@@ -453,36 +430,6 @@ extern int count_instructions;
CHECK_TERM(r(0)); \
} while (0)
-#ifdef HYBRID
-#ifdef INCREMENTAL
-#define TestGlobalHeap(Nh, Live, hp) \
- do { \
- unsigned need = (Nh); \
- ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
- SWAPOUT; \
- reg[0] = r(0); \
- FCALLS -= need; \
- (hp) = IncAlloc(c_p,need,reg,(Live)); \
- r(0) = reg[0]; \
- SWAPIN; \
- } while (0)
-#else
-#define TestGlobalHeap(Nh, Live, hp) \
- do { \
- unsigned need = (Nh); \
- ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
- if (g_hend - g_htop < need) { \
- SWAPOUT; \
- reg[0] = r(0); \
- FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
- r(0) = reg[0]; \
- SWAPIN; \
- } \
- (hp) = global_htop; \
- } while (0)
-#endif
-#endif /* HYBRID */
-
#define Init(N) make_blank(yb(N))
#define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
@@ -538,7 +485,7 @@ extern int count_instructions;
do { \
if (FCALLS > 0) { \
Eterm* dis_next; \
- SET_I(((Export *) Arg(0))->address); \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
dis_next = (Eterm *) *I; \
FCALLS--; \
CHECK_ARGS(I); \
@@ -547,7 +494,7 @@ extern int count_instructions;
&& FCALLS > neg_o_reds) { \
goto save_calls1; \
} else { \
- SET_I(((Export *) Arg(0))->address); \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
CHECK_ARGS(I); \
goto context_switch; \
} \
@@ -1008,17 +955,9 @@ static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
-#if defined(VXWORKS)
-static int init_done;
-#endif
-
void
init_emulator(void)
{
-#if defined(VXWORKS)
- init_done = 0;
-#endif
- erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0);
process_main();
}
@@ -1150,9 +1089,7 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
*/
void process_main(void)
{
-#if !defined(VXWORKS)
static int init_done = 0;
-#endif
Process* c_p = NULL;
int reds_used;
#ifdef DEBUG
@@ -1174,12 +1111,6 @@ void process_main(void)
*/
register Eterm* HTOP REG_htop = NULL;
-
-#ifdef HYBRID
- Eterm *g_htop;
- Eterm *g_hend;
-#endif
-
/* Stack pointer. Grows downwards; points
* to last item pushed (normally a saved
* continuation pointer).
@@ -1556,7 +1487,7 @@ void process_main(void)
*/
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
}
#endif
@@ -1571,7 +1502,7 @@ void process_main(void)
SET_CP(c_p, I+2);
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
}
#endif
@@ -1584,7 +1515,7 @@ void process_main(void)
OpCase(i_call_ext_only_e):
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
}
#endif
@@ -4624,63 +4555,6 @@ void process_main(void)
* Trace and debugging support.
*/
- /*
- * At this point, I points to the code[3] in the export entry for
- * a trace-enabled function.
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&call_traced_function
- * code[4]: Address of function.
- */
- OpCase(call_traced_function): {
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- Export* ep = (Export *) (((char *)I)-offset);
- Uint32 flags;
-
- SWAPOUT;
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
- 0, &ERTS_TRACER_PROC(c_p));
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- SWAPIN;
-
- if (flags & MATCH_SET_RX_TRACE) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 3 < HTOP) {
- /* SWAPOUT, SWAPIN was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((BeamInstr)(ep->code)));
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(c_p)) ||
- is_internal_port(ERTS_TRACER_PROC(c_p)));
- E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
- E[1] = am_true; /* Process tracer */
- E[0] = make_cp(ep->code);
- c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- SET_I((BeamInstr *)Arg(0));
- Dispatch();
- }
-
OpCase(return_trace): {
BeamInstr* code = (BeamInstr *) (UWord) E[0];
@@ -4695,80 +4569,22 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_count_breakpoint): {
+ OpCase(i_generic_breakpoint): {
BeamInstr real_I;
-
- ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
+ ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ SWAPOUT;
+ reg[0] = r(0);
+ real_I = erts_generic_breakpoint(c_p, I, reg);
+ r(0) = reg[0];
+ SWAPIN;
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
}
- /* need to send mfa instead of bdt pointer
- * the pointer might be deallocated.
- */
-
- OpCase(i_time_breakpoint): {
- BeamInstr real_I;
- BpData **bds = (BpData **) (I)[-4];
- BpDataTime *bdt = NULL;
- Uint ix = 0;
-#ifdef ERTS_SMP
- ix = c_p->scheduler_data->no - 1;
-#else
- ix = 0;
-#endif
- bdt = (BpDataTime *)bds[ix];
-
- ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- ASSERT(bdt);
- bdt = (BpDataTime *) bdt->next;
- ASSERT(bdt);
- bds[ix] = (BpData *) bdt;
- real_I = bdt->orig_instr;
- ASSERT(VALID_INSTR(real_I));
-
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
- if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
- /* This _IS_ a tail recursive call */
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
- SWAPIN;
- } else {
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
-
- /* r register needs to be copied to the array
- * for the garbage collector
- */
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 2 < HTOP) {
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- }
- SWAPIN;
-
- ASSERT(c_p->htop <= E && E <= c_p->hend);
-
- E -= 2;
- E[0] = make_cp(I);
- E[1] = make_cp(c_p->cp); /* original return address */
- c_p->cp = beam_return_time_trace;
- }
- }
-
- Goto(real_I);
- }
-
OpCase(i_return_time_trace): {
BeamInstr *pc = (BeamInstr *) (UWord) E[0];
SWAPOUT;
- erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ erts_trace_time_return(c_p, pc);
SWAPIN;
c_p->cp = NULL;
SET_I((BeamInstr *) cp_val(E[1]));
@@ -4776,114 +4592,6 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_trace_breakpoint):
- if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- BeamInstr real_I;
-
- ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
- Goto(real_I);
- }
- /* Fall through to next case */
- OpCase(i_mtrace_breakpoint): {
- BeamInstr real_I;
- Uint32 flags;
- Eterm tracer_pid;
- Uint* cpp;
- int return_to_trace = 0, need = 0;
- flags = 0;
- SWAPOUT;
- reg[0] = r(0);
-
- if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = &E[2];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else {
- cpp = NULL;
- }
- if (cpp) {
- /* This _IS_ a tail recursive call, if there are
- * return_trace and/or i_return_to_trace stackframes
- * on the stack, they are not intermixed with y registers
- */
- BeamInstr *cp_save = c_p->cp;
- for (;;) {
- ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
- cpp += 3;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp += 1;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
- cpp += 2;
- } else
- break;
- }
- c_p->cp = (BeamInstr *) cp_val(*cpp);
- ASSERT(is_CP(*cpp));
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- c_p->cp = cp_save;
- } else {
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- }
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
-
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- need += 1;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
- }
- if (need) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - need < HTOP) {
- /* SWAPOUT was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- E -= 1;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- E[0] = make_cp(c_p->cp);
- c_p->cp = (BeamInstr *) beam_return_to_trace;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
- E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
- the funcinfo is above i. */
- c_p->cp =
- (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- Goto(real_I);
- }
-
OpCase(i_return_to_trace): {
if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
Uint *cpp = (Uint*) E;
@@ -5233,7 +4941,6 @@ void process_main(void)
#endif /* NO_JUMP_TABLE */
em_call_error_handler = OpCode(call_error_handler);
- em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
@@ -5274,7 +4981,7 @@ void process_main(void)
save_calls(c_p, (Export *) Arg(0));
- SET_I(((Export *) Arg(0))->address);
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
dis_next = (Eterm *) *I;
FCALLS--;
@@ -5951,7 +5658,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
/*
* Search for the error_handler module.
*/
- ep = erts_find_function(erts_proc_get_error_handler(p), func, 3);
+ ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
+ erts_active_code_ix());
if (ep == NULL) { /* No error handler */
p->current = fi;
p->freason = EXC_UNDEF;
@@ -5981,7 +5689,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
reg[0] = fi[0];
reg[1] = fi[1];
reg[2] = args;
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
@@ -5995,7 +5703,7 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
* there is no error handler module.
*/
- if ((ep = erts_find_export_entry(erts_proc_get_error_handler(p),
+ if ((ep = erts_active_export_entry(erts_proc_get_error_handler(p),
am_undefined_function, 3)) == NULL) {
return NULL;
} else {
@@ -6102,7 +5810,7 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
* Note: All BIFs have export entries; thus, no special case is needed.
*/
- if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
@@ -6110,11 +5818,11 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr *fptr = (BeamInstr *) ep->address;
+ BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()];
DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
}
#endif
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
@@ -6156,7 +5864,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
* Note: All BIFs have export entries; thus, no special case is needed.
*/
- if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
goto error;
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
@@ -6165,11 +5873,11 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr *fptr = (BeamInstr *) ep->address;
+ BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()];
DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
}
#endif
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
int
@@ -6276,7 +5984,6 @@ call_fun(Process* p, /* Current process. */
Eterm fun = reg[arity];
Eterm hdr;
int i;
- Eterm function;
Eterm* hp;
if (!is_boxed(fun)) {
@@ -6347,7 +6054,7 @@ call_fun(Process* p, /* Current process. */
Export* ep;
Module* modp;
Eterm module;
-
+ ErtsCodeIndex code_ix = erts_active_code_ix();
/*
* No arity. There is no module loaded that defines the fun,
@@ -6355,9 +6062,9 @@ call_fun(Process* p, /* Current process. */
* representation (the module has never been loaded),
* or the module defining the fun has been unloaded.
*/
-
module = fe->module;
- if ((modp = erts_get_module(module)) != NULL && modp->code != NULL) {
+ if ((modp = erts_get_module(module, code_ix)) != NULL
+ && modp->curr.code != NULL) {
/*
* There is a module loaded, but obviously the fun is not
* defined in it. We must not call the error_handler
@@ -6372,7 +6079,7 @@ call_fun(Process* p, /* Current process. */
*/
ep = erts_find_function(erts_proc_get_error_handler(p),
- am_undefined_lambda, 3);
+ am_undefined_lambda, 3, code_ix);
if (ep == NULL) { /* No error handler */
p->current = NULL;
p->freason = EXC_UNDEF;
@@ -6382,7 +6089,7 @@ call_fun(Process* p, /* Current process. */
reg[1] = fun;
reg[2] = args;
reg[3] = NIL;
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
}
} else if (is_export_header(hdr)) {
@@ -6394,7 +6101,7 @@ call_fun(Process* p, /* Current process. */
if (arity == actual_arity) {
DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
} else {
/*
* Wrong arity. First build a list of the arguments.
@@ -6414,63 +6121,6 @@ call_fun(Process* p, /* Current process. */
p->fvalue = TUPLE2(hp, fun, args);
return NULL;
}
- } else if (hdr == make_arityval(2)) {
- Eterm* tp;
- Export* ep;
- Eterm module;
-
- tp = tuple_val(fun);
- module = tp[1];
- function = tp[2];
- if (!is_atom(module) || !is_atom(function)) {
- goto badfun;
- }
-
- /*
- * If this is the first time a tuple fun is used,
- * send a warning to the logger.
- */
- if (erts_smp_atomic_xchg_nob(&warned_for_tuple_funs,
- (erts_aint_t) 1) == 0) {
- erts_dsprintf_buf_t* dsbufp;
-
- dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Call to tuple fun {%T,%T}.\n\n"
- "Tuple funs are deprecated and will be removed "
- "in R16. Use \"fun M:F/A\" instead, for example "
- "\"fun %T:%T/%d\".\n\n"
- "(This warning will only be shown the first time "
- "a tuple fun is called.)\n",
- module, function, module, function, arity);
- erts_send_warning_to_logger(p->group_leader, dsbufp);
- }
-
- if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
- ep = erts_find_export_entry(erts_proc_get_error_handler(p),
- am_undefined_function, 3);
- if (ep == NULL) {
- p->freason = EXC_UNDEF;
- return 0;
- }
- if (is_non_value(args)) {
- Uint sz = 2 * arity;
- if (HeapWordsLeft(p) < sz) {
- erts_garbage_collect(p, sz, reg, arity);
- }
- hp = HEAP_TOP(p);
- HEAP_TOP(p) += sz;
- args = NIL;
- while (arity-- > 0) {
- args = CONS(hp, reg[arity], args);
- hp += 2;
- }
- }
- reg[0] = module;
- reg[1] = function;
- reg[2] = args;
- }
- DTRACE_GLOBAL_CALL(p, module, function, arity);
- return ep->address;
} else {
badfun:
p->current = NULL;
@@ -6532,10 +6182,8 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
hp = funp->env;
erts_refc_inc(&fe->refc, 2);
funp->thing_word = HEADER_FUN;
-#ifndef HYBRID /* FIND ME! */
funp->next = MSO(p).first;
MSO(p).first = (struct erl_off_heap_header*) funp;
-#endif
funp->fe = fe;
funp->num_free = num_free;
funp->creator = p->common.id;
@@ -6575,7 +6223,8 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->address == ep->code+3 && (ep->code[3] == (BeamInstr) em_apply_bif);
+ return ep->addressv[erts_active_code_ix()] == ep->code+3
+ && (ep->code[3] == (BeamInstr) em_apply_bif);
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index dd788df6e4..b51f076a5d 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -352,27 +352,6 @@ typedef struct LoaderState {
int loc_size; /* Size of location info in bytes (2/4) */
} LoaderState;
-/*
- * Layout of the line table.
- */
-
-#define MI_LINE_FNAME_PTR 0
-#define MI_LINE_LOC_TAB 1
-#define MI_LINE_LOC_SIZE 2
-#define MI_LINE_FUNC_TAB 3
-
-#define LINE_INVALID_LOCATION (0)
-
-/*
- * Macros for manipulating locations.
- */
-
-#define IS_VALID_LOCATION(File, Line) \
- ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1))
-#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line))
-#define LOC_FILE(Loc) ((Loc) >> 24)
-#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
-
#define GetTagAndValue(Stp, Tag, Val) \
do { \
BeamInstr __w; \
@@ -496,7 +475,8 @@ typedef struct LoaderState {
} while (0)
-static void free_state(LoaderState* stp);
+static void free_loader_state(Binary* magic);
+static void loader_state_dtor(Binary* magic);
static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
BeamInstr* code, Uint size);
@@ -507,6 +487,7 @@ static int verify_chunks(LoaderState* stp);
static int load_atom_table(LoaderState* stp);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
+static int is_bif(Eterm mod, Eterm func, unsigned arity);
static int read_lambda_table(LoaderState* stp);
static int read_literal_table(LoaderState* stp);
static int read_line_table(LoaderState* stp);
@@ -548,22 +529,9 @@ static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
-static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
- BeamInstr* modp, int idx);
-
static int must_swap_floats;
-/*
- * The following variables keep a sorted list of address ranges for
- * each module. It allows us to quickly find a function given an
- * instruction pointer.
- */
-Range* modules = NULL; /* Sorted lists of module addresses. */
-int num_loaded_modules; /* Number of loaded modules. */
-int allocated_modules; /* Number of slots allocated. */
-Range* mid_module = NULL; /* Cached search start point */
-
Uint erts_total_code_size;
/**********************************************************************/
@@ -579,11 +547,7 @@ void init_load(void)
f.fd = 1.0;
must_swap_floats = (f.fw[0] == 0);
- allocated_modules = 128;
- modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS,
- allocated_modules*sizeof(Range));
- mid_module = modules;
- num_loaded_modules = 0;
+ erts_init_ranges();
}
static void
@@ -595,7 +559,7 @@ define_file(LoaderState* stp, char* name, int idx)
}
Eterm
-erts_load_module(Process *c_p,
+erts_preload_module(Process *c_p,
ErtsProcLocks c_p_locks,
Eterm group_leader, /* Group leader or NIL if none. */
Eterm* modp, /*
@@ -605,15 +569,16 @@ erts_load_module(Process *c_p,
byte* code, /* Points to the code to load */
Uint size) /* Size of code to load. */
{
- LoaderState* stp = erts_alloc_loader_state();
+ Binary* magic = erts_alloc_loader_state();
Eterm retval;
- retval = erts_prepare_loading(stp, c_p, group_leader, modp,
+ ASSERT(!erts_initialized);
+ retval = erts_prepare_loading(magic, c_p, group_leader, modp,
code, size);
if (retval != NIL) {
return retval;
}
- return erts_finish_loading(stp, c_p, c_p_locks, modp);
+ return erts_finish_loading(magic, c_p, c_p_locks, modp);
}
/* #define LOAD_MEMORY_HARD_DEBUG 1*/
@@ -629,11 +594,13 @@ extern void check_allocated_block(Uint type, void *blk);
#endif
Eterm
-erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
+erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
Eterm* modp, byte* code, Uint unloaded_size)
{
Eterm retval = am_badfile;
+ LoaderState* stp;
+ stp = ERTS_MAGIC_BIN_DATA(magic);
stp->module = *modp;
stp->group_leader = group_leader;
@@ -666,7 +633,7 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
/*
* Initialize code area.
*/
- stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
+ stp->code_buffer_size = 2048 + stp->num_functions;
stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
sizeof(BeamInstr) * stp->code_buffer_size);
@@ -679,8 +646,6 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
stp->code[MI_COMPILE_PTR] = 0;
stp->code[MI_COMPILE_SIZE] = 0;
stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- stp->code[MI_NUM_BREAKPOINTS] = 0;
-
/*
* Read the atom table.
@@ -774,23 +739,24 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
load_error:
if (retval != NIL) {
- free_state(stp);
+ free_loader_state(magic);
}
return retval;
}
Eterm
-erts_finish_loading(LoaderState* stp, Process* c_p,
+erts_finish_loading(Binary* magic, Process* c_p,
ErtsProcLocks c_p_locks, Eterm* modp)
{
Eterm retval;
+ LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
/*
* No other process may run since we will update the export
* table which is not protected by any locks.
*/
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 ||
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
erts_smp_thr_progress_is_blocking());
/*
@@ -811,7 +777,6 @@ erts_finish_loading(LoaderState* stp, Process* c_p,
* exported and imported functions. This can't fail.
*/
- erts_export_consolidate();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
final_touch(stp);
@@ -837,16 +802,20 @@ erts_finish_loading(LoaderState* stp, Process* c_p,
}
load_error:
- free_state(stp);
+ free_loader_state(magic);
return retval;
}
-LoaderState*
+Binary*
erts_alloc_loader_state(void)
{
LoaderState* stp;
+ Binary* magic;
- stp = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LoaderState));
+ magic = erts_create_magic_binary(sizeof(LoaderState),
+ loader_state_dtor);
+ erts_refc_inc(&magic->refc, 1);
+ stp = ERTS_MAGIC_BIN_DATA(magic);
stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
stp->arity = 0;
@@ -875,76 +844,123 @@ erts_alloc_loader_state(void)
stp->line_instr = 0;
stp->func_line = 0;
stp->fname = 0;
- return stp;
+ return magic;
+}
+
+/*
+ * Return the module name (a tagged atom) for the prepared code
+ * in the magic binary, or NIL if the binary does not contain
+ * prepared code.
+ */
+Eterm
+erts_module_for_prepared_code(Binary* magic)
+{
+ LoaderState* stp;
+
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+ return NIL;
+ }
+ stp = ERTS_MAGIC_BIN_DATA(magic);
+ if (stp->code != 0) {
+ return stp->module;
+ } else {
+ return NIL;
+ }
}
static void
-free_state(LoaderState* stp)
+free_loader_state(Binary* magic)
{
+ loader_state_dtor(magic);
+ if (erts_refc_dectest(&magic->refc, 0) == 0) {
+ erts_bin_free(magic);
+ }
+}
+
+/*
+ * This destructor function can safely be called multiple times.
+ */
+static void
+loader_state_dtor(Binary* magic)
+{
+ LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
+
if (stp->bin != 0) {
driver_free_binary(stp->bin);
+ stp->bin = 0;
}
if (stp->code != 0) {
erts_free(ERTS_ALC_T_CODE, stp->code);
+ stp->code = 0;
}
- if (stp->labels != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->labels);
+ if (stp->labels != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels);
+ stp->labels = 0;
}
- if (stp->atom != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->atom);
+ if (stp->atom != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->atom);
+ stp->atom = 0;
}
- if (stp->import != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->import);
+ if (stp->import != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->import);
+ stp->import = 0;
}
- if (stp->export != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->export);
+ if (stp->export != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->export);
+ stp->export = 0;
}
if (stp->lambdas != stp->def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->lambdas);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->lambdas);
+ stp->lambdas = stp->def_lambdas;
}
- if (stp->literals != NULL) {
+ if (stp->literals != 0) {
int i;
for (i = 0; i < stp->num_literals; i++) {
- if (stp->literals[i].heap != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP,
+ if (stp->literals[i].heap != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->literals[i].heap);
+ stp->literals[i].heap = 0;
}
}
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literals);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals);
+ stp->literals = 0;
}
- while (stp->literal_patches != NULL) {
+ while (stp->literal_patches != 0) {
LiteralPatch* next = stp->literal_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literal_patches);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literal_patches);
stp->literal_patches = next;
}
- while (stp->string_patches != NULL) {
+ while (stp->string_patches != 0) {
StringPatch* next = stp->string_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->string_patches);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->string_patches);
stp->string_patches = next;
}
- while (stp->genop_blocks) {
- GenOpBlock* next = stp->genop_blocks->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks);
- stp->genop_blocks = next;
- }
if (stp->line_item != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_item);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->line_item);
+ stp->line_item = 0;
}
if (stp->line_instr != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_instr);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->line_instr);
+ stp->line_instr = 0;
}
if (stp->func_line != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->func_line);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->func_line);
+ stp->func_line = 0;
}
if (stp->fname != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->fname);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->fname);
+ stp->fname = 0;
}
- erts_free(ERTS_ALC_T_LOADER_TMP, stp);
+ /*
+ * The following data items should have been freed earlier.
+ */
+
+ ASSERT(stp->genop_blocks == 0);
}
static Eterm
@@ -954,7 +970,6 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
{
Module* modp;
Eterm retval;
- int i;
if ((retval = beam_make_current_old(c_p, c_p_locks, module)) != NIL) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
@@ -971,30 +986,15 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
erts_total_code_size += size;
modp = erts_put_module(module);
- modp->code = code;
- modp->code_length = size;
- modp->catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
+ modp->curr.code = code;
+ modp->curr.code_length = size;
+ modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
/*
- * Update address table (used for finding a function from a PC value).
+ * Update ranges (used for finding a function from a PC value).
*/
- if (num_loaded_modules == allocated_modules) {
- allocated_modules *= 2;
- modules = (Range *) erts_realloc(ERTS_ALC_T_MODULE_REFS,
- (void *) modules,
- allocated_modules * sizeof(Range));
- }
- for (i = num_loaded_modules; i > 0; i--) {
- if (code > modules[i-1].start) {
- break;
- }
- modules[i] = modules[i-1];
- }
- modules[i].start = code;
- modules[i].end = (BeamInstr *) (((byte *)code) + size);
- num_loaded_modules++;
- mid_module = &modules[num_loaded_modules/2];
+ erts_update_ranges(code, size);
return NIL;
}
@@ -1217,9 +1217,8 @@ load_atom_table(LoaderState* stp)
GetInt(stp, 4, stp->num_atoms);
stp->num_atoms++;
- stp->atom = erts_alloc(ERTS_ALC_T_LOADER_TMP,
- erts_next_heap_size((stp->num_atoms*sizeof(Eterm)),
- 0));
+ stp->atom = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_atoms*sizeof(Eterm));
/*
* Read all atoms.
@@ -1263,10 +1262,8 @@ load_import_table(LoaderState* stp)
int i;
GetInt(stp, 4, stp->num_imports);
- stp->import = erts_alloc(ERTS_ALC_T_LOADER_TMP,
- erts_next_heap_size((stp->num_imports *
- sizeof(ImportEntry)),
- 0));
+ stp->import = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_imports * sizeof(ImportEntry));
for (i = 0; i < stp->num_imports; i++) {
int n;
Eterm mod;
@@ -1296,7 +1293,7 @@ load_import_table(LoaderState* stp)
* If the export entry refers to a BIF, get the pointer to
* the BIF function.
*/
- if ((e = erts_find_export_entry(mod, func, arity)) != NULL) {
+ if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
if (e->code[3] == (BeamInstr) em_apply_bif) {
stp->import[i].bf = (BifFunction) e->code[4];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
@@ -1315,16 +1312,8 @@ load_import_table(LoaderState* stp)
static int
read_export_table(LoaderState* stp)
{
- static struct {
- Eterm mod;
- Eterm func;
- int arity;
- } allow_redef[] = {
- /* The BIFs that are allowed to be redefined by Erlang code */
- {am_erlang,am_apply,2},
- {am_erlang,am_apply,3},
- };
int i;
+ BeamInstr* address;
GetInt(stp, 4, stp->num_exps);
if (stp->num_exps > stp->num_functions) {
@@ -1332,7 +1321,7 @@ read_export_table(LoaderState* stp)
stp->num_exps, stp->num_functions);
}
stp->export
- = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ = (ExportEntry *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
(stp->num_exps * sizeof(ExportEntry)));
for (i = 0; i < stp->num_exps; i++) {
@@ -1340,7 +1329,6 @@ read_export_table(LoaderState* stp)
Uint value;
Eterm func;
Uint arity;
- Export* e;
GetInt(stp, 4, n);
GetAtom(stp, n, func);
@@ -1358,29 +1346,34 @@ read_export_table(LoaderState* stp)
if (value == 0) {
LoadError2(stp, "export table entry %d: label %d not resolved", i, n);
}
- stp->export[i].address = stp->code + value;
+ stp->export[i].address = address = stp->code + value;
/*
- * Check that we are not redefining a BIF (except the ones allowed to
- * redefine).
+ * Find out if there is a BIF with the same name.
*/
- if ((e = erts_find_export_entry(stp->module, func, arity)) != NULL) {
- if (e->code[3] == (BeamInstr) em_apply_bif) {
- int j;
- for (j = 0; j < sizeof(allow_redef)/sizeof(allow_redef[0]); j++) {
- if (stp->module == allow_redef[j].mod &&
- func == allow_redef[j].func &&
- arity == allow_redef[j].arity) {
- break;
- }
- }
- if (j == sizeof(allow_redef)/sizeof(allow_redef[0])) {
- LoadError2(stp, "exported function %T/%d redefines BIF",
- func, arity);
- }
- }
+ if (!is_bif(stp->module, func, arity)) {
+ continue;
+ }
+
+ /*
+ * This is a stub for a BIF.
+ *
+ * It should not be exported, and the information in its
+ * func_info instruction should be invalidated so that it
+ * can be filtered out by module_info(functions) and by
+ * any other functions that walk through all local functions.
+ */
+
+ if (stp->labels[n].patches) {
+ LoadError3(stp, "there are local calls to the stub for "
+ "the BIF %T:%T/%d",
+ stp->module, func, arity);
}
+ stp->export[i].address = NULL;
+ address[-1] = 0;
+ address[-2] = NIL;
+ address[-3] = NIL;
}
return 1;
@@ -1388,15 +1381,39 @@ read_export_table(LoaderState* stp)
return 0;
}
+
+static int
+is_bif(Eterm mod, Eterm func, unsigned arity)
+{
+ Export* e = erts_active_export_entry(mod, func, arity);
+ if (e == NULL) {
+ return 0;
+ }
+ if (e->code[3] != (BeamInstr) em_apply_bif) {
+ return 0;
+ }
+ if (mod == am_erlang && func == am_apply && arity == 3) {
+ /*
+ * erlang:apply/3 is a special case -- it is implemented
+ * as an instruction and it is OK to redefine it.
+ */
+ return 0;
+ }
+ return 1;
+}
+
static int
read_lambda_table(LoaderState* stp)
{
int i;
GetInt(stp, 4, stp->num_lambdas);
- stp->lambdas_allocated = stp->num_lambdas;
- stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
- stp->num_lambdas * sizeof(Lambda));
+ if (stp->num_lambdas > stp->lambdas_allocated) {
+ ASSERT(stp->lambdas == stp->def_lambdas);
+ stp->lambdas_allocated = stp->num_lambdas;
+ stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_lambdas * sizeof(Lambda));
+ }
for (i = 0; i < stp->num_lambdas; i++) {
Uint n;
Uint32 Index;
@@ -1446,7 +1463,7 @@ read_literal_table(LoaderState* stp)
stp->file_p = uncompressed;
stp->file_left = (unsigned) uncompressed_sz;
GetInt(stp, 4, stp->num_literals);
- stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_literals * sizeof(Literal));
stp->allocated_literals = stp->num_literals;
@@ -1466,7 +1483,7 @@ read_literal_table(LoaderState* stp)
if ((heap_size = erts_decode_ext_size(p, sz)) < 0) {
LoadError1(stp, "literal %d: bad external format", i);
}
- hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
heap_size*sizeof(Eterm));
stp->literals[i].off_heap.first = 0;
stp->literals[i].off_heap.overhead = 0;
@@ -1538,7 +1555,7 @@ read_line_table(LoaderState* stp)
*/
num_line_items++;
- lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
num_line_items * sizeof(BeamInstr));
stp->line_item = lp;
stp->num_line_items = num_line_items;
@@ -1594,7 +1611,7 @@ read_line_table(LoaderState* stp)
*/
if (stp->num_fnames != 0) {
- stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_fnames *
sizeof(Eterm));
for (i = 0; i < stp->num_fnames; i++) {
@@ -1610,11 +1627,11 @@ read_line_table(LoaderState* stp)
/*
* Allocate the arrays to be filled while code is being loaded.
*/
- stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_line_instrs *
sizeof(LineInstr));
stp->current_li = 0;
- stp->func_line = (int *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->func_line = (int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_functions *
sizeof(int));
@@ -1677,7 +1694,7 @@ read_code_header(LoaderState* stp)
* Initialize label table.
*/
- stp->labels = (Label *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_labels * sizeof(Label));
for (i = 0; i < stp->num_labels; i++) {
stp->labels[i].value = 0;
@@ -1703,9 +1720,9 @@ read_code_header(LoaderState* stp)
#define CodeNeed(w) do { \
ASSERT(ci <= code_buffer_size); \
if (code_buffer_size < ci+(w)) { \
- code_buffer_size = erts_next_heap_size(ci+(w), 0); \
- stp->code = code \
- = (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \
+ code_buffer_size = 2*ci+(w); \
+ stp->code = code = \
+ (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \
(void *) code, \
code_buffer_size * sizeof(BeamInstr)); \
} \
@@ -1731,6 +1748,7 @@ load_code(LoaderState* stp)
GenOp* last_op = NULL;
GenOp** last_op_next = NULL;
int arity;
+ int retval = 1;
/*
* The size of the loaded func_info instruction is needed
@@ -2457,7 +2475,11 @@ load_code(LoaderState* stp)
case op_int_code_end:
stp->code_buffer_size = code_buffer_size;
stp->ci = ci;
- return 1;
+ stp->function = THE_NON_VALUE;
+ stp->genop = NULL;
+ stp->specific_op = -1;
+ retval = 1;
+ goto cleanup;
}
/*
@@ -2471,9 +2493,20 @@ load_code(LoaderState* stp)
}
}
-
load_error:
- return 0;
+ retval = 0;
+
+ cleanup:
+ /*
+ * Clean up everything that is not needed any longer.
+ */
+
+ while (stp->genop_blocks) {
+ GenOpBlock* next = stp->genop_blocks->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks);
+ stp->genop_blocks = next;
+ }
+ return retval;
}
@@ -4265,24 +4298,31 @@ final_touch(LoaderState* stp)
index = next;
}
modp = erts_put_module(stp->module);
- modp->catches = catches;
+ modp->curr.catches = catches;
/*
* Export functions.
*/
for (i = 0; i < stp->num_exps; i++) {
- Export* ep = erts_export_put(stp->module, stp->export[i].function,
- stp->export[i].arity);
+ Export* ep;
+ BeamInstr* address = stp->export[i].address;
+
+ if (address == NULL) {
+ /* Skip stub for a BIF */
+ continue;
+ }
+ ep = erts_export_put(stp->module, stp->export[i].function,
+ stp->export[i].arity);
if (!on_load) {
- ep->address = stp->export[i].address;
+ ep->addressv[erts_staging_code_ix()] = address;
} else {
/*
* Don't make any of the exported functions
* callable yet.
*/
- ep->address = ep->code+3;
- ep->code[4] = (BeamInstr) stp->export[i].address;
+ ep->addressv[erts_staging_code_ix()] = ep->code+3;
+ ep->code[4] = (BeamInstr) address;
}
}
@@ -4926,7 +4966,7 @@ new_label(LoaderState* stp)
int num = stp->num_labels;
stp->num_labels++;
- stp->labels = (Label *) erts_realloc(ERTS_ALC_T_LOADER_TMP,
+ stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->labels,
stp->num_labels * sizeof(Label));
stp->labels[num].value = 0;
@@ -4937,7 +4977,8 @@ new_label(LoaderState* stp)
static void
new_literal_patch(LoaderState* stp, int pos)
{
- LiteralPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LiteralPatch));
+ LiteralPatch* p = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ sizeof(LiteralPatch));
p->pos = pos;
p->next = stp->literal_patches;
stp->literal_patches = p;
@@ -4946,7 +4987,7 @@ new_literal_patch(LoaderState* stp, int pos)
static void
new_string_patch(LoaderState* stp, int pos)
{
- StringPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(StringPatch));
+ StringPatch* p = erts_alloc(ERTS_ALC_T_PREPARED_CODE, sizeof(StringPatch));
p->pos = pos;
p->next = stp->string_patches;
stp->string_patches = p;
@@ -4964,14 +5005,14 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
ASSERT(stp->num_literals == 0);
stp->allocated_literals = 8;
need = stp->allocated_literals * sizeof(Literal);
- stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
need);
} else if (stp->allocated_literals <= stp->num_literals) {
Uint need;
stp->allocated_literals *= 2;
need = stp->allocated_literals * sizeof(Literal);
- stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_LOADER_TMP,
+ stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->literals,
need);
}
@@ -4980,7 +5021,7 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
lit = stp->literals + stp->num_literals;
lit->offset = 0;
lit->heap_size = heap_size;
- lit->heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, heap_size*sizeof(Eterm));
+ lit->heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, heap_size*sizeof(Eterm));
lit->term = make_boxed(lit->heap);
lit->off_heap.first = 0;
lit->off_heap.overhead = 0;
@@ -4999,7 +5040,7 @@ erts_module_info_0(Process* p, Eterm module)
return THE_NON_VALUE;
}
- if (erts_get_module(module) == NULL) {
+ if (erts_get_module(module, erts_active_code_ix()) == NULL) {
return THE_NON_VALUE;
}
@@ -5054,32 +5095,43 @@ functions_in_module(Process* p, /* Process whose heap to use. */
BeamInstr* code;
int i;
Uint num_functions;
+ Uint need;
Eterm* hp;
+ Eterm* hp_end;
Eterm result = NIL;
if (is_not_atom(mod)) {
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
num_functions = code[MI_NUM_FUNCTIONS];
- hp = HAlloc(p, 5*num_functions);
+ need = 5*num_functions;
+ hp = HAlloc(p, need);
+ hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
Eterm name = (Eterm) func_info[3];
int arity = (int) func_info[4];
Eterm tuple;
- ASSERT(is_atom(name));
- tuple = TUPLE2(hp, name, make_small(arity));
- hp += 3;
- result = CONS(hp, tuple, result);
- hp += 2;
+ /*
+ * If the function name is [], this entry is a stub for
+ * a BIF that should be ignored.
+ */
+ ASSERT(is_atom(name) || is_nil(name));
+ if (is_atom(name)) {
+ tuple = TUPLE2(hp, name, make_small(arity));
+ hp += 3;
+ result = CONS(hp, tuple, result);
+ hp += 2;
+ }
}
+ HRelease(p, hp_end, hp);
return result;
}
@@ -5106,12 +5158,12 @@ native_addresses(Process* p, Eterm mod)
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
num_functions = code[MI_NUM_FUNCTIONS];
need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
hp = HAlloc(p, need);
@@ -5122,9 +5174,11 @@ native_addresses(Process* p, Eterm mod)
int arity = (int) func_info[4];
Eterm tuple;
- ASSERT(is_atom(name));
+ ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */
if (func_info[1] != 0) {
- Eterm addr = erts_bld_uint(&hp, NULL, func_info[1]);
+ Eterm addr;
+ ASSERT(is_atom(name));
+ addr = erts_bld_uint(&hp, NULL, func_info[1]);
tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
@@ -5149,18 +5203,20 @@ exported_from_module(Process* p, /* Process whose heap to use. */
Eterm* hp = NULL;
Eterm* hend = NULL;
Eterm result = NIL;
+ ErtsCodeIndex code_ix;
if (is_not_atom(mod)) {
return THE_NON_VALUE;
}
- for (i = 0; i < export_list_size(); i++) {
- Export* ep = export_list(i);
+ code_ix = erts_active_code_ix();
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export* ep = export_list(i,code_ix);
if (ep->code[0] == mod) {
Eterm tuple;
- if (ep->address == ep->code+3 &&
+ if (ep->addressv[code_ix] == ep->code+3 &&
ep->code[3] == (BeamInstr) em_call_error_handler) {
/* There is a call to the function, but it does not exist. */
continue;
@@ -5204,11 +5260,11 @@ attributes_for_module(Process* p, /* Process whose heap to use. */
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
ext = (byte *) code[MI_ATTR_PTR];
if (ext != NULL) {
hp = HAlloc(p, code[MI_ATTR_SIZE_ON_HEAP]);
@@ -5244,11 +5300,11 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
ext = (byte *) code[MI_COMPILE_PTR];
if (ext != NULL) {
hp = HAlloc(p, code[MI_COMPILE_SIZE_ON_HEAP]);
@@ -5263,113 +5319,6 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
}
/*
- * Find a function from the given pc and fill information in
- * the FunctionInfo struct. If the full_info is non-zero, fill
- * in all available information (including location in the
- * source code). If no function is found, the 'current' field
- * will be set to NULL.
- */
-
-void
-erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
-{
- Range* low = modules;
- Range* high = low + num_loaded_modules;
- Range* mid = mid_module;
-
- fi->current = NULL;
- fi->needed = 5;
- fi->loc = LINE_INVALID_LOCATION;
- while (low < high) {
- if (pc < mid->start) {
- high = mid;
- } else if (pc > mid->end) {
- low = mid + 1;
- } else {
- BeamInstr** low1 = (BeamInstr **) (mid->start + MI_FUNCTIONS);
- BeamInstr** high1 = low1 + mid->start[MI_NUM_FUNCTIONS];
- BeamInstr** mid1;
-
- while (low1 < high1) {
- mid1 = low1 + (high1-low1) / 2;
- if (pc < mid1[0]) {
- high1 = mid1;
- } else if (pc < mid1[1]) {
- mid_module = mid;
- fi->current = mid1[0]+2;
- if (full_info) {
- BeamInstr** fp = (BeamInstr **) (mid->start +
- MI_FUNCTIONS);
- int idx = mid1 - fp;
- lookup_loc(fi, pc, mid->start, idx);
- }
- return;
- } else {
- low1 = mid1 + 1;
- }
- }
- return;
- }
- mid = low + (high-low) / 2;
- }
-}
-
-static void
-lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
-{
- Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
- Eterm* low;
- Eterm* high;
- Eterm* mid;
- Eterm pc;
-
- if (line == 0) {
- return;
- }
-
- pc = (Eterm) (BeamInstr) orig_pc;
- fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
- low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
- high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
- while (high > low) {
- mid = low + (high-low) / 2;
- if (pc < mid[0]) {
- high = mid;
- } else if (pc < mid[1]) {
- int file;
- int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
-
- if (line[MI_LINE_LOC_SIZE] == 2) {
- Uint16* loc_table =
- (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
- fi->loc = loc_table[index];
- } else {
- Uint32* loc_table =
- (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
- ASSERT(line[MI_LINE_LOC_SIZE] == 4);
- fi->loc = loc_table[index];
- }
- if (fi->loc == LINE_INVALID_LOCATION) {
- return;
- }
- fi->needed += 3+2+3+2;
- file = LOC_FILE(fi->loc);
- if (file == 0) {
- /* Special case: Module name with ".erl" appended */
- Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
- fi->needed += 2*(mod_atom->len+4);
- } else {
- Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
- fi->needed += 2*ap->len;
- }
- return;
- } else {
- low = mid + 1;
- }
- }
-}
-
-/*
* Build a single {M,F,A,Loction} item to be part of
* a stack trace.
*/
@@ -5449,6 +5398,7 @@ code_get_chunk_2(BIF_ALIST_2)
Process* p = BIF_P;
Eterm Bin = BIF_ARG_1;
Eterm Chunk = BIF_ARG_2;
+ Binary* magic = 0;
LoaderState* stp;
Uint chunk = 0;
ErlSubBin* sb;
@@ -5461,12 +5411,13 @@ code_get_chunk_2(BIF_ALIST_2)
Eterm real_bin;
byte* temp_alloc = NULL;
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
+ stp = ERTS_MAGIC_BIN_DATA(magic);
if ((start = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
error:
erts_free_aligned_binary_bytes(temp_alloc);
- if (stp) {
- free_state(stp);
+ if (magic) {
+ free_loader_state(magic);
}
BIF_ERROR(p, BADARG);
}
@@ -5511,7 +5462,7 @@ code_get_chunk_2(BIF_ALIST_2)
done:
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
return res;
}
@@ -5524,14 +5475,16 @@ code_module_md5_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm Bin = BIF_ARG_1;
+ Binary* magic;
LoaderState* stp;
byte* bytes;
byte* temp_alloc = NULL;
Eterm res;
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
+ stp = ERTS_MAGIC_BIN_DATA(magic);
if ((bytes = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
- free_state(stp);
+ free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
@@ -5545,7 +5498,7 @@ code_module_md5_1(BIF_ALIST_1)
done:
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
return res;
}
@@ -5574,7 +5527,8 @@ stub_copy_info(LoaderState* stp,
int chunk, /* Chunk: ATTR_CHUNK or COMPILE_CHUNK */
byte* info, /* Where to store info. */
BeamInstr* ptr_word, /* Where to store pointer into info. */
- BeamInstr* size_word) /* Where to store size of info. */
+ BeamInstr* size_word, /* Where to store size into info. */
+ BeamInstr* size_on_heap_word) /* Where to store size on heap. */
{
Sint decoded_size;
Uint size = stp->chunks[chunk].size;
@@ -5585,7 +5539,8 @@ stub_copy_info(LoaderState* stp,
if (decoded_size < 0) {
return 0;
}
- *size_word = decoded_size;
+ *size_word = (BeamInstr) size;
+ *size_on_heap_word = decoded_size;
}
return info + size;
}
@@ -5601,7 +5556,7 @@ stub_read_export_table(LoaderState* stp)
stp->num_exps, stp->num_functions);
}
stp->export
- = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ = (ExportEntry *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_exps * sizeof(ExportEntry));
for (i = 0; i < stp->num_exps; i++) {
@@ -5627,20 +5582,29 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
{
int i;
int n = stp->num_exps;
+ Eterm mod = fp[2];
Eterm function = fp[3];
int arity = fp[4];
#ifdef HIPE
Lambda* lp;
#endif
+ if (is_bif(mod, function, arity)) {
+ fp[1] = 0;
+ fp[2] = 0;
+ fp[3] = 0;
+ fp[4] = 0;
+ return;
+ }
+
/*
* Test if the function should be exported.
*/
for (i = 0; i < n; i++) {
if (stp->export[i].function == function && stp->export[i].arity == arity) {
- Export* ep = erts_export_put(fp[2], function, arity);
- ep->address = fp+5;
+ Export* ep = erts_export_put(mod, function, arity);
+ ep->addressv[erts_staging_code_ix()] = fp+5;
return;
}
}
@@ -5819,6 +5783,7 @@ patch_funentries(Eterm Patchlist)
Eterm
erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
{
+ Binary* magic;
LoaderState* stp;
BeamInstr Funcs;
BeamInstr Patchlist;
@@ -5840,7 +5805,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Must initialize stp->lambdas here because the error handling code
* at label 'error' uses it.
*/
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
+ stp = ERTS_MAGIC_BIN_DATA(magic);
if (is_not_atom(Mod)) {
goto error;
@@ -5920,7 +5886,6 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
code[MI_COMPILE_PTR] = 0;
code[MI_COMPILE_SIZE] = 0;
code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- code[MI_NUM_BREAKPOINTS] = 0;
code[MI_LITERALS_START] = 0;
code[MI_LITERALS_END] = 0;
code[MI_LITERALS_OFF_HEAP] = 0;
@@ -5997,12 +5962,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
info = (byte *) fp;
info = stub_copy_info(stp, ATTR_CHUNK, info,
- code+MI_ATTR_PTR, code+MI_ATTR_SIZE_ON_HEAP);
+ code+MI_ATTR_PTR,
+ code+MI_ATTR_SIZE,
+ code+MI_ATTR_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
}
info = stub_copy_info(stp, COMPILE_CHUNK, info,
- code+MI_COMPILE_PTR, code+MI_COMPILE_SIZE_ON_HEAP);
+ code+MI_COMPILE_PTR,
+ code+MI_COMPILE_SIZE,
+ code+MI_COMPILE_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
}
@@ -6028,13 +5997,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (patch_funentries(Patchlist)) {
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
return Mod;
}
error:
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
@@ -6051,3 +6020,4 @@ static int safe_mul(UWord a, UWord b, UWord* resp)
return (res / b) == a;
}
}
+
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 997ba197db..1048f258a5 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -49,11 +49,6 @@ extern void** beam_ops;
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_traced_function;
-typedef struct {
- BeamInstr* start; /* Pointer to start of module. */
- BeamInstr* end; /* Points one word beyond last function in module. */
-} Range;
/*
* The following variables keep a sorted list of address ranges for
@@ -61,11 +56,6 @@ typedef struct {
* instruction pointer.
*/
-extern Range* modules;
-extern int num_loaded_modules;
-extern int allocated_modules;
-extern Range* mid_module;
-
/* Total code size in bytes */
extern Uint erts_total_code_size;
/*
@@ -94,27 +84,22 @@ extern Uint erts_total_code_size;
#define MI_COMPILE_SIZE_ON_HEAP 6
/*
- * Number of breakpoints in module is stored in this word
- */
-#define MI_NUM_BREAKPOINTS 7
-
-/*
* Literal area (constant pool).
*/
-#define MI_LITERALS_START 8
-#define MI_LITERALS_END 9
-#define MI_LITERALS_OFF_HEAP 10
+#define MI_LITERALS_START 7
+#define MI_LITERALS_END 8
+#define MI_LITERALS_OFF_HEAP 9
/*
* Pointer to the on_load function (or NULL if none).
*/
-#define MI_ON_LOAD_FUNCTION_PTR 11
+#define MI_ON_LOAD_FUNCTION_PTR 10
/*
* Pointer to the line table (or NULL if none).
*/
-#define MI_LINE_TABLE 12
+#define MI_LINE_TABLE 11
/*
* Start of function pointer table. This table contains pointers to
@@ -125,5 +110,27 @@ extern Uint erts_total_code_size;
* this table.
*/
-#define MI_FUNCTIONS 13
+#define MI_FUNCTIONS 12
+
+/*
+ * Layout of the line table.
+ */
+
+#define MI_LINE_FNAME_PTR 0
+#define MI_LINE_LOC_TAB 1
+#define MI_LINE_LOC_SIZE 2
+#define MI_LINE_FUNC_TAB 3
+
+#define LINE_INVALID_LOCATION (0)
+
+/*
+ * Macros for manipulating locations.
+ */
+
+#define IS_VALID_LOCATION(File, Line) \
+ ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1))
+#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line))
+#define LOC_FILE(Loc) ((Loc) >> 24)
+#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
new file mode 100644
index 0000000000..0f2d5d0c2a
--- /dev/null
+++ b/erts/emulator/beam/beam_ranges.c
@@ -0,0 +1,349 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "beam_load.h"
+
+typedef struct {
+ BeamInstr* start; /* Pointer to start of module. */
+ erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */
+} Range;
+
+/* Range 'end' needs to be atomic as we purge module
+ by setting end=start in active code_ix */
+#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end))
+
+static Range* find_range(BeamInstr* pc);
+static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
+ BeamInstr* modp, int idx);
+
+/*
+ * The following variables keep a sorted list of address ranges for
+ * each module. It allows us to quickly find a function given an
+ * instruction pointer.
+ */
+struct ranges {
+ Range* modules; /* Sorted lists of module addresses. */
+ Sint n; /* Number of range entries. */
+ Sint allocated; /* Number of allocated entries. */
+ erts_smp_atomic_t mid; /* Cached search start point */
+};
+static struct ranges r[ERTS_NUM_CODE_IX];
+static erts_smp_atomic_t mem_used;
+
+#ifdef HARD_DEBUG
+static void check_consistency(struct ranges* p)
+{
+ int i;
+
+ ASSERT(p->n <= p->allocated);
+ ASSERT((Uint)(p->mid - p->modules) < p->n ||
+ (p->mid == p->modules && p->n == 0));
+ for (i = 0; i < p->n; i++) {
+ ASSERT(p->modules[i].start <= RANGE_END(&p->modules[i]));
+ ASSERT(!i || RANGE_END(&p->modules[i-1]) < p->modules[i].start);
+ }
+}
+# define CHECK(r) check_consistency(r)
+#else
+# define CHECK(r)
+#endif /* HARD_DEBUG */
+
+
+void
+erts_init_ranges(void)
+{
+ Sint i;
+
+ erts_smp_atomic_init_nob(&mem_used, 0);
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
+ r[i].modules = 0;
+ r[i].n = 0;
+ r[i].allocated = 0;
+ erts_smp_atomic_init_nob(&r[i].mid, 0);
+ }
+}
+
+void
+erts_start_staging_ranges(void)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+
+ if (r[dst].modules) {
+ erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated);
+ erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules);
+ r[dst].modules = NULL;
+ }
+}
+
+void
+erts_end_staging_ranges(int commit)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+
+ if (commit && r[dst].modules == NULL) {
+ Sint i;
+ Sint n;
+
+ /* No modules added, just clone src and remove purged code. */
+ ErtsCodeIndex src = erts_active_code_ix();
+
+ erts_smp_atomic_add_nob(&mem_used, r[src].n);
+ r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ r[src].n * sizeof(Range));
+ r[dst].allocated = r[src].n;
+ n = 0;
+ for (i = 0; i < r[src].n; i++) {
+ Range* rp = r[src].modules+i;
+ if (rp->start < RANGE_END(rp)) {
+ /* Only insert a module that has not been purged. */
+ r[dst].modules[n] = *rp;
+ n++;
+ }
+ }
+ r[dst].n = n;
+ erts_smp_atomic_set_nob(&r[dst].mid,
+ (erts_aint_t) (r[dst].modules + n / 2));
+ }
+}
+
+void
+erts_update_ranges(BeamInstr* code, Uint size)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+ ErtsCodeIndex src = erts_active_code_ix();
+ Sint i;
+ Sint n;
+ Sint need;
+
+ if (src == dst) {
+ ASSERT(!erts_initialized);
+
+ /*
+ * During start-up of system, the indices are the same.
+ * Handle this by faking a source area.
+ */
+ src = (src+1) % ERTS_NUM_CODE_IX;
+ if (r[src].modules) {
+ erts_smp_atomic_add_nob(&mem_used, -r[src].allocated);
+ erts_free(ERTS_ALC_T_MODULE_REFS, r[src].modules);
+ }
+ r[src] = r[dst];
+ r[dst].modules = 0;
+ }
+
+ CHECK(&r[src]);
+
+ ASSERT(r[dst].modules == NULL);
+ need = r[dst].allocated = r[src].n + 1;
+ erts_smp_atomic_add_nob(&mem_used, need);
+ r[dst].modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ need * sizeof(Range));
+ n = 0;
+ for (i = 0; i < r[src].n; i++) {
+ Range* rp = r[src].modules+i;
+ if (code < rp->start) {
+ r[dst].modules[n].start = code;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(((byte *)code) + size));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code);
+ n++;
+ break;
+ }
+ if (rp->start < RANGE_END(rp)) {
+ /* Only insert a module that has not been purged. */
+ r[dst].modules[n].start = rp->start;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(RANGE_END(rp)));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start);
+ n++;
+ }
+ }
+
+ while (i < r[src].n) {
+ Range* rp = r[src].modules+i;
+ if (rp->start < RANGE_END(rp)) {
+ /* Only insert a module that has not been purged. */
+ r[dst].modules[n].start = rp->start;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(RANGE_END(rp)));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start);
+ n++;
+ }
+ i++;
+ }
+
+ if (n == 0 || code > r[dst].modules[n-1].start) {
+ r[dst].modules[n].start = code;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(((byte *)code) + size));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code);
+ n++;
+ }
+
+ ASSERT(n <= r[src].n+1);
+ r[dst].n = n;
+ erts_smp_atomic_set_nob(&r[dst].mid,
+ (erts_aint_t) (r[dst].modules + n / 2));
+
+ CHECK(&r[dst]);
+ CHECK(&r[src]);
+}
+
+void
+erts_remove_from_ranges(BeamInstr* code)
+{
+ Range* rp = find_range(code);
+ erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start);
+}
+
+UWord
+erts_ranges_sz(void)
+{
+ return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range);
+}
+
+/*
+ * Find a function from the given pc and fill information in
+ * the FunctionInfo struct. If the full_info is non-zero, fill
+ * in all available information (including location in the
+ * source code). If no function is found, the 'current' field
+ * will be set to NULL.
+ */
+
+void
+erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
+{
+ BeamInstr** low;
+ BeamInstr** high;
+ BeamInstr** mid;
+ Range* rp;
+
+ fi->current = NULL;
+ fi->needed = 5;
+ fi->loc = LINE_INVALID_LOCATION;
+ rp = find_range(pc);
+ if (rp == 0) {
+ return;
+ }
+
+ low = (BeamInstr **) (rp->start + MI_FUNCTIONS);
+ high = low + rp->start[MI_NUM_FUNCTIONS];
+ while (low < high) {
+ mid = low + (high-low) / 2;
+ if (pc < mid[0]) {
+ high = mid;
+ } else if (pc < mid[1]) {
+ fi->current = mid[0]+2;
+ if (full_info) {
+ BeamInstr** fp = (BeamInstr **) (rp->start +
+ MI_FUNCTIONS);
+ int idx = mid - fp;
+ lookup_loc(fi, pc, rp->start, idx);
+ }
+ return;
+ } else {
+ low = mid + 1;
+ }
+ }
+}
+
+static Range*
+find_range(BeamInstr* pc)
+{
+ ErtsCodeIndex active = erts_active_code_ix();
+ Range* low = r[active].modules;
+ Range* high = low + r[active].n;
+ Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid);
+
+ CHECK(&r[active]);
+ while (low < high) {
+ if (pc < mid->start) {
+ high = mid;
+ } else if (pc > RANGE_END(mid)) {
+ low = mid + 1;
+ } else {
+ erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
+ return mid;
+ }
+ mid = low + (high-low) / 2;
+ }
+ return 0;
+}
+
+static void
+lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
+{
+ Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
+ Eterm* low;
+ Eterm* high;
+ Eterm* mid;
+ Eterm pc;
+
+ if (line == 0) {
+ return;
+ }
+
+ pc = (Eterm) (BeamInstr) orig_pc;
+ fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
+ low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
+ high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
+ while (high > low) {
+ mid = low + (high-low) / 2;
+ if (pc < mid[0]) {
+ high = mid;
+ } else if (pc < mid[1]) {
+ int file;
+ int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
+
+ if (line[MI_LINE_LOC_SIZE] == 2) {
+ Uint16* loc_table =
+ (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ fi->loc = loc_table[index];
+ } else {
+ Uint32* loc_table =
+ (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ ASSERT(line[MI_LINE_LOC_SIZE] == 4);
+ fi->loc = loc_table[index];
+ }
+ if (fi->loc == LINE_INVALID_LOCATION) {
+ return;
+ }
+ fi->needed += 3+2+3+2;
+ file = LOC_FILE(fi->loc);
+ if (file == 0) {
+ /* Special case: Module name with ".erl" appended */
+ Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ fi->needed += 2*(mod_atom->len+4);
+ } else {
+ Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
+ fi->needed += 2*ap->len;
+ }
+ return;
+ } else {
+ low = mid + 1;
+ }
+ }
+}
diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c
index 7fbf44a03c..8613131176 100644
--- a/erts/emulator/beam/benchmark.c
+++ b/erts/emulator/beam/benchmark.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -33,17 +33,6 @@ unsigned long long messages_copied;
unsigned long long messages_ego;
unsigned long long minor_gc;
unsigned long long major_gc;
-#ifdef HYBRID
-unsigned long long minor_global_gc;
-unsigned long long major_global_gc;
-unsigned long long gc_in_copy;
-#ifdef INCREMENTAL
-unsigned long long minor_gc_cycles;
-unsigned long long major_gc_cycles;
-unsigned long long minor_gc_stages;
-unsigned long long major_gc_stages;
-#endif
-#endif
#endif /* BM_COUNTERS */
#ifdef BM_TIMERS
@@ -191,17 +180,6 @@ void init_benchmarking()
messages_ego = 0;
minor_gc = 0;
major_gc = 0;
-#ifdef HYBRID
- minor_global_gc = 0;
- major_global_gc = 0;
- gc_in_copy = 0;
-#ifdef INCREMENTAL
- minor_gc_cycles = 0;
- major_gc_cycles = 0;
- minor_gc_stages = 0;
- major_gc_stages = 0;
-#endif
-#endif
#endif /* BM_COUNTERS */
#ifdef BM_HEAP_SIZES
@@ -243,16 +221,6 @@ void save_statistics()
erts_fprintf(file,"Number of processes spawned: %lld\n",processes_spawned);
erts_fprintf(file,"Number of local minor GCs: %lld\n",minor_gc);
erts_fprintf(file,"Number of local major GCs: %lld\n",major_gc);
-#ifdef HYBRID
- erts_fprintf(file,"Number of global minor GCs: %lld\n",minor_global_gc);
- erts_fprintf(file,"Number of global major GCs: %lld\n",major_global_gc);
-#ifdef INCREMENTAL
- erts_fprintf(file,"Number of minor GC-cycles: %lld\n",minor_gc_cycles);
- erts_fprintf(file,"Number of major GC-cycles: %lld\n",major_gc_cycles);
- erts_fprintf(file,"Number of minor GC-stages: %lld\n",minor_gc_stages);
- erts_fprintf(file,"Number of major GC-stages: %lld\n",major_gc_stages);
-#endif
-#endif
erts_fprintf(file,"Number of messages sent: %lld\n",messages_sent);
erts_fprintf(file,"Number of messages copied: %lld\n",messages_copied);
erts_fprintf(file,"Number of messages sent to self: %lld\n",messages_ego);
diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h
index eedb06a1b6..766edaac42 100644
--- a/erts/emulator/beam/benchmark.h
+++ b/erts/emulator/beam/benchmark.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -99,17 +99,6 @@ extern unsigned long long messages_copied;
extern unsigned long long messages_ego;
extern unsigned long long minor_gc;
extern unsigned long long major_gc;
-#ifdef HYBRID
-extern unsigned long long minor_global_gc;
-extern unsigned long long major_global_gc;
-extern unsigned long long gc_in_copy;
-#ifdef INCREMENTAL
-extern unsigned long long minor_gc_cycles;
-extern unsigned long long major_gc_cycles;
-extern unsigned long long minor_gc_stages;
-extern unsigned long long major_gc_stages;
-#endif
-#endif
#define BM_COUNT(var) (var)++;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 3ab8fdb2c7..98dc9a3902 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -2837,7 +2837,7 @@ BIF_RETTYPE float_to_list_1(BIF_ALIST_1)
if (is_not_float(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
GET_DOUBLE(BIF_ARG_1, f);
- if ((i = sys_double_to_chars(f.fd, fbuf)) <= 0)
+ if ((i = sys_double_to_chars(f.fd, fbuf, sizeof(fbuf))) <= 0)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
need = i*2;
hp = HAlloc(BIF_P, need);
@@ -3503,23 +3503,6 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
}
/**********************************************************************/
-/* Perform garbage collection of the message area */
-
-BIF_RETTYPE garbage_collect_message_area_0(BIF_ALIST_0)
-{
-#if defined(HYBRID) && !defined(INCREMENTAL)
- int reds = 0;
-
- FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
- reds = erts_global_garbage_collect(BIF_P, 0, NULL, 0);
- BIF_RET2(am_true, reds);
-#else
- BIF_RET(am_false);
-#endif
-}
-
-
-/**********************************************************************/
/*
* The erlang:processes/0 BIF.
*/
@@ -3735,7 +3718,8 @@ BIF_RETTYPE function_exported_3(BIF_ALIST_3)
is_not_small(BIF_ARG_3)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3)) == NULL) {
+ if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3),
+ erts_active_code_ix()) == NULL) {
BIF_RET(am_false);
}
BIF_RET(am_true);
@@ -4469,6 +4453,21 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Export bif_return_trap_export;
+void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
+ Eterm (*bif)(BIF_ALIST_0))
+{
+ int i;
+ sys_memset((void *) ep, 0, sizeof(Export));
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->addressv[i] = &ep->code[3];
+ }
+ ep->code[0] = m;
+ ep->code[1] = f;
+ ep->code[2] = a;
+ ep->code[3] = (BeamInstr) em_apply_bif;
+ ep->code[4] = (BeamInstr) bif;
+}
+
void erts_init_bif(void)
{
reference0 = 0;
@@ -4484,17 +4483,13 @@ void erts_init_bif(void)
* yield the calling process traps to. The only thing it does:
* return the value passed as argument.
*/
- sys_memset((void *) &bif_return_trap_export, 0, sizeof(Export));
- bif_return_trap_export.address = &bif_return_trap_export.code[3];
- bif_return_trap_export.code[0] = am_erlang;
- bif_return_trap_export.code[1] = am_bif_return_trap;
+ erts_init_trap_export(&bif_return_trap_export, am_erlang, am_bif_return_trap,
#ifdef DEBUG
- bif_return_trap_export.code[2] = 2;
+ 2
#else
- bif_return_trap_export.code[2] = 1;
+ 1
#endif
- bif_return_trap_export.code[3] = (BeamInstr) em_apply_bif;
- bif_return_trap_export.code[4] = (BeamInstr) &bif_return_trap;
+ , &bif_return_trap);
flush_monitor_message_trap = erts_export_put(am_erlang,
am_flush_monitor_message,
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 086dd9a5fd..71f232035d 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -125,7 +125,7 @@ do { \
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
(Proc)->arity = 0; \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -135,7 +135,7 @@ do { \
Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
(Proc)->arity = 1; \
reg[0] = (Eterm) (A0); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -146,7 +146,7 @@ do { \
(Proc)->arity = 2; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -158,7 +158,7 @@ do { \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -170,13 +170,13 @@ do { \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
} while (0)
#define BIF_TRAP0(p, Trap_) do { \
(p)->arity = 0; \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -185,7 +185,7 @@ do { \
Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
(p)->arity = 1; \
reg[0] = (A0); \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -195,7 +195,7 @@ do { \
(p)->arity = 2; \
reg[0] = (A0); \
reg[1] = (A1); \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -206,7 +206,7 @@ do { \
reg[0] = (A0); \
reg[1] = (A1); \
reg[2] = (A2); \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index c0a83c6f96..9a1a25031c 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -99,8 +99,6 @@ bif erlang:garbage_collect/0
bif 'erl.system':garbage_collect/0 ebif_garbage_collect_0
bif erlang:garbage_collect/1
bif 'erl.system':garbage_collect/1 ebif_garbage_collect_1
-bif erlang:garbage_collect_message_area/0
-bif 'erl.system':garbage_collect_message_area/0 ebif_garbage_collect_message_area_0
bif erlang:get/0
bif 'erl.lang.proc.pdict':get/0 ebif_get_0
bif erlang:get/1
@@ -144,8 +142,6 @@ bif erlang:list_to_pid/1
bif 'erl.lang.proc':string_to_pid/1 ebif_string_to_pid_1 list_to_pid_1
bif erlang:list_to_tuple/1
bif 'erl.lang.tuple':from_list/1 ebif_list_to_tuple_1
-bif erlang:load_module/2
-bif 'erl.system.code':load/2 ebif_load_module_2
bif erlang:loaded/0
bif 'erl.system.code':loaded/0 ebif_loaded_0
bif erlang:localtime/0
@@ -819,6 +815,13 @@ bif erlang:dt_restore_tag/1
bif erlang:dt_prepend_vm_tag_data/1
bif erlang:dt_append_vm_tag_data/1
+
+#
+# New in R16B.
+#
+bif erlang:prepare_loading/2
+bif erlang:finish_loading/1
+
#
# Obsolete
#
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index ad05316b39..9aa1e5f30d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -385,17 +385,22 @@ loaded(int to, void *to_arg)
int old = 0;
int cur = 0;
BeamInstr* code;
+ Module* modp;
+ ErtsCodeIndex code_ix;
+
+ code_ix = erts_active_code_ix();
+ erts_rlock_old_code(code_ix);
/*
* Calculate and print totals.
*/
- for (i = 0; i < module_code_size(); i++) {
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
- cur += module_code(i)->code_length;
- if (module_code(i)->old_code_length != 0) {
- old += module_code(i)->old_code_length;
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ if ((modp = module_code(i, code_ix)) != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ cur += modp->curr.code_length;
+ if (modp->old.code_length != 0) {
+ old += modp->old.code_length;
}
}
}
@@ -406,21 +411,22 @@ loaded(int to, void *to_arg)
* Print one line per module.
*/
- for (i = 0; i < module_code_size(); i++) {
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ modp = module_code(i, code_ix);
if (!ERTS_IS_CRASH_DUMPING) {
/*
* Interactive dump; keep it brief.
*/
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
- erts_print(to, to_arg, "%T", make_atom(module_code(i)->module));
- cur += module_code(i)->code_length;
- erts_print(to, to_arg, " %d", module_code(i)->code_length );
- if (module_code(i)->old_code_length != 0) {
+ if (modp != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ erts_print(to, to_arg, "%T", make_atom(modp->module));
+ cur += modp->curr.code_length;
+ erts_print(to, to_arg, " %d", modp->curr.code_length );
+ if (modp->old.code_length != 0) {
erts_print(to, to_arg, " (%d old)",
- module_code(i)->old_code_length );
- old += module_code(i)->old_code_length;
+ modp->old.code_length );
+ old += modp->old.code_length;
}
erts_print(to, to_arg, "\n");
}
@@ -428,15 +434,15 @@ loaded(int to, void *to_arg)
/*
* To crash dump; make it parseable.
*/
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
+ if (modp != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
erts_print(to, to_arg, "=mod:");
- erts_print(to, to_arg, "%T", make_atom(module_code(i)->module));
+ erts_print(to, to_arg, "%T", make_atom(modp->module));
erts_print(to, to_arg, "\n");
erts_print(to, to_arg, "Current size: %d\n",
- module_code(i)->code_length);
- code = module_code(i)->code;
+ modp->curr.code_length);
+ code = modp->curr.code;
if (code != NULL && code[MI_ATTR_PTR]) {
erts_print(to, to_arg, "Current attributes: ");
dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
@@ -448,9 +454,9 @@ loaded(int to, void *to_arg)
code[MI_COMPILE_SIZE]);
}
- if (module_code(i)->old_code_length != 0) {
- erts_print(to, to_arg, "Old size: %d\n", module_code(i)->old_code_length);
- code = module_code(i)->old_code;
+ if (modp->old.code_length != 0) {
+ erts_print(to, to_arg, "Old size: %d\n", modp->old.code_length);
+ code = modp->old.code;
if (code[MI_ATTR_PTR]) {
erts_print(to, to_arg, "Old attributes: ");
dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
@@ -465,6 +471,7 @@ loaded(int to, void *to_arg)
}
}
}
+ erts_runlock_old_code(code_ix);
}
@@ -659,10 +666,14 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */
#endif
int fd;
+ size_t envsz;
time_t now;
+ char env[21]; /* enough to hold any 64-bit integer */
size_t dumpnamebufsize = MAXPATHLEN;
char dumpnamebuf[MAXPATHLEN];
char* dumpname;
+ int secs;
+ int env_erl_crash_dump_seconds_set = 1;
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
@@ -685,9 +696,54 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_writing_erl_crash_dump = 1;
#endif
- erts_sys_prepare_crash_dump();
+ envsz = sizeof(env);
+ /* ERL_CRASH_DUMP_SECONDS not set
+ * if we have a heart port, break immediately
+ * otherwise dump crash indefinitely (until crash is complete)
+ * same as ERL_CRASH_DUMP_SECONDS = 0
+ * - do not write dump
+ * - do not set an alarm
+ * - break immediately
+ *
+ * ERL_CRASH_DUMP_SECONDS = 0
+ * - do not write dump
+ * - do not set an alarm
+ * - break immediately
+ *
+ * ERL_CRASH_DUMP_SECONDS < 0
+ * - do not set alarm
+ * - write dump until done
+ *
+ * ERL_CRASH_DUMP_SECONDS = S (and S positive)
+ * - Don't dump file forever
+ * - set alarm (set in sys)
+ * - write dump until alarm or file is written completely
+ */
+
+ if (erts_sys_getenv__("ERL_CRASH_DUMP_SECONDS", env, &envsz) != 0) {
+ env_erl_crash_dump_seconds_set = 0;
+ secs = -1;
+ } else {
+ env_erl_crash_dump_seconds_set = 1;
+ secs = atoi(env);
+ }
+
+ if (secs == 0) {
+ return;
+ }
+
+ /* erts_sys_prepare_crash_dump returns 1 if heart port is found, otherwise 0
+ * If we don't find heart (0) and we don't have ERL_CRASH_DUMP_SECONDS set
+ * we should continue writing a dump
+ *
+ * beware: secs -1 means no alarm
+ */
+
+ if (erts_sys_prepare_crash_dump(secs) && !env_erl_crash_dump_seconds_set ) {
+ return;
+ }
- if (erts_sys_getenv("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0)
+ if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0)
dumpname = "erl_crash.dump";
else
dumpname = &dumpnamebuf[0];
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
new file mode 100644
index 0000000000..c66d5a2f05
--- /dev/null
+++ b/erts/emulator/beam/code_ix.c
@@ -0,0 +1,168 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "code_ix.h"
+#include "global.h"
+#include "beam_catches.h"
+
+
+
+#if 0
+# define CIX_TRACE(text) erts_fprintf(stderr, "CIX_TRACE: " text " act=%u load=%u\r\n", erts_active_code_ix(), erts_staging_code_ix())
+#else
+# define CIX_TRACE(text)
+#endif
+
+erts_smp_atomic32_t the_active_code_index;
+erts_smp_atomic32_t the_staging_code_index;
+
+static Process* code_writing_process = NULL;
+struct code_write_queue_item {
+ Process *p;
+ struct code_write_queue_item* next;
+};
+static struct code_write_queue_item* code_write_queue = NULL;
+static erts_smp_mtx_t code_write_permission_mtx;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static erts_tsd_key_t has_code_write_permission;
+#endif
+
+void erts_code_ix_init(void)
+{
+ /* We start emulator by initializing preloaded modules
+ * single threaded with active and staging set both to zero.
+ * Preloading is finished by a commit that will set things straight.
+ */
+ erts_smp_atomic32_init_nob(&the_active_code_index, 0);
+ erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
+ erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_key_create(&has_code_write_permission);
+#endif
+ CIX_TRACE("init");
+}
+
+void erts_start_staging_code_ix(void)
+{
+ beam_catches_start_staging();
+ export_start_staging();
+ module_start_staging();
+ erts_start_staging_ranges();
+ CIX_TRACE("start");
+}
+
+
+void erts_end_staging_code_ix(void)
+{
+ beam_catches_end_staging(1);
+ export_end_staging(1);
+ module_end_staging(1);
+ erts_end_staging_ranges(1);
+ CIX_TRACE("end");
+}
+
+void erts_commit_staging_code_ix(void)
+{
+ ErtsCodeIndex ix;
+ /* We need to this lock as we are now making the staging export table active */
+ export_staging_lock();
+ ix = erts_staging_code_ix();
+ erts_smp_atomic32_set_nob(&the_active_code_index, ix);
+ ix = (ix + 1) % ERTS_NUM_CODE_IX;
+ erts_smp_atomic32_set_nob(&the_staging_code_index, ix);
+ export_staging_unlock();
+ CIX_TRACE("activate");
+}
+
+void erts_abort_staging_code_ix(void)
+{
+ beam_catches_end_staging(0);
+ export_end_staging(0);
+ module_end_staging(0);
+ erts_end_staging_ranges(0);
+ CIX_TRACE("abort");
+}
+
+
+/*
+ * Calller _must_ yield if we return 0
+ */
+int erts_try_seize_code_write_permission(Process* c_p)
+{
+ int success;
+#ifdef ERTS_SMP
+ ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */
+#endif
+ ASSERT(c_p != NULL);
+
+ erts_smp_mtx_lock(&code_write_permission_mtx);
+ success = (code_writing_process == NULL);
+ if (success) {
+ code_writing_process = c_p;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 1);
+#endif
+ }
+ else { /* Already locked */
+ struct code_write_queue_item* qitem;
+ ASSERT(code_writing_process != c_p);
+ qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem));
+ qitem->p = c_p;
+ erts_smp_proc_inc_refc(c_p);
+ qitem->next = code_write_queue;
+ code_write_queue = qitem;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+ erts_smp_mtx_unlock(&code_write_permission_mtx);
+ return success;
+}
+
+void erts_release_code_write_permission(void)
+{
+ erts_smp_mtx_lock(&code_write_permission_mtx);
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ while (code_write_queue != NULL) { /* unleash the entire herd */
+ struct code_write_queue_item* qitem = code_write_queue;
+ erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(qitem->p)) {
+ erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ code_write_queue = qitem->next;
+ erts_smp_proc_dec_refc(qitem->p);
+ erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
+ }
+ code_writing_process = NULL;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 0);
+#endif
+ erts_smp_mtx_unlock(&code_write_permission_mtx);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_has_code_write_permission(void)
+{
+ return (code_writing_process != NULL) && erts_tsd_get(has_code_write_permission);
+}
+#endif
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
new file mode 100644
index 0000000000..e2bd0da112
--- /dev/null
+++ b/erts/emulator/beam/code_ix.h
@@ -0,0 +1,141 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Description:
+ * This is the interface that facilitates changing the beam code
+ * (load,upgrade,delete) while allowing executing Erlang processes to
+ * access the code without any locks or other expensive memory barriers.
+ *
+ * The basic idea is to maintain several "logical copies" of the code. These
+ * copies are identified by a global 'code index', an integer of 0, 1 or 2.
+ * The code index is used as argument to code access structures like
+ * export, module, beam_catches, beam_ranges.
+ *
+ * The current 'active' code index is used to access the current running
+ * code. The 'staging' code index is used by the process that performs
+ * a code change operation. When a code change operation completes
+ * succesfully, the staging code index becomes the new active code index.
+ *
+ * The third code index is not explicitly used. It can be thought of as
+ * the "previous active" or the "next staging" index. It is needed to make
+ * sure that we do not reuse a code index for staging until we are sure
+ * that no executing BIFs are still referencing it.
+ * We could get by with only two (0 and 1), but that would require that we
+ * must wait for all schedulers to re-schedule before each code change
+ * operation can start staging.
+ *
+ * Note that the 'code index' is very loosely coupled to the concept of
+ * 'current' and 'old' module versions. You can almost say that they are
+ * orthogonal to each other. Code index is an emulator global concept while
+ * 'current' and 'old' is specific for each module.
+ */
+
+#ifndef __CODE_IX_H__
+#define __CODE_IX_H__
+
+#ifndef __SYS_H__
+# ifdef HAVE_CONFIG_H
+# include "config.h"
+# endif
+# include "sys.h"
+#endif
+struct process;
+
+
+#define ERTS_NUM_CODE_IX 3
+typedef unsigned ErtsCodeIndex;
+
+
+/* Called once at emulator initialization.
+ */
+void erts_code_ix_init(void);
+
+/* Return active code index.
+ * Is guaranteed to be valid until the calling BIF returns.
+ * To get a consistent view of the code, only one call to erts_active_code_ix()
+ * should be made and the returned ix reused within the same BIF call.
+ */
+ERTS_GLB_INLINE
+ErtsCodeIndex erts_active_code_ix(void);
+
+/* Return staging code ix.
+ * Only used by a process performing code loading/upgrading/deleting/purging.
+ * code_ix must be locked.
+ */
+ERTS_GLB_INLINE
+ErtsCodeIndex erts_staging_code_ix(void);
+
+/* Try seize exclusive code write permission. Needed for code staging.
+ * Main process lock (only) must be held.
+ * System thread progress must not be blocked.
+ * Caller is suspended and *must* yield if 0 is returned.
+ */
+int erts_try_seize_code_write_permission(struct process*);
+
+/* Release code write permission.
+ * Will resume any suspended waiters.
+ */
+void erts_release_code_write_permission(void);
+
+/* Prepare the "staging area" to be a complete copy of the active code.
+ * Code write permission must have been seized.
+ * Must be followed by calls to either "end" and "commit" or "abort" before
+ * code write permission can be released.
+ */
+void erts_start_staging_code_ix(void);
+
+/* End the staging.
+ * Preceded by "start" and must be followed by "commit".
+ */
+void erts_end_staging_code_ix(void);
+
+/* Set staging code index as new active code index.
+ * Preceded by "end".
+ */
+void erts_commit_staging_code_ix(void);
+
+/* Abort the staging.
+ * Preceded by "start".
+ */
+void erts_abort_staging_code_ix(void);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_has_code_write_permission(void);
+#endif
+
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+extern erts_smp_atomic32_t the_active_code_index;
+extern erts_smp_atomic32_t the_staging_code_index;
+
+ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&the_active_code_index);
+}
+ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&the_staging_code_index);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* !__CODE_IX_H__ */
+
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 04e1805088..23c0fca6aa 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -26,30 +26,13 @@
#include "global.h"
#include "erl_process.h"
#include "erl_gc.h"
-#include "erl_nmgc.h"
#include "big.h"
#include "erl_binary.h"
#include "erl_bits.h"
#include "dtrace-wrapper.h"
-#ifdef HYBRID
-MA_STACK_DECLARE(src);
-MA_STACK_DECLARE(dst);
-MA_STACK_DECLARE(offset);
-#endif
-
static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*);
-void
-init_copy(void)
-{
-#ifdef HYBRID
- MA_STACK_ALLOC(src);
- MA_STACK_ALLOC(dst);
- MA_STACK_ALLOC(offset);
-#endif
-}
-
/*
* Copy object "obj" to process p.
*/
@@ -432,12 +415,10 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
while (i--) {
*htop++ = *objp++;
}
-#ifndef HYBRID /* FIND ME! */
funp = (ErlFunThing *) tp;
funp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*) funp;
erts_refc_inc(&funp->fe->refc, 2);
-#endif
*argp = make_fun_rel(tp, dst_base);
}
break;
@@ -500,420 +481,6 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
return res;
}
-#ifdef HYBRID
-
-#ifdef BM_MESSAGE_SIZES
-# define BM_ADD(var,val) (var) += (val);
-#else
-# define BM_ADD(var,val)
-#endif
-
-#ifdef DEBUG
-# define CLEARMEM(PTR,SIZE) memset(PTR,0,SIZE*sizeof(Eterm))
-#else
-# define CLEARMEM(PTR,SIZE)
-#endif
-
-#ifdef INCREMENTAL
-#define GlobalAlloc(p, need, hp) \
-do { \
- Uint n = (need); \
- BM_ADD(words_copied,n); \
- BM_SWAP_TIMER(copy,system); \
- /* If a new collection cycle is started during copy, the message * \
- * will end up in the old generation and all allocations * \
- * thereafter must go directly into the old generation. */ \
- if (alloc_old) { \
- erts_incremental_gc((p),n,&dest,1); \
- (hp) = erts_inc_alloc(n); \
- } else { \
- (hp) = IncAlloc((p),n,&dest,1); \
- if (ma_gc_flags & GC_CYCLE_START) { \
- alloc_old = 1; \
- global_htop = global_heap; \
- (hp) = erts_inc_alloc(n); \
- } \
- } \
- CLEARMEM((hp),(n)); \
- BM_SWAP_TIMER(system,copy); \
-} while(0)
-
-#else /* no INCREMELNTAL */
-
-#define GlobalAlloc(p, need, hp) \
-do { \
- Uint n = (need); \
- total_need += n; \
- if (total_need >= global_heap_sz) \
- erl_exit(ERTS_ABORT_EXIT, "Copying a message (%d words) larger than the nursery simply won't work...\n", total_need); \
- if (global_hend - n < global_htop) { \
- BM_SWAP_TIMER(copy,system); \
- erts_global_garbage_collect((p),total_need,NULL,0); \
- BM_SWAP_TIMER(system,copy); \
- total_need = 0; \
- ma_src_top = 0; \
- ma_dst_top = 0; \
- ma_offset_top = 0; \
- goto copy_start; \
- } \
- (hp) = global_htop; \
- global_htop += n; \
- BM_ADD(words_copied,n); \
-} while(0)
-#endif /* INCREMENTAL */
-
-/* Copy a message to the message area. */
-Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
-{
- Eterm obj;
- Eterm dest;
-#ifdef INCREMENTAL
- int alloc_old = 0;
-#else
- int total_need = 0;
-#endif
-
- VERBOSE(DEBUG_MESSAGES,
- ("COPY START; %T is sending a message @ 0x%016x\n%T\n",
- from->common.id, orig, orig));
-
-#ifndef INCREMENTAL
- copy_start:
-#endif
- MA_STACK_PUSH(src,orig);
- MA_STACK_PUSH(dst,&dest);
- MA_STACK_PUSH(offset,offs);
-
- while (ma_src_top > 0) {
- obj = MA_STACK_POP(src);
-
- /* copy_struct_lazy should never be called with something that
- * do not need to be copied. Within the loop, nothing that do
- * not need copying should be placed in the src-stack.
- */
- ASSERT(!NO_COPY(obj));
-
- switch (primary_tag(obj)) {
- case TAG_PRIMARY_LIST: {
- Eterm *hp;
- Eterm *objp;
-
- GlobalAlloc(from,2,hp);
- objp = list_val(obj);
-
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_list(hp));
- MA_STACK_POP(dst);
-
- /* TODO: Byt ordningen nedan så att CDR pushas först. */
-
- if (NO_COPY(*objp)) {
- hp[0] = *objp;
-#ifdef INCREMENTAL
- if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
- INC_STORE(gray,hp,2);
-#endif
- } else {
- MA_STACK_PUSH(src,*objp);
- MA_STACK_PUSH(dst,hp);
- MA_STACK_PUSH(offset,0);
- }
-
- objp++;
-
- if (NO_COPY(*objp)) {
- hp[1] = *objp;
-#ifdef INCREMENTAL
- if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
- INC_STORE(gray,hp,2);
-#endif
- }
- else {
- MA_STACK_PUSH(src,*objp);
- MA_STACK_PUSH(dst,hp);
- MA_STACK_PUSH(offset,1);
- }
- continue;
- }
-
- case TAG_PRIMARY_BOXED: {
- Eterm *objp = boxed_val(obj);
-
- switch (*objp & _TAG_HEADER_MASK) {
- case ARITYVAL_SUBTAG: {
- Uint ari = arityval(*objp);
- Uint i;
- Eterm *hp;
- GlobalAlloc(from,ari + 1,hp);
- /* A GC above might invalidate the value of objp */
- objp = boxed_val(obj);
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp));
- MA_STACK_POP(dst);
- *hp = *objp++;
- for (i = 1; i <= ari; i++) {
- switch (primary_tag(*objp)) {
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- if (NO_COPY(*objp)) {
- hp[i] = *objp;
-#ifdef INCREMENTAL
- if (ptr_within(ptr_val(*objp),
- inc_fromspc,inc_fromend))
- INC_STORE(gray,hp,BOXED_NEED(hp,*hp));
-#endif
- objp++;
- } else {
- MA_STACK_PUSH(src,*objp++);
- MA_STACK_PUSH(dst,hp);
- MA_STACK_PUSH(offset,i);
- }
- break;
- default:
- hp[i] = *objp++;
- }
- }
- continue;
- }
-
- case REFC_BINARY_SUBTAG: {
- ProcBin *pb;
- Uint i = thing_arityval(*objp) + 1;
- Eterm *hp;
- GlobalAlloc(from,i,hp);
- /* A GC above might invalidate the value of objp */
- objp = boxed_val(obj);
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_binary(hp));
- MA_STACK_POP(dst);
- pb = (ProcBin*) hp;
- while (i--) {
- *hp++ = *objp++;
- }
- erts_refc_inc(&pb->val->refc, 2);
- pb->next = erts_global_offheap.first;
- erts_global_offheap.first = pb;
- OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
- continue;
- }
-
- case FUN_SUBTAG: {
- ErlFunThing *funp = (ErlFunThing*) objp;
- Uint i = thing_arityval(*objp) + 1;
- Uint j = i + 1 + funp->num_free;
- Uint k = i;
- Eterm *hp, *hp_start;
- GlobalAlloc(from,j,hp);
- /* A GC above might invalidate the value of objp */
- objp = boxed_val(obj);
- hp_start = hp;
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_fun(hp));
- MA_STACK_POP(dst);
- funp = (ErlFunThing*) hp;
- while (i--) {
- *hp++ = *objp++;
- }
-#ifndef HYBRID /* FIND ME! */
- funp->next = erts_global_offheap.first;
- erts_global_offheap.first = funp;
- erts_refc_inc(&funp->fe->refc, 2);
-#endif
- for (i = k; i < j; i++) {
- switch (primary_tag(*objp)) {
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- if (NO_COPY(*objp)) {
-#ifdef INCREMENTAL
- if (ptr_within(ptr_val(*objp),
- inc_fromspc,inc_fromend))
- INC_STORE(gray,hp,BOXED_NEED(hp,*hp));
-#endif
- *hp++ = *objp++;
- } else {
- MA_STACK_PUSH(src,*objp++);
- MA_STACK_PUSH(dst,hp_start);
- MA_STACK_PUSH(offset,i);
- hp++;
- }
- break;
- default:
- *hp++ = *objp++;
- }
- }
- continue;
- }
-
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG: {
- ExternalThing *etp;
- Uint i = thing_arityval(*objp) + 1;
- Eterm *hp;
- GlobalAlloc(from,i,hp);
- /* A GC above might invalidate the value of objp */
- objp = boxed_val(obj);
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_external(hp));
- MA_STACK_POP(dst);
- etp = (ExternalThing*) hp;
- while (i--) {
- *hp++ = *objp++;
- }
-
- etp->next = erts_global_offheap.first;
- erts_global_offheap.first = etp;
- erts_refc_inc(&etp->node->refc, 2);
- continue;
- }
-
- case SUB_BINARY_SUBTAG: {
- ErlSubBin *sb = (ErlSubBin *) objp;
- Eterm *hp;
- Eterm res_binary;
- Eterm real_bin = sb->orig;
- Uint bit_offset = sb->bitoffs;
- Uint bit_size = sb -> bitsize;
- Uint sub_offset = sb->offs;
- size_t size = sb->size;
- Uint extra_bytes;
- Uint real_size;
- Uint sub_binary_heapneed;
- if ((bit_size + bit_offset) > 8) {
- extra_bytes = 2;
- sub_binary_heapneed = ERL_SUB_BIN_SIZE;
- } else if ((bit_size + bit_offset) > 0) {
- extra_bytes = 1;
- sub_binary_heapneed = ERL_SUB_BIN_SIZE;
- } else {
- extra_bytes = 0;
- sub_binary_heapneed = 0;
- }
-
- real_size = size+extra_bytes;
- objp = binary_val(real_bin);
- if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
- ErlHeapBin *from_bin;
- ErlHeapBin *to_bin;
- Uint i = heap_bin_size(real_size);
- GlobalAlloc(from,i+sub_binary_heapneed,hp);
- from_bin = (ErlHeapBin *) objp;
- to_bin = (ErlHeapBin *) hp;
- to_bin->thing_word = header_heap_bin(real_size);
- to_bin->size = real_size;
- sys_memcpy(to_bin->data, ((byte *)from_bin->data) +
- sub_offset, real_size);
- res_binary = make_binary(to_bin);
- hp += i;
- } else {
- ProcBin *from_bin;
- ProcBin *to_bin;
-
- ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
- from_bin = (ProcBin *) objp;
- erts_refc_inc(&from_bin->val->refc, 2);
- GlobalAlloc(from,PROC_BIN_SIZE+sub_binary_heapneed,hp);
- to_bin = (ProcBin *) hp;
- to_bin->thing_word = HEADER_PROC_BIN;
- to_bin->size = real_size;
- to_bin->val = from_bin->val;
- to_bin->bytes = from_bin->bytes + sub_offset;
- to_bin->next = erts_global_offheap.first;
- erts_global_offheap.first = to_bin;
- OH_OVERHEAD(&erts_global_offheap, to_bin->size / sizeof(Eterm));
- res_binary=make_binary(to_bin);
- hp += PROC_BIN_SIZE;
- }
- if (extra_bytes != 0) {
- ErlSubBin* res;
- res = (ErlSubBin *) hp;
- res->thing_word = HEADER_SUB_BIN;
- res->size = size;
- res->bitsize = bit_size;
- res->bitoffs = bit_offset;
- res->offs = 0;
- res->is_writable = 0;
- res->orig = res_binary;
- res_binary = make_binary(hp);
- }
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),res_binary);
- MA_STACK_POP(dst);
- continue;
- }
-
- case BIN_MATCHSTATE_SUBTAG:
- erl_exit(ERTS_ABORT_EXIT,
- "copy_struct_lazy: matchstate term not allowed");
-
- default: {
- Uint size = thing_arityval(*objp) + 1;
- Eterm *hp;
- GlobalAlloc(from,size,hp);
- /* A GC above might invalidate the value of objp */
- objp = boxed_val(obj);
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_boxed(hp));
- MA_STACK_POP(dst);
- while (size--) {
- *hp++ = *objp++;
- }
- continue;
- }
- }
- continue;
- }
-
- case TAG_PRIMARY_HEADER:
- ASSERT((obj & _TAG_HEADER_MASK) == ARITYVAL_SUBTAG);
- {
- Eterm *objp = &obj;
- Uint ari = arityval(obj);
- Uint i;
- Eterm *hp;
- GlobalAlloc(from,ari + 1,hp);
- MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp));
- MA_STACK_POP(dst);
- *hp = *objp++;
- for (i = 1; i <= ari; i++) {
- switch (primary_tag(*objp)) {
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- if (NO_COPY(*objp)) {
-#ifdef INCREMENTAL
- if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
- INC_STORE(gray,hp,ari + 1);
-#endif
- hp[i] = *objp++;
- } else {
- MA_STACK_PUSH(src,*objp++);
- MA_STACK_PUSH(dst,hp);
- MA_STACK_PUSH(offset,i);
- }
- break;
- default:
- hp[i] = *objp++;
- }
- }
- continue;
- }
-
- default:
- erl_exit(ERTS_ABORT_EXIT,
- "%s, line %d: Internal error in copy_struct_lazy: 0x%08x\n",
- __FILE__, __LINE__,obj);
- }
- }
-
- VERBOSE(DEBUG_MESSAGES,
- ("Copy allocated @ 0x%08lx:\n%T\n",
- (unsigned long)ptr_val(dest),dest));
-
- ma_gc_flags &= ~GC_CYCLE_START;
-
- ASSERT(eq(orig, dest));
- ASSERT(ma_src_top == 0);
- ASSERT(ma_dst_top == 0);
- ASSERT(ma_offset_top == 0);
- return dest;
-}
-
-#undef NO_COPY
-#endif /* HYBRID */
-
/*
* Copy a term that is guaranteed to be contained in a single
* heap block. The heap block is copied word by word, and any
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index fd5efd89f1..f169a1a34a 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1051,16 +1051,16 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
#define VALGRIND_PRINTF_XML VALGRIND_PRINTF
#endif
-# define PURIFY_MSG(msg) \
- do { \
- char buf__[1]; size_t bufsz__ = sizeof(buf__); \
- if (erts_sys_getenv("VALGRIND_LOG_XML", buf__, &bufsz__) >= 0) { \
- VALGRIND_PRINTF_XML("<erlang_error_log>" \
- "%s, line %d: %s</erlang_error_log>\n", \
- __FILE__, __LINE__, msg); \
- } else { \
- VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg); \
- } \
+# define PURIFY_MSG(msg) \
+ do { \
+ char buf__[1]; size_t bufsz__ = sizeof(buf__); \
+ if (erts_sys_getenv_raw("VALGRIND_LOG_XML", buf__, &bufsz__) >= 0) { \
+ VALGRIND_PRINTF_XML("<erlang_error_log>" \
+ "%s, line %d: %s</erlang_error_log>\n", \
+ __FILE__, __LINE__, msg); \
+ } else { \
+ VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg); \
+ } \
} while (0)
#else
# define PURIFY_MSG(msg)
@@ -1661,11 +1661,9 @@ int erts_net_message(Port *prt,
}
erts_cleanup_offheap(&off_heap);
-#ifndef HYBRID /* FIND ME! */
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
-#endif
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return 0;
@@ -1678,11 +1676,9 @@ int erts_net_message(Port *prt,
data_error:
PURIFY_MSG("data error");
erts_cleanup_offheap(&off_heap);
-#ifndef HYBRID /* FIND ME! */
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
-#endif
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
erts_deliver_port_exit(prt, dep->cid, am_killed, 0);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -2051,6 +2047,7 @@ erts_dist_command(Port *prt, int reds_limit)
bw(foq.first->extp, size);
#endif
reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
fob = foq.first;
obufsize += size_obuf(fob);
foq.first = foq.first->next;
@@ -2133,6 +2130,7 @@ erts_dist_command(Port *prt, int reds_limit)
bw(oq.first->extp, size);
#endif
reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
fob = oq.first;
obufsize += size_obuf(fob);
oq.first = oq.first->next;
@@ -2530,15 +2528,15 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
goto error;
/* Check that all trap functions are defined !! */
- if (dsend2_trap->address == NULL ||
- dsend3_trap->address == NULL ||
+ if (dsend2_trap->addressv[0] == NULL ||
+ dsend3_trap->addressv[0] == NULL ||
/* dsend_nosuspend_trap->address == NULL ||*/
- dlink_trap->address == NULL ||
- dunlink_trap->address == NULL ||
- dmonitor_node_trap->address == NULL ||
- dgroup_leader_trap->address == NULL ||
- dmonitor_p_trap->address == NULL ||
- dexit_trap->address == NULL) {
+ dlink_trap->addressv[0] == NULL ||
+ dunlink_trap->addressv[0] == NULL ||
+ dmonitor_node_trap->addressv[0] == NULL ||
+ dgroup_leader_trap->addressv[0] == NULL ||
+ dmonitor_p_trap->addressv[0] == NULL ||
+ dexit_trap->addressv[0] == NULL) {
goto error;
}
diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h
index 1aeb7f9221..6ec0c91e21 100644
--- a/erts/emulator/beam/dtrace-wrapper.h
+++ b/erts/emulator/beam/dtrace-wrapper.h
@@ -42,6 +42,8 @@
#define DTRACE_CHARBUF(name, size) \
char name##_BUFFER[size], *name = name##_BUFFER
+#define DTRACE_CHARBUF_NAME(name) name##_BUFFER
+
#if defined(USE_DYNAMIC_TRACE) && defined(USE_VM_PROBES)
#include "erlang_dtrace.h"
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index e8a9c11034..04d807b780 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1675,7 +1675,7 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
t_str = type_no_str(n);
if (!t_str) {
- sprintf(buf, "%d", (int) n);
+ erts_snprintf(buf, sizeof(buf), "%d", (int) n);
t_str = buf;
}
@@ -2138,10 +2138,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
fi, ERTS_ALC_NO_FIXED_SIZES);
tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
}
- tmp += max_processes*sizeof(Process*);
-#ifdef HYBRID
- tmp += max_processes*sizeof(Process*);
-#endif
+ tmp += max_processes*sizeof(erts_smp_atomic_t);
tmp += erts_bif_timer_memory_size();
tmp += erts_tot_link_lh_size();
@@ -2186,9 +2183,9 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (want.code) {
size.code = module_table_sz();
size.code += export_table_sz();
- size.code += export_list_size() * sizeof(Export);
+ size.code += export_entries_sz();
size.code += erts_fun_table_sz();
- size.code += allocated_modules*sizeof(Range);
+ size.code += erts_ranges_sz();
size.code += erts_total_code_size;
}
@@ -2304,13 +2301,9 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "static";
- values[i].ui[0] =
- max_ports*sizeof(Port) /* Port table */
- + erts_timer_wheel_memory_size() /* Timer wheel */
-#ifdef SYS_TMP_BUF_SIZE
- + SYS_TMP_BUF_SIZE /* tmp_buf in sys on vxworks & ose */
-#endif
- ;
+ values[i].ui[0] =
+ max_ports*sizeof(erts_smp_atomic_t) /* Port table */
+ + erts_timer_wheel_memory_size(); /* Timer wheel */
i++;
erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
@@ -2338,7 +2331,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "export_list";
- values[i].ui[0] = export_list_size() * sizeof(Export);
+ values[i].ui[0] = export_entries_sz();
i++;
values[i].arity = 2;
@@ -2353,7 +2346,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "module_refs";
- values[i].ui[0] = allocated_modules*sizeof(Range);
+ values[i].ui[0] = erts_ranges_sz();
i++;
values[i].arity = 2;
@@ -3582,12 +3575,12 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
ftype = type_no_str(found_type);
if (!ftype) {
- sprintf(fbuf, "%d", (int) found_type);
+ erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type);
ftype = fbuf;
}
otype = type_no_str(n);
if (!otype) {
- sprintf(obuf, "%d", (int) n);
+ erts_snprintf(obuf, sizeof(obuf), "%d", (int) n);
otype = obuf;
}
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 264cbf8efe..d4de0d076a 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -165,6 +165,7 @@ type MSG_REF FIXED_SIZE PROCESSES msg_ref
type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
+type PREPARED_CODE SHORT_LIVED CODE prepared_code
type BIF_TIMER_TABLE LONG_LIVED SYSTEM bif_timer_table
type SL_BIF_TIMER SHORT_LIVED PROCESSES bif_timer_sl
type LL_BIF_TIMER STANDARD PROCESSES bif_timer_ll
@@ -265,6 +266,8 @@ type ZLIB STANDARD SYSTEM zlib
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
+type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
+type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
@@ -295,7 +298,6 @@ type PORT_LOCK STANDARD SYSTEM port_lock
type DRIVER_LOCK STANDARD SYSTEM driver_lock
type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
-type PROC_LCK_QS LONG_LIVED SYSTEM proc_lock_queues
type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data
type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data
@@ -321,19 +323,6 @@ type ACTIVE_PROCS STANDARD PROCESSES active_procs
+endif
-+if hybrid
-
-type ACTIVE_PROCS STANDARD PROCESSES active_procs
-
-# Used for all memory involved in incremental gc of the message area
-# that is, young (x2) and old generation, forwarding pointers and blackmap
-type MESSAGE_AREA LONG_LIVED PROCESSES message_area
-
-# Used in MA_STACK (global.h) and INC_STORAGE (erl_nmgc.h)
-type OBJECT_STACK STANDARD PROCESSES object_stack
-
-+endif
-
+if smp
type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl
type LL_PTIMER STANDARD SYSTEM ptimer_ll
@@ -434,28 +423,4 @@ type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf
+endif
-+if vxworks
-
-type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf
-type PEND_DATA SYSTEM SYSTEM pending_data
-type FD_TAB LONG_LIVED SYSTEM fd_tab
-type FD_ENTRY_BUF SYSTEM SYSTEM fd_entry_buf
-
-+endif
-
-+if ose
-
-type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf
-type PUTENV_STR SYSTEM SYSTEM putenv_string
-type GETENV_STR SYSTEM SYSTEM getenv_string
-type GETENV_STATE SYSTEM SYSTEM getenv_state
-type SIG_ENTRY SYSTEM SYSTEM sig_entry
-type DRIVER_DATA SYSTEM SYSTEM driver_data
-type PGM_TAB SYSTEM SYSTEM pgm_tab
-type PGM_ENTRY SYSTEM SYSTEM pgm_entry
-type PRT_TAB SYSTEM SYSTEM prt_tab
-type PRT_ENTRY SYSTEM SYSTEM prt_entry
-
-+endif
-
# ----------------------------------------------------------------------------
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 9a011e2adc..97ba306a79 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -829,49 +829,83 @@ init_dd_queue(ErtsAllctrDDQueue_t *ddq)
ddq->head.used_marker = 1;
}
-static ERTS_INLINE erts_aint_t
-ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr)
+static ERTS_INLINE int
+ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
{
- erts_aint_t first_ilast, ilast, itmp;
- ErtsAllctrDDBlock_t *this = ptr;
+ erts_aint_t itmp;
+ ErtsAllctrDDBlock_t *enq, *this = ptr;
erts_atomic_init_nob(&this->atmc_next, ERTS_AINT_NULL);
-
/* Enqueue at end of list... */
- first_ilast = ilast = erts_atomic_read_nob(&ddq->tail.data.last);
- while (1) {
- ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
- itmp = erts_atomic_cmpxchg_mb(&last->atmc_next,
- (erts_aint_t) this,
- ERTS_AINT_NULL);
- if (itmp == ERTS_AINT_NULL)
- break;
- ilast = itmp;
+ enq = (ErtsAllctrDDBlock_t *) erts_atomic_read_nob(&ddq->tail.data.last);
+ itmp = erts_atomic_cmpxchg_relb(&enq->atmc_next,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ /* We are required to move last pointer */
+#ifdef DEBUG
+ ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->atmc_next));
+ ASSERT(((erts_aint_t) enq)
+ == erts_atomic_xchg_relb(&ddq->tail.data.last,
+ (erts_aint_t) this));
+#else
+ erts_atomic_set_relb(&ddq->tail.data.last, (erts_aint_t) this);
+#endif
+ return 1;
}
+ else {
+ /*
+ * We *need* to insert element somewhere in between the
+ * last element we read earlier and the actual last element.
+ */
+ int i = cinit;
- /* Move last pointer forward... */
- while (1) {
- if (erts_atomic_read_rb(&this->atmc_next) != ERTS_AINT_NULL) {
- ilast = erts_atomic_read_rb(&ddq->tail.data.last);
- if (first_ilast != ilast) {
- /* Someone else will move it forward */
- return ilast;
+ while (1) {
+ erts_aint_t itmp2;
+ erts_atomic_set_nob(&this->atmc_next, itmp);
+ itmp2 = erts_atomic_cmpxchg_relb(&enq->atmc_next,
+ (erts_aint_t) this,
+ itmp);
+ if (itmp == itmp2)
+ return 0; /* inserted this */
+ if ((i & 1) == 0)
+ itmp = itmp2;
+ else {
+ enq = (ErtsAllctrDDBlock_t *) itmp2;
+ itmp = erts_atomic_read_acqb(&enq->atmc_next);
+ ASSERT(itmp != ERTS_AINT_NULL);
}
+ i++;
}
- itmp = erts_atomic_cmpxchg_mb(&ddq->tail.data.last,
- (erts_aint_t) this,
- ilast);
- if (ilast == itmp)
- return (erts_aint_t) this;
- ilast = itmp;
}
}
+static ERTS_INLINE erts_aint_t
+check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast)
+{
+ if (!ddq->head.used_marker
+ && ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast) {
+ erts_aint_t itmp;
+ ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
+
+ erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL);
+ itmp = erts_atomic_cmpxchg_relb(&last->atmc_next,
+ (erts_aint_t) &ddq->tail.data.marker,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ ilast = (erts_aint_t) &ddq->tail.data.marker;
+ ddq->head.used_marker = !0;
+ erts_atomic_set_relb(&ddq->tail.data.last, ilast);
+ }
+ }
+ return ilast;
+}
+
static ERTS_INLINE int
-ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr)
+ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
{
- erts_aint_t ilast;
+ int last_elem;
int um_refc_ix = 0;
int managed_thread = erts_thr_progress_is_managed_thread();
if (!managed_thread) {
@@ -887,11 +921,11 @@ ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr)
}
}
- ilast = ddq_managed_thread_enqueue(ddq, ptr);
+ last_elem = ddq_managed_thread_enqueue(ddq, ptr, cinit);
if (!managed_thread)
erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
- return ilast == (erts_aint_t) ptr;
+ return last_elem;
}
static ERTS_INLINE void *
@@ -937,20 +971,16 @@ ddq_check_incoming(ErtsAllctrDDQueue_t *ddq)
int um_refc_ix;
ddq->head.next.thr_progress_reached = 1;
um_refc_ix = ddq->head.next.um_refc_ix;
- if (erts_atomic_read_acqb(&ddq->tail.data.um_refc[um_refc_ix]) == 0) {
+ if (erts_atomic_read_nob(&ddq->tail.data.um_refc[um_refc_ix]) == 0) {
/* Move unreferenced end pointer forward... */
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+
ddq->head.unref_end = ddq->head.next.unref_end;
- if (!ddq->head.used_marker
- && ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast) {
- ddq->head.used_marker = 1;
- ilast = ddq_managed_thread_enqueue(ddq, &ddq->tail.data.marker);
- }
+ ilast = check_insert_marker(ddq, ilast);
- if (ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast)
- ERTS_THR_MEMORY_BARRIER;
- else {
+ if (ddq->head.unref_end != (ErtsAllctrDDBlock_t *) ilast) {
ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast;
ddq->head.next.thr_progress = erts_thr_progress_later(NULL);
erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix,
@@ -1095,12 +1125,15 @@ handle_delayed_dealloc(Allctr_t *allctr,
}
static ERTS_INLINE void
-enqueue_dealloc_other_instance(ErtsAlcType_t type, Allctr_t *allctr, void *ptr)
+enqueue_dealloc_other_instance(ErtsAlcType_t type,
+ Allctr_t *allctr,
+ void *ptr,
+ int cinit)
{
if (allctr->fix)
((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS] = (UWord) type;
- if (ddq_enqueue(type, &allctr->dd.q, ptr))
+ if (ddq_enqueue(type, &allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(allctr->ix);
}
@@ -3616,7 +3649,11 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
get_pref_allctr(extra, &pref_allctr);
ptr = get_used_allctr(extra, p, &used_allctr, NULL);
if (pref_allctr != used_allctr)
- enqueue_dealloc_other_instance(type, used_allctr, ptr);
+ enqueue_dealloc_other_instance(type,
+ used_allctr,
+ ptr,
+ (used_allctr->dd.ix
+ - pref_allctr->dd.ix));
else {
if (used_allctr->thread_safe)
erts_mtx_lock(&used_allctr->mutex);
@@ -3991,7 +4028,11 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
sys_memcpy(res, p, cpy_size);
if (!force_move || used_allctr != pref_allctr)
- enqueue_dealloc_other_instance(type, used_allctr, ptr);
+ enqueue_dealloc_other_instance(type,
+ used_allctr,
+ ptr,
+ (used_allctr->dd.ix
+ - pref_allctr->dd.ix));
else {
do_erts_alcu_free(type, used_allctr, ptr);
ASSERT(pref_allctr == used_allctr);
@@ -4182,6 +4223,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->dd.use = 1;
init_dd_queue(&allctr->dd.q);
+ allctr->dd.ix = init->ix;
}
else
#endif
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index d8adaedc91..f2ca193ace 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -122,6 +122,8 @@ typedef struct {
#endif
} ErtsAsyncData;
+#if defined(USE_THREADS) && defined(USE_VM_PROBES)
+
/*
* Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace
* calls if they're the last thing in the function. :-(
@@ -129,6 +131,7 @@ typedef struct {
* https://github.com/memcached/memcached/commit/6298b3978687530bc9d219b6ac707a1b681b2a46
*/
static unsigned gcc_optimizer_hack = 0;
+#endif
int erts_async_max_threads; /* Initialized by erl_init.c */
int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
@@ -253,7 +256,9 @@ erts_get_async_ready_queue(Uint sched_id)
static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
{
+#ifdef USE_VM_PROBES
int len;
+#endif
if (is_internal_port(a->port)) {
#if ERTS_USE_ASYNC_READY_Q
@@ -279,8 +284,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
len = -1;
DTRACE2(aio_pool_add, port_str, len);
}
-#endif
gcc_optimizer_hack++;
+#endif
}
static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
@@ -291,7 +296,9 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
int saved_fin_deq = 0;
ErtsThrQFinDeQ_t fin_deq;
#endif
+#ifdef USE_VM_PROBES
int len;
+#endif
while (1) {
ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q);
@@ -399,6 +406,8 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
erts_thr_port_release(p);
#endif
}
+ if (a->pdl)
+ driver_pdl_dec_refc(a->pdl);
if (a->hndl)
erts_ddll_dereference_driver(a->hndl);
}
@@ -408,9 +417,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq;
- if (a->pdl)
- driver_pdl_dec_refc(a->pdl);
-
#if ERTS_ASYNC_PRINT_JOB
erts_fprintf(stderr, "=>> %ld\n", a->async_id);
#endif
@@ -430,8 +436,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
#else /* ERTS_USE_ASYNC_READY_Q */
call_async_ready(a);
- if (a->pdl)
- driver_pdl_dec_refc(a->pdl);
erts_free(ERTS_ALC_T_ASYNC, (void *) a);
#endif /* ERTS_USE_ASYNC_READY_Q */
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index cc4f2be8eb..474151d454 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -72,52 +72,29 @@ binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
void erts_init_bif_binary(void)
{
- sys_memset((void *) &binary_match_trap_export, 0, sizeof(Export));
- binary_match_trap_export.address = &binary_match_trap_export.code[3];
- binary_match_trap_export.code[0] = am_erlang;
- binary_match_trap_export.code[1] = am_binary_match_trap;
- binary_match_trap_export.code[2] = 3;
- binary_match_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_match_trap_export.code[4] = (BeamInstr) &binary_match_trap;
-
- sys_memset((void *) &binary_matches_trap_export, 0, sizeof(Export));
- binary_matches_trap_export.address = &binary_matches_trap_export.code[3];
- binary_matches_trap_export.code[0] = am_erlang;
- binary_matches_trap_export.code[1] = am_binary_matches_trap;
- binary_matches_trap_export.code[2] = 3;
- binary_matches_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_matches_trap_export.code[4] = (BeamInstr) &binary_matches_trap;
-
- sys_memset((void *) &binary_longest_prefix_trap_export, 0, sizeof(Export));
- binary_longest_prefix_trap_export.address = &binary_longest_prefix_trap_export.code[3];
- binary_longest_prefix_trap_export.code[0] = am_erlang;
- binary_longest_prefix_trap_export.code[1] = am_binary_longest_prefix_trap;
- binary_longest_prefix_trap_export.code[2] = 3;
- binary_longest_prefix_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_longest_prefix_trap_export.code[4] = (BeamInstr) &binary_longest_prefix_trap;
-
- sys_memset((void *) &binary_longest_suffix_trap_export, 0, sizeof(Export));
- binary_longest_suffix_trap_export.address = &binary_longest_suffix_trap_export.code[3];
- binary_longest_suffix_trap_export.code[0] = am_erlang;
- binary_longest_suffix_trap_export.code[1] = am_binary_longest_suffix_trap;
- binary_longest_suffix_trap_export.code[2] = 3;
- binary_longest_suffix_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_longest_suffix_trap_export.code[4] = (BeamInstr) &binary_longest_suffix_trap;
-
- sys_memset((void *) &binary_bin_to_list_trap_export, 0, sizeof(Export));
- binary_bin_to_list_trap_export.address = &binary_bin_to_list_trap_export.code[3];
- binary_bin_to_list_trap_export.code[0] = am_erlang;
- binary_bin_to_list_trap_export.code[1] = am_binary_bin_to_list_trap;
- binary_bin_to_list_trap_export.code[2] = 3;
- binary_bin_to_list_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_bin_to_list_trap_export.code[4] = (BeamInstr) &binary_bin_to_list_trap;
- sys_memset((void *) &binary_copy_trap_export, 0, sizeof(Export));
- binary_copy_trap_export.address = &binary_copy_trap_export.code[3];
- binary_copy_trap_export.code[0] = am_erlang;
- binary_copy_trap_export.code[1] = am_binary_copy_trap;
- binary_copy_trap_export.code[2] = 2;
- binary_copy_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_copy_trap_export.code[4] = (BeamInstr) &binary_copy_trap;
+ erts_init_trap_export(&binary_match_trap_export,
+ am_erlang, am_binary_match_trap, 3,
+ &binary_match_trap);
+
+ erts_init_trap_export(&binary_matches_trap_export,
+ am_erlang, am_binary_matches_trap, 3,
+ &binary_matches_trap);
+
+ erts_init_trap_export(&binary_longest_prefix_trap_export,
+ am_erlang, am_binary_longest_prefix_trap, 3,
+ &binary_longest_prefix_trap);
+
+ erts_init_trap_export(&binary_longest_suffix_trap_export,
+ am_erlang, am_binary_longest_suffix_trap, 3,
+ &binary_longest_suffix_trap);
+
+ erts_init_trap_export(&binary_bin_to_list_trap_export,
+ am_erlang, am_binary_bin_to_list_trap, 3,
+ &binary_bin_to_list_trap);
+
+ erts_init_trap_export(&binary_copy_trap_export,
+ am_erlang, am_binary_copy_trap, 2,
+ &binary_copy_trap);
max_loop_limit = 0;
return;
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
index 06b7ffdf32..ff5ce3cc7a 100644
--- a/erts/emulator/beam/erl_bif_chksum.c
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -42,16 +42,9 @@ static Export chksum_md5_2_exp;
void erts_init_bif_chksum(void)
{
/* Non visual BIF to trap to. */
- memset(&chksum_md5_2_exp, 0, sizeof(Export));
- chksum_md5_2_exp.address =
- &chksum_md5_2_exp.code[3];
- chksum_md5_2_exp.code[0] = am_erlang;
- chksum_md5_2_exp.code[1] = am_atom_put("md5_trap",8);
- chksum_md5_2_exp.code[2] = 2;
- chksum_md5_2_exp.code[3] =
- (BeamInstr) em_apply_bif;
- chksum_md5_2_exp.code[4] =
- (BeamInstr) &md5_2;
+ erts_init_trap_export(&chksum_md5_2_exp,
+ am_erlang, am_atom_put("md5_trap",8), 2,
+ &md5_2);
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index f5a355a336..fabddffc68 100644..100755
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -25,7 +25,6 @@
#include "erl_vm.h"
#include "global.h"
#include "erl_process.h"
-#include "erl_nmgc.h"
#include "error.h"
#include "erl_driver.h"
#include "bif.h"
@@ -91,12 +90,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#ifdef ERTS_ENABLE_KERNEL_POLL
" [kernel-poll:%s]"
#endif
-#ifdef HYBRID
- " [hybrid heap]"
-#endif
-#ifdef INCREMENTAL
- " [incremental GC]"
-#endif
#ifdef ET_DEBUG
#if ET_DEBUG
" [type-assertions]"
@@ -576,9 +569,6 @@ static Eterm pi_args[] = {
am_min_bin_vheap_size,
am_current_location,
am_current_stacktrace,
-#ifdef HYBRID
- am_message_binary
-#endif
};
#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
@@ -626,9 +616,6 @@ pi_arg2ix(Eterm arg)
case am_min_bin_vheap_size: return 28;
case am_current_location: return 29;
case am_current_stacktrace: return 30;
-#ifdef HYBRID
- case am_message_binary: return 31;
-#endif
default: return -1;
}
}
@@ -1079,12 +1066,8 @@ process_info_aux(Process *BIF_P,
if (rp != BIF_P) {
Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
if (is_value(msg)) {
- mq[i].copy_struct_size = (is_immed(msg)
-#ifdef HYBRID
- || NO_COPY(msg)
-#endif
- ? 0
- : size_object(msg));
+ mq[i].copy_struct_size = (is_immed(msg)? 0 :
+ size_object(msg));
}
else if (mq[i].msgp->data.attached) {
mq[i].copy_struct_size
@@ -1526,16 +1509,6 @@ process_info_aux(Process *BIF_P,
break;
}
-#ifdef HYBRID
- case am_message_binary: {
- Uint sz = 3;
- (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso);
- hp = HAlloc(BIF_P, sz);
- res = bld_bin_list(&hp, NULL, erts_global_offheap.mso);
- break;
- }
-#endif
-
case am_sequential_trace_token:
res = copy_object(rp->seq_trace_token, BIF_P);
hp = HAlloc(BIF_P, 3);
@@ -1724,7 +1697,7 @@ static int check_if_xml(void)
{
char buf[1];
size_t bufsz = sizeof(buf);
- return erts_sys_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
+ return erts_sys_getenv_raw("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
}
#else
#define check_if_xml() 0
@@ -2358,36 +2331,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#endif
} else if (BIF_ARG_1 == am_heap_sizes) {
return erts_heap_sizes(BIF_P);
- } else if (BIF_ARG_1 == am_global_heaps_size) {
-#ifdef HYBRID
- Uint hsz = 0;
- Uint sz = 0;
-
- sz += global_heap_sz;
-#ifdef INCREMENTAL
- /* The size of the old generation is a bit hard to define here...
- * The amount of live data in the last collection perhaps..? */
- sz = 0;
-#else
- if (global_old_hend && global_old_heap)
- sz += global_old_hend - global_old_heap;
-#endif
-
- sz *= sizeof(Eterm);
-
- (void) erts_bld_uint(NULL, &hsz, sz);
- hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
- res = erts_bld_uint(&hp, NULL, sz);
-#else
- res = make_small(0);
-#endif
- return res;
} else if (BIF_ARG_1 == am_heap_type) {
-#if defined(HYBRID)
- return am_hybrid;
-#else
return am_private;
-#endif
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
res = erts_get_cpu_topology_term(BIF_P, am_used);
BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
@@ -2770,6 +2715,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
** name String
** input Number of bytes input from port program
** output Number of bytes output to the port program
+** os_pid The child's process ID
*/
Eterm
@@ -2880,6 +2826,15 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
goto done;
}
}
+ else if (item == am_os_pid) {
+ res = (prt->os_pid < 0
+ ? am_undefined
+ : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ }
else if (item == am_registered_name) {
RegProc *reg = prt->common.u.alive.reg;
if (reg) {
@@ -4084,48 +4039,52 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
Eterm* tp = tuple_val(BIF_ARG_1);
switch (arityval(tp[0])) {
- case 2:
+ case 2: {
+ int opt = 0;
+ int val = 0;
if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- if (tp[2] == am_true) {
-
- res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
-
- } else if (tp[2] == am_false) {
-
- res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
-
- } else {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(BIF_P, BADARG);
- }
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
-
+ opt = ERTS_LCNT_OPT_COPYSAVE;
} else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- if (tp[2] == am_true) {
-
- res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
-
- } else if (tp[2] == am_false) {
-
- res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
+ opt = ERTS_LCNT_OPT_PROCLOCK;
+ } else if (ERTS_IS_ATOM_STR("port_locks", tp[1])) {
+ opt = ERTS_LCNT_OPT_PORTLOCK;
+ } else if (ERTS_IS_ATOM_STR("suspend", tp[1])) {
+ opt = ERTS_LCNT_OPT_SUSPEND;
+ } else if (ERTS_IS_ATOM_STR("location", tp[1])) {
+ opt = ERTS_LCNT_OPT_LOCATION;
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (tp[2] == am_true) {
+ val = 1;
+ } else if (tp[2] == am_false) {
+ val = 0;
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- } else {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(BIF_P, BADARG);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ if (val) {
+ res = erts_lcnt_set_rt_opt(opt) ? am_true : am_false;
+ } else {
+ res = erts_lcnt_clear_rt_opt(opt) ? am_true : am_false;
+ }
+#ifdef ERTS_SMP
+ if (res != tp[2]) {
+ if (opt == ERTS_LCNT_OPT_PORTLOCK) {
+ erts_lcnt_enable_io_lock_count(val);
+ } else if (opt == ERTS_LCNT_OPT_PROCLOCK) {
+ erts_lcnt_enable_proc_lock_count(val);
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
- }
- break;
+ }
+#endif
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(res);
+ break;
+ }
default:
break;
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index 13f8b1f63c..d4cd9c89b3 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -261,11 +261,6 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
if (exp->code[2] == (Uint) arity) {
BIF_RET(am_true);
}
- } else if (is_tuple(arg1)) {
- Eterm* tp = tuple_val(arg1);
- if (tp[0] == make_arityval(2) && is_atom(tp[1]) && is_atom(tp[2])) {
- BIF_RET(am_true);
- }
}
BIF_RET(am_false);
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 58d48199fa..1062d4379b 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -58,7 +58,7 @@ BIF_RETTYPE os_getpid_0(BIF_ALIST_0)
char pid_string[21]; /* enough for a 64 bit number */
int n;
Eterm* hp;
- sys_get_pid(pid_string); /* In sys.c */
+ sys_get_pid(pid_string, sizeof(pid_string)); /* In sys.c */
n = sys_strlen(pid_string);
hp = HAlloc(BIF_P, n*2);
BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL));
@@ -71,15 +71,13 @@ BIF_RETTYPE os_getenv_0(BIF_ALIST_0)
Eterm* hp;
Eterm ret;
Eterm str;
- int len;
init_getenv_state(&state);
ret = NIL;
while ((cp = getenv_string(&state)) != NULL) {
- len = strlen(cp);
- hp = HAlloc(BIF_P, len*2+2);
- str = buf_to_intlist(&hp, cp, len, NIL);
+ str = erts_convert_native_to_filename(BIF_P,(byte *)cp);
+ hp = HAlloc(BIF_P, 2);
ret = CONS(hp, str, ret);
}
@@ -88,32 +86,30 @@ BIF_RETTYPE os_getenv_0(BIF_ALIST_0)
return ret;
}
-
+#define STATIC_BUF_SIZE 1024
BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
{
Process* p = BIF_P;
- Eterm key = BIF_ARG_1;
Eterm str;
- int len, res;
+ Sint len;
+ int res;
char *key_str, *val;
- char buf[1024];
+ char buf[STATIC_BUF_SIZE];
size_t val_size = sizeof(buf);
- len = is_string(key);
- if (!len) {
+ key_str = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE,
+ ERTS_ALC_T_TMP,1,0,&len);
+
+ if (!key_str) {
BIF_ERROR(p, BADARG);
}
- /* Leave at least one byte in buf for value */
- key_str = len < sizeof(buf)-2 ? &buf[0] : erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(key, key_str, len) != len)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- key_str[len] = '\0';
if (key_str != &buf[0])
val = &buf[0];
else {
- val_size -= len + 1;
- val = &buf[len + 1];
+ /* len includes zero byte */
+ val_size -= len;
+ val = &buf[len];
}
res = erts_sys_getenv(key_str, val, &val_size);
@@ -121,7 +117,6 @@ BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
no_var:
str = am_false;
} else {
- Eterm* hp;
if (res > 0) {
val = erts_alloc(ERTS_ALC_T_TMP, val_size);
while (1) {
@@ -134,9 +129,7 @@ BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
val = erts_realloc(ERTS_ALC_T_TMP, val, val_size);
}
}
- if (val_size)
- hp = HAlloc(p, val_size*2);
- str = buf_to_intlist(&hp, val, val_size, NIL);
+ str = erts_convert_native_to_filename(p,(byte *)val);
}
if (key_str != &buf[0])
erts_free(ERTS_ALC_T_TMP, key_str);
@@ -147,46 +140,43 @@ BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
{
- Process* p = BIF_P;
- Eterm key = BIF_ARG_1;
- Eterm value = BIF_ARG_2;
- char def_buf[1024];
- char *buf = NULL;
- int sep_ix, i, key_len, value_len, tot_len;
- key_len = is_string(key);
- if (!key_len) {
- error:
- if (buf)
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- BIF_ERROR(p, BADARG);
+ char def_buf_key[STATIC_BUF_SIZE];
+ char def_buf_value[STATIC_BUF_SIZE];
+ char *key_buf, *value_buf;
+
+ key_buf = erts_convert_filename_to_native(BIF_ARG_1,def_buf_key,
+ STATIC_BUF_SIZE,
+ ERTS_ALC_T_TMP,0,0,NULL);
+ if (!key_buf) {
+ BIF_ERROR(BIF_P, BADARG);
}
- if (is_nil(value))
- value_len = 0;
- else {
- value_len = is_string(value);
- if (!value_len)
- goto error;
+ value_buf = erts_convert_filename_to_native(BIF_ARG_2,def_buf_value,
+ STATIC_BUF_SIZE,
+ ERTS_ALC_T_TMP,1,0,
+ NULL);
+ if (!value_buf) {
+ if (key_buf != def_buf_key) {
+ erts_free(ERTS_ALC_T_TMP, key_buf);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+
+ if (erts_sys_putenv(key_buf, value_buf)) {
+ if (key_buf != def_buf_key) {
+ erts_free(ERTS_ALC_T_TMP, key_buf);
+ }
+ if (value_buf != def_buf_value) {
+ erts_free(ERTS_ALC_T_TMP, value_buf);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (key_buf != def_buf_key) {
+ erts_free(ERTS_ALC_T_TMP, key_buf);
}
- tot_len = key_len + 1 + value_len + 1;
- if (tot_len <= sizeof(def_buf))
- buf = &def_buf[0];
- else
- buf = erts_alloc(ERTS_ALC_T_TMP, tot_len);
- i = intlist_to_buf(key, buf, key_len);
- if (i != key_len)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- sep_ix = i;
- buf[i++] = '=';
- if (is_not_nil(value))
- i += intlist_to_buf(value, &buf[i], value_len);
- if (i != key_len + 1 + value_len)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[i] = '\0';
- if (erts_sys_putenv(buf, sep_ix)) {
- goto error;
+ if (value_buf != def_buf_value) {
+ erts_free(ERTS_ALC_T_TMP, value_buf);
}
- if (buf != &def_buf[0])
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(am_true);
}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 97f147132e..81146e38d7 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -576,7 +576,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
} else if (option == am_arg0) {
char *a0;
- if ((a0 = erts_convert_filename_to_native(*tp, ERTS_ALC_T_TMP, 1)) == NULL) {
+ if ((a0 = erts_convert_filename_to_native(*tp, NULL, 0, ERTS_ALC_T_TMP, 1, 1, NULL)) == NULL) {
goto badarg;
}
if (opts.argv == NULL) {
@@ -710,7 +710,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
goto badarg;
}
name = tp[1];
- if ((name_buf = erts_convert_filename_to_native(name,ERTS_ALC_T_TMP,0)) == NULL) {
+ if ((name_buf = erts_convert_filename_to_native(name, NULL, 0, ERTS_ALC_T_TMP,0,1, NULL)) == NULL) {
goto badarg;
}
opts.spawn_type = ERTS_SPAWN_EXECUTABLE;
@@ -774,7 +774,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
}
opts.wd = (char *) dir;
} else {
- if ((opts.wd = erts_convert_filename_to_native(edir,ERTS_ALC_T_TMP,0)) == NULL) {
+ if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) {
goto badarg;
}
}
@@ -863,7 +863,7 @@ static char **convert_args(Eterm l)
pp[i++] = erts_default_arg0;
while (is_list(l)) {
str = CAR(list_val(l));
- if ((b = erts_convert_filename_to_native(str,ERTS_ALC_T_TMP,1)) == NULL) {
+ if ((b = erts_convert_filename_to_native(str,NULL,0,ERTS_ALC_T_TMP,1,1,NULL)) == NULL) {
int j;
for (j = 1; j < i; ++j)
erts_free(ERTS_ALC_T_TMP, pp[j]);
@@ -898,8 +898,9 @@ static byte* convert_environment(Process* p, Eterm env)
Eterm* hp;
Uint heap_size;
int n;
- ErlDrvSizeT size;
+ Sint size;
byte* bytes;
+ int encoding = erts_get_native_filename_encoding();
if ((n = list_length(env)) < 0) {
return NULL;
@@ -942,7 +943,8 @@ static byte* convert_environment(Process* p, Eterm env)
if (is_not_nil(env)) {
goto done;
}
- if (erts_iolist_size(all, &size)) {
+
+ if ((size = erts_native_filename_need(all,encoding)) < 0) {
goto done;
}
@@ -950,7 +952,7 @@ static byte* convert_environment(Process* p, Eterm env)
* Put the result in a binary (no risk for a memory leak that way).
*/
(void) erts_new_heap_binary(p, NULL, size, &bytes);
- erts_iolist_to_buf(all, (char*)bytes, size);
+ erts_native_filename_put(all,encoding,bytes);
done:
erts_free(ERTS_ALC_T_TMP, temp_heap);
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index ace128f045..88f980d19f 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -71,14 +71,9 @@ void erts_init_bif_re(void)
erts_pcre_stack_free = &erts_erts_pcre_stack_free;
default_table = NULL; /* ISO8859-1 default, forced into pcre */
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
-
- sys_memset((void *) &re_exec_trap_export, 0, sizeof(Export));
- re_exec_trap_export.address = &re_exec_trap_export.code[3];
- re_exec_trap_export.code[0] = am_erlang;
- re_exec_trap_export.code[1] = am_re_run_trap;
- re_exec_trap_export.code[2] = 3;
- re_exec_trap_export.code[3] = (BeamInstr) em_apply_bif;
- re_exec_trap_export.code[4] = (BeamInstr) &re_exec_trap;
+
+ erts_init_trap_export(&re_exec_trap_export, am_erlang, am_re_run_trap, 3,
+ &re_exec_trap);
grun_trap_exportp = erts_export_put(am_re,am_grun,3);
urun_trap_exportp = erts_export_put(am_re,am_urun,3);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 60d5d039ed..99a4394666 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -42,14 +42,33 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
+
+/*
+ * The following variables are protected by code write permission.
+ */
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static struct { /* Protected by code write permission */
+ int current;
+ int install;
+ int local;
+ BpFunctions f; /* Local functions */
+ BpFunctions e; /* Export entries */
+#ifdef ERTS_SMP
+ Process* stager;
+ ErtsThrPrgrLaterOp lop;
+#endif
+} finish_bp;
+
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
+#ifdef ERTS_SMP
+static void smp_bp_finisher(void* arg);
+#endif
static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
@@ -60,12 +79,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
-static int setup_func_trace(Export* ep, void* match_prog);
-static int reset_func_trace(Export* ep);
-static void reset_bif_trace(int bif_index);
-static void setup_bif_trace(int bif_index);
-static void set_trace_bif(int bif_index, void* match_prog);
-static void clear_trace_bif(int bif_index);
+static void reset_bif_trace(void);
+static void setup_bif_trace(void);
+static void install_exp_breakpoints(BpFunctions* f);
+static void uninstall_exp_breakpoints(BpFunctions* f);
+static void clean_export_entries(BpFunctions* f);
void
erts_bif_trace_init(void)
@@ -98,7 +116,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
- int matches = 0;
+ int matches = -1;
int specified = 0;
enum erts_break_op on;
Binary* match_prog_set;
@@ -108,8 +126,10 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
Process *meta_tracer_proc = p;
Eterm meta_tracer_pid = p->common.id;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
+ }
+ finish_bp.current = -1;
UseTmpHeap(3,p);
/*
@@ -239,7 +259,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
erts_default_meta_match_spec = NULL;
erts_default_meta_tracer_pid = NIL;
}
- MatchSetUnref(match_prog_set);
if (erts_default_trace_pattern_flags.breakpoint &&
flags.breakpoint) {
/* Breakpoint trace -> breakpoint trace */
@@ -295,8 +314,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
erts_default_trace_pattern_is_on = !!flags.breakpoint;
}
}
-
- goto done;
+ matches = 0;
} else if (is_tuple(MFA)) {
Eterm *tp = tuple_val(MFA);
if (tp[0] != make_arityval(3)) {
@@ -320,36 +338,64 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (is_small(mfa[2])) {
mfa[2] = signed_val(mfa[2]);
}
- } else {
- goto error;
- }
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
- }
+ if (meta_tracer_proc) {
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
+ }
+ matches = erts_set_trace_pattern(p, mfa, specified,
+ match_prog_set, match_prog_set,
+ on, flags, meta_tracer_pid, 0);
+ }
- matches = erts_set_trace_pattern(mfa, specified,
- match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid);
+ error:
MatchSetUnref(match_prog_set);
-
- done:
UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- return make_small(matches);
+#ifdef ERTS_SMP
+ if (finish_bp.current >= 0) {
+ ASSERT(matches >= 0);
+ ASSERT(finish_bp.stager == NULL);
+ finish_bp.stager = p;
+ erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
+ erts_smp_proc_inc_refc(p);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(p, make_small(matches));
+ }
+#endif
- error:
+ erts_release_code_write_permission();
- MatchSetUnref(match_prog_set);
+ if (matches >= 0) {
+ return make_small(matches);
+ }
+ else {
+ BIF_ERROR(p, BADARG);
+ }
+}
- UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(p, BADARG);
+#ifdef ERTS_SMP
+static void smp_bp_finisher(void* null)
+{
+ if (erts_finish_breakpointing()) { /* Not done */
+ /* Arrange for being called again */
+ erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
+ }
+ else { /* Done */
+ Process* p = finish_bp.stager;
+#ifdef DEBUG
+ finish_bp.stager = NULL;
+#endif
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+ erts_release_code_write_permission();
+ }
}
+#endif /* ERTS_SMP */
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
@@ -358,6 +404,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_smp_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -370,7 +418,12 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*meta_tracer_pid = erts_default_meta_tracer_pid;
}
-
+int erts_is_default_trace_enabled(void)
+{
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_smp_thr_progress_is_blocking());
+ return erts_default_trace_pattern_is_on;
+}
Uint
erts_trace_flag2bit(Eterm flag)
@@ -464,6 +517,10 @@ Eterm trace_3(BIF_ALIST_3)
BIF_ERROR(p, BADARG);
}
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD3(bif_export[BIF_trace_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+
if (is_nil(tracer) || is_internal_pid(tracer)) {
Process *tracer_proc = erts_pid2proc(p,
ERTS_PROC_LOCK_MAIN,
@@ -739,6 +796,7 @@ Eterm trace_3(BIF_ALIST_3)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
+ erts_release_code_write_permission();
BIF_RET(make_small(matches));
@@ -754,6 +812,7 @@ Eterm trace_3(BIF_ALIST_3)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
@@ -850,6 +909,11 @@ Eterm trace_info_2(BIF_ALIST_2)
Eterm What = BIF_ARG_1;
Eterm Key = BIF_ARG_2;
Eterm res;
+
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key);
+ }
+
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
} else if (is_atom(What) || is_pid(What)) {
@@ -857,8 +921,10 @@ Eterm trace_info_2(BIF_ALIST_2)
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
} else {
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
+ erts_release_code_write_permission();
BIF_RET(res);
}
@@ -985,64 +1051,54 @@ static int function_is_traced(Process *p,
Binary **ms, /* out */
Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count, /* out */
+ Uint *count, /* out */
Eterm *call_time) /* out */
{
Export e;
Export* ep;
- int i;
- BeamInstr *code;
+ BeamInstr* pc;
/* First look for an export entry */
e.code[0] = mfa[0];
e.code[1] = mfa[1];
e.code[2] = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- if (ep->address == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_error_handler) {
- if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- *ms = ep->match_prog_set;
+ pc = ep->code+3;
+ if (ep->addressv[erts_active_code_ix()] == pc &&
+ *pc != (BeamInstr) em_call_error_handler) {
+
+ int r = 0;
+
+ ASSERT(*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+
+ if (erts_is_trace_break(pc, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
- for (i = 0; i < BIF_SIZE; ++i) {
- if (bif_export[i] == ep) {
- int r = 0;
-
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- *ms = ep->match_prog_set;
- return FUNC_TRACE_GLOBAL_TRACE;
- } else {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- r |= FUNC_TRACE_LOCAL_TRACE;
- *ms = ep->match_prog_set;
- }
- if (erts_is_mtrace_break(ep->code+3, ms_meta,
- tracer_pid_meta)) {
- r |= FUNC_TRACE_META_TRACE;
- }
- if (erts_is_time_break(p, ep->code+3, call_time)) {
- r |= FUNC_TRACE_TIME_TRACE;
- }
- }
- return r ? r : FUNC_TRACE_UNTRACED;
- }
- }
- erl_exit(1,"Impossible ghost bif encountered in trace_info.");
+
+ if (erts_is_trace_break(pc, ms, 1)) {
+ r |= FUNC_TRACE_LOCAL_TRACE;
+ }
+ if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ r |= FUNC_TRACE_META_TRACE;
+ }
+ if (erts_is_time_break(p, pc, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
}
+ return r ? r : FUNC_TRACE_UNTRACED;
}
}
/* OK, now look for breakpoint tracing */
- if ((code = erts_find_local_func(mfa)) != NULL) {
+ if ((pc = erts_find_local_func(mfa)) != NULL) {
int r =
- (erts_is_trace_break(code, ms, NULL)
+ (erts_is_trace_break(pc, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(code, count)
+ | (erts_is_count_break(pc, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, code, call_time)
+ | (erts_is_time_break(p, pc, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1057,7 +1113,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm* hp;
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
Binary *ms = NULL, *ms_meta = NULL;
- Sint count = 0;
+ Uint count = 0;
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
@@ -1145,9 +1201,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
break;
case am_call_count:
if (r & FUNC_TRACE_COUNT_TRACE) {
- retval = count < 0 ?
- erts_make_integer(-count-1, p) :
- erts_make_integer(count, p);
+ retval = erts_make_integer(count, p);
}
break;
case am_call_time:
@@ -1336,38 +1390,53 @@ trace_info_on_load(Process* p, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid)
+ Eterm meta_tracer_pid, int is_blocking)
{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
int i;
+ int n;
+ BpFunction* fp;
/*
* First work on normal functions (not real BIFs).
*/
-
- for (i = 0; i < export_list_size(); i++) {
- Export* ep = export_list(i);
- int j;
-
- if (ExportIsBuiltIn(ep)) {
- continue;
- }
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- if (on) {
- if (! flags.breakpoint)
- matches += setup_func_trace(ep, match_prog_set);
- else
- reset_func_trace(ep);
- } else if (! flags.breakpoint) {
- matches += reset_func_trace(ep);
+ erts_bp_match_export(&finish_bp.e, mfa, specified);
+ fp = finish_bp.e.matching;
+ n = finish_bp.e.matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+
+ if (on && !flags.breakpoint) {
+ /* Turn on global call tracing */
+ if (ep->addressv[code_ix] != pc) {
+ fp[i].mod->curr.num_traced_exports++;
+#ifdef DEBUG
+ pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+#endif
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ pc[1] = (BeamInstr) ep->addressv[code_ix];
+ }
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
+ if (ep->addressv[code_ix] != pc) {
+ pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ }
+ } else if (!on && flags.breakpoint) {
+ /* Turn off breakpoint tracing -- nothing to do here. */
+ } else {
+ /*
+ * Turn off global tracing, either explicitly or implicitly
+ * before turning on breakpoint tracing.
+ */
+ erts_clear_call_trace_bif(pc, 0);
+ if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
}
}
}
@@ -1392,26 +1461,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/* Empty loop body */
}
if (j == specified) {
+ BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
+
if (! flags.breakpoint) { /* Export entry call trace */
if (on) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- ASSERT(ExportIsBuiltIn(bif_export[i]));
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL;
- setup_bif_trace(i);
+ erts_clear_call_trace_bif(pc, 1);
+ erts_clear_mtrace_bif(pc);
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
} else { /* off */
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
- }
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
+ erts_clear_call_trace_bif(pc, 0);
}
matches++;
} else { /* Breakpoint call trace */
@@ -1419,52 +1477,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (on) {
if (flags.local) {
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_clear_call_trace_bif(pc, 0);
+ erts_set_call_trace_bif(pc, match_prog_set, 1);
m = 1;
}
if (flags.meta) {
- erts_set_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3,
- meta_match_prog_set, meta_tracer_pid);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_set_mtrace_bif(pc, meta_match_prog_set,
+ meta_tracer_pid);
m = 1;
}
if (flags.call_time) {
- erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ erts_set_time_trace_bif(pc, on);
/* I don't want to remove any other tracers */
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
m = 1;
}
- if (erts_bif_trace_flags[i]) {
- setup_bif_trace(i);
- }
} else { /* off */
if (flags.local) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- }
+ erts_clear_call_trace_bif(pc, 1);
m = 1;
}
if (flags.meta) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
+ erts_clear_mtrace_bif(pc);
m = 1;
}
if (flags.call_time) {
- erts_clear_time_trace_bif(bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ erts_clear_time_trace_bif(pc);
m = 1;
}
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
}
matches += m;
}
@@ -1474,176 +1513,241 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/*
** So, now for breakpoint tracing
*/
+ erts_bp_match_functions(&finish_bp.f, mfa, specified);
if (on) {
if (! flags.breakpoint) {
- erts_clear_trace_break(mfa, specified);
- erts_clear_mtrace_break(mfa, specified);
- erts_clear_count_break(mfa, specified);
- erts_clear_time_break(mfa, specified);
+ erts_clear_all_breaks(&finish_bp.f);
} else {
- int m = 0;
if (flags.local) {
- m = erts_set_trace_break(mfa, specified, match_prog_set,
- am_true);
+ erts_set_trace_break(&finish_bp.f, match_prog_set);
}
if (flags.meta) {
- m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set,
- meta_tracer_pid);
+ erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
+ meta_tracer_pid);
}
if (flags.call_count) {
- m = erts_set_count_break(mfa, specified, on);
+ erts_set_count_break(&finish_bp.f, on);
}
if (flags.call_time) {
- m = erts_set_time_break(mfa, specified, on);
+ erts_set_time_break(&finish_bp.f, on);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
} else {
- int m = 0;
if (flags.local) {
- m = erts_clear_trace_break(mfa, specified);
+ erts_clear_trace_break(&finish_bp.f);
}
if (flags.meta) {
- m = erts_clear_mtrace_break(mfa, specified);
+ erts_clear_mtrace_break(&finish_bp.f);
}
if (flags.call_count) {
- m = erts_clear_count_break(mfa, specified);
+ erts_clear_count_break(&finish_bp.f);
}
if (flags.call_time) {
- m = erts_clear_time_break(mfa, specified);
+ erts_clear_time_break(&finish_bp.f);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
+ finish_bp.current = 0;
+ finish_bp.install = on;
+ finish_bp.local = flags.breakpoint;
+
+#ifdef ERTS_SMP
+ if (is_blocking) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+#endif
+ while (erts_finish_breakpointing()) {
+ /* Empty loop body */
+ }
+#ifdef ERTS_SMP
+ finish_bp.current = -1;
+ }
+#endif
+
+ if (flags.breakpoint) {
+ matches += finish_bp.f.matched;
+ } else {
+ matches += finish_bp.e.matched;
+ }
return matches;
}
-/*
- * Setup function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
-
-static int
-setup_func_trace(Export* ep, void* match_prog)
+int
+erts_finish_breakpointing(void)
{
- if (ep->address == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
- }
- }
-
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+
/*
- * Currently no trace support for native code.
+ * Memory barriers will be issued for all processes *before*
+ * each of the stages below. (Unless the other schedulers
+ * are blocked, in which case memory barriers will be issued
+ * when they are awaken.)
*/
- if (erts_is_native_break(ep->address)) {
+ switch (finish_bp.current++) {
+ case 0:
+ /*
+ * At this point, in all functions that are to be breakpointed,
+ * a pointer to a GenericBp struct has already been added,
+ *
+ * Insert the new breakpoints (if any) into the
+ * code. Different schedulers may see breakpoint instruction
+ * at different times, but it does not matter since the newly
+ * added breakpoints are disabled.
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ erts_install_breakpoints(&finish_bp.f);
+ } else {
+ install_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ setup_bif_trace();
+ return 1;
+ case 1:
+ /*
+ * Switch index for the breakpoint data, activating the staged
+ * data. (Depending on the changes in the breakpoint data,
+ * that could either activate breakpoints or disable
+ * breakpoints.)
+ */
+ erts_commit_staged_bp();
+ return 1;
+ case 2:
+ /*
+ * Remove breakpoints instructions for disabled breakpoints
+ * (if any).
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ } else {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ }
+ } else {
+ if (finish_bp.local) {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ } else {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ reset_bif_trace();
+ return 1;
+ case 3:
+ /*
+ * Now all breakpoints have either been inserted or removed.
+ * For all updated breakpoints, copy the active breakpoint
+ * data to the staged breakpoint data to make them equal
+ * (simplifying for the next time breakpoints are to be
+ * updated). If any breakpoints have been totally disabled,
+ * deallocate the GenericBp structs for them.
+ */
+ erts_consolidate_bif_bp_data();
+ clean_export_entries(&finish_bp.e);
+ erts_consolidate_bp_data(&finish_bp.e, 0);
+ erts_consolidate_bp_data(&finish_bp.f, 1);
+ erts_bp_free_matched_functions(&finish_bp.e);
+ erts_bp_free_matched_functions(&finish_bp.f);
return 0;
+ default:
+ ASSERT(0);
}
-
- ep->code[3] = (BeamInstr) em_call_traced_function;
- ep->code[4] = (BeamInstr) ep->address;
- ep->address = ep->code+3;
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
+ return 0;
}
-static void setup_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[bif_index].traced;
-}
+static void
+install_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
-static void set_trace_bif(int bif_index, void* match_prog) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
-}
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-/*
- * Reset function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
+ ep->addressv[code_ix] = pc;
+ }
+}
-static int
-reset_func_trace(Export* ep)
+static void
+uninstall_exp_breakpoints(BpFunctions* f)
{
- if (ep->address == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- ep->address = (Uint *) ep->code[4];
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
+
+ if (ep->addressv[code_ix] != pc) {
+ continue;
}
+ ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
}
-
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(ep->address)) {
- return 0;
- }
-
- /*
- * Nothing to do, but the export entry matches.
- */
+}
- return 1;
+static void
+clean_export_entries(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
+
+ if (ep->addressv[code_ix] == pc) {
+ continue;
+ }
+ if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->code[3] = (BeamInstr) 0;
+ ep->code[4] = (BeamInstr) 0;
+ }
+ }
}
-static void reset_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
- ep->code[4] = (BeamInstr) bif_table[bif_index].f;
+static void
+setup_bif_trace(void)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].traced;
+ }
+ }
+ }
}
-static void clear_trace_bif(int bif_index) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+static void
+reset_bif_trace(void)
+{
+ int i;
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ BeamInstr* pc = ep->code+3;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g && g->data[active].flags == 0) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].f;
+ }
+ }
+ }
}
/*
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 6f7309f493..3753b618e1 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1005,8 +1005,13 @@ erts_new_bs_put_float(Process *c_p, Eterm arg, Uint num_bits, int flags)
if (is_float(arg)) {
FloatDef *fdp = (FloatDef*)(float_val(arg) + 1);
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ a = fdp->fw[1];
+ b = fdp->fw[0];
+#else
a = fdp->fw[0];
b = fdp->fw[1];
+#endif
} else if (is_small(arg)) {
u.f64 = (double) signed_val(arg);
a = u.i32[0];
@@ -1015,8 +1020,13 @@ erts_new_bs_put_float(Process *c_p, Eterm arg, Uint num_bits, int flags)
if (big_to_double(arg, &u.f64) < 0) {
return 0;
}
+#ifdef DOUBLE_MIDDLE_ENDIAN
+ a = u.i32[1];
+ b = u.i32[0];
+#else
a = u.i32[0];
b = u.i32[1];
+#endif
} else {
return 0;
}
@@ -1237,6 +1247,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
*/
erts_bin_offset = 8*sb->size + sb->bitsize;
+ if (unit > 1) {
+ if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
+ (erts_bin_offset % unit) != 0) {
+ goto badarg;
+ }
+ }
used_size_in_bits = erts_bin_offset + build_size_in_bits;
sb->is_writable = 0; /* Make sure that no one else can write. */
pb->size = NBYTES(used_size_in_bits);
@@ -1306,6 +1322,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
*/
ERTS_GET_BINARY_BYTES(bin, src_bytes, bitoffs, bitsize);
erts_bin_offset = 8*binary_size(bin) + bitsize;
+ if (unit > 1) {
+ if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
+ (erts_bin_offset % unit) != 0) {
+ goto badarg;
+ }
+ }
used_size_in_bits = erts_bin_offset + build_size_in_bits;
used_size_in_bytes = NBYTES(used_size_in_bits);
bin_size = 2*used_size_in_bytes;
@@ -1353,12 +1375,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
/*
* Now copy the data into the binary.
*/
- if (unit > 1) {
- if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
- (erts_bin_offset % unit) != 0) {
- return THE_NON_VALUE;
- }
- }
copy_binary_to_buffer(erts_current_bin, 0, src_bytes, bitoffs, erts_bin_offset);
return make_binary(sb);
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 41e71881a0..3f90f34736 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -486,7 +486,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
erts_thr_set_main_status(1, (int) esdp->no);
/* Make sure we check if we should bind to a cpu or not... */
- ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND);
+ (void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND);
}
#endif
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index cb03f37ebf..48a95cdf32 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -251,7 +251,6 @@ free_dbtable(void *vtb)
#endif
ASSERT(is_immed(tb->common.heir_data));
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
}
static void schedule_free_dbtable(DbTable* tb)
@@ -1424,7 +1423,6 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
erts_smp_atomic_init_nob(&tb->common.memory_size,
erts_smp_atomic_read_nob(&init_tb.common.memory_size));
}
@@ -2804,7 +2802,6 @@ void init_db(void)
{
DbTable init_tb;
int i;
- extern BeamInstr* em_apply_bif;
Eterm *hp;
unsigned bits;
size_t size;
@@ -2876,7 +2873,6 @@ void init_db(void)
meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb,
sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
erts_smp_atomic_read_nob(&init_tb.common.memory_size));
@@ -2908,7 +2904,6 @@ void init_db(void)
meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb,
sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
erts_smp_atomic_read_nob(&init_tb.common.memory_size));
@@ -2937,49 +2932,24 @@ void init_db(void)
}
/* Non visual BIF to trap to. */
- memset(&ets_select_delete_continue_exp, 0, sizeof(Export));
- ets_select_delete_continue_exp.address =
- &ets_select_delete_continue_exp.code[3];
- ets_select_delete_continue_exp.code[0] = am_ets;
- ets_select_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
- ets_select_delete_continue_exp.code[2] = 1;
- ets_select_delete_continue_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_delete_continue_exp.code[4] =
- (BeamInstr) &ets_select_delete_1;
+ erts_init_trap_export(&ets_select_delete_continue_exp,
+ am_ets, am_atom_put("delete_trap",11), 1,
+ &ets_select_delete_1);
/* Non visual BIF to trap to. */
- memset(&ets_select_count_continue_exp, 0, sizeof(Export));
- ets_select_count_continue_exp.address =
- &ets_select_count_continue_exp.code[3];
- ets_select_count_continue_exp.code[0] = am_ets;
- ets_select_count_continue_exp.code[1] = am_atom_put("count_trap",11);
- ets_select_count_continue_exp.code[2] = 1;
- ets_select_count_continue_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_count_continue_exp.code[4] =
- (BeamInstr) &ets_select_count_1;
+ erts_init_trap_export(&ets_select_count_continue_exp,
+ am_ets, am_atom_put("count_trap",11), 1,
+ &ets_select_count_1);
/* Non visual BIF to trap to. */
- memset(&ets_select_continue_exp, 0, sizeof(Export));
- ets_select_continue_exp.address =
- &ets_select_continue_exp.code[3];
- ets_select_continue_exp.code[0] = am_ets;
- ets_select_continue_exp.code[1] = am_atom_put("select_trap",11);
- ets_select_continue_exp.code[2] = 1;
- ets_select_continue_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_continue_exp.code[4] =
- (BeamInstr) &ets_select_trap_1;
+ erts_init_trap_export(&ets_select_continue_exp,
+ am_ets, am_atom_put("select_trap",11), 1,
+ &ets_select_trap_1);
/* Non visual BIF to trap to. */
- memset(&ets_delete_continue_exp, 0, sizeof(Export));
- ets_delete_continue_exp.address = &ets_delete_continue_exp.code[3];
- ets_delete_continue_exp.code[0] = am_ets;
- ets_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
- ets_delete_continue_exp.code[2] = 1;
- ets_delete_continue_exp.code[3] = (BeamInstr) em_apply_bif;
- ets_delete_continue_exp.code[4] = (BeamInstr) &ets_delete_trap;
+ erts_init_trap_export(&ets_delete_continue_exp,
+ am_ets, am_atom_put("delete_trap",11), 1,
+ &ets_delete_trap);
hp = ms_delete_all_buff;
ms_delete_all = CONS(hp, am_true, NIL);
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index c726be5fb4..2fea4671e1 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -105,7 +105,20 @@
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
-#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_ddrb(&(tb)->segtab))
+#ifdef ERTS_SMP
+# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED)
+#else
+# define DB_USING_FINE_LOCKING(TB) 0
+#endif
+
+#ifdef ETHR_ORDERED_READ_DEPEND
+#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))
+#else
+#define SEGTAB(tb) \
+ (DB_USING_FINE_LOCKING(tb) \
+ ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \
+ : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)))
+#endif
#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
@@ -122,7 +135,9 @@
*/
static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
{
- Uint mask = erts_smp_atomic_read_acqb(&tb->szm);
+ Uint mask = (DB_USING_FINE_LOCKING(tb)
+ ? erts_smp_atomic_read_acqb(&tb->szm)
+ : erts_smp_atomic_read_nob(&tb->szm));
Uint ix = hval & mask;
if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) {
ix &= mask>>1;
@@ -319,7 +334,10 @@ struct ext_segment {
static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
struct segment** segtab)
{
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+ else
+ erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
#ifdef VALGRIND
tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
#endif
@@ -2501,6 +2519,28 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
return list;
}
+static ERTS_INLINE int
+begin_resizing(DbTableHash* tb)
+{
+ if (DB_USING_FINE_LOCKING(tb))
+ return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
+ else {
+ if (erts_smp_atomic_read_nob(&tb->is_resizing))
+ return 0;
+ erts_smp_atomic_set_nob(&tb->is_resizing, 1);
+ return 1;
+ }
+}
+
+static ERTS_INLINE void
+done_resizing(DbTableHash* tb)
+{
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_smp_atomic_set_relb(&tb->is_resizing, 0);
+ else
+ erts_smp_atomic_set_nob(&tb->is_resizing, 0);
+}
+
/* Grow table with one new bucket.
** Allocate new segment if needed.
*/
@@ -2513,9 +2553,8 @@ static void grow(DbTableHash* tb, int nactive)
int from_ix;
int szm;
- if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) {
+ if (!begin_resizing(tb))
return; /* already in progress */
- }
if (NACTIVE(tb) != nactive) {
goto abort; /* already done (race) */
}
@@ -2547,9 +2586,12 @@ static void grow(DbTableHash* tb, int nactive)
}
erts_smp_atomic_inc_nob(&tb->nactive);
if (from_ix == 0) {
- erts_smp_atomic_set_relb(&tb->szm, szm);
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_smp_atomic_set_relb(&tb->szm, szm);
+ else
+ erts_smp_atomic_set_nob(&tb->szm, szm);
}
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
+ done_resizing(tb);
/* Finally, let's split the bucket. We try to do it in a smart way
to keep link order and avoid unnecessary updates of next-pointers */
@@ -2581,7 +2623,7 @@ static void grow(DbTableHash* tb, int nactive)
return;
abort:
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
+ done_resizing(tb);
}
@@ -2590,9 +2632,8 @@ abort:
*/
static void shrink(DbTableHash* tb, int nactive)
{
- if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) {
+ if (!begin_resizing(tb))
return; /* already in progress */
- }
if (NACTIVE(tb) == nactive) {
erts_smp_rwmtx_t* lck;
int src_ix = nactive - 1;
@@ -2639,7 +2680,7 @@ static void shrink(DbTableHash* tb, int nactive)
}
/*else already done */
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
+ done_resizing(tb);
}
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 312050b931..faa7f31d99 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -452,16 +452,8 @@ DbTableMethod db_tree =
void db_initialize_tree(void)
{
- memset(&ets_select_reverse_exp, 0, sizeof(Export));
- ets_select_reverse_exp.address =
- &ets_select_reverse_exp.code[3];
- ets_select_reverse_exp.code[0] = am_ets;
- ets_select_reverse_exp.code[1] = am_reverse;
- ets_select_reverse_exp.code[2] = 3;
- ets_select_reverse_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_reverse_exp.code[4] =
- (BeamInstr) &ets_select_reverse;
+ erts_init_trap_export(&ets_select_reverse_exp, am_ets, am_reverse, 3,
+ &ets_select_reverse);
return;
};
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 67c2d0eb7f..bcdb630140 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1238,7 +1238,7 @@ static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp,
Eterm ret;
ret = db_prog_match(p, mpsp, args, NULL, NULL, num_args,
- ERTS_PAM_CONTIGUOUS_TUPLE | ERTS_PAM_COPY_RESULT,
+ ERTS_PAM_COPY_RESULT,
return_flags);
#if defined(HARDDEBUG)
if (is_non_value(ret)) {
@@ -2485,7 +2485,7 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei)
vnum = tmp->variable;
}
if (vnum >= 0)
- sprintf(buff,tmp->error_string, vnum);
+ erts_snprintf(buff,sizeof(buff)+20,tmp->error_string, vnum);
else
strcpy(buff,tmp->error_string);
sl = strlen(buff);
@@ -3000,7 +3000,7 @@ Eterm db_copy_from_comp(DbTableCommon* tb, DbTerm* bp, Eterm** hpp,
}
ASSERT((*hpp - hp) <= bp->size);
#ifdef DEBUG_CLONE
- ASSERT(eq_rel(make_tuple(hp),make_tuple(bp->debug_clone),bp->debug_clone));
+ ASSERT(eq_rel(make_tuple(hp),NULL,make_tuple(bp->debug_clone),bp->debug_clone));
#endif
return make_tuple(hp);
}
@@ -3023,7 +3023,7 @@ Eterm db_copy_element_from_ets(DbTableCommon* tb, Process* p,
hp += extra;
HRelease(p, endp, hp);
#ifdef DEBUG_CLONE
- ASSERT(eq_rel(copy, obj->debug_clone[pos], obj->debug_clone));
+ ASSERT(eq_rel(copy, NULL, obj->debug_clone[pos], obj->debug_clone));
#endif
return copy;
}
@@ -4490,7 +4490,9 @@ static DMCRet dmc_fun(DMCContext *context,
if (context->err_info != NULL) {
/* Ugly, should define a better RETURN_TERM_ERROR interface... */
char buff[100];
- sprintf(buff, "Function %%T/%d does_not_exist.", (int)a - 1);
+ erts_snprintf(buff, sizeof(buff),
+ "Function %%T/%d does_not_exist.",
+ (int)a - 1);
RETURN_TERM_ERROR(buff, p[1], context, *constant);
} else {
return retFail;
@@ -4505,7 +4507,7 @@ static DMCRet dmc_fun(DMCContext *context,
if (context->err_info != NULL) {
/* Ugly, should define a better RETURN_TERM_ERROR interface... */
char buff[100];
- sprintf(buff,
+ erts_snprintf(buff, sizeof(buff),
"Function %%T/%d cannot be called in this context.",
(int)a - 1);
RETURN_TERM_ERROR(buff, p[1], context, *constant);
@@ -4769,7 +4771,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
for (j = 0; j < x && DMC_PEEK(heap,j) != n; ++j)
;
ASSERT(j < x);
- sprintf(buff+1,"%u", (unsigned) j);
+ erts_snprintf(buff+1, sizeof(buff) - 1, "%u", (unsigned) j);
/* Yes, writing directly into terms, they ARE off heap */
*p = am_atom_put(buff, strlen(buff));
}
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 783555195f..b90d00f236 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,7 +25,6 @@
#include "erl_vm.h"
#include "global.h"
#include "erl_process.h"
-#include "erl_nmgc.h"
#include "big.h"
#include "bif.h"
#include "beam_catches.h"
@@ -33,34 +32,9 @@
#define WITHIN(ptr, x, y) ((x) <= (ptr) && (ptr) < (y))
-#if defined(HYBRID)
-#if defined(INCREMENTAL)
-/* Hybrid + Incremental */
-#define IN_HEAP(p, ptr) \
- (WITHIN((ptr), p->heap, p->hend) || \
- (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \
- WITHIN((ptr), global_heap, global_hend) || \
- (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \
- WITHIN((ptr), global_old_heap, global_old_hend))
-
-#define IN_MA(ptr) \
- (WITHIN((ptr), global_heap, global_hend) || \
- (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \
- WITHIN((ptr), global_old_heap, global_old_hend))
-#else
-/* Hybrid */
-#define IN_HEAP(p, ptr) \
- (WITHIN((ptr), p->heap, p->hend) || \
- (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \
- WITHIN((ptr), global_heap, global_hend) || \
- (global_old_heap && WITHIN((ptr),global_old_heap,global_old_hend)))
-#endif
-#else
-/* Private */
#define IN_HEAP(p, ptr) \
(WITHIN((ptr), p->heap, p->hend) || \
(OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))))
-#endif
#ifdef __GNUC__
@@ -266,13 +240,6 @@ static int verify_eterm(Process *p,Eterm element)
}
}
}
-#ifdef INCREMENTAL
- else {
- if (IN_MA(ptr))
- return 1;
- }
-#endif
-
return 0;
}
@@ -447,51 +414,12 @@ void verify_process(Process *p)
VERIFY_ETERM("fvalue",p->fvalue);
VERIFY_ETERM("ftrace",p->ftrace);
-#ifdef HYBRID
- VERIFY_AREA("rrma",p->rrma,p->nrr);
-#endif
-
VERBOSE(DEBUG_MEMORY,("...done\n"));
#undef VERIFY_AREA
#undef VERIFY_ETERM
}
-void verify_everything()
-{
-#ifdef HYBRID
- Uint i;
- Uint n = erts_num_active_procs;
-
-#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY
- INC_Page *page = inc_used_mem;
-#endif
-
- for (i = 0; i < n; i++) {
- verify_process(erts_active_procs[i]);
- }
-
- erts_check_memory(NULL,global_heap,global_htop);
-
-#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY
- while (page)
- {
- Eterm *end = page + INC_PAGE_SIZE;
- Eterm *pos = page->start;
-
- while( pos < end) {
- Eterm val = *pos++;
- if(is_header(val))
- pos += thing_arityval(val);
- else
- verify_eterm(NULL,val);
- }
- page = page->next;
- }
-#endif
-#endif /* HYBRID */
-}
-
/*
* print_untagged_memory will print the contents of given memory area.
*/
@@ -582,83 +510,6 @@ void print_tagged_memory(Eterm *pos, Eterm *end)
erts_printf("+-%s-+-%s-+\n",dashes,dashes);
}
-#ifdef HYBRID
-void print_ma_info(void)
-{
- erts_printf("Message Area (start - top - end): "
- "0x%0*lx - 0x%0*lx - 0x%0*lx\n",
- PTR_SIZE, (unsigned long)global_heap,
- PTR_SIZE, (unsigned long)global_htop,
- PTR_SIZE, (unsigned long)global_hend);
-#ifndef INCREMENTAL
- erts_printf(" High water: 0x%0*lx "
- "Old gen: 0x%0*lx - 0x%0*lx - 0x%0*lx\n",
- PTR_SIZE, (unsigned long)global_high_water,
- PTR_SIZE, (unsigned long)global_old_heap,
- PTR_SIZE, (unsigned long)global_old_htop,
- PTR_SIZE, (unsigned long)global_old_hend);
-#endif
-}
-
-void print_message_area(void)
-{
- Eterm *pos = global_heap;
- Eterm *end = global_htop;
-
- erts_printf("From: 0x%0*lx to 0x%0*lx\n",
- PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)end);
- erts_printf("(Old generation: 0x%0*lx to 0x%0*lx\n",
- PTR_SIZE, (unsigned long)global_old_heap,
- PTR_SIZE, (unsigned long)global_old_hend);
- erts_printf("| %-*s | %-*s |\n",PTR_SIZE,"Address",PTR_SIZE,"Contents");
- erts_printf("|-%s-|-%s-|\n",dashes,dashes);
- while( pos < end ) {
- Eterm val = pos[0];
- erts_printf("| 0x%0*lx | 0x%0*lx | ",
- PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)val);
- ++pos;
- if( is_arity_value(val) ) {
- erts_printf("Arity(%lu)", arityval(val));
- } else if( is_thing(val) ) {
- unsigned int ari = thing_arityval(val);
- erts_printf("Thing Arity(%u) Tag(%lu)", ari, thing_subtag(val));
- while( ari ) {
- erts_printf("\n| 0x%0*lx | 0x%0*lx | THING",
- PTR_SIZE, (unsigned long)pos,
- PTR_SIZE, (unsigned long)*pos);
- ++pos;
- --ari;
- }
- } else
- erts_printf("%.30T", val);
- erts_printf("\n");
- }
- erts_printf("+-%s-+-%s-+\n",dashes,dashes);
-}
-
-void check_message_area()
-{
- Eterm *pos = global_heap;
- Eterm *end = global_htop;
-
- while( pos < end ) {
- Eterm val = *pos++;
- if(is_header(val))
- pos += thing_arityval(val);
- else if(!is_immed(val))
- if ((ptr_val(val) < global_heap || ptr_val(val) >= global_htop) &&
- (ptr_val(val) < global_old_heap ||
- ptr_val(val) >= global_old_hend))
- {
- erts_printf("check_message_area: Stray pointer found\n");
- print_message_area();
- erts_printf("Crashing to make it look real...\n");
- pos = 0;
- }
- }
-}
-#endif /* HYBRID */
-
static void print_process_memory(Process *p);
static void print_process_memory(Process *p)
{
@@ -703,19 +554,6 @@ static void print_process_memory(Process *p)
erts_printf(" Fvalue: 0x%0*lx\n",PTR_SIZE,p->fvalue);
erts_printf(" Ftrace: 0x%0*lx\n",PTR_SIZE,p->ftrace);
-#ifdef HYBRID
- if (p->nrr > 0) {
- int i;
- erts_printf(" Remembered Roots:\n");
- for (i = 0; i < p->nrr; i++)
- if (p->rrsrc[i] != NULL)
- erts_printf("0x%0*lx -> 0x%0*lx\n",
- PTR_SIZE, (unsigned long)p->rrsrc[i],
- PTR_SIZE, (unsigned long)p->rrma[i]);
- erts_printf("\n");
- }
-#endif
-
erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx %s-%s-+\n",
PTR_SIZE, "Stack",
PTR_SIZE, (unsigned long)STACK_TOP(p),
@@ -757,92 +595,6 @@ void print_memory(Process *p)
if (p != NULL) {
print_process_memory(p);
}
-#ifdef HYBRID
- else {
- Uint i;
- Uint n = erts_num_active_procs;
-
- for (i = 0; i < n; i++) {
- Process *p = erts_active_procs[i];
- print_process_memory(p);
- }
-
- erts_printf("==================\n");
- erts_printf("|| Message area ||\n");
- erts_printf("==================\n");
- erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
- dashes,dashes,dashes,dashes,dashes);
- erts_printf("| %-*s | 0x%0*lx - 0x%0*lx - 0x%0*lx%*s|\n",
- PTR_SIZE, "Young",
- PTR_SIZE, (unsigned long)global_heap,
- PTR_SIZE, (unsigned long)global_htop,
- PTR_SIZE, (unsigned long)global_hend,
- PTR_SIZE, "");
- erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
- dashes,dashes,dashes,dashes,dashes);
-
- print_untagged_memory(global_heap,global_htop);
-
-
- erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
- dashes,dashes,dashes,dashes,dashes);
- erts_printf("| %-*s | 0x%0*lx - 0x%0*lx %*s |\n",
- PTR_SIZE, "Old",
- PTR_SIZE, (unsigned long)global_old_heap,
- PTR_SIZE, (unsigned long)global_old_hend,
- 2 * PTR_SIZE, "");
- erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
- dashes,dashes,dashes,dashes,dashes);
-
-#ifdef INCREMENTAL
- {
- INC_Page *page = inc_used_mem;
- /* Genom att g� igenom fri-listan f�rst kan vi markera de
- omr�den som inte �r allokerade och bara skriva ut de som
- lever.
- char markarea[INC_PAGESIZE];
- */
-
- while (page) {
- Eterm *ptr = (Eterm*)page->start;
- Eterm *end = (Eterm*)page->start + INC_PAGESIZE;
-
- erts_printf("| %*s | This: 0x%0*lx Next: 0x%0*lx %*s|\n",
- PTR_SIZE, "",
- PTR_SIZE, (unsigned long)page,
- PTR_SIZE, (unsigned long)page->next,
- 2 * PTR_SIZE - 8, "");
- print_untagged_memory(ptr,end);
- page = page->next;
- }
- }
-
- {
- INC_MemBlock *this = inc_free_list;
-
- erts_printf("-- %-*s --%s-%s-%s-%s-\n",PTR_SIZE+2,"Free list",
- dashes,dashes,dashes,dashes);
- while (this) {
- erts_printf("Block @ 0x%0*lx sz: %8d prev: 0x%0*lx next: 0x%0*lx\n",
- PTR_SIZE, (unsigned long)this,this->size,
- PTR_SIZE, (unsigned long)this->prev,
- PTR_SIZE, (unsigned long)this->next);
- this = this->next;
- }
- erts_printf("--%s---%s-%s-%s-%s--\n",
- dashes,dashes,dashes,dashes,dashes);
- }
-
- if (inc_fromspc != NULL) {
- erts_printf("-- fromspace - 0x%0*lx 0x%0*lx "
- "------------------------------\n",
- PTR_SIZE, (unsigned long)inc_fromspc,
- PTR_SIZE, (unsigned long)inc_fromend);
- print_untagged_memory(inc_fromspc,inc_fromend);
- }
-#endif /* INCREMENTAL */
- }
-#endif /* HYBRID */
}
void print_memory_info(Process *p)
@@ -869,26 +621,6 @@ void print_memory_info(Process *p)
erts_printf("|| Memory info ||\n");
erts_printf("=================\n");
}
-#ifdef HYBRID
- erts_printf("|- message area --%s-%s-%s-%s-|\n",
- dashes,dashes,dashes,dashes);
- erts_printf("| Young | 0x%0*lx - 0x%0*lx - 0x%0*lx %*s |\n",
- PTR_SIZE, (unsigned long)global_heap,
- PTR_SIZE, (unsigned long)global_htop,
- PTR_SIZE, (unsigned long)global_hend,
- PTR_SIZE, "");
- erts_printf("| Old | 0x%0*lx - 0x%0*lx %*s |\n",
- PTR_SIZE, (unsigned long)global_old_heap,
- PTR_SIZE, (unsigned long)global_old_hend,
- 2 * PTR_SIZE, "");
-#endif
-#ifdef INCREMENTAL
- if (inc_fromspc != NULL)
- erts_printf("| Frmsp | 0x%0*lx - 0x%0*lx %*s |\n",
- PTR_SIZE, (unsigned long)inc_fromspc,
- PTR_SIZE, (unsigned long)inc_fromend,
- 2 * PTR_SIZE, "");
-#endif
erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes);
}
#if !HEAP_ON_C_STACK && defined(DEBUG)
diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h
index c49354a2b3..af51212281 100644
--- a/erts/emulator/beam/erl_debug.h
+++ b/erts/emulator/beam/erl_debug.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,12 +42,11 @@
#define DEBUG_DEFAULT 0x0000 /* No flags are set per default */
#define DEBUG_SYSTEM 0x0001 /* Misc system info at startup and end */
#define DEBUG_PRIVATE_GC 0x0002 /* GC of private heaps */
-#define DEBUG_HYBRID_GC 0x0004 /* GC of the message area */
-#define DEBUG_ALLOCATION 0x0008 /* HAlloc. To find holes in the heap */
-#define DEBUG_MESSAGES 0x0010 /* Message passing */
-#define DEBUG_THREADS 0x0020 /* Thread-related stuff */
-#define DEBUG_PROCESSES 0x0040 /* Process creation and removal */
-#define DEBUG_MEMORY 0x0080 /* Display results of memory checks */
+#define DEBUG_ALLOCATION 0x0004 /* HAlloc. To find holes in the heap */
+#define DEBUG_MESSAGES 0x0008 /* Message passing */
+#define DEBUG_THREADS 0x0010 /* Thread-related stuff */
+#define DEBUG_PROCESSES 0x0020 /* Process creation and removal */
+#define DEBUG_MEMORY 0x0040 /* Display results of memory checks */
extern Uint32 verbose;
@@ -88,7 +87,6 @@ extern void erts_check_stack(Process *p);
extern void erts_check_heap(Process *p);
extern void erts_check_memory(Process *p, Eterm *start, Eterm *end);
extern void verify_process(Process *p);
-extern void verify_everything(void);
extern void print_tagged_memory(Eterm *start, Eterm *end);
extern void print_untagged_memory(Eterm *start, Eterm *end);
extern void print_memory(Process *p);
@@ -99,10 +97,4 @@ extern void erts_debug_use_tmp_heap(int, Process *);
extern void erts_debug_unuse_tmp_heap(int, Process *);
#endif
-#ifdef HYBRID
-extern void print_ma_info(void);
-extern void print_message_area(void);
-extern void check_message_area(void);
-#endif
-
#endif /* _ERL_DEBUG_H_ */
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index cbc7bd4ede..046b46513f 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -87,10 +87,7 @@
#include <stdlib.h>
#include <string.h> /* ssize_t on Mac OS X */
-#if defined(VXWORKS)
-# include <ioLib.h>
-typedef struct iovec SysIOVec;
-#elif defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
+#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
#ifndef STATIC_ERLANG_DRIVER
/* Windows dynamic drivers, everything is different... */
#define ERL_DRIVER_TYPES_ONLY
@@ -371,11 +368,7 @@ typedef struct erl_drv_entry {
/* For windows dynamic drivers */
#ifndef ERL_DRIVER_TYPES_ONLY
-#if defined(VXWORKS)
-# define DRIVER_INIT(DRIVER_NAME) \
- ErlDrvEntry* DRIVER_NAME ## _init(void); \
- ErlDrvEntry* DRIVER_NAME ## _init(void)
-#elif defined(__WIN32__)
+#if defined(__WIN32__)
# define DRIVER_INIT(DRIVER_NAME) \
__declspec(dllexport) ErlDrvEntry* driver_init(void); \
__declspec(dllexport) ErlDrvEntry* driver_init(void)
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 2f165afa06..b673ef6b3c 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -54,9 +54,7 @@ typedef struct erl_fun_entry {
typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
ErlFunEntry* fe; /* Pointer to fun entry. */
-#ifndef HYBRID /* FIND ME! */
struct erl_off_heap_header* next;
-#endif
#ifdef HIPE
UWord* native_address; /* Native code for the fun. */
#endif
@@ -79,13 +77,9 @@ ErlFunEntry* erts_get_fun_entry(Eterm mod, int uniq, int index);
ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
byte* uniq, int index, int arity);
-ErlFunEntry* erts_get_fun_entry2(Eterm mod, int old_uniq, int old_index,
- byte* uniq, int index, int arity);
void erts_erase_fun_entry(ErlFunEntry* fe);
-#ifndef HYBRID /* FIND ME! */
void erts_cleanup_funs(ErlFunThing* funp);
-#endif
void erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end);
void erts_dump_fun_entries(int, void *);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 8a0e314e71..d377ba8f31 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -336,6 +336,19 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
return result;
}
+static ERTS_INLINE void reset_active_writer(Process *p)
+{
+ struct erl_off_heap_header* ptr;
+ ptr = MSO(p).first;
+ while (ptr) {
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ ProcBin *pbp = (ProcBin*) ptr;
+ pbp->flags &= ~PB_ACTIVE_WRITER;
+ }
+ ptr = ptr->next;
+ }
+}
+
/*
* Garbage collect a process.
*
@@ -391,6 +404,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
}
}
+ reset_active_writer(p);
/*
* Finish.
@@ -2166,7 +2180,6 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
if (pbp->flags & PB_ACTIVE_WRITER) {
- pbp->flags &= ~PB_ACTIVE_WRITER;
shrink->no_of_active++;
}
else { /* inactive */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index af7c2b91a0..175f04b15b 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -34,7 +34,6 @@
#include "erl_binary.h"
#include "dist.h"
#include "erl_mseg.h"
-#include "erl_nmgc.h"
#include "erl_threads.h"
#include "erl_bif_timer.h"
#include "erl_instrument.h"
@@ -214,28 +213,6 @@ ErtsModifiedTimings erts_modified_timings[] = {
Export *erts_delay_trap = NULL;
-#ifdef HYBRID
-Eterm *global_heap;
-Eterm *global_hend;
-Eterm *global_htop;
-Eterm *global_saved_htop;
-Eterm *global_old_heap;
-Eterm *global_old_hend;
-ErlOffHeap erts_global_offheap;
-Uint global_heap_sz = SH_DEFAULT_SIZE;
-
-#ifndef INCREMENTAL
-Eterm *global_high_water;
-Eterm *global_old_htop;
-#endif
-
-Uint16 global_gen_gcs;
-Uint16 global_max_gen_gcs;
-Uint global_gc_flags;
-
-Uint global_heap_min_sz = SH_DEFAULT_SIZE;
-#endif
-
int ignore_break;
int replace_intr;
@@ -330,6 +307,7 @@ erl_init(int ncpu,
erts_init_trace();
erts_init_binary();
erts_init_bits();
+ erts_code_ix_init();
erts_init_fun_table();
init_atom_table();
init_export_table();
@@ -348,7 +326,6 @@ erl_init(int ncpu,
erl_drv_thr_init();
erts_init_async();
erts_init_io(port_tab_sz, port_tab_sz_ignore_files);
- init_copy();
init_load();
erts_init_bif();
erts_init_bif_chksum();
@@ -369,45 +346,6 @@ erl_init(int ncpu,
}
static void
-init_shared_memory(int argc, char **argv)
-{
-#ifdef HYBRID
- int arg_size = 0;
-
- global_heap_sz = erts_next_heap_size(global_heap_sz,0);
-
- /* Make sure arguments will fit on the heap, no one else will check! */
- while (argc--)
- arg_size += 2 + strlen(argv[argc]);
- if (global_heap_sz < arg_size)
- global_heap_sz = erts_next_heap_size(arg_size,1);
-
-#ifndef INCREMENTAL
- global_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
- sizeof(Eterm) * global_heap_sz);
- global_hend = global_heap + global_heap_sz;
- global_htop = global_heap;
- global_high_water = global_heap;
- global_old_hend = global_old_htop = global_old_heap = NULL;
-#endif
-
- global_gen_gcs = 0;
- global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
- global_gc_flags = erts_default_process_flags;
-
- erts_global_offheap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- erts_global_offheap.funs = NULL;
-#endif
- erts_global_offheap.overhead = 0;
-#endif
-
-#ifdef INCREMENTAL
- erts_init_incgc();
-#endif
-}
-
-static void
erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
{
int i;
@@ -419,7 +357,8 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
Eterm env;
start_mod = am_atom_put(modname, sys_strlen(modname));
- if (erts_find_function(start_mod, am_start, 2) == NULL) {
+ if (erts_find_function(start_mod, am_start, 2,
+ erts_active_code_ix()) == NULL) {
erl_exit(5, "No function %s:start/2\n", modname);
}
@@ -518,7 +457,7 @@ load_preloaded(void)
if ((code = sys_preload_begin(&preload_p[i])) == 0)
erl_exit(1, "Failed to find preloaded code for module %s\n",
name);
- res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
+ res = erts_preload_module(NULL, 0, NIL, &module_name, code, length);
sys_preload_end(&preload_p[i]);
if (res != NIL)
erl_exit(1,"Failed loading preloaded module %s (%T)\n",
@@ -583,10 +522,14 @@ void erts_usage(void)
erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
+ erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n");
+ erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n");
erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n");
+ erts_fprintf(stderr, " default|legacy|proposal.\n");
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
@@ -753,7 +696,7 @@ early_init(int *argc, char **argv) /*
envbufsz = sizeof(envbuf);
- /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */
+ /* erts_sys_getenv(_raw)() not initialized yet; need erts_sys_getenv__() */
if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0)
erts_async_max_threads = atoi(envbuf);
else
@@ -861,6 +804,10 @@ early_init(int *argc, char **argv) /*
}
}
+#ifndef USE_THREADS
+ erts_async_max_threads = 0;
+#endif
+
#ifdef ERTS_SMP
no_schedulers = schdlrs;
no_schedulers_online = schdlrs_onln;
@@ -974,13 +921,13 @@ erl_start(int argc, char **argv)
int port_tab_sz_ignore_files = 0;
envbufsz = sizeof(envbuf);
- if (erts_sys_getenv(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
+ if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
user_requested_db_max_tabs = atoi(envbuf);
else
user_requested_db_max_tabs = 0;
envbufsz = sizeof(envbuf);
- if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
+ if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
Uint16 max_gen_gcs = atoi(envbuf);
erts_smp_atomic32_set_nob(&erts_max_gen_gcs,
(erts_aint32_t) max_gen_gcs);
@@ -1064,7 +1011,6 @@ erl_start(int argc, char **argv)
switch (*ch) {
case 's': verbose |= DEBUG_SYSTEM; break;
case 'g': verbose |= DEBUG_PRIVATE_GC; break;
- case 'h': verbose |= DEBUG_HYBRID_GC; break;
case 'M': verbose |= DEBUG_MEMORY; break;
case 'a': verbose |= DEBUG_ALLOCATION; break;
case 't': verbose |= DEBUG_THREADS; break;
@@ -1077,7 +1023,6 @@ erl_start(int argc, char **argv)
erts_printf("Verbose level: ");
if (verbose & DEBUG_SYSTEM) erts_printf("SYSTEM ");
if (verbose & DEBUG_PRIVATE_GC) erts_printf("PRIVATE_GC ");
- if (verbose & DEBUG_HYBRID_GC) erts_printf("HYBRID_GC ");
if (verbose & DEBUG_MEMORY) erts_printf("PARANOID_MEMORY ");
if (verbose & DEBUG_ALLOCATION) erts_printf("ALLOCATION ");
if (verbose & DEBUG_THREADS) erts_printf("THREADS ");
@@ -1105,12 +1050,6 @@ erl_start(int argc, char **argv)
#ifdef HIPE
strcat(tmp, ",HIPE");
#endif
-#ifdef INCREMENTAL
- strcat(tmp, ",INCREMENTAL_GC");
-#endif
-#ifdef HYBRID
- strcat(tmp, ",HYBRID");
-#endif
erts_fprintf(stderr, "Erlang ");
if (tmp[1]) {
erts_fprintf(stderr, "(%s) ", tmp+1);
@@ -1312,6 +1251,16 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("bwt", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if (erts_sched_set_busy_wait_threshold(arg) != 0) {
+ erts_fprintf(stderr, "bad scheduler busy wait threshold: %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("scheduler wakup threshold: %s\n", arg));
+ }
else if (has_prefix("cl", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (sys_strcmp("true", arg) == 0)
@@ -1385,13 +1334,23 @@ erl_start(int argc, char **argv)
erts_use_sender_punish = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
arg = get_arg(sub_param+2, argv[i+1], &i);
- if (erts_sched_set_wakeup_limit(arg) != 0) {
+ if (erts_sched_set_wakeup_other_thresold(arg) != 0) {
erts_fprintf(stderr, "scheduler wakeup threshold: %s\n",
arg);
erts_usage();
}
VERBOSE(DEBUG_SYSTEM,
- ("scheduler wakup threshold: %s\n", arg));
+ ("scheduler wakeup threshold: %s\n", arg));
+ }
+ else if (sys_strcmp("ws", sub_param) == 0) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (erts_sched_set_wakeup_other_type(arg) != 0) {
+ erts_fprintf(stderr, "scheduler wakeup strategy: %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("scheduler wakeup threshold: %s\n", arg));
}
else if (has_prefix("ss", sub_param)) {
/* suggested stack size (Kilo Words) for scheduler threads */
@@ -1585,8 +1544,9 @@ erl_start(int argc, char **argv)
port_tab_sz,
port_tab_sz_ignore_files);
- init_shared_memory(boot_argc, boot_argv);
load_preloaded();
+ erts_end_staging_code_ix();
+ erts_commit_staging_code_ix();
erts_initialized = 1;
@@ -1672,32 +1632,6 @@ system_cleanup(int flush_async)
#endif
#endif
-#ifdef HYBRID
- if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
- (void *)ma_src_stack);
- if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
- (void *)ma_dst_stack);
- if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
- (void *)ma_offset_stack);
- ma_src_stack = NULL;
- ma_dst_stack = NULL;
- ma_offset_stack = NULL;
- erts_cleanup_offheap(&erts_global_offheap);
-#endif
-
-#if defined(HYBRID) && !defined(INCREMENTAL)
- if (global_heap) {
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (void*) global_heap,
- sizeof(Eterm) * global_heap_sz);
- }
- global_heap = NULL;
-#endif
-
-#ifdef INCREMENTAL
- erts_cleanup_incgc();
-#endif
-
erts_exit_flush_async();
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 2505c094ab..69bb4be717 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -83,6 +83,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "bif_timers", NULL },
{ "reg_tab", NULL },
{ "proc_main", "pid" },
+ { "old_code", "address" },
#ifdef HIPE
{ "hipe_mfait_lock", NULL },
#endif
@@ -92,6 +93,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_msgq", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
+ { "code_write_permission", NULL },
{ "proc_status", "pid" },
{ "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
@@ -170,9 +172,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "sched_stat", NULL },
#endif
{ "async_init_mtx", NULL },
-#ifdef ERTS_SMP
- { "proc_lck_qs_alloc", NULL },
-#endif
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index e76bcf556e..068340abe7 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -105,7 +105,7 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) \
- ((void) ((A) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
+ ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
#ifdef ERTS_SMP
#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A)
#else
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index a36c53560e..5f75b0ac0b 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -49,7 +49,7 @@ const char *str_undefined = "undefined";
static ethr_tsd_key lcnt_thr_data_key;
static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[1024];
+static erts_lcnt_thread_data_t *lcnt_thread_data[4096];
/* local functions */
@@ -240,7 +240,7 @@ void erts_lcnt_init() {
lcnt_lock();
- erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK;
+ erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK | ERTS_LCNT_OPT_LOCATION;
eltd = lcnt_thread_data_alloc();
@@ -312,7 +312,7 @@ void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock)
}
void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
-
+
if (lock->next) lock->next->prev = lock->prev;
if (lock->prev) lock->prev->next = lock->next;
if (list->head == lock) list->head = lock->next;
@@ -334,6 +334,10 @@ void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
}
void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
int i;
+ if (!name) {
+ lock->flag = 0;
+ return;
+ }
lcnt_lock();
lock->next = NULL;
@@ -363,6 +367,8 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
erts_lcnt_lock_t *deleted_lock;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
+
lcnt_lock();
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
@@ -378,6 +384,7 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
}
/* delete original */
erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
+ lock->flag = 0;
lcnt_unlock();
}
@@ -389,6 +396,7 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
erts_lcnt_thread_data_t *eltd;
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
eltd = lcnt_get_thread_data();
@@ -422,6 +430,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
erts_lcnt_thread_data_t *eltd;
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
w_state = ethr_atomic_read(&lock->w_state);
ethr_atomic_inc( &lock->w_state);
@@ -452,6 +461,7 @@ void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
/* should check if this thread was "waiting" */
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
ethr_atomic_dec( &lock->w_state);
}
@@ -475,6 +485,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
#ifdef DEBUG
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
@@ -489,9 +500,13 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
ASSERT(eltd);
/* if lock was in conflict, time it */
-
- stats = lcnt_get_lock_stats(lock, file, line);
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
+ stats = lcnt_get_lock_stats(lock, file, line);
+ } else {
+ stats = &lock->stats[0];
+ }
+
if (eltd->timer_set) {
lcnt_time(&timer);
@@ -510,6 +525,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
}
@@ -520,6 +536,7 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
#ifdef DEBUG
/* flowstate */
flowstate = ethr_atomic_read(&lock->flowstate);
@@ -537,6 +554,7 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
/* Determine lock_state via res instead of state */
if (res != EBUSY) {
if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
@@ -555,6 +573,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
if (res != EBUSY) {
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 6306580ae4..a4fc91b510 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -89,6 +89,7 @@
#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 3)
+#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 4)
typedef struct {
unsigned long s;
@@ -201,5 +202,7 @@ void erts_lcnt_clear_counters(void);
char *erts_lcnt_lock_type(Uint16 type);
erts_lcnt_data_t *erts_lcnt_get_data(void);
+#define ERTS_LCNT_LOCK_TYPE(lockp) ((lockp)->flag & ERTS_LCNT_LT_ALL)
+
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 92af3f2ac4..325d77e911 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -29,7 +29,6 @@
#include "global.h"
#include "erl_message.h"
#include "erl_process.h"
-#include "erl_nmgc.h"
#include "erl_binary.h"
#include "dtrace-wrapper.h"
@@ -602,9 +601,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#endif
#ifdef HARD_DEBUG
- ProcBin *dbg_mso_start = off_heap->mso;
- ErlFunThing *dbg_fun_start = off_heap->funs;
- ExternalThing *dbg_external_start = off_heap->externals;
+ struct erl_off_heap_header* dbg_oh_start = off_heap->first;
Eterm dbg_term, dbg_token;
ErlHeapFragment *dbg_bp;
Uint *dbg_hp, *dbg_thp_start;
@@ -778,48 +775,16 @@ copy_done:
int i, j;
ErlHeapFragment* frag;
{
- ProcBin *mso = off_heap->mso;
+ struct erl_off_heap_header* dbg_oh = off_heap->first;
i = j = 0;
- while (mso != dbg_mso_start) {
- mso = mso->next;
+ while (dbg_oh != dbg_oh_start) {
+ dbg_oh = dbg_oh->next;
i++;
}
for (frag=bp; frag; frag=frag->next) {
- mso = frag->off_heap.mso;
- while (mso) {
- mso = mso->next;
- j++;
- }
- }
- ASSERT(i == j);
- }
- {
- ErlFunThing *fun = off_heap->funs;
- i = j = 0;
- while (fun != dbg_fun_start) {
- fun = fun->next;
- i++;
- }
- for (frag=bp; frag; frag=frag->next) {
- fun = frag->off_heap.funs;
- while (fun) {
- fun = fun->next;
- j++;
- }
- }
- ASSERT(i == j);
- }
- {
- ExternalThing *external = off_heap->externals;
- i = j = 0;
- while (external != dbg_external_start) {
- external = external->next;
- i++;
- }
- for (frag=bp; frag; frag=frag->next) {
- external = frag->off_heap.externals;
- while (external) {
- external = external->next;
+ dbg_oh = frag->off_heap.first;
+ while (dbg_oh) {
+ dbg_oh = dbg_oh->next;
j++;
}
}
@@ -929,8 +894,8 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
*sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send)) {
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
- erts_snprintf(receiver_name, sizeof(receiver_name), "%T", receiver->common.id);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id);
}
#endif
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
@@ -1013,54 +978,6 @@ erts_send_message(Process* sender,
#endif
);
BM_SWAP_TIMER(send,system);
-#ifdef HYBRID
- } else {
- ErlMessage* mp = message_alloc();
- BM_SWAP_TIMER(send,copy);
-#ifdef INCREMENTAL
- /* TODO: During GC activate processes if the message relies in
- * the fromspace and the sender is active. During major
- * collections add the message to the gray stack if it relies
- * in the old generation and the sender is active and the
- * receiver is inactive.
-
- if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) &&
- (ptr_val(message) >= inc_fromspc &&
- ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender))
- INC_ACTIVATE(receiver);
- else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) &&
- (ptr_val(message) >= global_old_heap &&
- ptr_val(message) < global_old_hend) &&
- INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver))
- Mark message in blackmap and add it to the gray stack
- */
-
- if (!IS_CONST(message))
- INC_ACTIVATE(receiver);
-#endif
- LAZY_COPY(sender,message);
- BM_SWAP_TIMER(copy,send);
- DTRACE6(message_send, sender_name, receiver_name,
- size_object(message)msize, tok_label, tok_lastcnt, tok_serial);
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = NIL;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = NIL;
-#endif
- mp->next = NULL;
- LINK_MESSAGE(receiver, mp);
- ACTIVATE(receiver);
-
- res = receiver->msg.len;
-
- erts_proc_notify_new_message(receiver);
-
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
- trace_receive(receiver, message);
- }
-
- BM_SWAP_TIMER(send,system);
-#else
} else if (sender == receiver) {
/* Drop message if receiver has a pending exit ... */
#ifdef ERTS_SMP
@@ -1179,7 +1096,6 @@ erts_send_message(Process* sender,
}
BM_SWAP_TIMER(send,system);
#endif /* #ifndef ERTS_SMP */
-#endif /* HYBRID */
}
return res;
}
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 358c67bf20..5a6fb8589f 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -611,7 +611,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0)
hostname[0] = '\0';
hostname[MAXHOSTNAMELEN-1] = '\0';
- sys_get_pid(pid);
+ sys_get_pid(pid, sizeof(pid));
write_trace_header(nodename ? nodename : "", pid, hostname);
erts_mtrace_update_heap_size();
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 04721cee7c..1bd2d933b2 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -33,6 +33,10 @@
#include "big.h"
#include "beam_bp.h"
#include "erl_thr_progress.h"
+#include "dtrace-wrapper.h"
+#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
+#define HAVE_USE_DTRACE 1
+#endif
#include <limits.h>
#include <stddef.h> /* offsetof */
@@ -1392,6 +1396,44 @@ size_t enif_sizeof_resource(void* obj)
return ERTS_MAGIC_BIN_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
}
+
+void* enif_dlopen(const char* lib,
+ void (*err_handler)(void*,const char*), void* err_arg)
+{
+ ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
+ void* handle;
+ void* init_func;
+ if (erts_sys_ddll_open2(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) {
+ if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) {
+ erts_sys_ddll_call_nif_init(init_func);
+ }
+ }
+ else {
+ if (err_handler != NULL) {
+ (*err_handler)(err_arg, errdesc.str);
+ }
+ handle = NULL;
+ }
+ erts_sys_ddll_free_error(&errdesc);
+ return handle;
+}
+
+void* enif_dlsym(void* handle, const char* symbol,
+ void (*err_handler)(void*,const char*), void* err_arg)
+{
+ ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
+ void* ret;
+ if (erts_sys_ddll_sym2(handle, symbol, &ret, &errdesc) != ERL_DE_NO_ERROR) {
+ if (err_handler != NULL) {
+ (*err_handler)(err_arg, errdesc.str);
+ }
+ erts_sys_ddll_free_error(&errdesc);
+ return NULL;
+ }
+ return ret;
+}
+
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
@@ -1524,6 +1566,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
if (len < 0) {
BIF_ERROR(BIF_P, BADARG);
}
+
lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1);
if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) {
@@ -1532,6 +1575,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
lib_name[len] = '\0';
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ erts_free(ERTS_ALC_T_TMP, lib_name);
+ ERTS_BIF_YIELD2(bif_export[BIF_load_nif_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
/* Block system (is this the right place to do it?) */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
@@ -1545,11 +1594,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ASSERT(caller != NULL);
mod_atom = caller[0];
ASSERT(is_atom(mod_atom));
- mod=erts_get_module(mod_atom);
+ mod=erts_get_module(mod_atom, erts_active_code_ix());
ASSERT(mod != NULL);
- if (!in_area(caller, mod->code, mod->code_length)) {
- ASSERT(in_area(caller, mod->old_code, mod->old_code_length));
+ if (!in_area(caller, mod->curr.code, mod->curr.code_length)) {
+ ASSERT(in_area(caller, mod->old.code, mod->old.code_length));
ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
"module '%T' not allowed", mod_atom);
@@ -1595,7 +1644,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
BeamInstr** code_pp;
ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom)
- || (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) {
+ || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
@@ -1624,18 +1673,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_refc_init(&lib->rt_dtor_cnt, 0);
lib->mod = mod;
env.mod_nif = lib;
- if (mod->nif != NULL) { /* Reload */
+ if (mod->curr.nif != NULL) { /* Reload */
int k;
- lib->priv_data = mod->nif->priv_data;
+ lib->priv_data = mod->curr.nif->priv_data;
- ASSERT(mod->nif->entry != NULL);
+ ASSERT(mod->curr.nif->entry != NULL);
if (entry->reload == NULL) {
ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
goto error;
}
/* Check that no NIF is removed */
- for (k=0; k < mod->nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->nif->entry->funcs[k];
+ for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
+ ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
for (i=0; i < entry->num_of_funcs; i++) {
if (old_func->arity == entry->funcs[i].arity
&& sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
@@ -1656,24 +1705,24 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful.");
}
else {
- mod->nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(mod->nif);
+ mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */
+ erts_unload_nif(mod->curr.nif);
reload_warning = 1;
}
}
else {
lib->priv_data = NULL;
- if (mod->old_nif != NULL) { /* Upgrade */
- void* prev_old_data = mod->old_nif->priv_data;
+ if (mod->old.nif != NULL) { /* Upgrade */
+ void* prev_old_data = mod->old.nif->priv_data;
if (entry->upgrade == NULL) {
ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
goto error;
}
erts_pre_nif(&env, BIF_P, lib);
- veto = entry->upgrade(&env, &lib->priv_data, &mod->old_nif->priv_data, BIF_ARG_2);
+ veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
- mod->old_nif->priv_data = prev_old_data;
+ mod->old.nif->priv_data = prev_old_data;
ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
}
/*else if (mod->old_nif->priv_data != prev_old_data) {
@@ -1693,20 +1742,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->nif = lib;
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom);
- code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData** bps = (BpData**) code_ptr[1];
- BpData* bp = (BpData*) bps[erts_bp_sched2ix()];
- bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ GenericBp* g = (GenericBp *) code_ptr[1];
+ ASSERT(code_ptr[5+0] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
code_ptr[5+2] = (BeamInstr) lib;
@@ -1726,6 +1776,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
if (reload_warning) {
@@ -1845,3 +1896,1079 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size)
#endif /* READONLY_CHECK */
+#ifdef HAVE_USE_DTRACE
+
+#define MESSAGE_BUFSIZ 1024
+
+static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term,
+ char **ptr, char *buf, int bufsiz)
+{
+ ErlNifBinary str_bin;
+
+ if (!enif_inspect_iolist_as_binary(env, term, &str_bin) ||
+ str_bin.size > bufsiz) {
+ *ptr = NULL;
+ } else {
+ memcpy(buf, (char *) str_bin.data, str_bin.size);
+ buf[str_bin.size] = '\0';
+ *ptr = buf;
+ }
+}
+
+ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc,
+ const ERL_NIF_TERM argv[])
+{
+ ErlNifBinary message_bin;
+ DTRACE_CHARBUF(messagebuf, MESSAGE_BUFSIZ + 1);
+
+ if (DTRACE_ENABLED(user_trace_s1)) {
+ if (!enif_inspect_iolist_as_binary(env, argv[0], &message_bin) ||
+ message_bin.size > MESSAGE_BUFSIZ) {
+ return am_badarg;
+ }
+ memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
+ messagebuf[message_bin.size] = '\0';
+ DTRACE1(user_trace_s1, messagebuf);
+ return am_true;
+ } else {
+ return am_false;
+ }
+}
+
+ERL_NIF_TERM erl_nif_user_trace_i4s4(ErlNifEnv* env, int argc,
+ const ERL_NIF_TERM argv[])
+{
+ DTRACE_CHARBUF(procbuf, 32 + 1);
+ DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1);
+ char *utbuf = NULL;
+ ErlNifSInt64 i1, i2, i3, i4;
+ DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1);
+ DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1);
+ DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1);
+ DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1);
+ char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL;
+
+ if (DTRACE_ENABLED(user_trace_i4s4)) {
+ dtrace_nifenv_str(env, procbuf);
+ get_string_maybe(env, argv[0], &utbuf, user_tagbuf, MESSAGE_BUFSIZ);
+ if (! enif_get_int64(env, argv[1], &i1))
+ i1 = 0;
+ if (! enif_get_int64(env, argv[2], &i2))
+ i2 = 0;
+ if (! enif_get_int64(env, argv[3], &i3))
+ i3 = 0;
+ if (! enif_get_int64(env, argv[4], &i4))
+ i4 = 0;
+ get_string_maybe(env, argv[5], &mbuf1, messagebuf1, MESSAGE_BUFSIZ);
+ get_string_maybe(env, argv[6], &mbuf2, messagebuf2, MESSAGE_BUFSIZ);
+ get_string_maybe(env, argv[7], &mbuf3, messagebuf3, MESSAGE_BUFSIZ);
+ get_string_maybe(env, argv[8], &mbuf4, messagebuf4, MESSAGE_BUFSIZ);
+ DTRACE10(user_trace_i4s4, procbuf, utbuf,
+ i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4);
+ return am_true;
+ } else {
+ return am_false;
+ }
+}
+
+#define DTRACE10_LABEL(name, label, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+ erlang_##name##label((a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8), (a9))
+#define N_STATEMENT(the_label) \
+ case the_label: \
+ if (DTRACE_ENABLED(user_trace_n##the_label)) { \
+ dtrace_nifenv_str(env, procbuf); \
+ get_string_maybe(env, argv[1], &utbuf, user_tagbuf, MESSAGE_BUFSIZ); \
+ if (! enif_get_int64(env, argv[2], &i1)) \
+ i1 = 0; \
+ if (! enif_get_int64(env, argv[3], &i2)) \
+ i2 = 0; \
+ if (! enif_get_int64(env, argv[4], &i3)) \
+ i3 = 0; \
+ if (! enif_get_int64(env, argv[5], &i4)) \
+ i4 = 0; \
+ get_string_maybe(env, argv[6], &mbuf1, messagebuf1, MESSAGE_BUFSIZ); \
+ get_string_maybe(env, argv[7], &mbuf2, messagebuf2, MESSAGE_BUFSIZ); \
+ get_string_maybe(env, argv[8], &mbuf3, messagebuf3, MESSAGE_BUFSIZ); \
+ get_string_maybe(env, argv[9], &mbuf4, messagebuf4, MESSAGE_BUFSIZ); \
+ DTRACE10_LABEL(user_trace_n, the_label, procbuf, utbuf, \
+ i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4); \
+ return am_true; \
+ } else { \
+ return am_false; \
+ } \
+ break
+
+ERL_NIF_TERM erl_nif_user_trace_n(ErlNifEnv* env, int argc,
+ const ERL_NIF_TERM argv[])
+{
+ DTRACE_CHARBUF(procbuf, 32 + 1);
+ DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1);
+ char *utbuf = NULL;
+ ErlNifSInt64 i1, i2, i3, i4;
+ DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1);
+ DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1);
+ DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1);
+ DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1);
+ char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL;
+ ErlNifSInt64 label = 0;
+
+ if (! enif_get_int64(env, argv[0], &label) || label < 0 || label > 1023) {
+ return am_badarg;
+ }
+ switch (label) {
+ N_STATEMENT(0);
+ N_STATEMENT(1);
+ N_STATEMENT(2);
+ N_STATEMENT(3);
+ N_STATEMENT(4);
+ N_STATEMENT(5);
+ N_STATEMENT(6);
+ N_STATEMENT(7);
+ N_STATEMENT(8);
+ N_STATEMENT(9);
+ N_STATEMENT(10);
+ N_STATEMENT(11);
+ N_STATEMENT(12);
+ N_STATEMENT(13);
+ N_STATEMENT(14);
+ N_STATEMENT(15);
+ N_STATEMENT(16);
+ N_STATEMENT(17);
+ N_STATEMENT(18);
+ N_STATEMENT(19);
+ N_STATEMENT(20);
+ N_STATEMENT(21);
+ N_STATEMENT(22);
+ N_STATEMENT(23);
+ N_STATEMENT(24);
+ N_STATEMENT(25);
+ N_STATEMENT(26);
+ N_STATEMENT(27);
+ N_STATEMENT(28);
+ N_STATEMENT(29);
+ N_STATEMENT(30);
+ N_STATEMENT(31);
+ N_STATEMENT(32);
+ N_STATEMENT(33);
+ N_STATEMENT(34);
+ N_STATEMENT(35);
+ N_STATEMENT(36);
+ N_STATEMENT(37);
+ N_STATEMENT(38);
+ N_STATEMENT(39);
+ N_STATEMENT(40);
+ N_STATEMENT(41);
+ N_STATEMENT(42);
+ N_STATEMENT(43);
+ N_STATEMENT(44);
+ N_STATEMENT(45);
+ N_STATEMENT(46);
+ N_STATEMENT(47);
+ N_STATEMENT(48);
+ N_STATEMENT(49);
+ N_STATEMENT(50);
+ N_STATEMENT(51);
+ N_STATEMENT(52);
+ N_STATEMENT(53);
+ N_STATEMENT(54);
+ N_STATEMENT(55);
+ N_STATEMENT(56);
+ N_STATEMENT(57);
+ N_STATEMENT(58);
+ N_STATEMENT(59);
+ N_STATEMENT(60);
+ N_STATEMENT(61);
+ N_STATEMENT(62);
+ N_STATEMENT(63);
+ N_STATEMENT(64);
+ N_STATEMENT(65);
+ N_STATEMENT(66);
+ N_STATEMENT(67);
+ N_STATEMENT(68);
+ N_STATEMENT(69);
+ N_STATEMENT(70);
+ N_STATEMENT(71);
+ N_STATEMENT(72);
+ N_STATEMENT(73);
+ N_STATEMENT(74);
+ N_STATEMENT(75);
+ N_STATEMENT(76);
+ N_STATEMENT(77);
+ N_STATEMENT(78);
+ N_STATEMENT(79);
+ N_STATEMENT(80);
+ N_STATEMENT(81);
+ N_STATEMENT(82);
+ N_STATEMENT(83);
+ N_STATEMENT(84);
+ N_STATEMENT(85);
+ N_STATEMENT(86);
+ N_STATEMENT(87);
+ N_STATEMENT(88);
+ N_STATEMENT(89);
+ N_STATEMENT(90);
+ N_STATEMENT(91);
+ N_STATEMENT(92);
+ N_STATEMENT(93);
+ N_STATEMENT(94);
+ N_STATEMENT(95);
+ N_STATEMENT(96);
+ N_STATEMENT(97);
+ N_STATEMENT(98);
+ N_STATEMENT(99);
+ N_STATEMENT(100);
+ N_STATEMENT(101);
+ N_STATEMENT(102);
+ N_STATEMENT(103);
+ N_STATEMENT(104);
+ N_STATEMENT(105);
+ N_STATEMENT(106);
+ N_STATEMENT(107);
+ N_STATEMENT(108);
+ N_STATEMENT(109);
+ N_STATEMENT(110);
+ N_STATEMENT(111);
+ N_STATEMENT(112);
+ N_STATEMENT(113);
+ N_STATEMENT(114);
+ N_STATEMENT(115);
+ N_STATEMENT(116);
+ N_STATEMENT(117);
+ N_STATEMENT(118);
+ N_STATEMENT(119);
+ N_STATEMENT(120);
+ N_STATEMENT(121);
+ N_STATEMENT(122);
+ N_STATEMENT(123);
+ N_STATEMENT(124);
+ N_STATEMENT(125);
+ N_STATEMENT(126);
+ N_STATEMENT(127);
+ N_STATEMENT(128);
+ N_STATEMENT(129);
+ N_STATEMENT(130);
+ N_STATEMENT(131);
+ N_STATEMENT(132);
+ N_STATEMENT(133);
+ N_STATEMENT(134);
+ N_STATEMENT(135);
+ N_STATEMENT(136);
+ N_STATEMENT(137);
+ N_STATEMENT(138);
+ N_STATEMENT(139);
+ N_STATEMENT(140);
+ N_STATEMENT(141);
+ N_STATEMENT(142);
+ N_STATEMENT(143);
+ N_STATEMENT(144);
+ N_STATEMENT(145);
+ N_STATEMENT(146);
+ N_STATEMENT(147);
+ N_STATEMENT(148);
+ N_STATEMENT(149);
+ N_STATEMENT(150);
+ N_STATEMENT(151);
+ N_STATEMENT(152);
+ N_STATEMENT(153);
+ N_STATEMENT(154);
+ N_STATEMENT(155);
+ N_STATEMENT(156);
+ N_STATEMENT(157);
+ N_STATEMENT(158);
+ N_STATEMENT(159);
+ N_STATEMENT(160);
+ N_STATEMENT(161);
+ N_STATEMENT(162);
+ N_STATEMENT(163);
+ N_STATEMENT(164);
+ N_STATEMENT(165);
+ N_STATEMENT(166);
+ N_STATEMENT(167);
+ N_STATEMENT(168);
+ N_STATEMENT(169);
+ N_STATEMENT(170);
+ N_STATEMENT(171);
+ N_STATEMENT(172);
+ N_STATEMENT(173);
+ N_STATEMENT(174);
+ N_STATEMENT(175);
+ N_STATEMENT(176);
+ N_STATEMENT(177);
+ N_STATEMENT(178);
+ N_STATEMENT(179);
+ N_STATEMENT(180);
+ N_STATEMENT(181);
+ N_STATEMENT(182);
+ N_STATEMENT(183);
+ N_STATEMENT(184);
+ N_STATEMENT(185);
+ N_STATEMENT(186);
+ N_STATEMENT(187);
+ N_STATEMENT(188);
+ N_STATEMENT(189);
+ N_STATEMENT(190);
+ N_STATEMENT(191);
+ N_STATEMENT(192);
+ N_STATEMENT(193);
+ N_STATEMENT(194);
+ N_STATEMENT(195);
+ N_STATEMENT(196);
+ N_STATEMENT(197);
+ N_STATEMENT(198);
+ N_STATEMENT(199);
+ N_STATEMENT(200);
+ N_STATEMENT(201);
+ N_STATEMENT(202);
+ N_STATEMENT(203);
+ N_STATEMENT(204);
+ N_STATEMENT(205);
+ N_STATEMENT(206);
+ N_STATEMENT(207);
+ N_STATEMENT(208);
+ N_STATEMENT(209);
+ N_STATEMENT(210);
+ N_STATEMENT(211);
+ N_STATEMENT(212);
+ N_STATEMENT(213);
+ N_STATEMENT(214);
+ N_STATEMENT(215);
+ N_STATEMENT(216);
+ N_STATEMENT(217);
+ N_STATEMENT(218);
+ N_STATEMENT(219);
+ N_STATEMENT(220);
+ N_STATEMENT(221);
+ N_STATEMENT(222);
+ N_STATEMENT(223);
+ N_STATEMENT(224);
+ N_STATEMENT(225);
+ N_STATEMENT(226);
+ N_STATEMENT(227);
+ N_STATEMENT(228);
+ N_STATEMENT(229);
+ N_STATEMENT(230);
+ N_STATEMENT(231);
+ N_STATEMENT(232);
+ N_STATEMENT(233);
+ N_STATEMENT(234);
+ N_STATEMENT(235);
+ N_STATEMENT(236);
+ N_STATEMENT(237);
+ N_STATEMENT(238);
+ N_STATEMENT(239);
+ N_STATEMENT(240);
+ N_STATEMENT(241);
+ N_STATEMENT(242);
+ N_STATEMENT(243);
+ N_STATEMENT(244);
+ N_STATEMENT(245);
+ N_STATEMENT(246);
+ N_STATEMENT(247);
+ N_STATEMENT(248);
+ N_STATEMENT(249);
+ N_STATEMENT(250);
+ N_STATEMENT(251);
+ N_STATEMENT(252);
+ N_STATEMENT(253);
+ N_STATEMENT(254);
+ N_STATEMENT(255);
+ N_STATEMENT(256);
+ N_STATEMENT(257);
+ N_STATEMENT(258);
+ N_STATEMENT(259);
+ N_STATEMENT(260);
+ N_STATEMENT(261);
+ N_STATEMENT(262);
+ N_STATEMENT(263);
+ N_STATEMENT(264);
+ N_STATEMENT(265);
+ N_STATEMENT(266);
+ N_STATEMENT(267);
+ N_STATEMENT(268);
+ N_STATEMENT(269);
+ N_STATEMENT(270);
+ N_STATEMENT(271);
+ N_STATEMENT(272);
+ N_STATEMENT(273);
+ N_STATEMENT(274);
+ N_STATEMENT(275);
+ N_STATEMENT(276);
+ N_STATEMENT(277);
+ N_STATEMENT(278);
+ N_STATEMENT(279);
+ N_STATEMENT(280);
+ N_STATEMENT(281);
+ N_STATEMENT(282);
+ N_STATEMENT(283);
+ N_STATEMENT(284);
+ N_STATEMENT(285);
+ N_STATEMENT(286);
+ N_STATEMENT(287);
+ N_STATEMENT(288);
+ N_STATEMENT(289);
+ N_STATEMENT(290);
+ N_STATEMENT(291);
+ N_STATEMENT(292);
+ N_STATEMENT(293);
+ N_STATEMENT(294);
+ N_STATEMENT(295);
+ N_STATEMENT(296);
+ N_STATEMENT(297);
+ N_STATEMENT(298);
+ N_STATEMENT(299);
+ N_STATEMENT(300);
+ N_STATEMENT(301);
+ N_STATEMENT(302);
+ N_STATEMENT(303);
+ N_STATEMENT(304);
+ N_STATEMENT(305);
+ N_STATEMENT(306);
+ N_STATEMENT(307);
+ N_STATEMENT(308);
+ N_STATEMENT(309);
+ N_STATEMENT(310);
+ N_STATEMENT(311);
+ N_STATEMENT(312);
+ N_STATEMENT(313);
+ N_STATEMENT(314);
+ N_STATEMENT(315);
+ N_STATEMENT(316);
+ N_STATEMENT(317);
+ N_STATEMENT(318);
+ N_STATEMENT(319);
+ N_STATEMENT(320);
+ N_STATEMENT(321);
+ N_STATEMENT(322);
+ N_STATEMENT(323);
+ N_STATEMENT(324);
+ N_STATEMENT(325);
+ N_STATEMENT(326);
+ N_STATEMENT(327);
+ N_STATEMENT(328);
+ N_STATEMENT(329);
+ N_STATEMENT(330);
+ N_STATEMENT(331);
+ N_STATEMENT(332);
+ N_STATEMENT(333);
+ N_STATEMENT(334);
+ N_STATEMENT(335);
+ N_STATEMENT(336);
+ N_STATEMENT(337);
+ N_STATEMENT(338);
+ N_STATEMENT(339);
+ N_STATEMENT(340);
+ N_STATEMENT(341);
+ N_STATEMENT(342);
+ N_STATEMENT(343);
+ N_STATEMENT(344);
+ N_STATEMENT(345);
+ N_STATEMENT(346);
+ N_STATEMENT(347);
+ N_STATEMENT(348);
+ N_STATEMENT(349);
+ N_STATEMENT(350);
+ N_STATEMENT(351);
+ N_STATEMENT(352);
+ N_STATEMENT(353);
+ N_STATEMENT(354);
+ N_STATEMENT(355);
+ N_STATEMENT(356);
+ N_STATEMENT(357);
+ N_STATEMENT(358);
+ N_STATEMENT(359);
+ N_STATEMENT(360);
+ N_STATEMENT(361);
+ N_STATEMENT(362);
+ N_STATEMENT(363);
+ N_STATEMENT(364);
+ N_STATEMENT(365);
+ N_STATEMENT(366);
+ N_STATEMENT(367);
+ N_STATEMENT(368);
+ N_STATEMENT(369);
+ N_STATEMENT(370);
+ N_STATEMENT(371);
+ N_STATEMENT(372);
+ N_STATEMENT(373);
+ N_STATEMENT(374);
+ N_STATEMENT(375);
+ N_STATEMENT(376);
+ N_STATEMENT(377);
+ N_STATEMENT(378);
+ N_STATEMENT(379);
+ N_STATEMENT(380);
+ N_STATEMENT(381);
+ N_STATEMENT(382);
+ N_STATEMENT(383);
+ N_STATEMENT(384);
+ N_STATEMENT(385);
+ N_STATEMENT(386);
+ N_STATEMENT(387);
+ N_STATEMENT(388);
+ N_STATEMENT(389);
+ N_STATEMENT(390);
+ N_STATEMENT(391);
+ N_STATEMENT(392);
+ N_STATEMENT(393);
+ N_STATEMENT(394);
+ N_STATEMENT(395);
+ N_STATEMENT(396);
+ N_STATEMENT(397);
+ N_STATEMENT(398);
+ N_STATEMENT(399);
+ N_STATEMENT(400);
+ N_STATEMENT(401);
+ N_STATEMENT(402);
+ N_STATEMENT(403);
+ N_STATEMENT(404);
+ N_STATEMENT(405);
+ N_STATEMENT(406);
+ N_STATEMENT(407);
+ N_STATEMENT(408);
+ N_STATEMENT(409);
+ N_STATEMENT(410);
+ N_STATEMENT(411);
+ N_STATEMENT(412);
+ N_STATEMENT(413);
+ N_STATEMENT(414);
+ N_STATEMENT(415);
+ N_STATEMENT(416);
+ N_STATEMENT(417);
+ N_STATEMENT(418);
+ N_STATEMENT(419);
+ N_STATEMENT(420);
+ N_STATEMENT(421);
+ N_STATEMENT(422);
+ N_STATEMENT(423);
+ N_STATEMENT(424);
+ N_STATEMENT(425);
+ N_STATEMENT(426);
+ N_STATEMENT(427);
+ N_STATEMENT(428);
+ N_STATEMENT(429);
+ N_STATEMENT(430);
+ N_STATEMENT(431);
+ N_STATEMENT(432);
+ N_STATEMENT(433);
+ N_STATEMENT(434);
+ N_STATEMENT(435);
+ N_STATEMENT(436);
+ N_STATEMENT(437);
+ N_STATEMENT(438);
+ N_STATEMENT(439);
+ N_STATEMENT(440);
+ N_STATEMENT(441);
+ N_STATEMENT(442);
+ N_STATEMENT(443);
+ N_STATEMENT(444);
+ N_STATEMENT(445);
+ N_STATEMENT(446);
+ N_STATEMENT(447);
+ N_STATEMENT(448);
+ N_STATEMENT(449);
+ N_STATEMENT(450);
+ N_STATEMENT(451);
+ N_STATEMENT(452);
+ N_STATEMENT(453);
+ N_STATEMENT(454);
+ N_STATEMENT(455);
+ N_STATEMENT(456);
+ N_STATEMENT(457);
+ N_STATEMENT(458);
+ N_STATEMENT(459);
+ N_STATEMENT(460);
+ N_STATEMENT(461);
+ N_STATEMENT(462);
+ N_STATEMENT(463);
+ N_STATEMENT(464);
+ N_STATEMENT(465);
+ N_STATEMENT(466);
+ N_STATEMENT(467);
+ N_STATEMENT(468);
+ N_STATEMENT(469);
+ N_STATEMENT(470);
+ N_STATEMENT(471);
+ N_STATEMENT(472);
+ N_STATEMENT(473);
+ N_STATEMENT(474);
+ N_STATEMENT(475);
+ N_STATEMENT(476);
+ N_STATEMENT(477);
+ N_STATEMENT(478);
+ N_STATEMENT(479);
+ N_STATEMENT(480);
+ N_STATEMENT(481);
+ N_STATEMENT(482);
+ N_STATEMENT(483);
+ N_STATEMENT(484);
+ N_STATEMENT(485);
+ N_STATEMENT(486);
+ N_STATEMENT(487);
+ N_STATEMENT(488);
+ N_STATEMENT(489);
+ N_STATEMENT(490);
+ N_STATEMENT(491);
+ N_STATEMENT(492);
+ N_STATEMENT(493);
+ N_STATEMENT(494);
+ N_STATEMENT(495);
+ N_STATEMENT(496);
+ N_STATEMENT(497);
+ N_STATEMENT(498);
+ N_STATEMENT(499);
+ N_STATEMENT(500);
+ N_STATEMENT(501);
+ N_STATEMENT(502);
+ N_STATEMENT(503);
+ N_STATEMENT(504);
+ N_STATEMENT(505);
+ N_STATEMENT(506);
+ N_STATEMENT(507);
+ N_STATEMENT(508);
+ N_STATEMENT(509);
+ N_STATEMENT(510);
+ N_STATEMENT(511);
+ N_STATEMENT(512);
+ N_STATEMENT(513);
+ N_STATEMENT(514);
+ N_STATEMENT(515);
+ N_STATEMENT(516);
+ N_STATEMENT(517);
+ N_STATEMENT(518);
+ N_STATEMENT(519);
+ N_STATEMENT(520);
+ N_STATEMENT(521);
+ N_STATEMENT(522);
+ N_STATEMENT(523);
+ N_STATEMENT(524);
+ N_STATEMENT(525);
+ N_STATEMENT(526);
+ N_STATEMENT(527);
+ N_STATEMENT(528);
+ N_STATEMENT(529);
+ N_STATEMENT(530);
+ N_STATEMENT(531);
+ N_STATEMENT(532);
+ N_STATEMENT(533);
+ N_STATEMENT(534);
+ N_STATEMENT(535);
+ N_STATEMENT(536);
+ N_STATEMENT(537);
+ N_STATEMENT(538);
+ N_STATEMENT(539);
+ N_STATEMENT(540);
+ N_STATEMENT(541);
+ N_STATEMENT(542);
+ N_STATEMENT(543);
+ N_STATEMENT(544);
+ N_STATEMENT(545);
+ N_STATEMENT(546);
+ N_STATEMENT(547);
+ N_STATEMENT(548);
+ N_STATEMENT(549);
+ N_STATEMENT(550);
+ N_STATEMENT(551);
+ N_STATEMENT(552);
+ N_STATEMENT(553);
+ N_STATEMENT(554);
+ N_STATEMENT(555);
+ N_STATEMENT(556);
+ N_STATEMENT(557);
+ N_STATEMENT(558);
+ N_STATEMENT(559);
+ N_STATEMENT(560);
+ N_STATEMENT(561);
+ N_STATEMENT(562);
+ N_STATEMENT(563);
+ N_STATEMENT(564);
+ N_STATEMENT(565);
+ N_STATEMENT(566);
+ N_STATEMENT(567);
+ N_STATEMENT(568);
+ N_STATEMENT(569);
+ N_STATEMENT(570);
+ N_STATEMENT(571);
+ N_STATEMENT(572);
+ N_STATEMENT(573);
+ N_STATEMENT(574);
+ N_STATEMENT(575);
+ N_STATEMENT(576);
+ N_STATEMENT(577);
+ N_STATEMENT(578);
+ N_STATEMENT(579);
+ N_STATEMENT(580);
+ N_STATEMENT(581);
+ N_STATEMENT(582);
+ N_STATEMENT(583);
+ N_STATEMENT(584);
+ N_STATEMENT(585);
+ N_STATEMENT(586);
+ N_STATEMENT(587);
+ N_STATEMENT(588);
+ N_STATEMENT(589);
+ N_STATEMENT(590);
+ N_STATEMENT(591);
+ N_STATEMENT(592);
+ N_STATEMENT(593);
+ N_STATEMENT(594);
+ N_STATEMENT(595);
+ N_STATEMENT(596);
+ N_STATEMENT(597);
+ N_STATEMENT(598);
+ N_STATEMENT(599);
+ N_STATEMENT(600);
+ N_STATEMENT(601);
+ N_STATEMENT(602);
+ N_STATEMENT(603);
+ N_STATEMENT(604);
+ N_STATEMENT(605);
+ N_STATEMENT(606);
+ N_STATEMENT(607);
+ N_STATEMENT(608);
+ N_STATEMENT(609);
+ N_STATEMENT(610);
+ N_STATEMENT(611);
+ N_STATEMENT(612);
+ N_STATEMENT(613);
+ N_STATEMENT(614);
+ N_STATEMENT(615);
+ N_STATEMENT(616);
+ N_STATEMENT(617);
+ N_STATEMENT(618);
+ N_STATEMENT(619);
+ N_STATEMENT(620);
+ N_STATEMENT(621);
+ N_STATEMENT(622);
+ N_STATEMENT(623);
+ N_STATEMENT(624);
+ N_STATEMENT(625);
+ N_STATEMENT(626);
+ N_STATEMENT(627);
+ N_STATEMENT(628);
+ N_STATEMENT(629);
+ N_STATEMENT(630);
+ N_STATEMENT(631);
+ N_STATEMENT(632);
+ N_STATEMENT(633);
+ N_STATEMENT(634);
+ N_STATEMENT(635);
+ N_STATEMENT(636);
+ N_STATEMENT(637);
+ N_STATEMENT(638);
+ N_STATEMENT(639);
+ N_STATEMENT(640);
+ N_STATEMENT(641);
+ N_STATEMENT(642);
+ N_STATEMENT(643);
+ N_STATEMENT(644);
+ N_STATEMENT(645);
+ N_STATEMENT(646);
+ N_STATEMENT(647);
+ N_STATEMENT(648);
+ N_STATEMENT(649);
+ N_STATEMENT(650);
+ N_STATEMENT(651);
+ N_STATEMENT(652);
+ N_STATEMENT(653);
+ N_STATEMENT(654);
+ N_STATEMENT(655);
+ N_STATEMENT(656);
+ N_STATEMENT(657);
+ N_STATEMENT(658);
+ N_STATEMENT(659);
+ N_STATEMENT(660);
+ N_STATEMENT(661);
+ N_STATEMENT(662);
+ N_STATEMENT(663);
+ N_STATEMENT(664);
+ N_STATEMENT(665);
+ N_STATEMENT(666);
+ N_STATEMENT(667);
+ N_STATEMENT(668);
+ N_STATEMENT(669);
+ N_STATEMENT(670);
+ N_STATEMENT(671);
+ N_STATEMENT(672);
+ N_STATEMENT(673);
+ N_STATEMENT(674);
+ N_STATEMENT(675);
+ N_STATEMENT(676);
+ N_STATEMENT(677);
+ N_STATEMENT(678);
+ N_STATEMENT(679);
+ N_STATEMENT(680);
+ N_STATEMENT(681);
+ N_STATEMENT(682);
+ N_STATEMENT(683);
+ N_STATEMENT(684);
+ N_STATEMENT(685);
+ N_STATEMENT(686);
+ N_STATEMENT(687);
+ N_STATEMENT(688);
+ N_STATEMENT(689);
+ N_STATEMENT(690);
+ N_STATEMENT(691);
+ N_STATEMENT(692);
+ N_STATEMENT(693);
+ N_STATEMENT(694);
+ N_STATEMENT(695);
+ N_STATEMENT(696);
+ N_STATEMENT(697);
+ N_STATEMENT(698);
+ N_STATEMENT(699);
+ N_STATEMENT(700);
+ N_STATEMENT(701);
+ N_STATEMENT(702);
+ N_STATEMENT(703);
+ N_STATEMENT(704);
+ N_STATEMENT(705);
+ N_STATEMENT(706);
+ N_STATEMENT(707);
+ N_STATEMENT(708);
+ N_STATEMENT(709);
+ N_STATEMENT(710);
+ N_STATEMENT(711);
+ N_STATEMENT(712);
+ N_STATEMENT(713);
+ N_STATEMENT(714);
+ N_STATEMENT(715);
+ N_STATEMENT(716);
+ N_STATEMENT(717);
+ N_STATEMENT(718);
+ N_STATEMENT(719);
+ N_STATEMENT(720);
+ N_STATEMENT(721);
+ N_STATEMENT(722);
+ N_STATEMENT(723);
+ N_STATEMENT(724);
+ N_STATEMENT(725);
+ N_STATEMENT(726);
+ N_STATEMENT(727);
+ N_STATEMENT(728);
+ N_STATEMENT(729);
+ N_STATEMENT(730);
+ N_STATEMENT(731);
+ N_STATEMENT(732);
+ N_STATEMENT(733);
+ N_STATEMENT(734);
+ N_STATEMENT(735);
+ N_STATEMENT(736);
+ N_STATEMENT(737);
+ N_STATEMENT(738);
+ N_STATEMENT(739);
+ N_STATEMENT(740);
+ N_STATEMENT(741);
+ N_STATEMENT(742);
+ N_STATEMENT(743);
+ N_STATEMENT(744);
+ N_STATEMENT(745);
+ N_STATEMENT(746);
+ N_STATEMENT(747);
+ N_STATEMENT(748);
+ N_STATEMENT(749);
+ N_STATEMENT(750);
+ N_STATEMENT(751);
+ N_STATEMENT(752);
+ N_STATEMENT(753);
+ N_STATEMENT(754);
+ N_STATEMENT(755);
+ N_STATEMENT(756);
+ N_STATEMENT(757);
+ N_STATEMENT(758);
+ N_STATEMENT(759);
+ N_STATEMENT(760);
+ N_STATEMENT(761);
+ N_STATEMENT(762);
+ N_STATEMENT(763);
+ N_STATEMENT(764);
+ N_STATEMENT(765);
+ N_STATEMENT(766);
+ N_STATEMENT(767);
+ N_STATEMENT(768);
+ N_STATEMENT(769);
+ N_STATEMENT(770);
+ N_STATEMENT(771);
+ N_STATEMENT(772);
+ N_STATEMENT(773);
+ N_STATEMENT(774);
+ N_STATEMENT(775);
+ N_STATEMENT(776);
+ N_STATEMENT(777);
+ N_STATEMENT(778);
+ N_STATEMENT(779);
+ N_STATEMENT(780);
+ N_STATEMENT(781);
+ N_STATEMENT(782);
+ N_STATEMENT(783);
+ N_STATEMENT(784);
+ N_STATEMENT(785);
+ N_STATEMENT(786);
+ N_STATEMENT(787);
+ N_STATEMENT(788);
+ N_STATEMENT(789);
+ N_STATEMENT(790);
+ N_STATEMENT(791);
+ N_STATEMENT(792);
+ N_STATEMENT(793);
+ N_STATEMENT(794);
+ N_STATEMENT(795);
+ N_STATEMENT(796);
+ N_STATEMENT(797);
+ N_STATEMENT(798);
+ N_STATEMENT(799);
+ N_STATEMENT(800);
+ N_STATEMENT(801);
+ N_STATEMENT(802);
+ N_STATEMENT(803);
+ N_STATEMENT(804);
+ N_STATEMENT(805);
+ N_STATEMENT(806);
+ N_STATEMENT(807);
+ N_STATEMENT(808);
+ N_STATEMENT(809);
+ N_STATEMENT(810);
+ N_STATEMENT(811);
+ N_STATEMENT(812);
+ N_STATEMENT(813);
+ N_STATEMENT(814);
+ N_STATEMENT(815);
+ N_STATEMENT(816);
+ N_STATEMENT(817);
+ N_STATEMENT(818);
+ N_STATEMENT(819);
+ N_STATEMENT(820);
+ N_STATEMENT(821);
+ N_STATEMENT(822);
+ N_STATEMENT(823);
+ N_STATEMENT(824);
+ N_STATEMENT(825);
+ N_STATEMENT(826);
+ N_STATEMENT(827);
+ N_STATEMENT(828);
+ N_STATEMENT(829);
+ N_STATEMENT(830);
+ N_STATEMENT(831);
+ N_STATEMENT(832);
+ N_STATEMENT(833);
+ N_STATEMENT(834);
+ N_STATEMENT(835);
+ N_STATEMENT(836);
+ N_STATEMENT(837);
+ N_STATEMENT(838);
+ N_STATEMENT(839);
+ N_STATEMENT(840);
+ N_STATEMENT(841);
+ N_STATEMENT(842);
+ N_STATEMENT(843);
+ N_STATEMENT(844);
+ N_STATEMENT(845);
+ N_STATEMENT(846);
+ N_STATEMENT(847);
+ N_STATEMENT(848);
+ N_STATEMENT(849);
+ N_STATEMENT(850);
+ N_STATEMENT(851);
+ N_STATEMENT(852);
+ N_STATEMENT(853);
+ N_STATEMENT(854);
+ N_STATEMENT(855);
+ N_STATEMENT(856);
+ N_STATEMENT(857);
+ N_STATEMENT(858);
+ N_STATEMENT(859);
+ N_STATEMENT(860);
+ N_STATEMENT(861);
+ N_STATEMENT(862);
+ N_STATEMENT(863);
+ N_STATEMENT(864);
+ N_STATEMENT(865);
+ N_STATEMENT(866);
+ N_STATEMENT(867);
+ N_STATEMENT(868);
+ N_STATEMENT(869);
+ N_STATEMENT(870);
+ N_STATEMENT(871);
+ N_STATEMENT(872);
+ N_STATEMENT(873);
+ N_STATEMENT(874);
+ N_STATEMENT(875);
+ N_STATEMENT(876);
+ N_STATEMENT(877);
+ N_STATEMENT(878);
+ N_STATEMENT(879);
+ N_STATEMENT(880);
+ N_STATEMENT(881);
+ N_STATEMENT(882);
+ N_STATEMENT(883);
+ N_STATEMENT(884);
+ N_STATEMENT(885);
+ N_STATEMENT(886);
+ N_STATEMENT(887);
+ N_STATEMENT(888);
+ N_STATEMENT(889);
+ N_STATEMENT(890);
+ N_STATEMENT(891);
+ N_STATEMENT(892);
+ N_STATEMENT(893);
+ N_STATEMENT(894);
+ N_STATEMENT(895);
+ N_STATEMENT(896);
+ N_STATEMENT(897);
+ N_STATEMENT(898);
+ N_STATEMENT(899);
+ N_STATEMENT(900);
+ N_STATEMENT(901);
+ N_STATEMENT(902);
+ N_STATEMENT(903);
+ N_STATEMENT(904);
+ N_STATEMENT(905);
+ N_STATEMENT(906);
+ N_STATEMENT(907);
+ N_STATEMENT(908);
+ N_STATEMENT(909);
+ N_STATEMENT(910);
+ N_STATEMENT(911);
+ N_STATEMENT(912);
+ N_STATEMENT(913);
+ N_STATEMENT(914);
+ N_STATEMENT(915);
+ N_STATEMENT(916);
+ N_STATEMENT(917);
+ N_STATEMENT(918);
+ N_STATEMENT(919);
+ N_STATEMENT(920);
+ N_STATEMENT(921);
+ N_STATEMENT(922);
+ N_STATEMENT(923);
+ N_STATEMENT(924);
+ N_STATEMENT(925);
+ N_STATEMENT(926);
+ N_STATEMENT(927);
+ N_STATEMENT(928);
+ N_STATEMENT(929);
+ N_STATEMENT(930);
+ N_STATEMENT(931);
+ N_STATEMENT(932);
+ N_STATEMENT(933);
+ N_STATEMENT(934);
+ N_STATEMENT(935);
+ N_STATEMENT(936);
+ N_STATEMENT(937);
+ N_STATEMENT(938);
+ N_STATEMENT(939);
+ N_STATEMENT(940);
+ N_STATEMENT(941);
+ N_STATEMENT(942);
+ N_STATEMENT(943);
+ N_STATEMENT(944);
+ N_STATEMENT(945);
+ N_STATEMENT(946);
+ N_STATEMENT(947);
+ N_STATEMENT(948);
+ N_STATEMENT(949);
+ N_STATEMENT(950);
+ }
+ return am_error; /* NOTREACHED, shut up the compiler */
+}
+
+#endif /* HAVE_USE_DTRACE */
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index e5d99dc4f1..93e56332e1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -187,11 +187,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#else
# define ERL_NIF_INIT_GLOB
# define ERL_NIF_INIT_BODY
-# if defined(VXWORKS)
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _init(void)
-# else
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
-# endif
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
#endif
@@ -226,6 +222,15 @@ ERL_NIF_INIT_DECL(NAME) \
} \
ERL_NIF_INIT_EPILOGUE
+#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
+#define HAVE_USE_DTRACE 1
+#endif
+
+#ifdef HAVE_USE_DTRACE
+ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ERL_NIF_TERM erl_nif_user_trace_i4s4(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ERL_NIF_TERM erl_nif_user_trace_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+#endif
#endif /* __ERL_NIF_H__ */
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 6396af09d0..51ff1eaa48 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -138,6 +138,8 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term, ERL_NIF_TERM *list));
ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
+ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
/*
** Add new entries here to keep compatibility on Windows!!!
@@ -260,6 +262,8 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
+# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
+# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
/*
** Add new entries here
diff --git a/erts/emulator/beam/erl_nmgc.c b/erts/emulator/beam/erl_nmgc.c
deleted file mode 100644
index 2a8c819360..0000000000
--- a/erts/emulator/beam/erl_nmgc.c
+++ /dev/null
@@ -1,1401 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-#include "global.h"
-#include "erl_gc.h"
-#include "erl_binary.h"
-#include "erl_nmgc.h"
-#include "erl_debug.h"
-#if HIPE
-#include "hipe_stack.h"
-#endif
-
-
-#ifdef INCREMENTAL
-/***************************************************************************
- * *
- * Incremental Garbage Collector for the Message Area *
- * *
- ***************************************************************************/
-
-/*
- * The heap pointers are declared in erl_init.c
- * global_heap is the nursery
- * global_old_heap is the old generation
- */
-unsigned char *blackmap = NULL;
-INC_Page *inc_used_mem = NULL;
-INC_MemBlock *inc_free_list = NULL;
-Eterm *inc_fromspc;
-Eterm *inc_fromend;
-Eterm *inc_nursery_scn_ptr;
-Eterm **fwdptrs;
-Eterm *inc_alloc_limit;
-Process *inc_active_proc;
-Process *inc_active_last;
-int inc_words_to_go;
-
-static Eterm *inc_last_nursery;
-static int inc_pages = INC_NoPAGES;
-static INC_Page *inc_bibop = NULL;
-static int inc_used_pages;
-
-/* Used when growing the old generation */
-/*
-#define INC_ROOTSAVE 16384
-static Eterm *root_save[INC_ROOTSAVE];
-static int roots_saved = 0;
-*/
-
-INC_STORAGE_DECLARATION(,gray);
-
-static void inc_minor_gc(Process *p, int need, Eterm* objv, int nobj);
-static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj);
-
-#ifdef INC_TIME_BASED
-#if USE_PERFCTR
-
-/*
- * This uses the Linux perfctr extension to virtualise the
- * time-stamp counter.
- */
-#include "libperfctr.h"
-static struct vperfctr *vperfctr;
-static double cpu_khz;
-static double tsc_to_cpu_mult;
-
-static void inc_start_hrvtime(void)
-{
- struct perfctr_info info;
- struct vperfctr_control control;
-
- if( vperfctr != NULL )
- return;
- vperfctr = vperfctr_open();
- if( vperfctr == NULL )
- return;
- if( vperfctr_info(vperfctr, &info) >= 0 ) {
- cpu_khz = (double)info.cpu_khz;
- tsc_to_cpu_mult = (double)(info.tsc_to_cpu_mult ? : 1);
- if( info.cpu_features & PERFCTR_FEATURE_RDTSC ) {
- memset(&control, 0, sizeof control);
- control.cpu_control.tsc_on = 1;
- if( vperfctr_control(vperfctr, &control) >= 0 )
- return;
- }
- }
- vperfctr_close(vperfctr);
- vperfctr = NULL;
-}
-
-#define inc_get_hrvtime() (((double)vperfctr_read_tsc(vperfctr) * tsc_to_cpu_mult) / cpu_khz)
-
-#endif /* USE_PERFCTR */
-#endif /* INC_TIME_BASED */
-
-#ifdef INC_TIME_BASED
-# define timeslice 1 /* milli seconds */
-# define WORK_MORE (inc_get_hrvtime() < start_time + timeslice)
-#else
-//# define inc_min_work 100 /* words */
-# define inc_min_work global_heap_sz + inc_pages * INC_FULLPAGE /* words */
-# define WORK_MORE (inc_words_to_go > 0)
-#endif
-
-void erts_init_incgc(void)
-{
- int i;
- int size = inc_pages * INC_FULLPAGE;
-
- /* Young generation */
- global_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
- sizeof(Eterm) * global_heap_sz);
- global_hend = global_heap + global_heap_sz;
- global_htop = global_heap;
- inc_alloc_limit = global_hend;
-
- /* Fromspace */
- inc_last_nursery = (Eterm *) erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
- global_heap_sz * sizeof(Eterm));
- inc_fromspc = inc_fromend = NULL;
-
- /* Forward-pointers */
- fwdptrs = erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
- global_heap_sz * sizeof(Eterm*));
- /* Old generation */
- global_old_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
- size * sizeof(Eterm));
- global_old_hend = global_old_heap + size;
-
- /* Pages i BiBOP */
- for (i = 0; i < inc_pages; i++)
- {
- INC_Page *this = (INC_Page*)(global_old_heap + i * INC_FULLPAGE);
- this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE);
- }
-
- inc_bibop = (INC_Page*)global_old_heap;
- ((INC_Page*)(global_old_heap + (inc_pages - 1) * INC_FULLPAGE))->next =
- NULL;
-
- inc_used_mem = inc_bibop;
- inc_bibop = inc_bibop->next;
- inc_used_mem->next = NULL;
- inc_used_pages = 1;
-
- /* Free-list */
- inc_free_list = (INC_MemBlock*)inc_used_mem->start;
- inc_free_list->size = INC_PAGESIZE;
- inc_free_list->prev = NULL;
- inc_free_list->next = NULL;
-
- /* Blackmap */
- blackmap = (unsigned char*)erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
- INC_FULLPAGE * inc_pages);
- /* Gray stack */
- INC_STORAGE_INIT(gray);
-
- inc_active_proc = NULL;
- inc_active_last = NULL;
-
-#ifdef INC_TIME_BASED
- inc_start_hrvtime();
-#endif
-}
-
-void erts_cleanup_incgc(void)
-{
- INC_STORAGE_ERASE(gray);
-
- if (inc_fromspc)
- inc_last_nursery = inc_fromspc;
-
- erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_heap);
- erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)inc_last_nursery);
- erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap);
- erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)blackmap);
- erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)fwdptrs);
-}
-
-void erts_incremental_gc(Process* p, int need, Eterm* objv, int nobj)
-{
- int repeat_minor;
-#ifdef INC_TIME_BASED
- double start_time = inc_get_hrvtime();
- int work_left_before = inc_words_to_go;
-#endif
- /* Used when growing the fromspace */
- static char inc_growing_nurs = 0;
-
- BM_STOP_TIMER(system);
- //BM_MMU_READ();
- BM_RESET_TIMER(gc);
- BM_START_TIMER(gc);
-
- VERBOSE(DEBUG_HYBRID_GC,
- ("INCGC: Incremental GC START Caused by: %T Need: %d\n",
- p->id,need));
-
- ma_gc_flags |= GC_GLOBAL;
- ma_gc_flags &= ~GC_CYCLE_START;
-
-#ifndef INC_TIME_BASED
- /* Decide how much work to do this GC stage. The work is meassured
- * in number of words copied from the young generation to the old
- * plus number of work marked in the old generation.
- */
- if (ma_gc_flags & GC_MAJOR) {
- int wm = (need > inc_min_work) ? need : inc_min_work;
- inc_words_to_go = (int)((wm * (((inc_used_pages * INC_PAGESIZE) /
- (double)global_heap_sz) + 1)) + 0.5);
- }
- else
- inc_words_to_go = (need > inc_min_work) ? need : inc_min_work;
-#endif
-
- do {
- if (ma_gc_flags & GC_MAJOR) {
- /* This is a major collection cycle. */
- inc_major_gc(p,need,objv,nobj);
- } else if (ma_gc_flags & GC_CYCLE) {
- /* This is a minor collection cycle. */
- inc_minor_gc(p,need,objv,nobj);
- } else {
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Collection cycle START\n"));
- ma_gc_flags |= (GC_CYCLE | GC_CYCLE_START);
- inc_fromspc = global_heap;
- inc_fromend = global_htop;
- global_heap = global_htop = inc_last_nursery;
- global_hend = global_heap + global_heap_sz;
- inc_nursery_scn_ptr = global_heap;
-#ifdef INC_TIME_BASED
- work_left_before = inc_words_to_go = global_heap_sz;
-#endif
-#ifdef DEBUG
- inc_last_nursery = NULL;
-#endif
- memset(fwdptrs,0,global_heap_sz * sizeof(Eterm));
-
- {
- /* TODO: Alla processer ska v�l egentligen inte aktiveras h�r... */
- int i;
- for (i = 0; i < erts_num_active_procs; i++) {
- Process *cp = erts_active_procs[i];
- INC_ACTIVATE(cp);
- cp->scan_top = cp->high_water;
- }
- }
-
- if (ma_gc_flags & GC_NEED_MAJOR) {
- /* The previous collection cycle caused the old generation to
- * overflow. This collection cycle will therefore be a major
- * one.
- */
- BM_COUNT(major_gc_cycles);
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: MAJOR cycle\n"));
- inc_major_gc(p,need,objv,nobj);
- } else {
- BM_COUNT(minor_gc_cycles);
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: MINOR cycle\n"));
- inc_minor_gc(p,need,objv,nobj);
- }
- }
-
- repeat_minor = 0;
- if (!(ma_gc_flags & GC_CYCLE)) {
- inc_alloc_limit = global_hend;
- inc_last_nursery = inc_fromspc;
- inc_fromspc = inc_fromend = NULL;
- ASSERT(INC_STORAGE_EMPTY(gray));
-
- if (inc_growing_nurs) {
- /*
- * The previous collection cycle caused the nursery to
- * grow, now we have to grow the from-space as well.
- */
- inc_last_nursery =
- (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA,
- (void*)inc_last_nursery,
- sizeof(Eterm) * global_heap_sz);
- inc_growing_nurs = 0;
- }
-
- if (global_hend - global_htop <= need) {
- /*
- * Initiate a new GC cycle immediately and, if necessary,
- * enlarge the nursery.
- */
- if (global_heap_sz <= need) {
- VERBOSE(DEBUG_HYBRID_GC,
- ("INCGC: Allocating a larger nursery\n"));
- global_heap_sz = erts_next_heap_size(need * 1.5,0);
- inc_last_nursery =
- (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA,
- (void*)inc_last_nursery,
- sizeof(Eterm) * global_heap_sz);
- fwdptrs = erts_realloc(ERTS_ALC_T_MESSAGE_AREA,fwdptrs,
- global_heap_sz * sizeof(Eterm*));
- inc_growing_nurs = 1;
- }
- repeat_minor = 1;
- }
-
-#ifdef DEBUG
- /* Fill the from-space with bad things */
- memset(inc_last_nursery,DEBUG_BAD_BYTE,
- global_heap_sz * sizeof(Eterm));
-#endif
- }
- } while (repeat_minor);
-
-
- /* Clean up after garbage collection ********************************/
-
- if (inc_alloc_limit != global_hend) {
-
-#ifdef INC_TIME_BASED
- if ((work_left_before - inc_words_to_go) == 0) {
- inc_alloc_limit = global_htop + need;
- } else {
- inc_alloc_limit = (global_hend - global_htop) /
- (inc_words_to_go / (work_left_before - inc_words_to_go)) +
- global_htop;
- if (inc_alloc_limit > global_hend)
- inc_alloc_limit = global_hend;
- }
-#else
- inc_alloc_limit = (Eterm*)(global_htop + (need > inc_min_work) ?
- need : inc_min_work);
- if (inc_alloc_limit > global_hend)
- inc_alloc_limit = global_hend;
-#endif
- }
-
- ma_gc_flags &= ~GC_GLOBAL;
-
- /* INC_TIME_BASED: If this fails we have to increase the timeslice! */
- ASSERT(inc_alloc_limit - global_htop > need);
-
- BM_STOP_TIMER(gc);
-#ifdef BM_TIMERS
- minor_global_gc_time += gc_time;
- if (gc_time > max_global_minor_time)
- max_global_minor_time = gc_time;
-
- pause_times[(((gc_time * 1000) < MAX_PAUSE_TIME) ?
- (int)(gc_time * 1000) :
- MAX_PAUSE_TIME - 1)]++;
-#endif
- //BM_MMU_INIT();
- { static long long verif = 0;
- //erts_printf("innan verify: %d\n",++verif);
- if (verif==168) print_memory(NULL);
- verify_everything();
- //erts_printf("efter verify: %d\n",verif);
- }
- BM_START_TIMER(system);
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Incremental GC END\n"));
-}
-
-
-/***************************************************************************
- * *
- * Minor collection - Copy live data from young generation to old *
- * *
- ***************************************************************************/
-
-#define MINOR_SCAN(PTR,END) do { \
- ASSERT(PTR <= END); \
- while (WORK_MORE && PTR < END) { \
- Eterm val = *PTR; \
- Eterm *obj_ptr = ptr_val(val); \
- switch (primary_tag(val)) { \
- case TAG_PRIMARY_LIST: \
- if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \
- if (INC_IS_FORWARDED(obj_ptr)) { \
- *PTR = make_list(INC_FORWARD_VALUE(obj_ptr)); \
- } \
- else { \
- Eterm *hp = erts_inc_alloc(2); \
- INC_STORE(gray,hp,2); \
- INC_COPY_CONS(obj_ptr,hp,PTR); \
- } \
- } \
- break; \
- case TAG_PRIMARY_BOXED: \
- if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \
- if (INC_IS_FORWARDED(obj_ptr)) { \
- *PTR = make_boxed(INC_FORWARD_VALUE(obj_ptr)); \
- } \
- else { \
- Eterm *hp = erts_inc_alloc(BOXED_NEED(obj_ptr,*obj_ptr)); \
- INC_STORE(gray,hp,BOXED_NEED(obj_ptr,*obj_ptr)); \
- INC_COPY_BOXED(obj_ptr,hp,PTR); \
- } \
- } \
- break; \
- case TAG_PRIMARY_HEADER: \
- switch (val & _TAG_HEADER_MASK) { \
- case ARITYVAL_SUBTAG: break; \
- default: PTR += thing_arityval(val); break; \
- } \
- break; \
- } \
- PTR++; \
- } \
-} while(0)
-
-
-/* Returns: TRUE (1) if the need is greater than the available space
- * and the garbage collector needs to be restarted immediately. FALSE
- * (0) otherwise.
- */
-static void inc_minor_gc(Process* p, int need, Eterm* objv, int nobj)
-{
- BM_COUNT(minor_gc_stages);
-
- /* Start with looking at gray objects found in earlier collection
- * stages.
- */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n"));
- {
- INC_Object *obj = NULL;
- Eterm *ptr;
-
- while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
- obj = INC_STORAGE_GET(gray);
- if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
- ptr = obj->this + thing_arityval(*obj->this) + 1;
- } else {
- ptr = obj->this;
- }
- MINOR_SCAN(ptr,obj->this + obj->size);
- }
- /* TODO: Se f�reg�ende uppdatering av gr� objekt */
- if (!WORK_MORE && obj != NULL)
- INC_STORE(gray,obj->this,obj->size);
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan root-set\n"));
- while (WORK_MORE && inc_active_proc) {
- Rootset rootset;
- Process *cp = inc_active_proc;
-
- ASSERT(INC_IS_ACTIVE(cp));
-
- /* TODO: Hur dyrt �r det att bygga nytt rootset varje g�ng? */
-
- /* TODO: Fundera p� ordningen! Rootset, Heap, Old heap... */
-
- /* TODO: Scanna stacken fr�n p->send till p->stop! [Brooks84] */
- /* Notera: Vi GC:ar inte de yngsta objekten - de som allokeras
- under GC-cykeln. Detta ger ynglingarna en chans att d� innan
- GC:n b�rjar kopiera dem. [StefanovicMcKinleyMoss@OOPSLA99] */
-
- /* TODO: N�r rootset �r scannat borde processen inte vara
- aktiv mer. Den b�r aktiveras i schedule, endast om en
- process har k�rt beh�ver vi scanna rootset igen. */
-
- /* MT: In a multithreaded system the process cp needs to be
- * locked here.
- */
-
- if (cp == p)
- rootset.n = setup_rootset(cp, objv, nobj, &rootset);
- else
- rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset);
-
- //MA_GENSWEEP_NSTACK(cp, old_htop, n_htop, objv, nobj);
-
- while (WORK_MORE && rootset.n--) {
- Eterm *g_ptr = rootset.v[rootset.n];
- Uint g_sz = rootset.sz[rootset.n];
-
- while (WORK_MORE && g_sz--) {
- Eterm gval = *g_ptr;
- switch (primary_tag(gval)) {
- case TAG_PRIMARY_LIST: {
- Eterm *ptr = list_val(gval);
- if (ptr_within(ptr,inc_fromspc,inc_fromend)) {
- if (INC_IS_FORWARDED(ptr)) {
- *g_ptr++ = make_list(INC_FORWARD_VALUE(ptr));
- }
- else {
- Eterm *hp = erts_inc_alloc(2);
- INC_STORE(gray,hp,2);
- INC_COPY_CONS(ptr,hp,g_ptr++);
- }
- }
- else
- ++g_ptr;
- continue;
- }
-
- case TAG_PRIMARY_BOXED: {
- Eterm *ptr = boxed_val(gval);
- if (ptr_within(ptr,inc_fromspc,inc_fromend)) {
- if (INC_IS_FORWARDED(ptr)) {
- *g_ptr++ = make_boxed(INC_FORWARD_VALUE(ptr));
- }
- else {
- Eterm *hp = erts_inc_alloc(BOXED_NEED(ptr,*ptr));
- INC_STORE(gray,hp,BOXED_NEED(ptr,*ptr));
- INC_COPY_BOXED(ptr,hp,g_ptr++);
- }
- }
- else
- ++g_ptr;
- continue;
- }
-
- default:
- g_ptr++;
- continue;
- }
- }
- }
-
- restore_one_rootset(cp, &rootset);
-
- /* MT: cp can be unlocked now. */
-
- /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan private nursery\n")); */
- if (cp->scan_top != HEAP_TOP(cp)) {
- Eterm *ptr = cp->scan_top;
- MINOR_SCAN(ptr,HEAP_TOP(cp));
- /* TODO: F�r att spara scan_top h�r m�ste alla ma-pekare
- * som hittas l�ggas till i cp->rrma.
- */
- //cp->scan_top = ptr;
- }
-
- /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan heap fragments\n")); */
- {
- ErlHeapFragment* bp = MBUF(cp);
-
- while (WORK_MORE && bp) {
- Eterm *ptr = bp->mem;
- if ((ARITH_HEAP(cp) >= bp->mem) &&
- (ARITH_HEAP(cp) < bp->mem + bp->size)) {
- MINOR_SCAN(ptr,ARITH_HEAP(cp));
- } else {
- MINOR_SCAN(ptr,bp->mem + bp->size);
- }
- bp = bp->next;
- }
- }
-
- /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan gray\n")); */
- {
- INC_Object *obj = NULL;
- Eterm *ptr;
- while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
- obj = INC_STORAGE_GET(gray);
- if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
- ptr = obj->this + thing_arityval(*obj->this) + 1;
- } else {
- ptr = obj->this;
- }
- MINOR_SCAN(ptr,obj->this + obj->size);
- }
- /* TODO: INC_STORE(gray,ptr,obj->size-(ptr-obj->this)); Typ.. */
- if (!WORK_MORE && obj != NULL)
- INC_STORE(gray,obj->this,obj->size);
- }
-
- if (WORK_MORE) {
- //printf("Rootset after:\r\n");
- //print_one_rootset(&rootset);
- INC_DEACTIVATE(cp);
- }
- }
-
- /* Update new pointers in the nursery to new copies in old generation. */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update nursery\n"));
- {
- Eterm *ptr = inc_nursery_scn_ptr;
- MINOR_SCAN(ptr,global_htop);
- inc_nursery_scn_ptr = ptr;
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n"));
- {
- INC_Object *obj = NULL;
- Eterm *ptr;
-
- while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
- obj = INC_STORAGE_GET(gray);
- if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
- ptr = obj->this + thing_arityval(*obj->this) + 1;
- } else {
- ptr = obj->this;
- }
- MINOR_SCAN(ptr,obj->this + obj->size);
- }
- /* TODO: Se f�reg�ende uppdatering av gr� objekt */
- if (!WORK_MORE && obj != NULL)
- INC_STORE(gray,obj->this,obj->size);
- }
-
- /* Atomic phase */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n"));
- {
- Uint i;
- for (i = 0; i < ma_dst_top; i++) {
- if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) {
- if (INC_IS_FORWARDED(ma_dst_stack[i]))
- ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]);
- }
- }
- }
-
- if (WORK_MORE) {
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update offheap-lists\n"));
- {
- ExternalThing **prev = &erts_global_offheap.externals;
- ExternalThing *ptr = erts_global_offheap.externals;
-
- /* Atomic phase */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc externals\n"));
- while (ptr) {
- Eterm *ppt = (Eterm*) ptr;
-
- if (ptr_within(ppt,global_old_heap,global_old_hend)) {
- prev = &ptr->next;
- ptr = ptr->next;
- } else if (ptr_within(ppt, inc_fromspc, inc_fromend) &&
- INC_IS_FORWARDED(ppt)) {
- ExternalThing *ro = (ExternalThing*)INC_FORWARD_VALUE(ppt);
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else {
- erts_deref_node_entry(ptr->node);
- *prev = ptr = ptr->next;
- }
- }
- ASSERT(*prev == NULL);
- }
-
- {
- ProcBin **prev = &erts_global_offheap.mso;
- ProcBin *ptr = erts_global_offheap.mso;
-
- /* Atomic phase */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc bins\n"));
- while (ptr) {
- Eterm *ppt = (Eterm*)ptr;
-
- if (ptr_within(ppt,global_old_heap,global_old_hend)) {
- prev = &ptr->next;
- ptr = ptr->next;
- } else if (ptr_within(ppt, inc_fromspc, inc_fromend) &&
- INC_IS_FORWARDED(ppt)) {
- ProcBin *ro = (ProcBin*)INC_FORWARD_VALUE(ppt);
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else {
- Binary *bptr;
- *prev = ptr->next;
- bptr = ptr->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0)
- erts_bin_free(bptr);
- ptr = *prev;
- }
- }
- ASSERT(*prev == NULL);
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Minor collection cycle END\n"));
- ma_gc_flags &= ~GC_CYCLE;
- }
-}
-
-
-
-
-/***************************************************************************
- * *
- * Major collection - CopyMark - Copy young to old, Mark-Sweep old *
- * *
- ***************************************************************************/
-
-#define COPYMARK(PTR,END) do { \
- ASSERT(PTR <= END); \
- while (WORK_MORE && PTR < END) { \
- Eterm val = *PTR; \
- Eterm *obj_ptr = ptr_val(val); \
- switch (primary_tag(val)) { \
- case TAG_PRIMARY_LIST: \
- COPYMARK_CONS(obj_ptr,aging_htop,PTR,aging_end); break; \
- case TAG_PRIMARY_BOXED: \
- COPYMARK_BOXED(obj_ptr,aging_htop,PTR,aging_end); break; \
- case TAG_PRIMARY_HEADER: \
- switch (val & _TAG_HEADER_MASK) { \
- case ARITYVAL_SUBTAG: break; \
- default: \
- PTR += thing_arityval(val); \
- break; \
- } \
- break; \
- default: break; \
- } \
- PTR++; \
- } \
-} while(0);
-/* TODO:
- if (aging_htop + 10 > aging + INC_FULLPAGE) {
- aging->next = inc_used_mem;
- inc_used_mem = aging;
- }
-*/
-
-static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj)
-{
- Eterm *free_start = NULL;
- Uint live = 0;
- Uint old_gen_sz = 0;
- static INC_Page *aging;
- static Eterm *aging_htop;
- static Eterm *aging_end;
- BM_NEW_TIMER(old_gc);
-
- BM_SWAP_TIMER(gc,old_gc);
- BM_COUNT(major_gc_stages);
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection START\n"));
-
- ma_gc_flags |= GC_INCLUDE_ALL;
-
- if (ma_gc_flags & GC_NEED_MAJOR)
- {
- INC_Page *page = inc_used_mem;
-
- ma_gc_flags |= GC_MAJOR;
- ma_gc_flags &= ~GC_NEED_MAJOR;
-
- while (page)
- {
- memset(blackmap +
- ((void*)page - (void*)global_old_heap) / sizeof(void*),
- 0, INC_FULLPAGE);
- page = page->next;
- }
-
- if (inc_bibop) {
- aging = inc_bibop;
- inc_bibop = inc_bibop->next;
- aging->next = NULL;
- memset(blackmap +
- ((void*)aging - (void*)global_old_heap) / sizeof(void*),
- 1, INC_FULLPAGE);
- aging_htop = aging->start;
- aging_end = aging->start + INC_PAGESIZE;
- }
- else {
- /* There are no free pages.. Either fragmentation is a
- * problem or we are simply out of memory. Allocation in
- * the old generation will be done through the free-list
- * this GC cycle.
- */
- aging = NULL;
- aging_htop = aging_end = NULL;
- }
- }
-
- /* Start with looking at gray objects found in earlier collection
- * stages.
- */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray\n"));
- {
- INC_Object *obj = NULL;
-
- while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
- Eterm *ptr;
-
- obj = INC_STORAGE_GET(gray);
- if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
- ptr = obj->this + thing_arityval(*obj->this) + 1;
- } else {
- ptr = obj->this;
- }
- COPYMARK(ptr,obj->this + obj->size);
- }
- /* TODO: Titta p� motsvarande i minor. */
- if (!WORK_MORE && obj != NULL)
- INC_STORE(gray,obj->this,obj->size);
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark roots\n"));
- while (WORK_MORE && inc_active_proc)
- {
- /* For each process: Scan all areas containing pointers to the
- * message area. When a process is done here, all it's
- * message-pointers should be to the old generation.
- */
- Rootset rootset;
- Process *cp = inc_active_proc;
-
- ASSERT(INC_IS_ACTIVE(cp));
-
- /* MT: In a multithreaded system the process cp needs to be
- * locked here.
- */
- if (cp == p)
- rootset.n = setup_rootset(cp, objv, nobj, &rootset);
- else
- rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset);
-
- while (WORK_MORE && rootset.n--)
- {
- Eterm *ptr = rootset.v[rootset.n];
- Eterm *end = ptr + rootset.sz[rootset.n];
-
- while (WORK_MORE && ptr < end) {
- Eterm val = *ptr;
- Eterm *obj_ptr = ptr_val(val);
-
- switch (primary_tag(val)) {
- case TAG_PRIMARY_LIST:
- {
- COPYMARK_CONS(obj_ptr,aging_htop,ptr,aging_end);
- break;
- }
-
- case TAG_PRIMARY_BOXED:
- {
- COPYMARK_BOXED(obj_ptr,aging_htop,ptr,aging_end);
- break;
- }
- }
- ptr++;
- }
- }
-
-#ifdef HIPE
- /* Atomic phase */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Native stack scan: %T\n",cp->id));
- aging_htop = ma_fullsweep_nstack(cp,aging_htop,aging_end);
-#endif
- restore_one_rootset(cp, &rootset);
-
- /* MT: cp can be unlocked now. But beware!! The message queue
- * might be updated with new pointers to the fromspace while
- * we work below. The send operation can not assume that all
- * active processes will look through their message queue
- * before deactivating as is the case in non-MT incremental
- * collection.
- */
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark process heap\n"));
- {
- Eterm *ptr = cp->scan_top;
- COPYMARK(ptr,cp->htop);
- //cp->scan_top = ptr;
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark heap fragments\n"));
- {
- ErlHeapFragment* bp = MBUF(cp);
-
- while (WORK_MORE && bp) {
- Eterm *ptr = bp->mem;
- Eterm *end;
-
- if ((ARITH_HEAP(cp) >= bp->mem) &&
- (ARITH_HEAP(cp) < bp->mem + bp->size)) {
- end = ARITH_HEAP(cp);
- } else {
- end = bp->mem + bp->size;
- }
-
- COPYMARK(ptr,end);
- bp = bp->next;
- }
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray stack\n"));
- {
- INC_Object *obj = NULL;
-
- while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
- Eterm *ptr;
-
- obj = INC_STORAGE_GET(gray);
- if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
- ptr = obj->this + thing_arityval(*obj->this) + 1;
- } else {
- ptr = obj->this;
- }
- COPYMARK(ptr,obj->this + obj->size);
- }
- /* TODO: Titta p� motsvarande i minor. */
- if (!WORK_MORE && obj != NULL)
- INC_STORE(gray,obj->this,obj->size);
- }
-
- if (WORK_MORE) {
- INC_DEACTIVATE(cp);
- }
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark nursery\n"));
- {
- Eterm *ptr = inc_nursery_scn_ptr;
- COPYMARK(ptr,global_htop);
- inc_nursery_scn_ptr = ptr;
- }
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray found in nursery\n"));
- {
- INC_Object *obj = NULL;
-
- while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
- Eterm *ptr;
-
- obj = INC_STORAGE_GET(gray);
- if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
- ptr = obj->this + thing_arityval(*obj->this) + 1;
- } else {
- ptr = obj->this;
- }
- COPYMARK(ptr,obj->this + obj->size);
- }
- /* TODO: Titta p� motsvarande i minor. */
- if (!WORK_MORE && obj != NULL)
- INC_STORE(gray,obj->this,obj->size);
- }
-
-
- /**********************************************************************/
- if (WORK_MORE) {
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep phase\n"));
-
- /* Atomic phase */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep externals in old generation\n"));
- {
- ExternalThing** prev = &erts_global_offheap.externals;
- ExternalThing* ptr = erts_global_offheap.externals;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if ((ptr_within(ppt, global_old_heap, global_old_hend) &&
- blackmap[ppt - global_old_heap] == 0) ||
- (ptr_within(ppt, inc_fromspc, inc_fromend) &&
- !INC_IS_FORWARDED(ppt)))
- {
- erts_deref_node_entry(ptr->node);
- *prev = ptr = ptr->next;
- } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) {
- ExternalThing* ro = (ExternalThing*)INC_FORWARD_VALUE(ppt);
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else {
- prev = &ptr->next;
- ptr = ptr->next;
- }
- }
- ASSERT(*prev == NULL);
- }
-
- /* Atomic phase */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep refc bins in old generation\n"));
- {
- ProcBin** prev = &erts_global_offheap.mso;
- ProcBin* ptr = erts_global_offheap.mso;
-
- while (ptr) {
- Eterm *ppt = (Eterm*)ptr;
-
- if ((ptr_within(ppt, global_old_heap, global_old_hend) &&
- blackmap[ppt - global_old_heap] == 0) ||
- (ptr_within(ppt, inc_fromspc, inc_fromend) &&
- !INC_IS_FORWARDED(ppt)))
- {
- Binary* bptr;
- *prev = ptr->next;
- bptr = ptr->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0)
- erts_bin_free(bptr);
- ptr = *prev;
- } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) {
- ProcBin* ro = (ProcBin*)INC_FORWARD_VALUE(ppt);
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else {
- prev = &ptr->next;
- ptr = ptr->next;
- }
- }
- ASSERT(*prev == NULL);
- }
-
- /* TODO: Currently atomic phase - Can not be later of course. */
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep old generation\n"));
- {
- INC_Page *page = inc_used_mem;
- INC_Page *prev = NULL;
- inc_free_list = NULL;
-
- while (page) {
- int scavenging = 0;
- int n = page->start - global_old_heap;
- int stop = n + INC_PAGESIZE;
-
- old_gen_sz += INC_PAGESIZE;
- while (n < stop) {
- if (blackmap[n] != 0) {
- if (scavenging) {
- Eterm *ptr = global_old_heap + n;
- scavenging = 0;
- if ((ptr - free_start) * sizeof(Eterm) >=
- sizeof(INC_MemBlock))
- {
- INC_MemBlock *new = (INC_MemBlock*)free_start;
- new->size = ptr - free_start;
- new->prev = NULL;
- new->next = inc_free_list;
- if (inc_free_list)
- inc_free_list->prev = new;
- inc_free_list = new;
- }
- }
- if (blackmap[n] == 255) {
- unsigned int size =
- *(unsigned int*)(((long)&blackmap[n]+4) & ~3);
- live += size;
- n += size;
- }
- else {
- live += blackmap[n];
- n += blackmap[n];
- }
- }
- else if (!scavenging) {
- free_start = global_old_heap + n;
- scavenging = 1;
- n++;
- }
- else {
- n++;
- }
- }
-
- if (scavenging) {
- if ((global_old_heap + n - free_start) * sizeof(Eterm) >
- sizeof(INC_MemBlock))
- {
- INC_MemBlock *new = (INC_MemBlock*)free_start;
- new->size = global_old_heap + n - free_start;
- new->prev = NULL;
- new->next = inc_free_list;
- if (inc_free_list)
- inc_free_list->prev = new;
- inc_free_list = new;
- }
- else if (free_start == page->start) {
- INC_Page *next = page->next;
-
- if (prev)
- prev->next = page->next;
- else
- inc_used_mem = page->next;
-
- page->next = inc_bibop;
- inc_bibop = page;
- inc_used_pages--;
- page = next;
- continue;
- }
- }
- prev = page;
- page = page->next;
- }
- }
- }
-
- ASSERT(inc_bibop);
- /*
- This code is not expected to work right now.
- if (!inc_bibop) {
- int i;
- int new_pages = inc_pages * 2;
- int size = sizeof(Eterm) * new_pages * INC_FULLPAGE;
- Eterm *new_heap = erts_alloc(ERTS_ALC_T_MESSAGE_AREA,size);
- Eterm *new_hend = new_heap + size;
- Eterm *new_htop;
- Eterm *last_page_end;
- INC_Page *new_used_mem;
- INC_Page *page;
-
- erts_printf("The last page has been allocated..\n");
- erts_printf("We need to copy things!\n");
-
- / * Create new, bigger bag of pages * /
- for (i = 0; i < new_pages; i++)
- {
- INC_Page *this =
- (INC_Page*)(new_heap + i * INC_FULLPAGE);
- this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE);
- }
- inc_bibop = (INC_Page*)new_heap;
- ((INC_Page*)(new_heap + (new_pages - 1) *
- INC_FULLPAGE))->next = NULL;
-
- new_used_mem = inc_bibop;
- inc_bibop = inc_bibop->next;
- new_used_mem->next = NULL;
-
- / * Move stuff from old bag to new * /
- inc_free_list = NULL;
- new_htop = new_used_mem->start;
- last_page_end = new_htop + INC_PAGESIZE;
- page = inc_used_mem;
- while (page)
- {
- Eterm *ptr = page->start;
- Eterm *page_end = ptr + INC_PAGESIZE;
- int n = offsetof(INC_Page,start) / sizeof(void*) +
- ((Eterm*)page - global_old_heap);
- while (ptr < page_end)
- {
- if (blackmap[n] > 0)
- {
- if (last_page_end - new_htop < blackmap[n])
- {
- INC_Page *new_page = inc_bibop;
- inc_bibop = inc_bibop->next;
- new_page->next = new_used_mem;
- new_used_mem = new_page;
- new_htop = new_page->start;
- last_page_end = new_htop + INC_PAGESIZE;
- }
-
- memcpy(new_htop,ptr,blackmap[n] * sizeof(Eterm));
- for (i = 0; i < blackmap[n]; i++)
- {
- *ptr++ = (Eterm)new_htop++;
- }
- //new_htop += blackmap[n];
- //ptr += blackmap[n];
- / *
- if (blackmap[n] == 255) Do the right thing...
- * /
- n += blackmap[n];
- }
- else
- {
- n++; ptr++;
- }
- }
- page = page->next;
- }
-
- page = inc_used_mem;
- while (page)
- {
- Eterm *ptr = page->start;
- Eterm *page_end = ptr + INC_PAGESIZE;
-
- / * TODO: If inc_used_mem is sorted in address order, this
- * pass can be done at the same time as copying. * /
- while (ptr < page_end)
- {
- if (ptr_within(ptr_val(*ptr),global_old_heap,global_old_hend))
- {
- *ptr = *((Eterm*)ptr_val(*ptr));
- }
- ptr++;
- }
- page = page->next;
- }
-
- printf("Restore rootset after heap move. Roots: %d\r\n",roots_saved);
- while (roots_saved--)
- {
- Eterm *ptr = root_save[roots_saved];
- *ptr = *((Eterm*)ptr_val(*ptr));
- }
-
- erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap);
-
- global_old_heap = new_heap;
- global_old_hend = new_hend;
- inc_used_mem = new_used_mem;
- inc_pages = new_pages;
-
- if ((last_page_end - new_htop) * sizeof(Eterm) >=
- sizeof(INC_MemBlock))
- {
- inc_free_list = (INC_MemBlock*)(new_htop);
- inc_free_list->size = last_page_end - new_htop;
- inc_free_list->prev = NULL;
- inc_free_list->next = NULL;
- }
- }
- */
-
- /* I vilka l�gen kan vi vilja sl�nga p� en extra sida.. ( < 25% kvar?)
- if ()
- {
- INC_Page *new_page = inc_bibop;
- INC_MemBlock *new_free =
- (INC_MemBlock*)new_page->start;
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Fetching new page\n"));
- inc_bibop = inc_bibop->next;
-
- new_page->next = inc_used_mem;
- if (inc_used_mem)
- inc_used_mem->prev = new_page;
- inc_used_mem = new_page;
-
- // kolla detta med normal sidstorlek! old_gen_sz += INC_PAGESIZE;
- //BM_SWAP_TIMER(gc,misc1);
- memset(blackmap +
- ((void*)new_page - (void*)global_old_heap) / sizeof(void*),
- 0, INC_FULLPAGE);
- //BM_SWAP_TIMER(misc1,gc);
-
- new_free->prev = NULL;
- new_free->next = inc_free_list;
- new_free->size = INC_PAGESIZE;
- if (inc_free_list)
- inc_free_list->prev = new_free;
- inc_free_list = new_free;
- //printf("Snatched a new page @ 0x%08x\r\n",(int)new_page);
- //print_free_list();
- found = new_free;
- }
- */
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n"));
- {
- Uint i;
- for (i = 0; i < ma_dst_top; i++) {
- if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) {
- if (INC_IS_FORWARDED(ma_dst_stack[i]))
- ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]);
- }
- }
- }
-
- if (WORK_MORE)
- {
- int size_left = INC_PAGESIZE - (aging_htop - aging->start);
-
- if (size_left > sizeof(INC_MemBlock))
- {
- ((INC_MemBlock*)aging_htop)->size = size_left;
- ((INC_MemBlock*)aging_htop)->prev = NULL;
- ((INC_MemBlock*)aging_htop)->next = inc_free_list;
- if (inc_free_list)
- inc_free_list->prev = (INC_MemBlock*)aging_htop;
- inc_free_list = (INC_MemBlock*)aging_htop;
- }
- aging->next = inc_used_mem;
- inc_used_mem = aging;
- inc_used_pages++;
-
- ma_gc_flags &= ~GC_MAJOR;
- ma_gc_flags &= ~GC_CYCLE;
-
- VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection cycle END\n"));
- }
-
- ma_gc_flags &= ~GC_INCLUDE_ALL;
-
- BM_STOP_TIMER(old_gc);
-#ifdef BM_TIMER
- major_global_gc_time += old_gc_time;
- if (old_gc_time > max_global_major_time)
- max_global_major_time = old_gc_time;
-
- if ((old_gc_time * 1000) < MAX_PAUSE_TIME)
- pause_times_old[(int)(old_gc_time * 1000)]++;
- else
- pause_times_old[MAX_PAUSE_TIME - 1]++;
-#endif
- BM_START_TIMER(gc);
-}
-
-
-
-/***************************************************************************
- * *
- * Allocation in the old generation. Used in minor colection and when *
- * copying the rest of a message after a GC. *
- * *
- ***************************************************************************/
-
-
-Eterm *erts_inc_alloc(int need)
-{
- INC_MemBlock *this = inc_free_list;
-
- ASSERT(need < INC_PAGESIZE);
- while (this && (this->size) < need)
- {
- this = this->next;
- }
-
- if (!this)
- {
- /* If a free block large enough is not found, a new page is
- * allocated. GC_NEED_MAJOR is set so that the next garbage
- * collection cycle will be a major one, that is, both
- * generations will be garbage collected.
- */
- INC_Page *new_page = inc_bibop;
- INC_MemBlock *new_free = (INC_MemBlock*)new_page->start;
-
- if (new_page)
- {
- VERBOSE(DEBUG_HYBRID_GC,
- ("INCGC: Allocation grabs a new page\n"));
- inc_bibop = inc_bibop->next;
- new_page->next = inc_used_mem;
- inc_used_mem = new_page;
- inc_used_pages++;
-
- new_free->prev = NULL;
- new_free->next = inc_free_list;
- new_free->size = INC_PAGESIZE;
- if (inc_free_list)
- inc_free_list->prev = new_free;
- inc_free_list = new_free;
-
- this = new_free;
- if (!(ma_gc_flags & GC_MAJOR))
- ma_gc_flags |= GC_NEED_MAJOR;
- }
- else
- {
- erl_exit(-1, "inc_alloc ran out of pages!\n");
- }
- }
-
- if (((this->size) - need) * sizeof(Eterm) >= sizeof(INC_MemBlock))
- {
- INC_MemBlock *rest = (INC_MemBlock*)((Eterm*)this + need);
-
- /* The order here IS important! */
- rest->next = this->next;
-
- if (rest->next)
- rest->next->prev = rest;
-
- rest->prev = this->prev;
-
- if (rest->prev)
- rest->prev->next = rest;
- else
- inc_free_list = rest;
-
- rest->size = this->size - need;
- }
- else
- {
- if (this->prev)
- this->prev->next = this->next;
- else
- inc_free_list = this->next;
-
- if (this->next)
- this->next->prev = this->prev;
- }
-
- if (ma_gc_flags & GC_MAJOR) {
- if (need > 254) {
- blackmap[(Eterm*)this - global_old_heap] = 255;
- *(int*)((UWord)(&blackmap[(Eterm*)this - global_old_heap]+4) & ~3) =
- need;
- } else
- blackmap[(Eterm*)this - global_old_heap] = need;
- }
- return (Eterm*)this;
-}
-#endif /* INCREMENTAL */
diff --git a/erts/emulator/beam/erl_nmgc.h b/erts/emulator/beam/erl_nmgc.h
deleted file mode 100644
index b207dd37fa..0000000000
--- a/erts/emulator/beam/erl_nmgc.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef __ERL_NMGC_H__
-#define __ERL_NMGC_H__
-
-#ifdef INCREMENTAL
-#include <stddef.h> /* offsetof() */
-#include "erl_process.h"
-
-#define INC_FULLPAGE (INC_PAGESIZE + offsetof(INC_Page,start) / sizeof(void*))
-
-#define BOXED_NEED(PTR,HDR) \
- (((HDR) & _HEADER_SUBTAG_MASK) == SUB_BINARY_SUBTAG ? \
- header_arity(HDR) + 2 : \
- ((HDR) & _HEADER_SUBTAG_MASK) == FUN_SUBTAG ? \
- header_arity(HDR) + ((ErlFunThing*)(PTR))->num_free + 2 : \
- header_arity(HDR) + 1)
-
-
-#define INC_DECREASE_WORK(n) inc_words_to_go -= (n);
-
-#define INC_COPY_CONS(FROM,TO,PTR) \
-do { \
- TO[0] = FROM[0]; \
- TO[1] = FROM[1]; \
- INC_MARK_FORWARD(FROM,TO); \
- *(PTR) = make_list(TO); \
- INC_DECREASE_WORK(2); \
- (TO) += 2; \
-} while(0)
-
-#define INC_COPY_BOXED(FROM,TO,PTR) \
-do { \
- Sint nelts; \
- Eterm hdr = *(FROM); \
- \
- ASSERT(is_header(hdr)); \
- INC_MARK_FORWARD(FROM,TO); \
- *(PTR) = make_boxed(TO); \
- *(TO)++ = *(FROM)++; \
- nelts = header_arity(hdr); \
- switch ((hdr) & _HEADER_SUBTAG_MASK) { \
- case SUB_BINARY_SUBTAG: nelts++; break; \
- case FUN_SUBTAG: nelts+=((ErlFunThing*)(FROM-1))->num_free+1; break;\
- } \
- INC_DECREASE_WORK(nelts + 1); \
- while (nelts--) \
- *(TO)++ = *(FROM)++; \
-} while(0)
-
-
-/* Things copied to the old generation are not marked in the blackmap.
- * This is ok since the page they are copied to (aging) is not part of
- * the sweep.
- */
-#define COPYMARK_CONS(FROM,TO,PTR,LIMIT) \
-do { \
- if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \
- if (INC_IS_FORWARDED(FROM)) { \
- *PTR = make_list(INC_FORWARD_VALUE(FROM)); \
- } else if (TO + 2 <= LIMIT) { \
- INC_STORE(gray,TO,2); \
- INC_COPY_CONS(FROM,TO,PTR); \
- } else { \
- Eterm *hp = erts_inc_alloc(2); \
- INC_STORE(gray,hp,2); \
- INC_COPY_CONS(FROM,hp,PTR); \
- } \
- } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \
- (blackmap[FROM - global_old_heap] == 0)) { \
- blackmap[FROM - global_old_heap] = 2; \
- INC_DECREASE_WORK(2); \
- INC_STORE(gray,FROM,2); \
- } \
-} while(0)
-
-#define COPYMARK_BOXED(FROM,TO,PTR,LIMIT) \
-do { \
- if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \
- int size = BOXED_NEED(FROM,*FROM); \
- if (INC_IS_FORWARDED(FROM)) { \
- *PTR = make_boxed(INC_FORWARD_VALUE(FROM)); \
- } else if (TO + size <= LIMIT) { \
- INC_STORE(gray,TO,size); \
- INC_COPY_BOXED(FROM,TO,PTR); \
- } else { \
- Eterm *hp = erts_inc_alloc(size); \
- INC_STORE(gray,hp,size); \
- INC_COPY_BOXED(FROM,hp,PTR); \
- } \
- } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \
- (blackmap[FROM - global_old_heap] == 0)) { \
- int size = BOXED_NEED(FROM,*FROM); \
- if (size > 254) { \
- blackmap[FROM - global_old_heap] = 255; \
- *(int*)((long)(&blackmap[FROM - \
- global_old_heap] + 4) & ~3) = size; \
- } else \
- blackmap[FROM - global_old_heap] = size; \
- INC_DECREASE_WORK(size); \
- INC_STORE(gray,FROM,size); \
- } \
-} while(0)
-
-#define INC_MARK_FORWARD(ptr,dst) fwdptrs[(ptr) - inc_fromspc] = (dst);
-#define INC_IS_FORWARDED(ptr) (fwdptrs[(ptr) - inc_fromspc] != 0)
-#define INC_FORWARD_VALUE(ptr) fwdptrs[(ptr) - inc_fromspc]
-
-/* Note for BM_TIMER: Active timer should always be 'system' when IncAlloc
- * is called!
- */
-#define IncAlloc(p, sz, objv, nobj) \
- (ASSERT_EXPR((sz) >= 0), \
- (((inc_alloc_limit - global_htop) <= (sz)) ? \
- erts_incremental_gc((p),(sz),(objv),(nobj)) : 0), \
- ASSERT_EXPR(global_hend - global_htop > (sz)), \
- global_htop += (sz), global_htop - (sz))
-
-
-/************************************************************************
- * INC_STORAGE, a dynamic circular storage for objects (INC_Object). *
- * Use INC_STORE to add objects to the storage. The storage can then *
- * be used either as a queue, using INC_STORAGE_GET to retreive *
- * values, or as a stack, using INC_STORAGE_POP. It is OK to mix calls *
- * to GET and POP if that is desired. *
- * An iterator can be declared to traverse the storage without removing *
- * any elements, and INC_STORAGE_STEP will then return each element in *
- * turn, oldest first. *
- ***********************************************************************/
-
-/* Declare a new storage; must be in the beginning of a block. Give
- * the storage a name that is used in all later calls to the storage.
- * If this is an external declaration of the storage, pass the keyword
- * external as the first argument, otherwise leave it empty.
- */
-#define INC_STORAGE_DECLARATION(ext,name) \
- ext INC_Storage *name##head; \
- ext INC_Storage *name##tail; \
- ext INC_Object *name##free; \
- ext INC_Object *name##last_free; \
- ext int name##size;
-
-
-/* Initialize the storage. Note that memory allocation is involved -
- * don't forget to erase the storage when you are done.
- */
-#define INC_STORAGE_INIT(name) do { \
- name##head = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \
- sizeof(INC_Storage)); \
- name##head->next = name##head; \
- name##head->prev = name##head; \
- name##tail = name##head; \
- name##free = name##head->data; \
- name##last_free = name##free + INC_STORAGE_SIZE - 1; \
- name##size = 0; \
-} while(0)
-
-
-/*
-#define INC_STORAGE_SWAP(s1,s2) do { \
- INC_Storage *tmphead = s1##head; \
- INC_Storage *tmptail = s1##tail; \
- INC_Object *tmpfree = s1##free; \
- INC_Object *tmplast = s1##last_free; \
- int tmpsize = s1##size; \
- s1##head = s2##head; \
- s1##tail = s2##tail; \
- s1##free = s2##free; \
- s1##last_free = s2##last_free; \
- s1##size = s2##size; \
- s2##head = tmphead; \
- s2##tail = tmptail; \
- s2##free = tmpfree; \
- s2##last_free = tmplast; \
- s2##size = tmpsize; \
-} while(0)
-*/
-
-
-/* Return and remove the youngest element - treat the storage as a
- * stack. Always check that there are elements in the queue before
- * using INC_STORAGE_POP!
- */
-#define INC_STORAGE_POP(name) (ASSERT_EXPR(name##size != 0), \
- name##size--, \
- (--name##free != name##head->data - 1) ? \
- name##free : (name##head = name##head->prev, \
- name##free = name##head->data + INC_STORAGE_SIZE - 1))
-
-
-/* Return and remove the oldest element - treat the storage as a
- * queue. Always check that there are elements in the queue before
- * using INC_STORAGE_GET!
- */
-#define INC_STORAGE_GET(name) (ASSERT_EXPR(name##size != 0), \
- name##size--, \
- (++name##last_free != name##tail->data + INC_STORAGE_SIZE) ? \
- name##last_free : (name##tail = name##tail->next, \
- name##last_free = name##tail->data))
-
-
-/* Advance the head to the next free location. If the storage is full,
- * a new storage is allocated and linked into the list.
- */
-#define INC_STORAGE_NEXT(name) do { \
- if (name##free == name##last_free) { \
- name##tail = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \
- sizeof(INC_Storage)); \
- memcpy(name##tail->data,name##head->data, \
- INC_STORAGE_SIZE * sizeof(INC_Object)); \
- name##tail->next = name##head->next; \
- name##head->next = name##tail; \
- name##tail->prev = name##tail->next->prev; \
- name##tail->next->prev = name##tail; \
- name##last_free = ((void*)name##tail + \
- ((void*)name##last_free - (void*)name##head)); \
- } \
- name##free++; \
- name##size++; \
- if (name##free == name##head->data + INC_STORAGE_SIZE) { \
- name##head = name##head->next; \
- name##free = name##head->data; \
- } \
-} while(0)
-
-
-/* The head of this storage is the next free location. This is where
- * the next element will be stored.
- */
-#define INC_STORAGE_HEAD(name) (name##free)
-
-
-/* Return the top - the youngest element in the storage. */
-/* #define INC_STORAGE_TOP(name) (name##free - 1 with some magic..) */
-
-
-/* True if the storage is empty, false otherwise */
-#define INC_STORAGE_EMPTY(name) (name##size == 0)
-
-
-/* Store a new element in the head of the storage and advance the head
- * to the next free location.
- */
-#define INC_STORE(name,ptr,sz) do { \
- INC_STORAGE_HEAD(name)->this = ptr; \
- INC_STORAGE_HEAD(name)->size = sz; \
- INC_STORAGE_NEXT(name); \
-} while(0)
-
-
-/* An iterator. Use it together with INC_STORAGE_STEP to browse throuh
- * the storage. Please note that it is not possible to remove an entry
- * in the middle of the storage, use GET or POP to remove enties.
- */
-#define INC_STORAGE_ITERATOR(name) \
- INC_Storage *name##iterator_head = name##tail; \
- INC_Object *name##iterator_current = name##last_free; \
- int name##iterator_left = name##size;
-
-
-/* Return the next element in the storage (sorted by age, oldest
- * first) or NULL if the storage is empty or the last element has been
- * returned already.
- */
-#define INC_STORAGE_STEP(name) (name##iterator_left == 0 ? NULL : \
- (name##iterator_left--, \
- (++name##iterator_current != name##iterator_head->data + \
- INC_STORAGE_SIZE) ? name##iterator_current : \
- (name##iterator_head = name##iterator_head->next, \
- name##iterator_current = name##iterator_head->data)))
-
-
-/* Erase the storage. */
-#define INC_STORAGE_ERASE(name)do { \
- name##head->prev->next = NULL; \
- while (name##head != NULL) { \
- name##tail = name##head; \
- name##head = name##head->next; \
- erts_free(ERTS_ALC_T_OBJECT_STACK,(void*)name##tail); \
- } \
- name##tail = NULL; \
- name##free = NULL; \
- name##last_free = NULL; \
- name##size = 0; \
-} while(0)
-
-/*
- * Structures used by the non-moving memory manager
- */
-
-typedef struct
-{
- Eterm *this;
- unsigned long size;
-} INC_Object;
-
-typedef struct inc_storage {
- struct inc_storage *next;
- struct inc_storage *prev;
- INC_Object data[INC_STORAGE_SIZE];
-} INC_Storage;
-
-typedef struct inc_mem_block
-{
- unsigned long size;
- struct inc_mem_block *prev;
- struct inc_mem_block *next;
-} INC_MemBlock;
-
-typedef struct inc_page
-{
- struct inc_page *next;
- Eterm start[1]; /* Has to be last in struct, this is where the data start */
-} INC_Page;
-
-
-/*
- * Heap pointers for the non-moving memory area.
- */
-extern INC_Page *inc_used_mem;
-extern INC_MemBlock *inc_free_list;
-extern unsigned char *blackmap;
-
-extern Eterm **fwdptrs;
-extern Eterm *inc_fromspc;
-extern Eterm *inc_fromend;
-extern Process *inc_active_proc;
-extern Process *inc_active_last;
-extern Eterm *inc_alloc_limit;
-extern int inc_words_to_go;
-
-INC_STORAGE_DECLARATION(extern,gray);
-INC_STORAGE_DECLARATION(extern,root);
-
-void erts_init_incgc(void);
-void erts_cleanup_incgc(void);
-void erts_incremental_gc(Process *p, int sz, Eterm* objv, int nobj);
-Eterm *erts_inc_alloc(int need);
-
-#else
-# define INC_STORE(lst,ptr,sz)
-# define INC_MARK_FORWARD(ptr)
-# define INC_IS_FORWARDED(ptr)
-# define INC_FORWARD_VALUE(ptr)
-#endif /* INCREMENTAL */
-
-#endif /* _ERL_NMGC_H_ */
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 6bda07e9b0..ebfba065d1 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -847,9 +847,6 @@ static Eterm AM_dist_references;
static Eterm AM_node_references;
static Eterm AM_system;
static Eterm AM_timer;
-#ifdef HYBRID
-static Eterm AM_processes;
-#endif
static void setup_reference_table(void);
static Eterm reference_table_term(Uint **hpp, Uint *szp);
@@ -934,9 +931,6 @@ erts_get_node_and_dist_references(struct process *proc)
INIT_AM(node_references);
INIT_AM(timer);
INIT_AM(system);
-#ifdef HYBRID
- INIT_AM(processes);
-#endif
references_atoms_need_init = 0;
}
@@ -1299,12 +1293,6 @@ setup_reference_table(void)
SYSTEM_REF,
TUPLE2(&heap[0], AM_system, am_undefined));
-#ifdef HYBRID
- /* Insert Heap */
- insert_offheap(&erts_global_offheap,
- HEAP_REF,
- TUPLE2(&heap[0], AM_processes, am_undefined));
-#endif
UnUseTmpHeapNoproc(3);
max = erts_ptab_max(&erts_proc);
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 67c8c28f3f..f4d73e716a 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -80,7 +80,8 @@ typedef struct line_buf { /* Buffer used in line oriented I/O */
am_id, \
am_connected, \
am_input, \
- am_output }
+ am_output, \
+ am_os_pid }
/*
* Port Specific Data.
@@ -143,6 +144,7 @@ struct _erl_drv_port {
char *name; /* String used in the open */
erts_driver_t* drv_ptr;
UWord drv_data;
+ SWord os_pid; /* Child process ID */
ErtsProcList *suspended; /* List of suspended processes. */
LineBuf *linebuf; /* Buffer to hold data not ready for
process to get (line oriented I/O)*/
@@ -913,6 +915,8 @@ int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
#define ERTS_PORT_SIG_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK
#define ERTS_PORT_SIG_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
#define ERTS_PORT_SIG_FLG_FORCE_SCHED ERTS_P2P_SIG_DATA_FLG_SCHED
+/* ERTS_PORT_SIG_FLG_FORCE_IMM_CALL only when crash dumping... */
+#define ERTS_PORT_SIG_FLG_FORCE_IMM_CALL ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
/*
* Port ! {Owner, {command, Data}}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 89f6a4e774..b661c26036 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1756,8 +1756,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
!= (erts_aint_t) 0);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ runq->scheduler->reductions += reds;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);
return res;
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 9976604042..ae6cd69ae2 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -169,6 +169,9 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
ERTS_GLB_INLINE void
erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
{
+#ifdef ERTS_SMP
+ char *lock_str = "port_sched_lock";
+#endif
ptsp->next = NULL;
ptsp->taskq.local.busy.first = NULL;
ptsp->taskq.local.busy.last = NULL;
@@ -179,7 +182,11 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
- erts_mtx_init_x(&ptsp->mtx, "port_sched_lock", instr_id);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
+#endif
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
#endif
}
diff --git a/erts/emulator/beam/erl_posix_str.c b/erts/emulator/beam/erl_posix_str.c
index 02db10905b..deb7e3e173 100644
--- a/erts/emulator/beam/erl_posix_str.c
+++ b/erts/emulator/beam/erl_posix_str.c
@@ -619,6 +619,7 @@ erl_errno_id(error)
case WSAEINVALIDPROVIDER: return "einvalidprovider";
#endif
#ifdef WSAEPROVIDERFAILEDINIT
+ /* You could get this if SYSTEMROOT env variable is set incorrectly */
case WSAEPROVIDERFAILEDINIT: return "eproviderfailedinit";
#endif
#ifdef WSASYSCALLFAILURE
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 34da9cab84..2320b64295 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -437,7 +437,10 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
break;
case BINARY_DEF:
- {
+ if (header_is_bin_matchstate(*boxed_val(wobj))) {
+ PRINT_STRING(res, fn, arg, "#MatchState");
+ }
+ else {
ProcBin* pb = (ProcBin *) binary_val(wobj);
if (pb->size == 1)
PRINT_STRING(res, fn, arg, "<<1 byte>>");
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 332cc81d48..aaca4b5f59 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -28,7 +28,6 @@
#include "erl_vm.h"
#include "global.h"
#include "erl_process.h"
-#include "erl_nmgc.h"
#include "error.h"
#include "bif.h"
#include "erl_db.h"
@@ -45,6 +44,9 @@
#include "dtrace-wrapper.h"
#include "erl_ptab.h"
+#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
+#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
+
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
(ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED/2)
@@ -53,21 +55,22 @@
#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
-#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40
+#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_LONG 20
+#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_LONG 1000
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM 10
+#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM 1000
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_SHORT 10
+#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_SHORT 0
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_SHORT 5
+#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_SHORT 0
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE 0
+#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_NONE 0
+
#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
-#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
- (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
#define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
-#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS)
-#define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS)
-#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS)
-#define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS)
-#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10)
-
-#define ERTS_WAKEUP_OTHER_DEC 10
-#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10)
-
#if 0 || defined(DEBUG)
#define ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA
#endif
@@ -145,14 +148,18 @@ Uint erts_no_schedulers;
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-static int wakeup_other_limit;
-
int erts_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
+static struct {
+ int aux_work;
+ int tse;
+ int sys_schedule;
+} sched_busy_wait;
+
#ifdef ERTS_SMP
int erts_disable_proc_not_running_opt;
@@ -259,11 +266,6 @@ struct erts_system_monitor_flags_t erts_system_monitor_flags;
Eterm erts_system_profile;
struct erts_system_profile_flags_t erts_system_profile_flags;
-#ifdef HYBRID
-Uint erts_num_active_procs;
-Process** erts_active_procs;
-#endif
-
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
#endif
@@ -354,6 +356,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
#endif
#ifdef ERTS_SMP
+ valid |= ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP;
valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_DD;
valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -495,13 +498,6 @@ erts_init_process(int ncpu, int proc_tab_size)
proc_tab_size,
"process_table");
-#ifdef HYBRID
- erts_active_procs = (Process**)
- erts_alloc(ERTS_ALC_T_ACTIVE_PROCS,
- erts_proc.max * sizeof(Process*));
- erts_num_active_procs = 0;
-#endif
-
last_reductions = 0;
last_exact_reductions = 0;
erts_default_process_flags = 0;
@@ -944,6 +940,67 @@ haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
}
}
+static ERTS_INLINE erts_aint32_t
+handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ int jix, max_jix;
+
+ ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
+
+ if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions)
+ return aux_work;
+
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
+
+ ERTS_THR_MEMORY_BARRIER;
+
+ max_jix = awdp->delayed_wakeup.jix;
+ awdp->delayed_wakeup.jix = -1;
+ for (jix = 0; jix <= max_jix; jix++) {
+ int sched = awdp->delayed_wakeup.job[jix].sched;
+ erts_aint32_t aux_work = awdp->delayed_wakeup.job[jix].aux_work;
+
+ ASSERT(awdp->delayed_wakeup.sched2jix[sched] == jix);
+ awdp->delayed_wakeup.sched2jix[sched] = -1;
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(sched-1),
+ aux_work);
+ }
+ awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
+ return aux_work & ~ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP;
+}
+
+static ERTS_INLINE void
+schedule_aux_work_wakeup(ErtsAuxWorkData *awdp,
+ int sched,
+ erts_aint32_t aux_work)
+{
+ int jix = awdp->delayed_wakeup.sched2jix[sched];
+ if (jix >= 0) {
+ ASSERT(awdp->delayed_wakeup.job[jix].sched == sched);
+ awdp->delayed_wakeup.job[jix].aux_work |= aux_work;
+ }
+ else {
+ jix = ++awdp->delayed_wakeup.jix;
+ awdp->delayed_wakeup.sched2jix[sched] = jix;
+ awdp->delayed_wakeup.job[jix].sched = sched;
+ awdp->delayed_wakeup.job[jix].aux_work = aux_work;
+ }
+
+ if (awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY) {
+ ASSERT(erts_atomic32_read_nob(&awdp->ssi->aux_work)
+ & ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
+ }
+ else {
+ awdp->delayed_wakeup.next = (awdp->esdp->reductions
+ + ERTS_DELAYED_WAKEUP_REDUCTIONS);
+
+ ASSERT(!(erts_atomic32_read_nob(&awdp->ssi->aux_work)
+ & ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP));
+ set_aux_work_flags_wakeup_nob(awdp->ssi,
+ ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
+ }
+}
+
#endif
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
@@ -1020,7 +1077,8 @@ misc_aux_work_clean(ErtsThrQ_t *q,
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
ErtsThrQ_t *q = &misc_aux_work_queues[awdp->sched_id].q;
@@ -1040,7 +1098,8 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp,
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
@@ -1119,7 +1178,8 @@ erts_notify_check_async_ready_queue(void *vno)
static ERTS_INLINE erts_aint32_t
handle_async_ready(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
@@ -1141,7 +1201,8 @@ handle_async_ready(ErtsAuxWorkData *awdp,
static ERTS_INLINE erts_aint32_t
handle_async_ready_clean(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
void *thr_prgr_p;
@@ -1174,10 +1235,11 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
}
}
-#endif
+#endif /* ERTS_USE_ASYNC_READY_Q */
+
static ERTS_INLINE erts_aint32_t
-handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
@@ -1200,12 +1262,18 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
void
erts_alloc_notify_delayed_dealloc(int ix)
{
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
- ERTS_SSI_AUX_WORK_DD);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp)
+ schedule_aux_work_wakeup(&esdp->aux_work_data,
+ ix,
+ ERTS_SSI_AUX_WORK_DD);
+ else
+ set_aux_work_flags_wakeup_relb(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
+ ERTS_SSI_AUX_WORK_DD);
}
static ERTS_INLINE erts_aint32_t
-handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
int need_thr_progress = 0;
@@ -1243,7 +1311,7 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
}
static ERTS_INLINE erts_aint32_t
-handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
ErtsSchedulerSleepInfo *ssi;
int need_thr_progress;
@@ -1293,7 +1361,7 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
#define ERTS_MAX_THR_PRGR_LATER_OPS 50
static ERTS_INLINE erts_aint32_t
-handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
int lops;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
@@ -1303,6 +1371,9 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
+ if (!awdp->later_op.first) {
+ awdp->later_op.last = NULL;
+ }
lop->func(lop->data);
if (!awdp->later_op.first) {
awdp->later_op.last = NULL;
@@ -1439,7 +1510,7 @@ erts_smp_notify_check_children_needed(void)
}
static ERTS_INLINE erts_aint32_t
-handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
erts_check_children();
@@ -1462,7 +1533,7 @@ erts_smp_atomic32_t erts_halt_progress;
int erts_halt_code;
static ERTS_INLINE erts_aint32_t
-handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
awdp->esdp->run_queue->halt_in_progress = 1;
@@ -1505,7 +1576,7 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
#if HAVE_ERTS_MSEG
static ERTS_INLINE erts_aint32_t
-handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
erts_mseg_cache_check();
@@ -1515,7 +1586,7 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
#endif
static ERTS_INLINE erts_aint32_t
-handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
setup_aux_work_timer();
@@ -1529,7 +1600,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
#define HANDLE_AUX_WORK(FLG, HNDLR) \
ignore |= FLG; \
if (aux_work & FLG) { \
- aux_work = HNDLR(awdp, aux_work); \
+ aux_work = HNDLR(awdp, aux_work, waiting); \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
if (!(aux_work & ~ignore)) { \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
@@ -1562,6 +1633,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
* eachother. Most frequent first.
*/
#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP,
+ handle_delayed_aux_work_wakeup);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD,
handle_delayed_dealloc);
/* DD must be before DD_THR_PRGR */
@@ -1768,8 +1841,8 @@ sched_waiting_sys(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
ASSERT(rq->waiting >= 0);
- ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
+ (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
rq->waiting++;
rq->waiting *= -1;
rq->woken = 0;
@@ -1845,8 +1918,8 @@ static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
+ (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
if (rq->waiting < 0)
rq->waiting--;
else
@@ -2043,7 +2116,7 @@ thr_prgr_fin_wait(void *vssi)
| ERTS_SSI_FLG_TSE_SLEEPING));
}
-static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp);
+static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp);
static void *
aux_thread(void *unused)
@@ -2063,7 +2136,7 @@ aux_thread(void *unused)
callbacks.finalize_wait = thr_prgr_fin_wait;
erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
- init_aux_work_data(awdp, NULL);
+ init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
sched_prep_spin_wait(ssi);
@@ -2139,7 +2212,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_runq_unlock(rq);
- spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+ spincount = sched_busy_wait.tse;
tse_wait:
@@ -2190,7 +2263,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
flgs = sched_prep_cont_spin_wait(ssi);
- spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+ spincount = sched_busy_wait.aux_work;
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
@@ -2227,7 +2300,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(working);
sched_wall_time_change(esdp, working = 0);
- spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
+ spincount = sched_busy_wait.sys_schedule;
+ if (spincount == 0)
+ goto sys_aux_work;
while (spincount-- > 0) {
@@ -2518,7 +2593,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) {
if (activate) {
if (try_inc_no_active_runqs(ix+1))
- ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
+ (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
}
wake_scheduler(wrq, 0);
return 1;
@@ -2585,7 +2660,7 @@ erts_sched_notify_check_cpu_bind(void)
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
+ (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
wake_scheduler(rq, 0);
}
#else
@@ -2851,7 +2926,7 @@ suspend_run_queue(ErtsRunQueue *rq)
{
erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags,
ERTS_SSI_FLG_SUSPENDED);
- ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
+ (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
wake_scheduler(rq, 0);
}
@@ -2918,7 +2993,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
mps = erts_get_migration_paths_managed();
mp = &mps->mpath[rq->ix];
@@ -3423,9 +3498,9 @@ check_balance(ErtsRunQueue *c_rq)
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
- ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
+ (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
else
- ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
});
@@ -3994,33 +4069,287 @@ erts_debug_nbalance(void)
#endif
}
+/* Wakeup other schedulers */
+
+typedef enum {
+ ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH,
+ ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH,
+ ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM,
+ ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW,
+ ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW
+} ErtsSchedWakeupOtherThreshold;
+
+typedef enum {
+ ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL,
+ ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY
+} ErtsSchedWakeupOtherType;
+
+/* First proposal */
+
+#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10)
+
+#define ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH 3
+#define ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH 1
+#define ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM 0
+#define ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW -2
+#define ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW -5
+
+#define ERTS_WAKEUP_OTHER_DEC_SHIFT 2
+#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10)
+
+/* To be legacy */
+
+#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY (200*CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY (50*CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY (10*CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY (CONTEXT_REDS)
+#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY (CONTEXT_REDS/10)
+
+#define ERTS_WAKEUP_OTHER_DEC_LEGACY 10
+#define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10)
+
+#ifdef ERTS_SMP
+
+static struct {
+ ErtsSchedWakeupOtherThreshold threshold;
+ ErtsSchedWakeupOtherType type;
+ int limit;
+ int dec_shift;
+ int dec_mask;
+ void (*check)(ErtsRunQueue *rq, Uint32 flags);
+} wakeup_other;
+
+static void
+wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
+{
+ int wo_reds = rq->wakeup_other_reds;
+ if (wo_reds) {
+ int left_len = rq->len - 1;
+ if (left_len < 1) {
+ int wo_reduce = wo_reds << wakeup_other.dec_shift;
+ wo_reduce &= wakeup_other.dec_mask;
+ rq->wakeup_other -= wo_reduce;
+ if (rq->wakeup_other < 0)
+ rq->wakeup_other = 0;
+ }
+ else {
+ rq->wakeup_other += (left_len*wo_reds
+ + ERTS_WAKEUP_OTHER_FIXED_INC);
+ if (rq->wakeup_other > wakeup_other.limit) {
+ int empty_rqs =
+ erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ if (empty_rqs != 0)
+ wake_scheduler_on_empty_runq(rq);
+ rq->wakeup_other = 0;
+ }
+ }
+ rq->wakeup_other_reds = 0;
+ }
+}
+
+static void
+wakeup_other_set_limit(void)
+{
+ switch (wakeup_other.threshold) {
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH;
+ wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH;
+ wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
+ wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW;
+ wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW;
+ wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW;
+ break;
+ }
+ if (wakeup_other.dec_shift < 0)
+ wakeup_other.dec_mask = (1 << (sizeof(wakeup_other.dec_mask)*8
+ + wakeup_other.dec_shift)) - 1;
+ else {
+ wakeup_other.dec_mask = 0;
+ wakeup_other.dec_mask = ~wakeup_other.dec_mask;
+ }
+}
+
+static void
+wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
+{
+ int wo_reds = rq->wakeup_other_reds;
+ if (wo_reds) {
+ erts_aint32_t len = rq->len;
+ if (len < 2) {
+ rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds;
+ if (rq->wakeup_other < 0)
+ rq->wakeup_other = 0;
+ }
+ else if (rq->wakeup_other < wakeup_other.limit)
+ rq->wakeup_other += len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY;
+ else {
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
+ wake_scheduler_on_empty_runq(rq);
+ rq->wakeup_other = 0;
+ }
+ rq->wakeup_other = 0;
+ }
+ }
+ rq->wakeup_other_reds = 0;
+}
+
+static void
+wakeup_other_set_limit_legacy(void)
+{
+ switch (wakeup_other.threshold) {
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY;
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW:
+ wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY;
+ break;
+ }
+}
+
+static void
+set_wakeup_other_data(void)
+{
+ switch (wakeup_other.type) {
+ case ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL:
+ wakeup_other.check = wakeup_other_check;
+ wakeup_other_set_limit();
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY:
+ wakeup_other.check = wakeup_other_check_legacy;
+ wakeup_other_set_limit_legacy();
+ break;
+ }
+}
+
+#endif
+
void
erts_early_init_scheduling(int no_schedulers)
{
aux_work_timeout_early_init(no_schedulers);
- wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
+#ifdef ERTS_SMP
+ wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
+ wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY;
+#endif
+ sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
+ sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
+ * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
+ sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
+ * ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
}
int
-erts_sched_set_wakeup_limit(char *str)
+erts_sched_set_wakeup_other_thresold(char *str)
{
+ ErtsSchedWakeupOtherThreshold threshold;
if (sys_strcmp(str, "very_high") == 0)
- wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH;
+ threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH;
else if (sys_strcmp(str, "high") == 0)
- wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH;
+ threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH;
else if (sys_strcmp(str, "medium") == 0)
- wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
+ threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
else if (sys_strcmp(str, "low") == 0)
- wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_LOW;
+ threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW;
else if (sys_strcmp(str, "very_low") == 0)
- wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW;
+ threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW;
else
return EINVAL;
+#ifdef ERTS_SMP
+ wakeup_other.threshold = threshold;
+ set_wakeup_other_data();
+#endif
return 0;
}
+int
+erts_sched_set_wakeup_other_type(char *str)
+{
+ ErtsSchedWakeupOtherType type;
+ if (sys_strcmp(str, "proposal") == 0)
+ type = ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL;
+ else if (sys_strcmp(str, "default") == 0)
+ type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY;
+ else if (sys_strcmp(str, "legacy") == 0)
+ type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY;
+ else
+ return EINVAL;
+#ifdef ERTS_SMP
+ wakeup_other.type = type;
+#endif
+ return 0;
+}
+
+int
+erts_sched_set_busy_wait_threshold(char *str)
+{
+ int sys_sched;
+ int aux_work_fact;
+
+ if (sys_strcmp(str, "very_long") == 0) {
+ sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG;
+ aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG;
+ }
+ else if (sys_strcmp(str, "long") == 0) {
+ sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_LONG;
+ aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_LONG;
+ }
+ else if (sys_strcmp(str, "medium") == 0) {
+ sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
+ aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM;
+ }
+ else if (sys_strcmp(str, "short") == 0) {
+ sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_SHORT;
+ aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_SHORT;
+ }
+ else if (sys_strcmp(str, "very_short") == 0) {
+ sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_SHORT;
+ aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_SHORT;
+ }
+ else if (sys_strcmp(str, "none") == 0) {
+ sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_NONE;
+ }
+ else {
+ return EINVAL;
+ }
+
+ sched_busy_wait.sys_schedule = sys_sched;
+ sched_busy_wait.tse = sys_sched*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ sched_busy_wait.aux_work = sys_sched*aux_work_fact;
+
+ return 0;
+}
static void
-init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp)
+init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
awdp->sched_id = esdp ? (int) esdp->no : 0;
awdp->esdp = esdp;
@@ -4040,15 +4369,40 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp)
#endif
awdp->async_ready.queue = NULL;
#endif
+#ifdef ERTS_SMP
+ awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
+ if (!dawwp) {
+ awdp->delayed_wakeup.job = NULL;
+ awdp->delayed_wakeup.sched2jix = NULL;
+ awdp->delayed_wakeup.jix = -1;
+ }
+ else {
+ int i;
+ awdp->delayed_wakeup.job = (ErtsDelayedAuxWorkWakeupJob *) dawwp;
+ dawwp += sizeof(ErtsDelayedAuxWorkWakeupJob)*(erts_no_schedulers+1);
+ awdp->delayed_wakeup.sched2jix = (int *) dawwp;
+ awdp->delayed_wakeup.jix = -1;
+ for (i = 0; i <= erts_no_schedulers; i++)
+ awdp->delayed_wakeup.sched2jix[i] = -1;
+ }
+#endif
}
void
erts_init_scheduling(int no_schedulers, int no_schedulers_online)
{
int ix, n, no_ssi;
+ char *daww_ptr;
+#ifdef ERTS_SMP
+ size_t daww_sz;
+#endif
init_misc_op_list_alloc();
+#ifdef ERTS_SMP
+ set_wakeup_other_data();
+#endif
+
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
@@ -4164,6 +4518,15 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
/* Create and initialize scheduler specific data */
+#ifdef ERTS_SMP
+ daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob)
+ + sizeof(int))*(n+1));
+ daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
+ daww_sz*n);
+#else
+ daww_ptr = NULL;
+#endif
+
erts_aligned_scheduler_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
n*sizeof(ErtsAlignedSchedulerData));
@@ -4198,7 +4561,13 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
esdp->run_queue = ERTS_RUNQ_IX(ix);
esdp->run_queue->scheduler = esdp;
- init_aux_work_data(&esdp->aux_work_data, esdp);
+ init_aux_work_data(&esdp->aux_work_data, esdp, daww_ptr);
+#ifdef ERTS_SMP
+ daww_ptr += daww_sz;
+#endif
+
+ esdp->reductions = 0;
+
init_sched_wall_time(&esdp->sched_wall_time);
}
@@ -6292,6 +6661,8 @@ Process *schedule(Process *p, int calls)
| ERTS_PROC_LOCK_STATUS));
#endif
+ esdp->reductions += reds;
+
schedule_out_process(rq, state, p); /* Returns with rq locked! */
ERTS_PROC_REDUCTIONS_EXECUTED(rq,
@@ -6433,7 +6804,7 @@ Process *schedule(Process *p, int calls)
goto continue_check_activities_to_run;
}
- ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
/*
* Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
@@ -6487,29 +6858,7 @@ Process *schedule(Process *p, int calls)
exec_misc_ops(rq);
#ifdef ERTS_SMP
- {
- int wo_reds = rq->wakeup_other_reds;
- if (wo_reds) {
- erts_aint32_t len = rq->len;
- if (len < 2) {
- rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC*wo_reds;
- if (rq->wakeup_other < 0)
- rq->wakeup_other = 0;
- }
- else if (rq->wakeup_other < wakeup_other_limit)
- rq->wakeup_other += len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC;
- else {
- if (flags & ERTS_RUNQ_FLG_PROTECTED)
- ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
- wake_scheduler_on_empty_runq(rq);
- rq->wakeup_other = 0;
- }
- rq->wakeup_other = 0;
- }
- }
- rq->wakeup_other_reds = 0;
- }
+ wakeup_other.check(rq, flags);
#endif
/*
@@ -6604,7 +6953,7 @@ Process *schedule(Process *p, int calls)
erts_smp_runq_unlock(rq);
if (flags & ERTS_RUNQ_FLG_PROTECTED)
- ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -6641,7 +6990,6 @@ Process *schedule(Process *p, int calls)
/* Never run a suspended process */
ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
- ACTIVATE(p);
reds = context_reds;
if (IS_TRACED(p)) {
@@ -6677,7 +7025,6 @@ Process *schedule(Process *p, int calls)
}
p->fcalls = reds;
- ASSERT(IS_ACTIVE(p));
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
return p;
}
@@ -6948,9 +7295,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
ErtsRunQueue *rq = NULL;
Process *p;
Sint arity; /* Number of arguments. */
-#ifndef HYBRID
Uint arg_size; /* Size of arguments. */
-#endif
Uint sz; /* Needed words on heap. */
Uint heap_need; /* Size needed on heap. */
Eterm res = THE_NON_VALUE;
@@ -6961,17 +7306,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
-#ifdef HYBRID
- /*
- * Copy the arguments to the global heap
- * Since global GC might occur we want to do this before adding the
- * new process to the erts_proc.r.o.tab.
- */
- BM_SWAP_TIMER(system,copy);
- LAZY_COPY(parent,args);
- BM_SWAP_TIMER(copy,system);
- heap_need = 0;
-#endif /* HYBRID */
/*
* Check for errors.
*/
@@ -7010,12 +7344,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
BM_COUNT(processes_spawned);
-#ifndef HYBRID
BM_SWAP_TIMER(system,size);
arg_size = size_object(args);
BM_SWAP_TIMER(size,system);
heap_need = arg_size;
-#endif
p->flags = erts_default_process_flags;
@@ -7060,9 +7392,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
p->high_water = p->heap;
-#ifdef INCREMENTAL
- p->scan_top = p->high_water;
-#endif
p->gen_gcs = 0;
p->stop = p->hend = p->heap + sz;
p->htop = p->heap;
@@ -7088,19 +7417,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
BM_STOP_TIMER(system);
BM_MESSAGE(args,p,parent);
BM_START_TIMER(system);
-#ifdef HYBRID
- p->arg_reg[2] = args;
-#ifdef INCREMENTAL
- p->active = 0;
- if (ptr_val(args) >= inc_fromspc && ptr_val(args) < inc_fromend)
- INC_ACTIVATE(p);
-#endif
-#else
BM_SWAP_TIMER(system,copy);
p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
BM_MESSAGE_COPIED(arg_size);
BM_SWAP_TIMER(copy,system);
-#endif
p->arity = 3;
p->fvalue = NIL;
@@ -7159,13 +7479,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
? NIL
: parent->common.id);
-#ifdef HYBRID
- p->rrma = NULL;
- p->rrsrc = NULL;
- p->nrr = 0;
- p->rrsz = 0;
-#endif
-
INIT_HOLE_CHECK(p);
#ifdef DEBUG
p->last_old_htop = NULL;
@@ -7235,15 +7548,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
so->mref = mref;
}
-#ifdef HYBRID
- /*
- * Add process to the array of active processes.
- */
- ACTIVATE(p);
- p->active_index = erts_num_active_procs++;
- erts_active_procs[p->active_index] = p;
-#endif
-
#ifdef ERTS_SMP
p->scheduler_data = NULL;
p->suspendee = NIL;
@@ -7327,9 +7631,6 @@ void erts_init_empty_process(Process *p)
p->common.u.alive.reg = NULL;
p->heap_sz = 0;
p->high_water = NULL;
-#ifdef INCREMENTAL
- p->scan_top = NULL;
-#endif
p->old_hend = NULL;
p->old_htop = NULL;
p->old_heap = NULL;
@@ -7381,14 +7682,6 @@ void erts_init_empty_process(Process *p)
#endif
#endif
- ACTIVATE(p);
-
-#ifdef HYBRID
- p->rrma = NULL;
- p->rrsrc = NULL;
- p->nrr = 0;
- p->rrsz = 0;
-#endif
INIT_HOLE_CHECK(p);
#ifdef DEBUG
p->last_old_htop = NULL;
@@ -7434,9 +7727,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->common.u.alive.reg == NULL);
ASSERT(p->heap_sz == 0);
ASSERT(p->high_water == NULL);
-#ifdef INCREMENTAL
- ASSERT(p->scan_top == NULL);
-#endif
ASSERT(p->old_hend == NULL);
ASSERT(p->old_htop == NULL);
ASSERT(p->old_heap == NULL);
@@ -7582,22 +7872,6 @@ delete_process(Process* p)
ASSERT(!p->suspend_monitors);
p->fvalue = NIL;
-
-#ifdef HYBRID
- erts_active_procs[p->active_index] =
- erts_active_procs[--erts_num_active_procs];
- erts_active_procs[p->active_index]->active_index = p->active_index;
-#ifdef INCREMENTAL
- if (INC_IS_ACTIVE(p))
- INC_DEACTIVATE(p);
-#endif
-
- if (p->rrma != NULL) {
- erts_free(ERTS_ALC_T_ROOTSET,p->rrma);
- erts_free(ERTS_ALC_T_ROOTSET,p->rrsrc);
- }
-#endif
-
}
static ERTS_INLINE erts_aint32_t
@@ -7987,7 +8261,6 @@ send_exit_signal(Process *c_p, /* current process if and only
? rsn
: copy_object(rsn, rp)),
NULL);
- ACTIVATE(rp);
}
#endif
return -1; /* Receiver will exit */
@@ -8724,7 +8997,7 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
erts_print(to, to_arg, "\n%p ", sp);
} else {
char sbuf[16];
- sprintf(sbuf, "y(%d)", yreg);
+ erts_snprintf(sbuf, sizeof(sbuf), "y(%d)", yreg);
erts_print(to, to_arg, "%-8s ", sbuf);
yreg++;
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index d613321421..6d1032c292 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -250,19 +250,20 @@ typedef enum {
* eachother. Most frequent - lowest bit number.
*/
-#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 0)
-#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 1)
-#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 2)
-#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 3)
-#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 4)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 6)
-#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 7)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 8)
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 9)
-#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 10)
-#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 11)
-#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP (((erts_aint32_t) 1) << 0)
+#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 1)
+#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 2)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 6)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 7)
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 8)
+#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 9)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 10)
+#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 11)
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -419,6 +420,11 @@ typedef struct {
} ErtsSchedWallTime;
typedef struct {
+ int sched;
+ erts_aint32_t aux_work;
+} ErtsDelayedAuxWorkWakeupJob;
+
+typedef struct {
int sched_id;
ErtsSchedulerData *esdp;
ErtsSchedulerSleepInfo *ssi;
@@ -451,6 +457,14 @@ typedef struct {
void *queue;
} async_ready;
#endif
+#ifdef ERTS_SMP
+ struct {
+ Uint64 next;
+ int *sched2jix;
+ int jix;
+ ErtsDelayedAuxWorkWakeupJob *job;
+ } delayed_wakeup;
+#endif
} ErtsAuxWorkData;
struct ErtsSchedulerData_ {
@@ -484,11 +498,11 @@ struct ErtsSchedulerData_ {
int virtual_reds;
int cpu_id; /* >= 0 when bound */
ErtsAuxWorkData aux_work_data;
-
ErtsAtomCacheMap atom_cache_map;
ErtsSchedAllocData alloc_data;
+ Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
@@ -832,24 +846,6 @@ struct process {
#endif
#endif
-#ifdef HYBRID
- Eterm *rrma; /* Remembered roots to Message Area */
- Eterm **rrsrc; /* The source of the root */
- Uint nrr; /* Number of remembered roots */
- Uint rrsz; /* Size of root array */
-#endif
-
-#ifdef HYBRID
- Uint active; /* Active since last major collection? */
- Uint active_index; /* Index in the active process array */
-#endif
-
-#ifdef INCREMENTAL
- Process *active_next; /* Active processes to scan for roots */
- Process *active_prev; /* in collection of the message area */
- Eterm *scan_top;
-#endif
-
#ifdef CHECK_FOR_HOLES
Eterm* last_htop; /* No need to scan the heap below this point. */
ErlHeapFragment* last_mbuf; /* No need to scan beyond this mbuf. */
@@ -995,10 +991,6 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
#endif
-#ifdef HYBRID
-extern Uint erts_num_active_procs;
-extern Process** erts_active_procs;
-#endif
extern Uint erts_default_process_flags;
extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
/* If any of the erts_system_monitor_* variables are set (enabled),
@@ -1303,12 +1295,14 @@ ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *list,
#endif
+int erts_sched_set_wakeup_other_thresold(char *str);
+int erts_sched_set_wakeup_other_type(char *str);
+int erts_sched_set_busy_wait_threshold(char *str);
+
void erts_schedule_thr_prgr_later_op(void (*)(void *),
void *,
ErtsThrPrgrLaterOp *);
-int erts_sched_set_wakeup_limit(char *str);
-
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
#endif
@@ -1335,6 +1329,10 @@ void erts_smp_notify_check_children_needed(void);
#if ERTS_USE_ASYNC_READY_Q
void erts_notify_check_async_ready_queue(void *);
#endif
+#ifdef ERTS_SMP
+void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
+void erts_notify_finish_breakpointing(Process* p);
+#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
void *arg);
@@ -1689,10 +1687,14 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
ERTS_GLB_INLINE Uint erts_get_scheduler_id(void);
ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p);
ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp);
+#ifndef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq);
+#endif
ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq);
ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq);
+#ifndef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+#endif
ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
@@ -1757,6 +1759,12 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
#endif
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
+
+#else
+
ERTS_GLB_INLINE void
erts_smp_runq_lock(ErtsRunQueue *rq)
{
@@ -1765,6 +1773,8 @@ erts_smp_runq_lock(ErtsRunQueue *rq)
#endif
}
+#endif
+
ERTS_GLB_INLINE int
erts_smp_runq_trylock(ErtsRunQueue *rq)
{
@@ -1783,6 +1793,31 @@ erts_smp_runq_unlock(ErtsRunQueue *rq)
#endif
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+#define erts_smp_xrunq_lock(rq, xrq) erts_smp_xrunq_lock_x((rq), (xrq), __FILE__, __LINE__)
+
+ERTS_GLB_INLINE void
+erts_smp_xrunq_lock_x(ErtsRunQueue *rq, ErtsRunQueue *xrq, char* file, int line)
+{
+#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx));
+ if (xrq != rq) {
+ if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) {
+ if (rq < xrq)
+ erts_smp_mtx_lock_x(&xrq->mtx, file, line);
+ else {
+ erts_smp_mtx_unlock(&rq->mtx);
+ erts_smp_mtx_lock_x(&xrq->mtx, file, line);
+ erts_smp_mtx_lock_x(&rq->mtx, file, line);
+ }
+ }
+ }
+#endif
+}
+
+#else
+
ERTS_GLB_INLINE void
erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
@@ -1802,6 +1837,8 @@ erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
#endif
}
+#endif
+
ERTS_GLB_INLINE void
erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index de2ebf73f0..ba74dfd6a1 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -323,7 +323,7 @@ heap_dump(int to, void *to_arg, Eterm x)
int i;
GET_DOUBLE_DATA((ptr+1), f);
- i = sys_double_to_chars(f.fd, (char*) sbuf);
+ i = sys_double_to_chars(f.fd, (char*) sbuf, sizeof(sbuf));
sys_memset(sbuf+i, 0, 31-i);
erts_print(to, to_arg, "F%X:%s\n", i, sbuf);
*ptr = OUR_NIL;
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 2984665cbd..2db5df06b4 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -91,16 +91,6 @@ static void check_queue(erts_proc_lock_t *lck);
#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
#endif
-struct erts_proc_lock_queues_t_ {
- erts_proc_lock_queues_t *next;
- erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-};
-
-static erts_proc_lock_queues_t zeroqs = {0};
-
-static erts_smp_spinlock_t qs_lock;
-static erts_proc_lock_queues_t *queue_free_list;
-
static int proc_lock_spin_count;
static int aux_thr_proc_lock_spin_count;
@@ -133,8 +123,6 @@ erts_init_proc_lock(int cpus)
#endif
}
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
- queue_free_list = NULL;
erts_thr_install_exit_handler(cleanup_tse);
if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
@@ -164,16 +152,7 @@ erts_init_proc_lock(int cpus)
#if ERTS_PROC_LOCK_OWN_IMPL
#ifdef ERTS_ENABLE_LOCK_CHECK
-static void
-check_unused_tse(erts_tse_t *wtr)
-{
- int i;
- erts_proc_lock_queues_t *queues = wtr->udata;
- ERTS_LC_ASSERT(wtr->uflgs == 0);
- for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- ERTS_LC_ASSERT(!queues->queue[i]);
-}
-#define CHECK_UNUSED_TSE(W) check_unused_tse((W))
+#define CHECK_UNUSED_TSE(W) ERTS_LC_ASSERT((W)->uflgs == 0)
#else
#define CHECK_UNUSED_TSE(W)
#endif
@@ -182,49 +161,14 @@ static ERTS_INLINE erts_tse_t *
tse_fetch(erts_pix_lock_t *pix_lock)
{
erts_tse_t *tse = erts_tse_fetch();
- if (!tse->udata) {
- erts_proc_lock_queues_t *qs;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- if (pix_lock)
- erts_pix_unlock(pix_lock);
-#endif
- erts_smp_spin_lock(&qs_lock);
- qs = queue_free_list;
- if (qs) {
- queue_free_list = queue_free_list->next;
- erts_smp_spin_unlock(&qs_lock);
- }
- else {
- erts_smp_spin_unlock(&qs_lock);
- qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
- sizeof(erts_proc_lock_queues_t));
- sys_memcpy((void *) qs,
- (void *) &zeroqs,
- sizeof(erts_proc_lock_queues_t));
- }
- tse->udata = qs;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- if (pix_lock)
- erts_pix_lock(pix_lock);
-#endif
- }
tse->uflgs = 0;
return tse;
}
static ERTS_INLINE void
-tse_return(erts_tse_t *tse, int force_free_q)
+tse_return(erts_tse_t *tse)
{
CHECK_UNUSED_TSE(tse);
- if (force_free_q || erts_tse_is_tmp(tse)) {
- erts_proc_lock_queues_t *qs = tse->udata;
- ASSERT(qs);
- erts_smp_spin_lock(&qs_lock);
- qs->next = queue_free_list;
- queue_free_list = qs;
- erts_smp_spin_unlock(&qs_lock);
- tse->udata = NULL;
- }
erts_tse_return(tse);
}
@@ -232,55 +176,49 @@ static void
cleanup_tse(void)
{
erts_tse_t *tse = erts_tse_fetch();
- if (tse) {
- if (tse->udata)
- tse_return(tse, 1);
- else
- erts_tse_return(tse);
- }
+ if (tse)
+ erts_tse_return(tse);
}
/*
* Waiters are queued in a circular double linked list;
- * where qs->queue[lock_ix] is the first waiter in queue, and
- * qs->queue[lock_ix]->prev is the last waiter in queue.
+ * where lck->queue[lock_ix] is the first waiter in queue, and
+ * lck->queue[lock_ix]->prev is the last waiter in queue.
*/
static ERTS_INLINE void
-enqueue_waiter(erts_proc_lock_queues_t *qs,
- int ix,
- erts_tse_t *wtr)
+enqueue_waiter(erts_proc_lock_t *lck, int ix, erts_tse_t *wtr)
{
- if (!qs->queue[ix]) {
- qs->queue[ix] = wtr;
+ if (!lck->queue[ix]) {
+ lck->queue[ix] = wtr;
wtr->next = wtr;
wtr->prev = wtr;
}
else {
- ERTS_LC_ASSERT(qs->queue[ix]->next && qs->queue[ix]->prev);
- wtr->next = qs->queue[ix];
- wtr->prev = qs->queue[ix]->prev;
+ ERTS_LC_ASSERT(lck->queue[ix]->next && lck->queue[ix]->prev);
+ wtr->next = lck->queue[ix];
+ wtr->prev = lck->queue[ix]->prev;
wtr->prev->next = wtr;
- qs->queue[ix]->prev = wtr;
+ lck->queue[ix]->prev = wtr;
}
}
static erts_tse_t *
-dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
+dequeue_waiter(erts_proc_lock_t *lck, int ix)
{
- erts_tse_t *wtr = qs->queue[ix];
- ERTS_LC_ASSERT(qs->queue[ix]);
+ erts_tse_t *wtr = lck->queue[ix];
+ ERTS_LC_ASSERT(lck->queue[ix]);
if (wtr->next == wtr) {
- ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
- qs->queue[ix] = NULL;
+ ERTS_LC_ASSERT(lck->queue[ix]->prev == wtr);
+ lck->queue[ix] = NULL;
}
else {
ERTS_LC_ASSERT(wtr->next != wtr);
ERTS_LC_ASSERT(wtr->prev != wtr);
wtr->next->prev = wtr->prev;
wtr->prev->next = wtr->next;
- qs->queue[ix] = wtr->next;
+ lck->queue[ix] = wtr->next;
}
return wtr;
}
@@ -301,19 +239,18 @@ try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
ErtsProcLocks locks = wtr->uflgs;
int lock_no;
- ERTS_LC_ASSERT(lck->queues);
ERTS_LC_ASSERT(got_locks != locks);
for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
if (locks & lock) {
ErtsProcLocks wflg, old_lflgs;
- if (lck->queues->queue[lock_no]) {
+ if (lck->queue[lock_no]) {
/* Others already waiting */
enqueue:
ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(lck)
& (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
- enqueue_waiter(lck->queues, lock_no, wtr);
+ enqueue_waiter(lck, lock_no, wtr);
break;
}
wflg = lock << ERTS_PROC_LOCK_WAITER_SHIFT;
@@ -365,7 +302,6 @@ transfer_locks(Process *p,
for (lock_no = 0; tlocks && lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
if (tlocks & lock) {
- erts_proc_lock_queues_t *qs = p->lock.queues;
/* Transfer lock */
#ifdef ERTS_ENABLE_LOCK_CHECK
tlocks &= ~lock;
@@ -373,9 +309,9 @@ transfer_locks(Process *p,
ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock)
& (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
transferred++;
- wtr = dequeue_waiter(qs, lock_no);
+ wtr = dequeue_waiter(&p->lock, lock_no);
ERTS_LC_ASSERT(wtr);
- if (!qs->queue[lock_no])
+ if (!p->lock.queue[lock_no])
unset_waiter |= lock;
ERTS_LC_ASSERT(wtr->uflgs & lock);
wtr->uflgs &= ~lock;
@@ -464,7 +400,6 @@ wait_for_locks(Process *p,
{
erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
erts_tse_t *wtr;
- erts_proc_lock_queues_t *qs;
/* Acquire a waiter object on which this thread can wait. */
wtr = tse_fetch(pix_lock);
@@ -480,18 +415,6 @@ wait_for_locks(Process *p,
ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
- qs = wtr->udata;
- ASSERT(qs);
- /* Provide the process with waiter queues, if it doesn't have one. */
- if (!p->lock.queues) {
- qs->next = NULL;
- p->lock.queues = qs;
- }
- else {
- qs->next = p->lock.queues->next;
- p->lock.queues->next = qs;
- }
-
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
check_queue(&p->lock);
#endif
@@ -505,7 +428,9 @@ wait_for_locks(Process *p,
check_queue(&p->lock);
#endif
- if (wtr->uflgs) {
+ if (wtr->uflgs == 0)
+ erts_pix_unlock(pix_lock);
+ else {
/* We didn't get them all; need to wait... */
ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
@@ -530,28 +455,12 @@ wait_for_locks(Process *p,
} while (res != 0);
}
- erts_pix_lock(pix_lock);
-
ASSERT(wtr->uflgs == 0);
}
- /* Recover some queues to store in the waiter. */
- ERTS_LC_ASSERT(p->lock.queues);
- if (p->lock.queues->next) {
- qs = p->lock.queues->next;
- p->lock.queues->next = qs->next;
- }
- else {
- qs = p->lock.queues;
- p->lock.queues = NULL;
- }
- wtr->udata = qs;
-
- erts_pix_unlock(pix_lock);
-
ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
- tse_return(wtr, 0);
+ tse_return(wtr);
}
/*
@@ -658,7 +567,7 @@ void
erts_proc_lock_prepare_proc_lock_waiter(void)
{
#if ERTS_PROC_LOCK_OWN_IMPL
- tse_return(tse_fetch(NULL), 0);
+ tse_return(tse_fetch(NULL));
#endif
}
@@ -1093,6 +1002,7 @@ void
erts_proc_lock_init(Process *p)
{
#if ERTS_PROC_LOCK_OWN_IMPL
+ int i;
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_smp_atomic32_init_nob(&p->lock.flags,
@@ -1100,7 +1010,8 @@ erts_proc_lock_init(Process *p)
#else
p->lock.flags = ERTS_PROC_LOCKS_ALL;
#endif
- p->lock.queues = NULL;
+ for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
+ p->lock.queue[i] = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
#endif
@@ -1128,11 +1039,8 @@ erts_proc_lock_init(Process *p)
#endif
erts_atomic32_init_nob(&p->lock.refc, 1);
#ifdef ERTS_PROC_LOCK_DEBUG
- {
- int i;
- for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
- }
+ for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
+ erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock_init(p);
@@ -1159,7 +1067,7 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_proc_lock_init(Process *p) {
-
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
if (p->common.id != ERTS_INVALID_PID) {
erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id);
erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id);
@@ -1171,6 +1079,12 @@ void erts_lcnt_proc_lock_init(Process *p) {
erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK);
}
+ } else {
+ sys_memzero(&(p->lock.lcnt_main), sizeof(p->lock.lcnt_main));
+ sys_memzero(&(p->lock.lcnt_msgq), sizeof(p->lock.lcnt_msgq));
+ sys_memzero(&(p->lock.lcnt_link), sizeof(p->lock.lcnt_link));
+ sys_memzero(&(p->lock.lcnt_status), sizeof(p->lock.lcnt_status));
+ }
}
@@ -1265,6 +1179,27 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res
}
}
+
+void erts_lcnt_enable_proc_lock_count(int enable)
+{
+ int i, max = erts_ptab_max(&erts_proc);
+
+ for (i = 0; i < max; ++i) {
+ Process* p = erts_pix2proc(i);
+ if (p) {
+ if (enable) {
+ if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) {
+ erts_lcnt_proc_lock_init(p);
+ }
+ } else {
+ if (ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) {
+ erts_lcnt_proc_lock_destroy(p);
+ }
+ }
+ }
+ }
+}
+
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
@@ -1657,7 +1592,7 @@ erts_proc_lc_chk_no_proc_locks(char *file, int line)
lc_id.proc_lock_msgq,
lc_id.proc_lock_status};
erts_lc_have_lock_ids(resv, ids, 4);
- if (resv[0] || resv[1] || resv[2] || resv[3]) {
+ if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3])) {
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
"not to have any process locks locked",
file, line);
@@ -1679,21 +1614,21 @@ check_queue(erts_proc_lock_t *lck)
if (lflgs & wtr) {
int n;
erts_tse_t *wtr;
- ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
- wtr = lck->queues->queue[lock_no];
+ ERTS_LC_ASSERT(lck->queue[lock_no]);
+ wtr = lck->queue[lock_no];
n = 0;
do {
wtr = wtr->next;
n++;
- } while (wtr != lck->queues->queue[lock_no]);
+ } while (wtr != lck->queue[lock_no]);
do {
wtr = wtr->prev;
n--;
- } while (wtr != lck->queues->queue[lock_no]);
+ } while (wtr != lck->queue[lock_no]);
ERTS_LC_ASSERT(n == 0);
}
else {
- ERTS_LC_ASSERT(!lck->queues || !lck->queues->queue[lock_no]);
+ ERTS_LC_ASSERT(!lck->queue[lock_no]);
}
}
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 537cd3f24f..9dd503f3cb 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -69,8 +69,6 @@
typedef erts_aint32_t ErtsProcLocks;
-typedef struct erts_proc_lock_queues_t_ erts_proc_lock_queues_t;
-
typedef struct erts_proc_lock_t_ {
#if ERTS_PROC_LOCK_OWN_IMPL
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -78,7 +76,7 @@ typedef struct erts_proc_lock_t_ {
#else
ErtsProcLocks flags;
#endif
- erts_proc_lock_queues_t *queues;
+ erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_t lcnt_main;
erts_lcnt_lock_t lcnt_link;
@@ -232,6 +230,8 @@ void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
+void erts_lcnt_enable_proc_lock_count(int enable);
+
#endif /* ERTS_ENABLE_LOCK_COUNT*/
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index 8dcdf94c80..87beeafa1a 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -1257,13 +1257,10 @@ void
erts_ptab_init(void)
{
/* ptab_list_continue/2 is a hidden BIF that the original BIF traps to. */
- sys_memset((void *) &ptab_list_continue_export, 0, sizeof(Export));
- ptab_list_continue_export.address = &ptab_list_continue_export.code[3];
- ptab_list_continue_export.code[0] = am_erlang;
- ptab_list_continue_export.code[1] = am_ptab_list_continue;
- ptab_list_continue_export.code[2] = 2;
- ptab_list_continue_export.code[3] = (BeamInstr) em_apply_bif;
- ptab_list_continue_export.code[4] = (BeamInstr) &ptab_list_continue;
+ erts_init_trap_export(&ptab_list_continue_export,
+ am_erlang, am_ptab_list_continue, 2,
+ &ptab_list_continue);
+
}
/*
diff --git a/erts/emulator/beam/erl_resolv_dns.c b/erts/emulator/beam/erl_resolv_dns.c
deleted file mode 100644
index 9d76fa89f8..0000000000
--- a/erts/emulator/beam/erl_resolv_dns.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Set this to non-zero value if DNS should be used.
- */
-int erl_use_resolver = 1;
diff --git a/erts/emulator/beam/erl_resolv_nodns.c b/erts/emulator/beam/erl_resolv_nodns.c
deleted file mode 100644
index f14ab68e27..0000000000
--- a/erts/emulator/beam/erl_resolv_nodns.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Set this to non-zero value if DNS should be used.
- */
-int erl_use_resolver = 0;
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index 37b186abd9..a490aec734 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -116,54 +116,84 @@ erts_sspa_create(size_t blk_sz, int pa_size)
return data;
}
-static ERTS_INLINE erts_aint_t
+static ERTS_INLINE void
enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
erts_sspa_blk_t *this,
- int want_last)
+ int cinit)
{
- erts_aint_t ilast, itmp;
+ erts_aint_t itmp;
+ erts_sspa_blk_t *enq;
erts_atomic_init_nob(&this->next_atmc, ERTS_AINT_NULL);
-
/* Enqueue at end of list... */
- ilast = erts_atomic_read_nob(&chdr->tail.data.last);
- while (1) {
- erts_sspa_blk_t *last = (erts_sspa_blk_t *) ilast;
- itmp = erts_atomic_cmpxchg_mb(&last->next_atmc,
- (erts_aint_t) this,
- ERTS_AINT_NULL);
- if (itmp == ERTS_AINT_NULL)
- break;
- ilast = itmp;
+ enq = (erts_sspa_blk_t *) erts_atomic_read_nob(&chdr->tail.data.last);
+ itmp = erts_atomic_cmpxchg_relb(&enq->next_atmc,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ /* We are required to move last pointer */
+#ifdef DEBUG
+ ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->next_atmc));
+ ASSERT(((erts_aint_t) enq)
+ == erts_atomic_xchg_relb(&chdr->tail.data.last,
+ (erts_aint_t) this));
+#else
+ erts_atomic_set_relb(&chdr->tail.data.last, (erts_aint_t) this);
+#endif
}
+ else {
+ /*
+ * We *need* to insert element somewhere in between the
+ * last element we read earlier and the actual last element.
+ */
+ int i = cinit;
- /* Move last pointer forward... */
- while (1) {
- erts_aint_t itmp;
- if (want_last) {
- if (erts_atomic_read_rb(&this->next_atmc) != ERTS_AINT_NULL) {
- /* Someone else will move it forward */
- return erts_atomic_read_nob(&chdr->tail.data.last);
+ while (1) {
+ erts_aint_t itmp2;
+ erts_atomic_set_nob(&this->next_atmc, itmp);
+ itmp2 = erts_atomic_cmpxchg_relb(&enq->next_atmc,
+ (erts_aint_t) this,
+ itmp);
+ if (itmp == itmp2)
+ break; /* inserted this */
+ if ((i & 1) == 0)
+ itmp = itmp2;
+ else {
+ enq = (erts_sspa_blk_t *) itmp;
+ itmp = erts_atomic_read_acqb(&enq->next_atmc);
+ ASSERT(itmp != ERTS_AINT_NULL);
}
+ i++;
}
- else {
- if (erts_atomic_read_nob(&this->next_atmc) != ERTS_AINT_NULL) {
- /* Someone else will move it forward */
- return ERTS_AINT_NULL;
- }
+ }
+}
+
+static ERTS_INLINE erts_aint_t
+check_insert_marker(erts_sspa_chunk_header_t *chdr, erts_aint_t ilast)
+{
+ if (!chdr->head.used_marker
+ && chdr->head.unref_end == (erts_sspa_blk_t *) ilast) {
+ erts_aint_t itmp;
+ erts_sspa_blk_t *last = (erts_sspa_blk_t *) ilast;
+
+ erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
+ itmp = erts_atomic_cmpxchg_relb(&last->next_atmc,
+ (erts_aint_t) &chdr->tail.data.marker,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ ilast = (erts_aint_t) &chdr->tail.data.marker;
+ chdr->head.used_marker = !0;
+ erts_atomic_set_relb(&chdr->tail.data.last, ilast);
}
- itmp = erts_atomic_cmpxchg_mb(&chdr->tail.data.last,
- (erts_aint_t) this,
- ilast);
- if (ilast == itmp)
- return want_last ? (erts_aint_t) this : ERTS_AINT_NULL;
- ilast = itmp;
}
+ return ilast;
}
void
-erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr, erts_sspa_blk_t *blk)
+erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *blk,
+ int cinit)
{
int um_refc_ix = 0;
int managed_thread = erts_thr_progress_is_managed_thread();
@@ -180,7 +210,7 @@ erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr, erts_sspa_blk_t *blk)
}
}
- (void) enqueue_remote_managed_thread(chdr, blk, 0);
+ enqueue_remote_managed_thread(chdr, blk, cinit);
if (!managed_thread)
erts_atomic_dec_relb(&chdr->tail.data.um_refc[um_refc_ix]);
@@ -208,24 +238,17 @@ fetch_remote(erts_sspa_chunk_header_t *chdr, int max)
int um_refc_ix;
chdr->head.next.thr_progress_reached = 1;
um_refc_ix = chdr->head.next.um_refc_ix;
- if (erts_atomic_read_acqb(&chdr->tail.data.um_refc[um_refc_ix]) == 0) {
+ if (erts_atomic_read_nob(&chdr->tail.data.um_refc[um_refc_ix]) == 0) {
+
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
/* Move unreferenced end pointer forward... */
chdr->head.unref_end = chdr->head.next.unref_end;
- if (!chdr->head.used_marker
- && chdr->head.unref_end == (erts_sspa_blk_t *) ilast) {
- /* Need to equeue marker */
- chdr->head.used_marker = 1;
- ilast = enqueue_remote_managed_thread(chdr,
- &chdr->tail.data.marker,
- 1);
- }
+ ilast = check_insert_marker(chdr, ilast);
- if (chdr->head.unref_end == (erts_sspa_blk_t *) ilast)
- ERTS_THR_MEMORY_BARRIER;
- else {
+ if (chdr->head.unref_end != (erts_sspa_blk_t *) ilast) {
chdr->head.next.unref_end = (erts_sspa_blk_t *) ilast;
chdr->head.next.thr_progress = erts_thr_progress_later(NULL);
erts_atomic32_set_relb(&chdr->tail.data.um_refc_ix,
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index d36066c399..9144c73acd 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -142,7 +142,8 @@ check_local_list(erts_sspa_chunk_header_t *chdr)
erts_sspa_data_t *erts_sspa_create(size_t blk_sz,
int pa_size);
void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
- erts_sspa_blk_t *blk);
+ erts_sspa_blk_t *blk,
+ int cinit);
erts_sspa_blk_t *erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
erts_sspa_blk_t *old_res);
@@ -216,7 +217,7 @@ erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk)
chdr = &chnk->aligned.header;
if (chnk_cix != cix) {
/* Remote chunk */
- erts_sspa_remote_free(chdr, blk);
+ erts_sspa_remote_free(chdr, blk, chnk_cix - cix);
}
else {
/* Local chunk */
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index b86e3e16ca..6a04c6fc0b 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -105,7 +105,7 @@ unsigned tag_val_def(Wterm x)
break;
}
}
- sprintf(msg, "tag_val_def: %#lx", (unsigned long) x);
+ erts_snprintf(msg, sizeof(msg), "tag_val_def: %#lx", (unsigned long) x);
et_abort(msg, file, line);
#undef file
#undef line
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index c1b4408c06..3272a5326d 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -386,7 +386,7 @@ static int clock_resolution;
/*
** The clock resolution should really be the resolution of the
** time function in use, which on most platforms
-** is 1. On VxWorks the resolution shold be
+** is 1. On VxWorks the resolution should be
** the number of ticks per second (or 1, which would work nicely to).
**
** Setting lower resolutions is mostly interesting when timers are used
@@ -754,6 +754,11 @@ int univ_to_seconds(Sint year, Sint month, Sint day, Sint hour, Sint minute, Sin
return 1;
}
+#if defined(HAVE_TIME2POSIX) && defined(HAVE_DECL_TIME2POSIX) && \
+ !HAVE_DECL_TIME2POSIX
+extern time_t time2posix(time_t);
+#endif
+
int
local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst)
@@ -794,7 +799,7 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
refuses to give us a DST time, we simulate the Linux/Solaris
behaviour of giving the same data as if is_dst was not set. */
t.tm_isdst = 0;
- if (erl_mktime(&the_clock, &t)) {
+ if (erl_mktime(&the_clock, &t) < 0) {
/* Failed anyway, something else is bad - will be a badarg */
return 0;
}
@@ -803,6 +808,11 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
return 0;
}
}
+
+#ifdef HAVE_TIME2POSIX
+ the_clock = time2posix(the_clock);
+#endif
+
#ifdef HAVE_GMTIME_R
tm = gmtime_r(&the_clock, &tmbuf);
#else
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 991e573c14..41f33efa79 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2140,187 +2140,6 @@ void save_calls(Process *p, Export *e)
}
}
-/*
- * Entry point called by the trace wrap functions in erl_bif_wrap.c
- *
- * The trace wrap functions are themselves called through the export
- * entries instead of the original BIF functions.
- */
-Eterm
-erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
-{
- Eterm result;
- int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {
- /* Warning! This is an Optimization.
- *
- * If neither meta trace is active nor process trace flags then
- * no tracing will occur. Doing the whole else branch will
- * also do nothing, only slower.
- */
- Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, args, I);
- } else {
- Eterm (*func)(Process*, Eterm*, BeamInstr*);
- Export* ep = bif_export[bif_index];
- Uint32 flags = 0, flags_meta = 0;
- int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
- int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
- int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
- Eterm meta_tracer_pid = NIL;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
- BeamInstr *cp = p->cp;
-
- /*
- * Make continuation pointer OK, it is not during direct BIF calls,
- * but it is correct during apply of bif.
- */
- if (!applying) {
- p->cp = I;
- }
- if (global || local) {
- flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
- local, &ERTS_TRACER_PROC(p));
- }
- if (meta) {
- flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
- &meta_tracer_pid);
- }
- if (time) {
- BpDataTime *bdt = NULL;
- BeamInstr *pc = (BeamInstr *)ep->code+3;
-
- bdt = (BpDataTime *) erts_get_time_break(p, pc);
- ASSERT(bdt);
-
- if (!bdt->pause) {
- erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
- }
- }
- /* Restore original continuation pointer (if changed). */
- p->cp = cp;
-
- func = bif_table[bif_index].f;
-
- result = func(p, args, I);
-
- if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
- BeamInstr i_return_time_trace = beam_return_time_trace[0];
- Eterm *cpp;
- /* Maybe advance cp to skip trace stack frames */
- for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
- if (*cp == i_return_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 2; /* Skip return_trace parameters */
- } else if (*cp == i_return_time_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 1; /* Skip return_time_trace parameters */
- } else if (*cp == i_return_to_trace) {
- /* A return_to trace message is going to be generated
- * by normal means, so we do not have to.
- */
- cp = NULL;
- break;
- } else break;
- }
- }
-
- /* Try to get these in the order
- * they usually appear in normal code... */
- if (is_non_value(result)) {
- Uint reason = p->freason;
- if (reason != TRAP) {
- Eterm class;
- Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
- /* Expand error value like in handle_error() */
- if (reason & EXF_ARGLIST) {
- Eterm *tp;
- ASSERT(is_tuple(value));
- tp = tuple_val(value);
- value = tp[1];
- }
- if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
- reason = EXC_ERROR;
- }
- /* Note: expand_error_value() could theoretically
- * allocate on the heap, but not for any error
- * returned by a BIF, and it would do no harm,
- * just be annoying.
- */
- value = expand_error_value(p, reason, value);
- class = exception_tag[GET_EXC_CLASS(reason)];
-
- if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
- }
- if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &ERTS_TRACER_PROC(p));
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
- /* can only happen if(local)*/
- Eterm *ptr = p->stop;
- ASSERT(is_CP(*ptr));
- ASSERT(ptr <= STACK_START(p));
- /* Search the nearest stack frame for a catch */
- while (++ptr < STACK_START(p)) {
- if (is_CP(*ptr)) break;
- if (is_catch(*ptr)) {
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into
- * calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- UnUseTmpHeapNoproc(3);
- if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- } else {
- if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
- }
- /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
- if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
- }
- if (flags & MATCH_SET_RETURN_TO_TRACE) {
- /* can only happen if(local)*/
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- return result;
-}
-
/* Sends trace message:
* {trace_ts, Pid, What, Msg, Timestamp}
* or {trace, Pid, What, Msg}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 0d6df699ff..50fb27aab0 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -123,16 +123,19 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Eterm* mfa, int specified,
- struct binary* match_prog_set, struct binary *meta_match_prog_set,
+int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+ struct binary* match_prog_set,
+ struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid);
+ Eterm meta_tracer_pid, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct binary **match_spec,
struct binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid);
+int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
+int erts_finish_breakpointing(void);
#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 6d5eae73b0..51559aea1c 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -77,66 +77,25 @@ void erts_init_unicode(void)
{
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
/* Non visual BIFs to trap to. */
- memset(&characters_to_utf8_trap_exp, 0, sizeof(Export));
- characters_to_utf8_trap_exp.address =
- &characters_to_utf8_trap_exp.code[3];
- characters_to_utf8_trap_exp.code[0] = am_erlang;
- characters_to_utf8_trap_exp.code[1] =
- am_atom_put("characters_to_utf8_trap",23);
- characters_to_utf8_trap_exp.code[2] = 3;
- characters_to_utf8_trap_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_utf8_trap_exp.code[4] =
- (BeamInstr) &characters_to_utf8_trap;
-
- memset(&characters_to_list_trap_1_exp, 0, sizeof(Export));
- characters_to_list_trap_1_exp.address =
- &characters_to_list_trap_1_exp.code[3];
- characters_to_list_trap_1_exp.code[0] = am_erlang;
- characters_to_list_trap_1_exp.code[1] =
- am_atom_put("characters_to_list_trap_1",25);
- characters_to_list_trap_1_exp.code[2] = 3;
- characters_to_list_trap_1_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_1_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_1;
-
- memset(&characters_to_list_trap_2_exp, 0, sizeof(Export));
- characters_to_list_trap_2_exp.address =
- &characters_to_list_trap_2_exp.code[3];
- characters_to_list_trap_2_exp.code[0] = am_erlang;
- characters_to_list_trap_2_exp.code[1] =
- am_atom_put("characters_to_list_trap_2",25);
- characters_to_list_trap_2_exp.code[2] = 3;
- characters_to_list_trap_2_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_2_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_2;
-
-
- memset(&characters_to_list_trap_3_exp, 0, sizeof(Export));
- characters_to_list_trap_3_exp.address =
- &characters_to_list_trap_3_exp.code[3];
- characters_to_list_trap_3_exp.code[0] = am_erlang;
- characters_to_list_trap_3_exp.code[1] =
- am_atom_put("characters_to_list_trap_3",25);
- characters_to_list_trap_3_exp.code[2] = 3;
- characters_to_list_trap_3_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_3_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_3;
-
- memset(&characters_to_list_trap_4_exp, 0, sizeof(Export));
- characters_to_list_trap_4_exp.address =
- &characters_to_list_trap_4_exp.code[3];
- characters_to_list_trap_4_exp.code[0] = am_erlang;
- characters_to_list_trap_4_exp.code[1] =
- am_atom_put("characters_to_list_trap_4",25);
- characters_to_list_trap_4_exp.code[2] = 1;
- characters_to_list_trap_4_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_4_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_4;
+ erts_init_trap_export(&characters_to_utf8_trap_exp,
+ am_erlang, am_atom_put("characters_to_utf8_trap",23), 3,
+ &characters_to_utf8_trap);
+
+ erts_init_trap_export(&characters_to_list_trap_1_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_1",25), 3,
+ &characters_to_list_trap_1);
+
+ erts_init_trap_export(&characters_to_list_trap_2_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_2",25), 3,
+ &characters_to_list_trap_2);
+
+ erts_init_trap_export(&characters_to_list_trap_3_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_3",25), 3,
+ &characters_to_list_trap_3);
+
+ erts_init_trap_export(&characters_to_list_trap_4_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_4",25), 1,
+ &characters_to_list_trap_4);
c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2);
c_to_l_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_list_int,2);
@@ -2027,12 +1986,14 @@ BIF_RETTYPE binary_to_existing_atom_2(BIF_ALIST_2)
* string routines, that will certainly fail on some OS.
*/
-char *erts_convert_filename_to_native(Eterm name, ErtsAlcType_t alloc_type, int allow_empty)
+char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_size, ErtsAlcType_t alloc_type, int allow_empty, int allow_atom, Sint *used)
{
int encoding = erts_get_native_filename_encoding();
char* name_buf = NULL;
- if (is_atom(name) || is_list(name) || (allow_empty && is_nil(name))) {
+ if ((allow_atom && is_atom(name)) ||
+ is_list(name) ||
+ (allow_empty && is_nil(name))) {
Sint need;
if ((need = erts_native_filename_need(name,encoding)) < 0) {
return NULL;
@@ -2042,7 +2003,13 @@ char *erts_convert_filename_to_native(Eterm name, ErtsAlcType_t alloc_type, int
} else {
++need;
}
- name_buf = (char *) erts_alloc(alloc_type, need);
+ if (used)
+ *used = (Sint) need;
+ if (need > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, need);
+ } else {
+ name_buf = statbuf;
+ }
erts_native_filename_put(name,encoding,(byte *)name_buf);
name_buf[need-1] = 0;
if (encoding == ERL_FILENAME_WIN_WCHAR) {
@@ -2058,14 +2025,26 @@ char *erts_convert_filename_to_native(Eterm name, ErtsAlcType_t alloc_type, int
bytes = erts_get_aligned_binary_bytes(name, &temp_alloc);
if (encoding != ERL_FILENAME_WIN_WCHAR) {
/*Add 0 termination only*/
- name_buf = (char *) erts_alloc(alloc_type, size+1);
+ if (used)
+ *used = (Sint) size+1;
+ if (size+1 > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, size+1);
+ } else {
+ name_buf = statbuf;
+ }
memcpy(name_buf,bytes,size);
name_buf[size]=0;
} else if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK ||
erts_get_user_requested_filename_encoding() == ERL_FILENAME_LATIN1) {
byte *p;
/* What to do now? Maybe latin1, so just take byte for byte instead */
- name_buf = (char *) erts_alloc(alloc_type, (size+1)*2);
+ if (used)
+ *used = (Sint) (size+1)*2;
+ if ((size+1)*2 > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, (size+1)*2);
+ } else {
+ name_buf = statbuf;
+ }
p = (byte *) name_buf;
while (size--) {
*p++ = *bytes++;
@@ -2074,7 +2053,13 @@ char *erts_convert_filename_to_native(Eterm name, ErtsAlcType_t alloc_type, int
*p++ = 0;
*p++ = 0;
} else { /* WIN_WCHAR and valid UTF8 */
- name_buf = (char *) erts_alloc(alloc_type, (num_chars+1)*2);
+ if (used)
+ *used = (Sint) (num_chars+1)*2;
+ if ((num_chars+1)*2 > statbuf_size) {
+ name_buf = (char *) erts_alloc(alloc_type, (num_chars+1)*2);
+ } else {
+ name_buf = statbuf;
+ }
erts_copy_utf8_to_utf16_little((byte *) name_buf, bytes, num_chars);
name_buf[num_chars*2] = 0;
name_buf[num_chars*2+1] = 0;
@@ -2086,6 +2071,71 @@ char *erts_convert_filename_to_native(Eterm name, ErtsAlcType_t alloc_type, int
return name_buf;
}
+static int filename_len_16bit(byte *str)
+{
+ byte *p = str;
+ while(*p != '\0' || p[1] != '\0') {
+ p += 2;
+ }
+ return (p - str);
+}
+Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
+{
+ Uint size,num_chars;
+ Eterm *hp;
+ byte *err_pos;
+ Uint num_built; /* characters */
+ Uint num_eaten; /* bytes */
+ Eterm ret;
+ int mac = 0;
+
+ switch (erts_get_native_filename_encoding()) {
+ case ERL_FILENAME_LATIN1:
+ goto noconvert;
+ case ERL_FILENAME_UTF8_MAC:
+ mac = 1;
+ case ERL_FILENAME_UTF8:
+ size = strlen((char *) bytes);
+ if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) {
+ goto noconvert;
+ }
+ num_built = 0;
+ num_eaten = 0;
+ if (mac) {
+ ret = do_utf8_to_list_normalize(p, num_chars, bytes, size);
+ } else {
+ ret = do_utf8_to_list(p, num_chars, bytes, size, num_chars, &num_built, &num_eaten, NIL);
+ }
+ return ret;
+ case ERL_FILENAME_WIN_WCHAR:
+ size=filename_len_16bit(bytes);
+ if ((size % 2) != 0) { /* Panic fixup to avoid crashing the emulator */
+ size--;
+ hp = HAlloc(p, size+2);
+ ret = CONS(hp,make_small((Uint) bytes[size]),NIL);
+ hp += 2;
+ } else {
+ hp = HAlloc(p, size);
+ ret = NIL;
+ }
+ bytes += size-1;
+ while (size > 0) {
+ Uint x = ((Uint) *bytes--) << 8;
+ x |= ((Uint) *bytes--);
+ size -= 2;
+ ret = CONS(hp,make_small(x),ret);
+ hp += 2;
+ }
+ return ret;
+ default:
+ goto noconvert;
+ }
+ noconvert:
+ size = strlen((char *) bytes);
+ hp = HAlloc(p, 2 * size);
+ return erts_bin_bytes_to_list(NIL, hp, bytes, size, 0);
+}
+
Sint erts_native_filename_need(Eterm ioterm, int encoding)
{
@@ -2619,3 +2669,4 @@ BIF_RETTYPE file_native_name_encoding_0(BIF_ALIST_0)
BIF_RET(am_undefined);
}
}
+
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 8425534df5..a2064bd8a3 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -145,6 +145,7 @@ erts_smp_current_interval_acqb(erts_interval_t *icp)
*/
void erts_silence_warn_unused_result(long unused);
+
int erts_fit_in_bits_int64(Sint64);
int erts_fit_in_bits_int32(Sint32);
int list_length(Eterm);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 5dc307e383..c962955de9 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -29,19 +29,10 @@
/* #define FORCE_HEAP_FRAGS */
-#if defined(HYBRID)
-/* # define CHECK_FOR_HOLES */
-#endif
-
#if defined(DEBUG) && !defined(CHECK_FOR_HOLES) && !defined(__WIN32__)
# define CHECK_FOR_HOLES
#endif
-#if defined(HYBRID)
-/* # define INCREMENTAL 1 */ /* Incremental garbage collection */
-/* # define INC_TIME_BASED 1 */ /* Time-based incremental GC (vs Work-based) */
-#endif
-
#define BEAM 1
#define EMULATOR "BEAM"
#define SEQ_TRACE 1
@@ -70,16 +61,6 @@
#define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */
#define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */
-#ifdef HYBRID
-# define SH_DEFAULT_SIZE 2629425 /* default message area min size */
-#endif
-
-#ifdef INCREMENTAL
-# define INC_NoPAGES 256 /* Number of pages in the old generation */
-# define INC_PAGESIZE 32768 /* The size of each page */
-# define INC_STORAGE_SIZE 1024 /* The size of gray stack and similar */
-#endif
-
#define CP_SIZE 1
#define ErtsHAllocLockCheck(P) \
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
index c1024dafc4..e3ebbb84f4 100644
--- a/erts/emulator/beam/erlang_dtrace.d
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -639,9 +639,9 @@ provider erlang {
* Entry into the efile_drv.c file I/O driver
*
* For a list of command numbers used by this driver, see the section
- * "Guide to probe arguments" in ../../../README.md. That section
- * also contains explanation of the various integer and string
- * arguments that may be present when any particular probe fires.
+ * "Guide to efile_drv.c probe arguments" in ../../../HOWTO/DTRACE.md.
+ * That section also contains explanation of the various integer and
+ * string arguments that may be present when any particular probe fires.
*
* NOTE: Not all Linux platforms (using SystemTap) can support
* arguments beyond arg9.
@@ -717,6 +717,2916 @@ provider erlang {
* inside the same I/O worker pool thread.
* For R14B03's source, see erl_async.c lines 302-317.
*/
+
+/*
+ * The set of probes for use by Erlang code ... moved to here from
+ * lib/runtime_tools/c_src/dtrace_user.d until a more portable solution
+ * is found. This move pollutes the Erlang VM with functions that are
+ * only used by the NIF shared library code in
+ * lib/runtime_tools/c_src/dyntrace.c. The reason this is necessary is
+ * in order to work around an issue on several platforms, including
+ * SystemTap 1.3 and Solaris. The Solaris issue is discussed in the
+ * `dtrace-discuss` mailing list thread on 01 Dec 2011 17:58:15.
+ */
+ /**
+ * If you use only a single probe, but you also embed that probe
+ * in many different places in your code, if that probe fires 100K
+ * or more times per second, then it *will* hurt when you have to
+ * enable that probe.
+ *
+ * However, if you have any different probes, then you can ensure
+ * that any probe on a hot code path will use separate probe(s)
+ * than everyone else ... and you can then enable many non-hot
+ * probes in production without worry about creating too much
+ * measurement overhead.
+ *
+ * In an ideal world, we would use the libusdt library to be able
+ * to create arbitrary DTrace probes with more convenient and
+ * meaningful names than "user_trace-n7". But libusdt doesn't
+ * (yet) support all of the platforms that DTrace does, and there
+ * is no known (yet) equivalent for SystemTap.
+ */
+ probe user_trace__n0(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n1(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n2(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n3(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n4(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n5(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n6(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n7(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n8(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n9(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n10(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n11(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n12(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n13(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n14(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n15(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n16(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n17(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n18(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n19(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n20(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n21(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n22(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n23(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n24(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n25(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n26(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n27(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n28(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n29(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n30(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n31(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n32(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n33(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n34(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n35(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n36(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n37(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n38(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n39(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n40(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n41(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n42(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n43(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n44(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n45(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n46(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n47(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n48(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n49(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n50(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n51(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n52(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n53(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n54(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n55(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n56(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n57(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n58(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n59(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n60(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n61(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n62(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n63(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n64(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n65(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n66(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n67(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n68(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n69(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n70(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n71(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n72(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n73(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n74(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n75(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n76(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n77(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n78(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n79(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n80(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n81(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n82(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n83(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n84(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n85(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n86(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n87(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n88(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n89(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n90(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n91(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n92(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n93(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n94(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n95(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n96(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n97(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n98(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n99(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n100(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n101(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n102(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n103(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n104(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n105(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n106(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n107(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n108(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n109(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n110(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n111(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n112(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n113(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n114(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n115(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n116(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n117(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n118(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n119(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n120(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n121(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n122(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n123(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n124(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n125(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n126(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n127(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n128(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n129(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n130(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n131(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n132(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n133(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n134(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n135(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n136(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n137(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n138(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n139(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n140(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n141(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n142(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n143(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n144(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n145(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n146(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n147(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n148(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n149(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n150(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n151(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n152(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n153(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n154(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n155(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n156(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n157(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n158(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n159(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n160(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n161(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n162(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n163(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n164(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n165(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n166(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n167(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n168(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n169(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n170(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n171(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n172(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n173(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n174(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n175(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n176(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n177(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n178(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n179(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n180(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n181(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n182(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n183(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n184(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n185(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n186(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n187(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n188(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n189(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n190(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n191(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n192(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n193(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n194(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n195(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n196(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n197(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n198(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n199(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n200(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n201(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n202(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n203(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n204(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n205(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n206(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n207(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n208(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n209(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n210(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n211(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n212(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n213(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n214(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n215(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n216(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n217(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n218(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n219(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n220(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n221(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n222(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n223(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n224(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n225(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n226(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n227(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n228(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n229(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n230(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n231(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n232(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n233(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n234(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n235(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n236(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n237(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n238(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n239(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n240(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n241(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n242(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n243(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n244(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n245(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n246(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n247(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n248(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n249(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n250(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n251(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n252(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n253(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n254(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n255(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n256(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n257(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n258(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n259(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n260(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n261(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n262(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n263(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n264(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n265(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n266(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n267(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n268(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n269(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n270(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n271(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n272(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n273(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n274(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n275(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n276(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n277(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n278(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n279(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n280(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n281(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n282(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n283(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n284(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n285(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n286(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n287(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n288(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n289(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n290(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n291(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n292(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n293(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n294(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n295(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n296(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n297(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n298(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n299(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n300(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n301(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n302(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n303(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n304(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n305(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n306(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n307(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n308(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n309(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n310(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n311(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n312(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n313(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n314(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n315(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n316(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n317(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n318(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n319(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n320(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n321(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n322(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n323(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n324(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n325(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n326(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n327(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n328(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n329(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n330(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n331(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n332(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n333(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n334(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n335(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n336(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n337(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n338(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n339(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n340(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n341(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n342(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n343(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n344(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n345(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n346(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n347(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n348(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n349(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n350(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n351(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n352(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n353(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n354(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n355(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n356(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n357(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n358(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n359(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n360(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n361(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n362(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n363(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n364(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n365(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n366(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n367(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n368(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n369(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n370(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n371(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n372(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n373(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n374(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n375(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n376(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n377(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n378(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n379(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n380(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n381(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n382(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n383(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n384(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n385(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n386(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n387(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n388(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n389(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n390(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n391(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n392(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n393(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n394(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n395(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n396(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n397(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n398(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n399(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n400(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n401(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n402(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n403(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n404(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n405(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n406(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n407(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n408(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n409(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n410(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n411(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n412(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n413(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n414(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n415(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n416(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n417(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n418(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n419(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n420(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n421(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n422(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n423(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n424(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n425(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n426(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n427(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n428(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n429(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n430(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n431(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n432(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n433(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n434(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n435(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n436(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n437(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n438(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n439(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n440(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n441(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n442(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n443(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n444(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n445(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n446(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n447(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n448(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n449(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n450(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n451(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n452(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n453(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n454(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n455(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n456(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n457(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n458(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n459(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n460(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n461(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n462(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n463(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n464(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n465(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n466(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n467(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n468(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n469(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n470(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n471(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n472(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n473(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n474(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n475(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n476(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n477(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n478(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n479(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n480(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n481(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n482(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n483(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n484(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n485(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n486(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n487(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n488(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n489(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n490(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n491(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n492(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n493(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n494(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n495(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n496(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n497(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n498(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n499(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n500(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n501(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n502(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n503(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n504(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n505(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n506(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n507(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n508(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n509(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n510(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n511(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n512(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n513(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n514(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n515(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n516(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n517(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n518(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n519(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n520(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n521(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n522(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n523(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n524(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n525(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n526(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n527(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n528(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n529(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n530(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n531(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n532(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n533(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n534(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n535(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n536(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n537(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n538(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n539(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n540(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n541(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n542(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n543(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n544(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n545(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n546(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n547(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n548(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n549(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n550(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n551(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n552(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n553(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n554(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n555(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n556(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n557(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n558(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n559(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n560(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n561(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n562(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n563(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n564(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n565(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n566(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n567(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n568(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n569(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n570(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n571(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n572(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n573(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n574(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n575(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n576(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n577(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n578(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n579(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n580(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n581(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n582(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n583(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n584(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n585(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n586(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n587(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n588(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n589(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n590(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n591(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n592(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n593(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n594(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n595(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n596(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n597(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n598(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n599(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n600(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n601(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n602(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n603(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n604(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n605(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n606(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n607(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n608(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n609(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n610(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n611(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n612(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n613(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n614(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n615(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n616(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n617(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n618(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n619(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n620(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n621(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n622(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n623(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n624(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n625(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n626(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n627(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n628(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n629(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n630(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n631(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n632(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n633(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n634(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n635(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n636(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n637(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n638(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n639(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n640(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n641(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n642(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n643(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n644(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n645(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n646(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n647(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n648(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n649(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n650(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n651(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n652(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n653(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n654(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n655(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n656(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n657(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n658(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n659(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n660(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n661(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n662(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n663(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n664(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n665(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n666(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n667(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n668(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n669(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n670(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n671(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n672(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n673(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n674(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n675(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n676(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n677(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n678(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n679(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n680(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n681(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n682(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n683(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n684(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n685(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n686(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n687(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n688(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n689(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n690(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n691(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n692(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n693(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n694(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n695(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n696(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n697(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n698(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n699(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n700(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n701(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n702(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n703(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n704(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n705(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n706(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n707(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n708(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n709(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n710(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n711(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n712(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n713(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n714(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n715(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n716(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n717(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n718(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n719(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n720(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n721(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n722(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n723(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n724(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n725(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n726(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n727(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n728(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n729(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n730(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n731(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n732(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n733(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n734(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n735(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n736(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n737(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n738(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n739(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n740(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n741(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n742(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n743(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n744(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n745(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n746(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n747(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n748(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n749(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n750(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n751(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n752(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n753(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n754(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n755(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n756(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n757(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n758(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n759(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n760(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n761(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n762(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n763(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n764(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n765(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n766(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n767(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n768(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n769(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n770(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n771(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n772(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n773(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n774(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n775(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n776(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n777(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n778(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n779(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n780(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n781(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n782(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n783(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n784(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n785(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n786(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n787(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n788(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n789(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n790(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n791(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n792(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n793(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n794(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n795(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n796(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n797(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n798(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n799(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n800(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n801(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n802(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n803(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n804(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n805(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n806(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n807(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n808(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n809(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n810(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n811(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n812(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n813(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n814(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n815(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n816(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n817(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n818(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n819(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n820(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n821(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n822(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n823(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n824(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n825(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n826(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n827(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n828(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n829(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n830(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n831(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n832(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n833(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n834(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n835(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n836(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n837(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n838(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n839(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n840(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n841(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n842(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n843(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n844(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n845(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n846(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n847(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n848(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n849(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n850(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n851(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n852(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n853(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n854(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n855(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n856(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n857(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n858(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n859(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n860(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n861(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n862(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n863(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n864(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n865(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n866(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n867(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n868(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n869(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n870(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n871(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n872(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n873(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n874(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n875(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n876(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n877(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n878(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n879(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n880(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n881(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n882(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n883(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n884(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n885(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n886(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n887(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n888(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n889(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n890(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n891(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n892(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n893(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n894(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n895(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n896(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n897(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n898(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n899(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n900(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n901(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n902(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n903(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n904(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n905(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n906(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n907(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n908(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n909(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n910(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n911(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n912(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n913(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n914(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n915(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n916(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n917(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n918(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n919(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n920(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n921(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n922(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n923(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n924(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n925(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n926(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n927(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n928(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n929(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n930(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n931(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n932(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n933(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n934(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n935(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n936(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n937(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n938(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n939(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n940(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n941(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n942(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n943(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n944(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n945(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n946(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n947(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n948(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n949(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ probe user_trace__n950(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+ /**
+ * Send a single string to a probe.
+ * This probe is deprecated.
+ *
+ * @param NUL-terminated string
+ */
+ probe user_trace__s1(char* message);
+
+ /**
+ * Multi-purpose probe: up to 4 NUL-terminated strings and 4
+ * 64-bit integer arguments.
+ * This probe is deprecated.
+ *
+ * @param proc, the PID (string form) of the sending process
+ * @param user_tag, the user tag of the sender
+ * @param i1, integer
+ * @param i2, integer
+ * @param i3, integer
+ * @param i4, integer
+ * @param s1, string/iolist. D's arg6 is NULL if not given by Erlang
+ * @param s2, string/iolist. D's arg7 is NULL if not given by Erlang
+ * @param s3, string/iolist. D's arg8 is NULL if not given by Erlang
+ * @param s4, string/iolist. D's arg9 is NULL if not given by Erlang
+ */
+ probe user_trace__i4s4(char *proc, char *user_tag,
+ int i1, int i2, int i3, int i4,
+ char *s1, char *s2, char *s3, char *s4);
+
};
#pragma D attributes Evolving/Evolving/Common provider erlang provider
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index fb0ee99119..6b5121f917 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -32,98 +32,162 @@
#define EXPORT_HASH(m,f,a) ((m)*(f)+(a))
-static IndexTable export_table; /* Not locked. */
-static Hash secondary_export_table; /* Locked. */
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
-#include "erl_smp.h"
+static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */
-static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table. */
+static erts_smp_atomic_t total_entries_bytes;
-#define export_read_lock() erts_smp_rwmtx_rlock(&export_table_lock)
-#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
-#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
-#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
+#include "erl_smp.h"
+
+/* This lock protects the staging export table from concurrent access
+ * AND it protects the staging table from becoming active.
+ */
+erts_smp_mtx_t export_staging_lock;
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
+struct export_entry
+{
+ IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
+ Export* ep;
+};
+
+/* Helper struct that brings things together in one allocation
+*/
+struct export_blob
+{
+ Export exp;
+ struct export_entry entryv[ERTS_NUM_CODE_IX];
+ /* Note that entryv is not indexed by "code_ix".
+ */
+};
+
+/* Helper struct only used as template
+*/
+struct export_templ
+{
+ struct export_entry entry;
+ Export exp;
+};
+
+static struct export_blob* entry_to_blob(struct export_entry* ee)
+{
+ return (struct export_blob*)
+ ((char*)ee->ep - offsetof(struct export_blob,exp));
+}
+
void
export_info(int to, void *to_arg)
{
#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- export_read_lock();
+ export_staging_lock();
#endif
- index_info(to, to_arg, &export_table);
- hash_info(to, to_arg, &secondary_export_table);
+ index_info(to, to_arg, &export_tables[erts_active_code_ix()]);
+ hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable);
#ifdef ERTS_SMP
if (lock)
- export_read_unlock();
+ export_staging_unlock();
#endif
}
static HashValue
-export_hash(Export* x)
+export_hash(struct export_entry* ee)
{
+ Export* x = ee->ep;
return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
}
static int
-export_cmp(Export* tmpl, Export* obj)
+export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e)
{
+ Export* tmpl = tmpl_e->ep;
+ Export* obj = obj_e->ep;
return !(tmpl->code[0] == obj->code[0] &&
tmpl->code[1] == obj->code[1] &&
tmpl->code[2] == obj->code[2]);
}
-static Export*
-export_alloc(Export* tmpl)
+static struct export_entry*
+export_alloc(struct export_entry* tmpl_e)
{
- Export* obj = (Export*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(Export));
-
- obj->fake_op_func_info_for_hipe[0] = 0;
- obj->fake_op_func_info_for_hipe[1] = 0;
- obj->code[0] = tmpl->code[0];
- obj->code[1] = tmpl->code[1];
- obj->code[2] = tmpl->code[2];
- obj->slot.index = -1;
- obj->address = obj->code+3;
- obj->code[3] = (BeamInstr) em_call_error_handler;
- obj->code[4] = 0;
- obj->match_prog_set = NULL;
- return obj;
+ struct export_blob* blob;
+ unsigned ix;
+
+ if (tmpl_e->slot.index == -1) { /* Template, allocate blob */
+ Export* tmpl = tmpl_e->ep;
+ Export* obj;
+
+ blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob));
+ erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
+ obj = &blob->exp;
+ obj->fake_op_func_info_for_hipe[0] = 0;
+ obj->fake_op_func_info_for_hipe[1] = 0;
+ obj->code[0] = tmpl->code[0];
+ obj->code[1] = tmpl->code[1];
+ obj->code[2] = tmpl->code[2];
+ obj->code[3] = (BeamInstr) em_call_error_handler;
+ obj->code[4] = 0;
+
+ for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
+ obj->addressv[ix] = obj->code+3;
+
+ blob->entryv[ix].slot.index = -1;
+ blob->entryv[ix].ep = &blob->exp;
+ }
+ ix = 0;
+ }
+ else { /* Existing entry in another table, use free entry in blob */
+ blob = entry_to_blob(tmpl_e);
+ for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) {
+ ASSERT(ix < ERTS_NUM_CODE_IX);
+ }
+ }
+ return &blob->entryv[ix];
}
-
-static void
-export_free(Export* obj)
+static void
+export_free(struct export_entry* obj)
{
- erts_free(ERTS_ALC_T_EXPORT, (void*) obj);
+ struct export_blob* blob = entry_to_blob(obj);
+ int i;
+ obj->slot.index = -1;
+ for (i=0; i < ERTS_NUM_CODE_IX; i++) {
+ if (blob->entryv[i].slot.index >= 0) {
+ return;
+ }
+ }
+ erts_free(ERTS_ALC_T_EXPORT, blob);
+ erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
}
-
void
init_export_table(void)
{
HashFunctions f;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ int i;
- erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
+ erts_smp_mtx_init(&export_staging_lock, "export_tab");
+ erts_smp_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
f.free = (HFREE_FUN) export_free;
- erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_table, "export_list",
- EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f);
- hash_init(ERTS_ALC_T_EXPORT_TABLE, &secondary_export_table,
- "secondary_export_table", 50, f);
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_tables[i], "export_list",
+ EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f);
+ }
}
/*
@@ -138,29 +202,42 @@ init_export_table(void)
* called through such an export entry.
* 3) This function is suitable for the implementation of erlang:apply/3.
*/
+extern Export* /* inline-helper */
+erts_find_export_entry(Eterm m, Eterm f, unsigned int a,ErtsCodeIndex code_ix);
Export*
-erts_find_export_entry(Eterm m, Eterm f, unsigned int a)
+erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
{
HashValue hval = EXPORT_HASH((BeamInstr) m, (BeamInstr) f, (BeamInstr) a);
int ix;
HashBucket* b;
- ix = hval % export_table.htable.size;
- b = export_table.htable.bucket[ix];
+ ix = hval % export_tables[code_ix].htable.size;
+ b = export_tables[code_ix].htable.bucket[ix];
/*
* Note: We have inlined the code from hash.c for speed.
*/
while (b != (HashBucket*) 0) {
- Export* ep = (Export *) b;
+ Export* ep = ((struct export_entry*) b)->ep;
if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
- break;
+ return ep;
}
b = b->next;
}
- return (Export*)b;
+ return NULL;
+}
+
+static struct export_entry* init_template(struct export_templ* templ,
+ Eterm m, Eterm f, unsigned a)
+{
+ templ->entry.ep = &templ->exp;
+ templ->entry.slot.index = -1;
+ templ->exp.code[0] = m;
+ templ->exp.code[1] = f;
+ templ->exp.code[2] = a;
+ return &templ->entry;
}
@@ -176,47 +253,42 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a)
*/
Export*
-erts_find_function(Eterm m, Eterm f, unsigned int a)
+erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
{
- Export e;
- Export* ep;
-
- e.code[0] = m;
- e.code[1] = f;
- e.code[2] = a;
-
- ep = hash_get(&export_table.htable, (void*) &e);
- if (ep != NULL && ep->address == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_traced_function) {
- ep = NULL;
+ struct export_templ templ;
+ struct export_entry* ee;
+
+ ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
+ if (ee == NULL ||
+ (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
+ ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
+ return NULL;
}
- return ep;
+ return ee->ep;
}
/*
* Returns a pointer to an existing export entry for a MFA,
* or creates a new one and returns the pointer.
*
- * This function provides unlocked write access to the main export
- * table. It should only be used during start up or when
- * all other threads are blocked.
+ * This function acts on the staging export table. It should only be used
+ * to load new code.
*/
Export*
erts_export_put(Eterm mod, Eterm func, unsigned int arity)
{
- Export e;
- int ix;
+ ErtsCodeIndex code_ix = erts_staging_code_ix();
+ struct export_templ templ;
+ struct export_entry* ee;
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_smp_thr_progress_is_blocking());
ASSERT(is_atom(mod));
ASSERT(is_atom(func));
- e.code[0] = mod;
- e.code[1] = func;
- e.code[2] = arity;
- ix = index_put(&export_table, (void*) &e);
- return (Export*) erts_index_lookup(&export_table, ix);
+ export_staging_lock();
+ ee = (struct export_entry*) index_put_entry(&export_tables[code_ix],
+ init_template(&templ, mod, func, arity));
+ export_staging_unlock();
+ return ee->ep;
}
/*
@@ -224,77 +296,117 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity)
* export entry (making a call through it will cause the error_handler to
* be called).
*
- * Stub export entries will be placed in the secondary export table.
- * erts_export_consolidate() will move all stub export entries into the
- * main export table (will be done the next time code is loaded).
+ * Stub export entries will be placed in the loader export table.
*/
Export*
erts_export_get_or_make_stub(Eterm mod, Eterm func, unsigned int arity)
{
- Export e;
+ ErtsCodeIndex code_ix;
Export* ep;
+ IF_DEBUG(int retrying = 0;)
ASSERT(is_atom(mod));
ASSERT(is_atom(func));
-
- e.code[0] = mod;
- e.code[1] = func;
- e.code[2] = arity;
- ep = erts_find_export_entry(mod, func, arity);
- if (ep == 0) {
- /*
- * The code is not loaded (yet). Put the export in the secondary
- * export table, to avoid having to lock the main export table.
- */
- export_write_lock();
- ep = (Export *) hash_put(&secondary_export_table, (void*) &e);
- export_write_unlock();
- }
+
+ do {
+ code_ix = erts_active_code_ix();
+ ep = erts_find_export_entry(mod, func, arity, code_ix);
+ if (ep == 0) {
+ /*
+ * The code is not loaded (yet). Put the export in the staging
+ * export table, to avoid having to lock the active export table.
+ */
+ export_staging_lock();
+ if (erts_active_code_ix() == code_ix) {
+ struct export_templ templ;
+ struct export_entry* entry;
+
+ IndexTable* tab = &export_tables[erts_staging_code_ix()];
+ init_template(&templ, mod, func, arity);
+ entry = (struct export_entry *) index_put_entry(tab, &templ.entry);
+ ep = entry->ep;
+ ASSERT(ep);
+ }
+ else { /* race */
+ ASSERT(!retrying);
+ IF_DEBUG(retrying = 1);
+ }
+ export_staging_unlock();
+ }
+ } while (!ep);
return ep;
}
-/*
- * To be called before loading code (with other threads blocked).
- * This function will move all export entries from the secondary
- * export table into the primary.
- */
-void
-erts_export_consolidate(void)
+Export *export_list(int i, ErtsCodeIndex code_ix)
{
-#ifdef DEBUG
- HashInfo hi;
-#endif
-
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_smp_thr_progress_is_blocking());
-
- export_write_lock();
- erts_index_merge(&secondary_export_table, &export_table);
- erts_hash_merge(&secondary_export_table, &export_table.htable);
- export_write_unlock();
-#ifdef DEBUG
- hash_get_info(&hi, &export_table.htable);
- ASSERT(export_table.entries == hi.objs);
-#endif
+ return ((struct export_entry*) erts_index_lookup(&export_tables[code_ix], i))->ep;
}
-Export *export_list(int i)
+int export_list_size(ErtsCodeIndex code_ix)
{
- return (Export*) erts_index_lookup(&export_table, i);
+ return export_tables[code_ix].entries;
}
-int export_list_size(void)
+int export_table_sz(void)
+{
+ int i, bytes = 0;
+
+ export_staging_lock();
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ bytes += index_table_sz(&export_tables[i]);
+ }
+ export_staging_unlock();
+ return bytes;
+}
+int export_entries_sz(void)
{
- return export_table.entries;
+ return erts_smp_atomic_read_nob(&total_entries_bytes);
}
+Export *export_get(Export *e)
+{
+ struct export_entry ee;
+ struct export_entry* entry;
-int export_table_sz(void)
+ ee.ep = e;
+ entry = (struct export_entry*)hash_get(&export_tables[erts_active_code_ix()].htable, &ee);
+ return entry ? entry->ep : NULL;
+}
+
+IF_DEBUG(static ErtsCodeIndex debug_start_load_ix = 0;)
+
+
+void export_start_staging(void)
{
- return index_table_sz(&export_table);
+ ErtsCodeIndex dst_ix = erts_staging_code_ix();
+ ErtsCodeIndex src_ix = erts_active_code_ix();
+ IndexTable* dst = &export_tables[dst_ix];
+ IndexTable* src = &export_tables[src_ix];
+ struct export_entry* src_entry;
+ struct export_entry* dst_entry;
+ int i;
+
+ ASSERT(dst_ix != src_ix);
+ ASSERT(debug_start_load_ix == -1);
+
+ export_staging_lock();
+ /*
+ * Insert all entries in src into dst table
+ */
+ for (i = 0; i < src->entries; i++) {
+ src_entry = (struct export_entry*) erts_index_lookup(src, i);
+ src_entry->ep->addressv[dst_ix] = src_entry->ep->addressv[src_ix];
+ dst_entry = (struct export_entry*) index_put_entry(dst, src_entry);
+ ASSERT(entry_to_blob(src_entry) == entry_to_blob(dst_entry));
+ }
+ export_staging_unlock();
+
+ IF_DEBUG(debug_start_load_ix = dst_ix);
}
-Export *export_get(Export *e)
+void export_end_staging(int commit)
{
- return hash_get(&export_table.htable, e);
+ ASSERT(debug_start_load_ix == erts_staging_code_ix());
+ IF_DEBUG(debug_start_load_ix = -1);
}
+
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 86d85e05ee..ee06e69aff 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -28,14 +28,15 @@
#include "index.h"
#endif
+#include "code_ix.h"
+
/*
** Export entry
*/
+
typedef struct export
{
- IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
- void* address; /* Pointer to code for function. */
- struct binary* match_prog_set; /* Match program for tracing. */
+ void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
@@ -44,12 +45,12 @@ typedef struct export
* code[2]: Arity (untagged integer).
* code[3]: This entry is 0 unless the 'address' field points to it.
* Threaded code instruction to load function
- * (em_call_error_handler), execute BIF (em_apply_bif,
- * em_apply_apply), or call a traced function
- * (em_call_traced_function).
- * code[4]: Function pointer to BIF function (for BIFs only)
+ * (em_call_error_handler), execute BIF (em_apply_bif),
+ * or a breakpoint instruction (op_i_generic_breakpoint).
+ * code[4]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
- * on_load function that has not been run yet.
+ * on_load function that has not been run yet, or pointer
+ * to code for function code[3] is a breakpont instruction.
* Otherwise: 0.
*/
BeamInstr code[5];
@@ -59,22 +60,38 @@ typedef struct export
void init_export_table(void);
void export_info(int, void *);
-Export* erts_find_export_entry(Eterm m, Eterm f, unsigned int a);
+ERTS_GLB_INLINE Export* erts_active_export_entry(Eterm m, Eterm f, unsigned a);
Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity);
-
Export* erts_export_get_or_make_stub(Eterm, Eterm, unsigned);
-void erts_export_consolidate(void);
-Export *export_list(int);
-int export_list_size(void);
+Export *export_list(int,ErtsCodeIndex);
+int export_list_size(ErtsCodeIndex);
int export_table_sz(void);
+int export_entries_sz(void);
Export *export_get(Export*);
-Export* erts_find_function(Eterm, Eterm, unsigned int);
+void export_start_staging(void);
+void export_end_staging(int commit);
+
+extern erts_smp_mtx_t export_staging_lock;
+#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock)
+#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock)
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
-(((EntryPtr)->address == (EntryPtr)->code + 3) && \
+(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \
((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
-#endif
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Export*
+erts_active_export_entry(Eterm m, Eterm f, unsigned int a)
+{
+ extern Export* erts_find_export_entry(Eterm m, Eterm f, unsigned a, ErtsCodeIndex);
+ return erts_find_export_entry(m, f, a, erts_active_code_ix());
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* __EXPORT_H__ */
+
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 44abc83d6d..ab1065aaa1 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1850,8 +1850,8 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
} else {
*ep++ = FLOAT_EXT;
- /* now the sprintf which does the work */
- i = sys_double_to_chars(f.fd, (char*) ep);
+ /* now the erts_snprintf which does the work */
+ i = sys_double_to_chars(f.fd, (char*) ep, (size_t)31);
/* Don't leave garbage after the float! (Bad practice in general,
* and Purify complains.)
@@ -1889,7 +1889,9 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
*ep++ = BINARY_INTERNAL_REF;
}
if (pb->flags) {
+ char* before_realloc = pb->val->orig_bytes;
erts_emasculate_writable_binary(pb);
+ bytes += (pb->val->orig_bytes - before_realloc);
}
erts_refc_inc(&pb->val->refc, 2);
@@ -2562,7 +2564,7 @@ dec_term_atom_common:
goto error;
}
if (edep && (edep->flags & ERTS_DIST_EXT_BTT_SAFE)) {
- if (!erts_find_export_entry(mod, name, arity))
+ if (!erts_active_export_entry(mod, name, arity))
goto error;
}
*objp = make_export(hp);
@@ -2626,14 +2628,12 @@ dec_term_atom_common:
}
old_uniq = unsigned_val(temp);
-#ifndef HYBRID /* FIND ME! */
/*
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
funp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)funp;
-#endif
funp->fe = erts_put_fun_entry2(module, old_uniq, old_index,
uniq, index, arity);
@@ -2704,14 +2704,12 @@ dec_term_atom_common:
goto error;
}
-#ifndef HYBRID /* FIND ME! */
/*
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
funp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)funp;
-#endif
old_uniq = unsigned_val(temp);
funp->fe = erts_put_fun_entry(module, old_uniq, old_index);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 06b90b38ab..298241618f 100644..100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -28,6 +28,7 @@
#include "hash.h"
#include "index.h"
#include "atom.h"
+#include "code_ix.h"
#include "export.h"
#include "module.h"
#include "register.h"
@@ -352,92 +353,6 @@ extern erts_smp_atomic32_t erts_max_gen_gcs;
extern int erts_disable_tolerant_timeofday;
-#ifdef HYBRID
-
-/* Message Area heap pointers */
-extern Eterm *global_heap; /* Heap start */
-extern Eterm *global_hend; /* Heap end */
-extern Eterm *global_htop; /* Heap top (heap pointer) */
-extern Eterm *global_saved_htop; /* Saved heap top (heap pointer) */
-extern Uint global_heap_sz; /* Heap size, in words */
-extern Eterm *global_old_heap; /* Old generation */
-extern Eterm *global_old_hend;
-extern ErlOffHeap erts_global_offheap; /* Global MSO (OffHeap) list */
-
-extern Uint16 global_gen_gcs;
-extern Uint16 global_max_gen_gcs;
-extern Uint global_gc_flags;
-
-#ifdef INCREMENTAL
-#define ACTIVATE(p)
-#define DEACTIVATE(p)
-#define IS_ACTIVE(p) 1
-
-#define INC_ACTIVATE(p) do { \
- if ((p)->active) { \
- if ((p)->active_next != NULL) { \
- (p)->active_next->active_prev = (p)->active_prev; \
- if ((p)->active_prev) { \
- (p)->active_prev->active_next = (p)->active_next; \
- } else { \
- inc_active_proc = (p)->active_next; \
- } \
- inc_active_last->active_next = (p); \
- (p)->active_next = NULL; \
- (p)->active_prev = inc_active_last; \
- inc_active_last = (p); \
- } \
- } else { \
- (p)->active_next = NULL; \
- (p)->active_prev = inc_active_last; \
- if (inc_active_last) { \
- inc_active_last->active_next = (p); \
- } else { \
- inc_active_proc = (p); \
- } \
- inc_active_last = (p); \
- (p)->active = 1; \
- } \
-} while(0);
-
-#define INC_DEACTIVATE(p) do { \
- ASSERT((p)->active == 1); \
- if ((p)->active_next == NULL) { \
- inc_active_last = (p)->active_prev; \
- } else { \
- (p)->active_next->active_prev = (p)->active_prev; \
- } \
- if ((p)->active_prev == NULL) { \
- inc_active_proc = (p)->active_next; \
- } else { \
- (p)->active_prev->active_next = (p)->active_next; \
- } \
- (p)->active = 0; \
-} while(0);
-
-#define INC_IS_ACTIVE(p) ((p)->active != 0)
-
-#else
-extern Eterm *global_old_htop;
-extern Eterm *global_high_water;
-#define ACTIVATE(p) (p)->active = 1;
-#define DEACTIVATE(p) (p)->active = 0;
-#define IS_ACTIVE(p) ((p)->active != 0)
-#define INC_ACTIVATE(p)
-#define INC_IS_ACTIVE(p) 1
-#endif /* INCREMENTAL */
-
-#else
-# define ACTIVATE(p)
-# define DEACTIVATE(p)
-# define IS_ACTIVE(p) 1
-# define INC_ACTIVATE(p)
-#endif /* HYBRID */
-
-#ifdef HYBRID
-extern Uint global_heap_min_sz;
-#endif
-
extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */
extern int stackdump_on_exit;
@@ -613,6 +528,8 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm,
Eterm);
+void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
+ Eterm (*bif)(Process*,Eterm*));
void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
@@ -628,24 +545,33 @@ typedef struct {
Eterm* fname_ptr; /* Pointer to fname table */
} FunctionInfo;
-struct LoaderState* erts_alloc_loader_state(void);
-Eterm erts_prepare_loading(struct LoaderState*, Process *c_p,
+Binary* erts_alloc_loader_state(void);
+Eterm erts_module_for_prepared_code(Binary* magic);
+Eterm erts_prepare_loading(Binary* loader_state, Process *c_p,
Eterm group_leader, Eterm* modp,
byte* code, Uint size);
-Eterm erts_finish_loading(struct LoaderState* stp, Process* c_p,
+Eterm erts_finish_loading(Binary* loader_state, Process* c_p,
ErtsProcLocks c_p_locks, Eterm* modp);
-Eterm erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* mod, byte* code, Uint size);
+Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
BeamInstr* find_function_from_pc(BeamInstr* pc);
Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
Eterm args, Eterm* mfa_p);
-void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+/* beam_ranges.c */
+void erts_init_ranges(void);
+void erts_start_staging_ranges(void);
+void erts_end_staging_ranges(int commit);
+void erts_update_ranges(BeamInstr* code, Uint size);
+void erts_remove_from_ranges(BeamInstr* code);
+UWord erts_ranges_sz(void);
+void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
+
/* break.c */
void init_break_handler(void);
void erts_set_ignore_break(void);
@@ -662,7 +588,6 @@ __decl_noreturn void __noreturn erl_exit_flush_async(int n, char*, ...);
void erl_error(char*, va_list);
/* copy.c */
-void init_copy(void);
Eterm copy_object(Eterm, Process*);
#if HALFWORD_HEAP
@@ -692,116 +617,6 @@ Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
void move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
Eterm* refs, unsigned nrefs);
-#ifdef HYBRID
-#define RRMA_DEFAULT_SIZE 256
-#define RRMA_STORE(p,ptr,src) do { \
- ASSERT((p)->rrma != NULL); \
- ASSERT((p)->rrsrc != NULL); \
- (p)->rrma[(p)->nrr] = (ptr); \
- (p)->rrsrc[(p)->nrr++] = (src); \
- if ((p)->nrr == (p)->rrsz) \
- { \
- (p)->rrsz *= 2; \
- (p)->rrma = (Eterm *) erts_realloc(ERTS_ALC_T_ROOTSET, \
- (void*)(p)->rrma, \
- sizeof(Eterm) * (p)->rrsz); \
- (p)->rrsrc = (Eterm **) erts_realloc(ERTS_ALC_T_ROOTSET, \
- (void*)(p)->rrsrc, \
- sizeof(Eterm) * (p)->rrsz); \
- } \
-} while(0)
-
-/* Note that RRMA_REMOVE decreases the given index after deletion.
- * This is done so that a loop with an increasing index can call
- * remove without having to decrease the index to see the element
- * placed in the hole after the deleted element.
- */
-#define RRMA_REMOVE(p,index) do { \
- p->rrsrc[index] = p->rrsrc[--p->nrr]; \
- p->rrma[index--] = p->rrma[p->nrr]; \
- } while(0);
-
-
-/* The MessageArea STACKs are used while copying messages to the
- * message area.
- */
-#define MA_STACK_EXTERNAL_DECLARE(type,_s_) \
- typedef type ma_##_s_##_type; \
- extern ma_##_s_##_type *ma_##_s_##_stack; \
- extern Uint ma_##_s_##_top; \
- extern Uint ma_##_s_##_size;
-
-#define MA_STACK_DECLARE(_s_) \
- ma_##_s_##_type *ma_##_s_##_stack; Uint ma_##_s_##_top; Uint ma_##_s_##_size;
-
-#define MA_STACK_ALLOC(_s_) do { \
- ma_##_s_##_top = 0; \
- ma_##_s_##_size = 512; \
- ma_##_s_##_stack = (ma_##_s_##_type*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \
- sizeof(ma_##_s_##_type) * ma_##_s_##_size); \
-} while(0)
-
-
-#define MA_STACK_PUSH(_s_,val) do { \
- ma_##_s_##_stack[ma_##_s_##_top++] = (val); \
- if (ma_##_s_##_top == ma_##_s_##_size) \
- { \
- ma_##_s_##_size *= 2; \
- ma_##_s_##_stack = \
- (ma_##_s_##_type*) erts_realloc(ERTS_ALC_T_OBJECT_STACK, \
- (void*)ma_##_s_##_stack, \
- sizeof(ma_##_s_##_type) * ma_##_s_##_size); \
- } \
-} while(0)
-
-#define MA_STACK_POP(_s_) (ma_##_s_##_top != 0 ? ma_##_s_##_stack[--ma_##_s_##_top] : 0)
-#define MA_STACK_TOP(_s_) (ma_##_s_##_stack[ma_##_s_##_top - 1])
-#define MA_STACK_UPDATE(_s_,offset,value) \
- *(ma_##_s_##_stack[ma_##_s_##_top - 1] + (offset)) = (value)
-#define MA_STACK_SIZE(_s_) (ma_##_s_##_top)
-#define MA_STACK_ELM(_s_,i) ma_##_s_##_stack[i]
-
-MA_STACK_EXTERNAL_DECLARE(Eterm,src);
-MA_STACK_EXTERNAL_DECLARE(Eterm*,dst);
-MA_STACK_EXTERNAL_DECLARE(Uint,offset);
-
-
-#ifdef INCREMENTAL
-extern Eterm *ma_pending_stack;
-extern Uint ma_pending_top;
-extern Uint ma_pending_size;
-
-#define NO_COPY(obj) (IS_CONST(obj) || \
- (((ptr_val(obj) >= global_heap) && \
- (ptr_val(obj) < global_htop)) || \
- ((ptr_val(obj) >= inc_fromspc) && \
- (ptr_val(obj) < inc_fromend)) || \
- ((ptr_val(obj) >= global_old_heap) && \
- (ptr_val(obj) < global_old_hend))))
-
-#else
-
-#define NO_COPY(obj) (IS_CONST(obj) || \
- (((ptr_val(obj) >= global_heap) && \
- (ptr_val(obj) < global_htop)) || \
- ((ptr_val(obj) >= global_old_heap) && \
- (ptr_val(obj) < global_old_hend))))
-
-#endif /* INCREMENTAL */
-
-#define LAZY_COPY(from,obj) do { \
- if (!NO_COPY(obj)) { \
- BM_LAZY_COPY_START; \
- BM_COUNT(messages_copied); \
- obj = copy_struct_lazy(from,obj,0); \
- BM_LAZY_COPY_STOP; \
- } \
-} while(0)
-
-Eterm copy_struct_lazy(Process*, Eterm, Uint);
-
-#endif /* HYBRID */
-
/* Utilities */
extern void erts_delete_nodes_monitors(Process *, ErtsProcLocks);
extern Eterm erts_monitor_nodes(Process *, Eterm, Eterm);
@@ -895,10 +710,6 @@ void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_free_heap_frags(Process* p);
-#ifdef HYBRID
-int erts_global_garbage_collect(Process*, int, Eterm*, int);
-#endif
-
/* io.c */
typedef struct {
@@ -923,10 +734,19 @@ void erts_free_port_names(ErtsPortNames *);
Uint erts_port_ioq_size(Port *pp);
void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int);
+Port *erts_get_heart_port(void);
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+void erts_lcnt_enable_io_lock_count(int enable);
+#endif
+
/* erl_drv_thread.c */
void erl_drv_thr_init(void);
/* utils.c */
+void erts_cleanup_offheap(ErlOffHeap *offheap);
+
+Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -966,8 +786,12 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding);
void erts_copy_utf8_to_utf16_little(byte *target, byte *bytes, int num_chars);
int erts_analyze_utf8(byte *source, Uint size,
byte **err_pos, Uint *num_chars, int *left);
-char *erts_convert_filename_to_native(Eterm name, ErtsAlcType_t alloc_type, int allow_empty);
-
+char *erts_convert_filename_to_native(Eterm name, char *statbuf,
+ size_t statbuf_size,
+ ErtsAlcType_t alloc_type,
+ int allow_empty, int allow_atom,
+ Sint *used /* out */);
+Eterm erts_convert_native_to_filename(Process *p, byte *bytes);
#define ERTS_UTF8_OK 0
#define ERTS_UTF8_INCOMPLETE 1
#define ERTS_UTF8_ERROR 2
@@ -1089,14 +913,6 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
breakpoint functions */
#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
-/*
- * Flag values when tracing bif
- * Future note: flag field is 8 bits
- */
-#define BIF_TRACE_AS_LOCAL (0x1)
-#define BIF_TRACE_AS_GLOBAL (0x2)
-#define BIF_TRACE_AS_META (0x4)
-#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
index a4a3007f93..79c3ecf1b3 100644
--- a/erts/emulator/beam/index.c
+++ b/erts/emulator/beam/index.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -68,21 +68,22 @@ erts_index_init(ErtsAlcType_t type, IndexTable* t, char* name,
return t;
}
-int
-index_put(IndexTable* t, void* tmpl)
+IndexSlot*
+index_put_entry(IndexTable* t, void* tmpl)
{
int ix;
IndexSlot* p = (IndexSlot*) hash_put(&t->htable, tmpl);
if (p->index >= 0) {
- return p->index;
+ return p;
}
ix = t->entries;
if (ix >= t->size) {
Uint sz;
if (ix >= t->limit) {
- erl_exit(1, "no more index entries in %s (max=%d)\n",
+ /* A core dump is unnecessary */
+ erl_exit(ERTS_DUMP_EXIT, "no more index entries in %s (max=%d)\n",
t->htable.name, t->limit);
}
sz = INDEX_PAGE_SIZE*sizeof(IndexSlot*);
@@ -92,7 +93,7 @@ index_put(IndexTable* t, void* tmpl)
t->entries++;
p->index = ix;
t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
- return ix;
+ return p;
}
int index_get(IndexTable* t, void* tmpl)
@@ -135,3 +136,18 @@ void erts_index_merge(Hash* src, IndexTable* dst)
}
}
}
+
+void index_erase_latest_from(IndexTable* t, Uint from_ix)
+{
+ if(from_ix < (Uint)t->entries) {
+ int ix;
+ for (ix = from_ix; ix < t->entries; ix++) {
+ IndexSlot* obj = t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
+ hash_erase(&t->htable, obj);
+ }
+ t->entries = from_ix;
+ }
+ else {
+ ASSERT(from_ix == t->entries);
+ }
+}
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
index 4eb9b1f992..3afe48d007 100644
--- a/erts/emulator/beam/index.h
+++ b/erts/emulator/beam/index.h
@@ -55,12 +55,24 @@ void index_info(int, void *, IndexTable*);
int index_table_sz(IndexTable *);
int index_get(IndexTable*, void*);
-int index_put(IndexTable*, void*);
+
+IndexSlot* index_put_entry(IndexTable*, void*);
void erts_index_merge(Hash*, IndexTable*);
+/* Erase all entries with index 'ix' and higher
+*/
+void index_erase_latest_from(IndexTable*, Uint ix);
+
+ERTS_GLB_INLINE int index_put(IndexTable*, void*);
ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int index_put(IndexTable* t, void* tmpl)
+{
+ return index_put_entry(t, tmpl)->index;
+}
+
ERTS_GLB_INLINE IndexSlot*
erts_index_lookup(IndexTable* t, Uint ix)
{
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 32f3a675fa..3fc124589b 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -226,8 +226,14 @@ static ERTS_INLINE void port_init_instr(Port *prt
*/
#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
- if (!prt->drv_ptr->lock)
- erts_mtx_init_locked_x(prt->lock, "port_lock", id);
+ if (!prt->drv_ptr->lock) {
+ char *lock_str = "port_lock";
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
+#endif
+ erts_mtx_init_locked_x(prt->lock, lock_str, id);
+ }
#endif
erts_port_task_init_sched(&prt->sched, id);
}
@@ -360,6 +366,7 @@ static Port *create_port(char *name,
erts_port_task_handle_init(&prt->timeout_task);
prt->psd = NULL;
prt->drv_data = (SWord) 0;
+ prt->os_pid = -1;
/* Set default tracing */
erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
@@ -1276,6 +1283,35 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
}
}
+/*
+ * force_imm_drv_call()/finalize_force_imm_drv_call() should *only*
+ * be used while crash dumping...
+ */
+static ErtsTryImmDrvCallResult
+force_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ erts_aint32_t invalid_state;
+ Port *prt = sp->port;
+
+ ASSERT(ERTS_IS_CRASH_DUMPING)
+ ASSERT(is_atom(sp->port_op));
+
+ invalid_state = sp->state;
+ sp->state = erts_atomic32_read_nob(&prt->state);
+ if (sp->state & invalid_state)
+ return ERTS_TRY_IMM_DRV_CALL_INVALID_PORT;
+
+ sp->fpe_was_unmasked = erts_block_fpe();
+
+ return ERTS_TRY_IMM_DRV_CALL_OK;
+}
+
+static void
+finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ erts_unblock_fpe(sp->fpe_was_unmasked);
+}
+
#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
static ERTS_INLINE void
@@ -1581,7 +1617,8 @@ call_driver_outputv(int bang_op,
else {
ErlDrvSizeT size = evp->size;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_outputv)) {
@@ -1676,6 +1713,9 @@ call_driver_output(int bang_op,
send_badsig(prt);
else {
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_output)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
@@ -1757,10 +1797,12 @@ erts_port_output(Process *c_p,
ErlDrvBinary *cbin = NULL;
ErlIOVec *evp = NULL;
char *buf = NULL;
+ int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
| ERTS_PORT_SIG_FLG_NOSUSPEND
- | ERTS_PORT_SIG_FLG_FORCE)) == 0);
+ | ERTS_PORT_SIG_FLG_FORCE
+ | ERTS_PORT_SIG_FLG_FORCE_IMM_CALL)) == 0);
busy_flgs = ((flags & ERTS_PORT_SIG_FLG_FORCE)
? ((erts_aint32_t) 0)
@@ -1779,11 +1821,13 @@ erts_port_output(Process *c_p,
? ERTS_PORT_OP_DROPPED
: ERTS_PORT_OP_BUSY);
- try_call = !(sched_flags & (invalid_flags|ERTS_PTS_FLGS_FORCE_SCHEDULE_OP));
+ try_call = (force_immediate_call /* crash dumping */
+ || !(sched_flags & (invalid_flags
+ | ERTS_PTS_FLGS_FORCE_SCHEDULE_OP)));
#ifdef USE_VM_PROBES
if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(c_p->common.id, prt);
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(c_p ? c_p->common.id : ERTS_INVALID_PID, prt);
DTRACE4(port_command, process_str, port_str, prt->name, "command");
}
#endif
@@ -1881,17 +1925,24 @@ erts_port_output(Process *c_p,
invalid_flags,
!refp,
am_command);
+
try_call_state.pre_chk_sched_flags = 0; /* already checked */
- try_call_res = try_imm_drv_call(&try_call_state);
+ if (force_immediate_call)
+ try_call_res = force_imm_drv_call(&try_call_state);
+ else
+ try_call_res = try_imm_drv_call(&try_call_state);
switch (try_call_res) {
case ERTS_TRY_IMM_DRV_CALL_OK:
call_driver_outputv(flags & ERTS_PORT_SIG_FLG_BANG_OP,
- c_p->common.id,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
from,
prt,
drv,
evp);
- finalize_imm_drv_call(&try_call_state);
+ if (force_immediate_call)
+ finalize_force_imm_drv_call(&try_call_state);
+ else
+ finalize_imm_drv_call(&try_call_state);
/* Fall through... */
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
driver_free_binary(cbin);
@@ -2021,18 +2072,25 @@ erts_port_output(Process *c_p,
r = erts_iolist_to_buf(list, buf, size);
ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r));
}
+
try_call_state.pre_chk_sched_flags = 0; /* already checked */
- try_call_res = try_imm_drv_call(&try_call_state);
+ if (force_immediate_call)
+ try_call_res = force_imm_drv_call(&try_call_state);
+ else
+ try_call_res = try_imm_drv_call(&try_call_state);
switch (try_call_res) {
case ERTS_TRY_IMM_DRV_CALL_OK:
call_driver_output(flags & ERTS_PORT_SIG_FLG_BANG_OP,
- c_p->common.id,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
from,
prt,
drv,
buf,
size);
- finalize_imm_drv_call(&try_call_state);
+ if (force_immediate_call)
+ finalize_force_imm_drv_call(&try_call_state);
+ else
+ finalize_imm_drv_call(&try_call_state);
/* Fall through... */
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
erts_free(ERTS_ALC_T_TMP, buf);
@@ -2073,7 +2131,7 @@ erts_port_output(Process *c_p,
res = erts_schedule_proc2port_signal(c_p,
prt,
- c_p->common.id,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
refp,
sigdp,
task_flags,
@@ -2596,6 +2654,63 @@ void erts_init_io(int port_tab_size,
erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
+#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+
+static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+{
+ if (dp->lock) {
+ if (enable)
+ erts_lcnt_init_lock_x(&dp->lock->lcnt,
+ "driver_lock",
+ ERTS_LCNT_LT_MUTEX,
+ am_atom_put(dp->name,
+ sys_strlen(dp->name)));
+ else
+ erts_lcnt_destroy_lock(&dp->lock->lcnt);
+
+ }
+}
+
+static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+{
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (!enable) {
+ erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_lcnt_destroy_lock(&prt->lock->lcnt);
+ }
+ else {
+ erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
+ "port_sched_lock",
+ ERTS_LCNT_LT_MUTEX,
+ prt->common.id);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_lcnt_init_lock_x(&prt->lock->lcnt,
+ "port_lock",
+ ERTS_LCNT_LT_MUTEX,
+ prt->common.id);
+ }
+}
+
+void erts_lcnt_enable_io_lock_count(int enable)
+{
+ erts_driver_t *dp;
+ int i, max = erts_ptab_max(&erts_port);
+
+ for (i = 0; i < max; i++) {
+ Port *prt = erts_pix2port(i);
+ if (prt)
+ lcnt_enable_port_lock_count(prt, enable);
+ }
+
+ lcnt_enable_drv_lock_count(&vanilla_driver, enable);
+ lcnt_enable_drv_lock_count(&spawn_driver, enable);
+ lcnt_enable_drv_lock_count(&fd_driver, enable);
+ for (dp = driver_list; dp; dp = dp->next)
+ lcnt_enable_drv_lock_count(dp, enable);
+}
+#endif
+
/*
* Buffering of data when using line oriented I/O on ports
*/
@@ -5353,6 +5468,8 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
Uint size = ptr[1];
Uint offset = ptr[2];
+ erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size);
+
if (size <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hbp = (ErlHeapBin *) hp;
hp += heap_bin_size(size);
@@ -5384,6 +5501,9 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
case ERL_DRV_BUF2BINARY: { /* char*, size */
byte *bufp = (byte *) ptr[0];
Uint size = (Uint) ptr[1];
+
+ erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size);
+
if (size <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hbp = (ErlHeapBin *) hp;
hp += heap_bin_size(size);
@@ -5420,6 +5540,7 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
}
case ERL_DRV_STRING: /* char*, length */
+ erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) ptr[1]);
mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], NIL);
ptr += 2;
break;
@@ -7394,11 +7515,39 @@ int null_func(void)
int
erl_drv_putenv(char *key, char *value)
{
- return erts_write_env(key, value);
+ return erts_sys_putenv_raw(key, value);
}
int
erl_drv_getenv(char *key, char *value, size_t *value_size)
{
- return erts_sys_getenv(key, value, value_size);
+ return erts_sys_getenv_raw(key, value, value_size);
+}
+
+/* get heart_port
+ * used by erl_crash_dump
+ * - uses the fact that heart_port is registered when starting heart
+ */
+
+Port *erts_get_heart_port(void)
+{
+ int ix, max = erts_ptab_max(&erts_port);
+
+ for (ix = 0; ix < max; ix++) {
+ struct reg_proc *reg;
+ Port *port = erts_pix2port(ix);
+
+ if (!port)
+ continue;
+ /* only examine undead or alive ports */
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD)
+ continue;
+ /* immediate atom compare */
+ reg = port->common.u.alive.reg;
+ if (reg && reg->name == am_heart_port) {
+ return port;
+ }
+ }
+
+ return NULL;
}
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index b93b1ad09a..01856007ee 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -26,21 +26,32 @@
#include "global.h"
#include "module.h"
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
+
#define MODULE_SIZE 50
#define MODULE_LIMIT (64*1024)
-static IndexTable module_table;
+static IndexTable module_tables[ERTS_NUM_CODE_IX];
-/*
- * SMP note: We don't need to look accesses to the module table because
- * there is one only scheduler thread when we update it.
+erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+
+static erts_smp_atomic_t tot_module_bytes;
+
+/* SMP note: Active module table lookup and current module instance can be
+ * read without any locks. Old module instances are protected by
+ * "the_old_code_rwlocks" as purging is done on active module table.
+ * Staging table is protected by the "code_ix lock".
*/
#include "erl_smp.h"
void module_info(int to, void *to_arg)
{
- index_info(to, to_arg, &module_table);
+ index_info(to, to_arg, &module_tables[erts_active_code_ix()]);
}
@@ -59,45 +70,67 @@ static int module_cmp(Module* tmpl, Module* obj)
static Module* module_alloc(Module* tmpl)
{
Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module));
+ erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module));
obj->module = tmpl->module;
- obj->code = 0;
- obj->old_code = 0;
- obj->code_length = 0;
- obj->old_code_length = 0;
+ obj->curr.code = 0;
+ obj->old.code = 0;
+ obj->curr.code_length = 0;
+ obj->old.code_length = 0;
obj->slot.index = -1;
- obj->nif = NULL;
- obj->old_nif = NULL;
+ obj->curr.nif = NULL;
+ obj->old.nif = NULL;
+ obj->curr.num_breakpoints = 0;
+ obj->old.num_breakpoints = 0;
+ obj->curr.num_traced_exports = 0;
+ obj->old.num_traced_exports = 0;
return obj;
}
+static void module_free(Module* mod)
+{
+ erts_free(ERTS_ALC_T_MODULE, mod);
+ erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module));
+}
void init_module_table(void)
{
HashFunctions f;
+ int i;
f.hash = (H_FUN) module_hash;
f.cmp = (HCMP_FUN) module_cmp;
f.alloc = (HALLOC_FUN) module_alloc;
- f.free = 0;
+ f.free = (HFREE_FUN) module_free;
+
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
+ erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_tables[i], "module_code",
+ MODULE_SIZE, MODULE_LIMIT, f);
+ }
- erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_table, "module_code",
- MODULE_SIZE, MODULE_LIMIT, f);
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ }
+ erts_smp_atomic_init_nob(&tot_module_bytes, 0);
}
Module*
-erts_get_module(Eterm mod)
+erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
{
Module e;
int index;
+ IndexTable* mod_tab;
ASSERT(is_atom(mod));
+
+ mod_tab = &module_tables[code_ix];
+
e.module = atom_val(mod);
- index = index_get(&module_table, (void*) &e);
+ index = index_get(mod_tab, (void*) &e);
if (index == -1) {
return NULL;
} else {
- return (Module*) erts_index_lookup(&module_table, index);
+ return (Module*) erts_index_lookup(mod_tab, index);
}
}
@@ -105,27 +138,101 @@ Module*
erts_put_module(Eterm mod)
{
Module e;
- int index;
+ IndexTable* mod_tab;
+ int oldsz, newsz;
+ Module* res;
ASSERT(is_atom(mod));
ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_smp_thr_progress_is_blocking());
+ || erts_has_code_write_permission());
+
+ mod_tab = &module_tables[erts_staging_code_ix()];
e.module = atom_val(mod);
- index = index_put(&module_table, (void*) &e);
- return (Module*) erts_index_lookup(&module_table, index);
+ oldsz = index_table_sz(mod_tab);
+ res = (Module*) index_put_entry(mod_tab, (void*) &e);
+ newsz = index_table_sz(mod_tab);
+ erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ return res;
}
-Module *module_code(int i)
+Module *module_code(int i, ErtsCodeIndex code_ix)
{
- return (Module*) erts_index_lookup(&module_table, i);
+ return (Module*) erts_index_lookup(&module_tables[code_ix], i);
}
-int module_code_size(void)
+int module_code_size(ErtsCodeIndex code_ix)
{
- return module_table.entries;
+ return module_tables[code_ix].entries;
}
int module_table_sz(void)
{
- return index_table_sz(&module_table);
+ return erts_smp_atomic_read_nob(&tot_module_bytes);
+}
+
+#ifdef DEBUG
+static ErtsCodeIndex dbg_load_code_ix = 0;
+#endif
+
+static int entries_at_start_staging = 0;
+
+void module_start_staging(void)
+{
+ IndexTable* src = &module_tables[erts_active_code_ix()];
+ IndexTable* dst = &module_tables[erts_staging_code_ix()];
+ Module* src_mod;
+ Module* dst_mod;
+ int i, oldsz, newsz;
+
+ ASSERT(dbg_load_code_ix == -1);
+ ASSERT(dst->entries <= src->entries);
+
+ /*
+ * Make sure our existing modules are up-to-date
+ */
+ for (i = 0; i < dst->entries; i++) {
+ src_mod = (Module*) erts_index_lookup(src, i);
+ dst_mod = (Module*) erts_index_lookup(dst, i);
+ ASSERT(src_mod->module == dst_mod->module);
+
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ }
+
+ /*
+ * Copy all new modules from active table
+ */
+ oldsz = index_table_sz(dst);
+ for (i = dst->entries; i < src->entries; i++) {
+ src_mod = (Module*) erts_index_lookup(src, i);
+ dst_mod = (Module*) index_put_entry(dst, src_mod);
+ ASSERT(dst_mod != src_mod);
+
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ }
+ newsz = index_table_sz(dst);
+ erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+
+ entries_at_start_staging = dst->entries;
+ IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix());
+}
+
+void module_end_staging(int commit)
+{
+ ASSERT(dbg_load_code_ix == erts_staging_code_ix());
+
+ if (!commit) { /* abort */
+ IndexTable* tab = &module_tables[erts_staging_code_ix()];
+ int oldsz, newsz;
+
+ ASSERT(entries_at_start_staging <= tab->entries);
+ oldsz = index_table_sz(tab);
+ index_erase_latest_from(tab, entries_at_start_staging);
+ newsz = index_table_sz(tab);
+ erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ }
+
+ IF_DEBUG(dbg_load_code_ix = -1);
}
+
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index 694e4ab72f..6e123a0ffe 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -24,28 +24,72 @@
#include "index.h"
#endif
+struct erl_module_instance {
+ BeamInstr* code;
+ int code_length; /* Length of loaded code in bytes. */
+ unsigned catches;
+ struct erl_module_nif* nif;
+ int num_breakpoints;
+ int num_traced_exports;
+};
typedef struct erl_module {
IndexSlot slot; /* Must be located at top of struct! */
int module; /* Atom index for module (not tagged). */
- BeamInstr* code;
- BeamInstr* old_code;
- int code_length; /* Length of loaded code in bytes. */
- int old_code_length; /* Length of old loaded code in bytes */
- unsigned catches, old_catches;
- struct erl_module_nif* nif;
- struct erl_module_nif* old_nif;
+ struct erl_module_instance curr;
+ struct erl_module_instance old; /* protected by "old_code" rwlock */
} Module;
-Module* erts_get_module(Eterm mod);
+Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix);
Module* erts_put_module(Eterm mod);
void init_module_table(void);
+void module_start_staging(void);
+void module_end_staging(int commit);
void module_info(int, void *);
-Module *module_code(int);
-int module_code_size(void);
+Module *module_code(int, ErtsCodeIndex);
+int module_code_size(ErtsCodeIndex);
int module_table_sz(void);
+ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex);
+ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex);
+ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex);
+ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_is_old_code_rlocked(ErtsCodeIndex);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+
+ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]);
+}
+ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]);
+}
+ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]);
+}
+ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix)
+{
+ return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]);
+}
#endif
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#endif /* !__MODULE_H__ */
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 9b168889dd..6764e88c81 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -62,11 +62,8 @@ label L
i_func_info I a a I
int_code_end
-i_trace_breakpoint
-i_mtrace_breakpoint
+i_generic_breakpoint
i_debug_breakpoint
-i_count_breakpoint
-i_time_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
@@ -522,7 +519,6 @@ apply_bif
call_nif
call_error_handler
error_action_code
-call_traced_function
return_trace
#
@@ -829,16 +825,20 @@ call_ext_only Ar=u==2 Bif=u$bif:erlang:load_nif/2 => allocate u Ar | i_call_ext
#
-# The apply/2 and apply/3 BIFs are instructions.
+# apply/2 is an instruction, not a BIF.
#
call_ext u==2 u$func:erlang:apply/2 => i_apply_fun
call_ext_last u==2 u$func:erlang:apply/2 D => i_apply_fun_last D
call_ext_only u==2 u$func:erlang:apply/2 => i_apply_fun_only
-call_ext u==3 u$func:erlang:apply/3 => i_apply
-call_ext_last u==3 u$func:erlang:apply/3 D => i_apply_last D
-call_ext_only u==3 u$func:erlang:apply/3 => i_apply_only
+#
+# The apply/3 BIF is an instruction.
+#
+
+call_ext u==3 u$bif:erlang:apply/3 => i_apply
+call_ext_last u==3 u$bif:erlang:apply/3 D => i_apply_last D
+call_ext_only u==3 u$bif:erlang:apply/3 => i_apply_only
#
# The exit/1 and throw/1 BIFs never execute the instruction following them;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 9c3f9551c7..898a30b010 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -25,11 +25,6 @@
# define NO_FPE_SIGNALS
#endif
-/* xxxP __VXWORKS__ */
-#ifdef VXWORKS
-#include <vxWorks.h>
-#endif
-
#ifdef DISABLE_CHILD_WAITER_THREAD
#undef ENABLE_CHILD_WAITER_THREAD
#endif
@@ -43,8 +38,6 @@
#if defined (__WIN32__)
# include "erl_win_sys.h"
-#elif defined (VXWORKS)
-# include "erl_vxworks_sys.h"
#else
# include "erl_unix_sys.h"
#ifndef UNIX
@@ -192,12 +185,6 @@ void erl_assert_error(char* expr, char* file, int line);
# define const
#endif
-#ifdef VXWORKS
-/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */
-int real_printf(const char *fmt, ...);
-# define printf real_printf
-#endif
-
#undef __deprecated
#if ERTS_AT_LEAST_GCC_VSN__(3, 0, 0)
# define __deprecated __attribute__((deprecated))
@@ -521,38 +508,28 @@ static unsigned long zero_value = 0, one_value = 1;
# define SET_NONBLOCKING(fd) ioctlsocket((fd), FIONBIO, &one_value)
# else
-# ifdef VXWORKS
-# include <fcntl.h> /* xxxP added for O_WRONLY etc ... macro:s ... */
-# include <ioLib.h>
-static const int zero_value = 0, one_value = 1;
-# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (int)&zero_value)
-# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (int)&one_value)
-# define ERRNO_BLOCK EWOULDBLOCK
-
-# else
-# ifdef NB_FIONBIO /* Old BSD */
-# include <sys/ioctl.h>
+# ifdef NB_FIONBIO /* Old BSD */
+# include <sys/ioctl.h>
static const int zero_value = 0, one_value = 1;
-# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value)
-# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value)
-# define ERRNO_BLOCK EWOULDBLOCK
-# else /* !NB_FIONBIO */
-# include <fcntl.h>
-# ifdef NB_O_NDELAY /* Nothing needs this? */
-# define NB_FLAG O_NDELAY
-# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */
-# define ERRNO_BLOCK EWOULDBLOCK
-# endif
-# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */
-# define NB_FLAG O_NONBLOCK
-# define ERRNO_BLOCK EAGAIN
-# endif /* !NB_O_NDELAY */
-# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \
- fcntl((fd), F_GETFL, 0) & ~NB_FLAG)
-# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \
- fcntl((fd), F_GETFL, 0) | NB_FLAG)
-# endif /* !NB_FIONBIO */
-# endif /* _WXWORKS_ */
+# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value)
+# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value)
+# define ERRNO_BLOCK EWOULDBLOCK
+# else /* !NB_FIONBIO */
+# include <fcntl.h>
+# ifdef NB_O_NDELAY /* Nothing needs this? */
+# define NB_FLAG O_NDELAY
+# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */
+# define ERRNO_BLOCK EWOULDBLOCK
+# endif
+# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */
+# define NB_FLAG O_NONBLOCK
+# define ERRNO_BLOCK EAGAIN
+# endif /* !NB_O_NDELAY */
+# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \
+ fcntl((fd), F_GETFL, 0) & ~NB_FLAG)
+# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \
+ fcntl((fd), F_GETFL, 0) | NB_FLAG)
+# endif /* !NB_FIONBIO */
# endif /* !__WIN32__ */
#endif /* WANT_NONBLOCKING */
@@ -681,7 +658,7 @@ typedef struct {
#define ERTS_SYS_DDLL_ERROR_INIT {NULL}
extern void erts_sys_ddll_free_error(ErtsSysDdllError*);
extern void erl_sys_ddll_init(void); /* to initialize mutexes etc */
-extern int erts_sys_ddll_open2(char *path, void **handle, ErtsSysDdllError*);
+extern int erts_sys_ddll_open2(const char *path, void **handle, ErtsSysDdllError*);
#define erts_sys_ddll_open(P,H) erts_sys_ddll_open2(P,H,NULL)
extern int erts_sys_ddll_open_noext(char *path, void **handle, ErtsSysDdllError*);
extern int erts_sys_ddll_load_driver_init(void *handle, void **function);
@@ -690,7 +667,7 @@ extern int erts_sys_ddll_close2(void *handle, ErtsSysDdllError*);
#define erts_sys_ddll_close(H) erts_sys_ddll_close2(H,NULL)
extern void *erts_sys_ddll_call_init(void *function);
extern void *erts_sys_ddll_call_nif_init(void *function);
-extern int erts_sys_ddll_sym2(void *handle, char *name, void **function, ErtsSysDdllError*);
+extern int erts_sys_ddll_sym2(void *handle, const char *name, void **function, ErtsSysDdllError*);
#define erts_sys_ddll_sym(H,N,F) erts_sys_ddll_sym2(H,N,F,NULL)
extern char *erts_sys_ddll_error(int code);
@@ -707,7 +684,7 @@ void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec);
void erts_sys_main_thread(void);
#endif
-extern void erts_sys_prepare_crash_dump(void);
+extern int erts_sys_prepare_crash_dump(int secs);
extern void erts_sys_pre_init(void);
extern void erl_sys_init(void);
extern void erl_sys_args(int *argc, char **argv);
@@ -754,22 +731,25 @@ void fini_getenv_state(GETENV_STATE *);
/* xxxP */
void init_sys_float(void);
int sys_chars_to_double(char*, double*);
-int sys_double_to_chars(double, char*);
-void sys_get_pid(char *);
+int sys_double_to_chars(double, char*, size_t);
+void sys_get_pid(char *, size_t);
/* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */
-int erts_sys_putenv(char *key_value, int sep_ix);
+int erts_sys_putenv(char *key, char *value);
+/* Simple variant used from drivers, raw eightbit interface */
+int erts_sys_putenv_raw(char *key, char *value);
/* erts_sys_getenv() returns 0 on success (length of value string in
*size), a value > 0 if value buffer is too small (*size is set to needed
size), and a value < 0 on failure. */
int erts_sys_getenv(char *key, char *value, size_t *size);
+/* Simple variant used from drivers, raw eightbit interface */
+int erts_sys_getenv_raw(char *key, char *value, size_t *size);
/* erts_sys_getenv__() is only allowed to be used in early init phase */
int erts_sys_getenv__(char *key, char *value, size_t *size);
/* Easier to use, but not as efficient, environment functions */
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
-int erts_write_env(char *key, char *value);
/* utils.c */
@@ -910,13 +890,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
extern int erts_use_kernel_poll;
#endif
-#if defined(VXWORKS)
-/* NOTE! sys_calloc2 does not exist on other
- platforms than VxWorks and OSE */
-void* sys_calloc2(Uint, Uint);
-#endif /* VXWORKS || OSE */
-
-
#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n)
#define sys_memmove(s1,s2,n) memmove(s1,s2,n)
#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n)
@@ -1023,43 +996,6 @@ void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x)
#endif
-
-#ifdef VXWORKS
-/* This includes redefines of malloc etc
- this should be done after sys_alloc, etc, above */
-# include "reclaim.h"
-/*********************Malloc and friends************************
- * There is a problem with the naming of malloc and friends,
- * malloc is used throughout sys.c and the resolver to mean save_alloc,
- * but it should actually mean either sys_alloc or sys_alloc2,
- * so the definitions from reclaim_master.h are not any
- * good, i redefine the malloc family here, although it's quite
- * ugly, actually it would be preferrable to use the
- * names sys_alloc and so on throughout the offending code, but
- * that will be saved as an later exercise...
- * I also add an own calloc, to make the BSD resolver source happy.
- ***************************************************************/
-/* Undefine malloc and friends */
-# ifdef malloc
-# undef malloc
-# endif
-# ifdef calloc
-# undef calloc
-# endif
-# ifdef realloc
-# undef realloc
-# endif
-# ifdef free
-# undef free
-# endif
-/* Redefine malloc and friends */
-# define malloc sys_alloc
-# define calloc sys_calloc
-# define realloc sys_realloc
-# define free sys_free
-
-#endif
-
#ifdef __WIN32__
#ifdef ARCH_64
#define ERTS_ALLOC_ALIGN_BYTES 16
@@ -1075,23 +1011,20 @@ void erl_bin_write(unsigned char *, int, int);
#ifdef __WIN32__
-
void call_break_handler(void);
char* last_error(void);
char* win32_errorstr(int);
-
-
#endif
/************************************************************************
* Find out the native filename encoding of the process (look at locale of
* Unix processes and just do UTF16 on windows
************************************************************************/
-#define ERL_FILENAME_UNKNOWN 0
-#define ERL_FILENAME_LATIN1 1
-#define ERL_FILENAME_UTF8 2
-#define ERL_FILENAME_UTF8_MAC 3
-#define ERL_FILENAME_WIN_WCHAR 4
+#define ERL_FILENAME_UNKNOWN (0)
+#define ERL_FILENAME_LATIN1 (1)
+#define ERL_FILENAME_UTF8 (2)
+#define ERL_FILENAME_UTF8_MAC (3)
+#define ERL_FILENAME_WIN_WCHAR (4)
int erts_get_native_filename_encoding(void);
/* The set function is only to be used by erl_init! */
@@ -1101,4 +1034,3 @@ int erts_get_user_requested_filename_encoding(void);
void erts_init_sys_common_misc(void);
#endif
-
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 164bed51d1..5261effef9 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3445,7 +3445,7 @@ erts_read_env(char *key)
char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
int res;
while (1) {
- res = erts_sys_getenv(key, value, &value_len);
+ res = erts_sys_getenv_raw(key, value, &value_len);
if (res <= 0)
break;
value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
@@ -3464,28 +3464,6 @@ erts_free_read_env(void *value)
erts_free(ERTS_ALC_T_TMP, value);
}
-int
-erts_write_env(char *key, char *value)
-{
- int ix, res;
- size_t key_len = sys_strlen(key), value_len = sys_strlen(value);
- char *key_value = erts_alloc_fnf(ERTS_ALC_T_TMP,
- key_len + 1 + value_len + 1);
- if (!key_value) {
- errno = ENOMEM;
- return -1;
- }
- sys_memcpy((void *) key_value, (void *) key, key_len);
- ix = key_len;
- key_value[ix++] = '=';
- sys_memcpy((void *) key_value, (void *) value, value_len);
- ix += value_len;
- key_value[ix] = '\0';
- res = erts_sys_putenv(key_value, key_len);
- erts_free(ERTS_ALC_T_TMP, key_value);
- return res;
-}
-
/*
* To be used to silence unused result warnings, but do not abuse it.
*/