aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c1
-rw-r--r--erts/emulator/beam/beam_bp.c798
-rw-r--r--erts/emulator/beam/beam_bp.h171
-rw-r--r--erts/emulator/beam/beam_emu.c137
-rw-r--r--erts/emulator/beam/bif.c48
-rw-r--r--erts/emulator/beam/bif.tab2
-rw-r--r--erts/emulator/beam/break.c2
-rw-r--r--erts/emulator/beam/copy.c130
-rw-r--r--erts/emulator/beam/erl_alloc.c7
-rw-r--r--erts/emulator/beam/erl_bif_info.c46
-rw-r--r--erts/emulator/beam/erl_bif_port.c4
-rw-r--r--erts/emulator/beam/erl_bif_timer.c2
-rw-r--r--erts/emulator/beam/erl_bif_trace.c111
-rw-r--r--erts/emulator/beam/erl_db_util.c17
-rw-r--r--erts/emulator/beam/erl_debug.c6
-rw-r--r--erts/emulator/beam/erl_drv_thread.c14
-rw-r--r--erts/emulator/beam/erl_gc.c101
-rw-r--r--erts/emulator/beam/erl_gc.h39
-rw-r--r--erts/emulator/beam/erl_init.c57
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_lock_count.c40
-rw-r--r--erts/emulator/beam/erl_lock_count.h15
-rw-r--r--erts/emulator/beam/erl_message.c133
-rw-r--r--erts/emulator/beam/erl_message.h19
-rw-r--r--erts/emulator/beam/erl_nif.c335
-rw-r--r--erts/emulator/beam/erl_nif.h38
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h58
-rw-r--r--erts/emulator/beam/erl_port_task.c8
-rw-r--r--erts/emulator/beam/erl_process.c111
-rw-r--r--erts/emulator/beam/erl_process.h14
-rw-r--r--erts/emulator/beam/erl_threads.h18
-rw-r--r--erts/emulator/beam/erl_trace.c30
-rw-r--r--erts/emulator/beam/erl_vm.h1
-rw-r--r--erts/emulator/beam/export.h6
-rw-r--r--erts/emulator/beam/global.h27
-rw-r--r--erts/emulator/beam/io.c26
-rw-r--r--erts/emulator/beam/ops.tab2
-rw-r--r--erts/emulator/beam/register.c37
-rw-r--r--erts/emulator/beam/sys.h2
-rw-r--r--erts/emulator/beam/utils.c7
41 files changed, 1917 insertions, 705 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 1138c0c871..28f69b9460 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -123,6 +123,7 @@ atom busy_dist_port
atom busy_port
atom call
atom call_count
+atom call_time
atom caller
atom capture
atom case_clause
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 8462f1c7fd..596ad9a010 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -368,7 +368,6 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
BIF_RET(am_true);
}
-
static void
set_default_trace_pattern(Eterm module)
{
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 6278ff6bad..682f31b83f 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -30,6 +30,7 @@
#include "error.h"
#include "erl_binary.h"
#include "beam_bp.h"
+#include "erl_term.h"
/* *************************************************************************
** Macros
@@ -100,6 +101,11 @@ do { \
(b)->prev = (a); \
} while (0)
+
+#define BREAK_IS_BIF (1)
+#define BREAK_IS_ERL (0)
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -114,7 +120,7 @@ static int set_break(Eterm mfa[3], int specified,
static int set_module_break(Module *modp, Eterm mfa[3], int specified,
Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, BeamInstr *pc,
+static int set_function_break(Module *modp, BeamInstr *pc, int bif,
Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
@@ -122,11 +128,29 @@ static int clear_break(Eterm mfa[3], int specified,
BeamInstr break_op);
static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
BeamInstr break_op);
-static int clear_function_break(Module *modp, BeamInstr *pc,
+static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
BeamInstr break_op);
static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
-
+static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+
+/* bp_hash */
+#define BP_TIME_ADD(pi0, pi1) \
+ do { \
+ Uint r; \
+ (pi0)->count += (pi1)->count; \
+ (pi0)->s_time += (pi1)->s_time; \
+ (pi0)->us_time += (pi1)->us_time; \
+ r = (pi0)->us_time / 1000000; \
+ (pi0)->s_time += r; \
+ (pi0)->us_time = (pi0)->us_time % 1000000; \
+ } while(0)
+
+static void bp_hash_init(bp_time_hash_t *hash, Uint n);
+static void bp_hash_rehash(bp_time_hash_t *hash, Uint n);
+static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
+static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
+static void bp_hash_delete(bp_time_hash_t *hash);
/* *************************************************************************
@@ -156,25 +180,20 @@ erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
(BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
+/* set breakpoint data for on exported bif entry */
+
void
erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- BpDataTrace *bdt;
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+}
- bdt = (BpDataTrace *) pc[-4];
- if (bdt) {
- MatchSetUnref(bdt->match_spec);
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else {
- bdt = Alloc(sizeof(BpDataTrace));
- BpInit((BpData *) bdt, 0);
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- pc[-4] = (BeamInstr) bdt;
- }
+void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
+ set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+}
+
+void erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
}
int
@@ -191,7 +210,12 @@ erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
(BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
}
-
+int
+erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, NULL,
+ (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+}
int
erts_clear_trace_break(Eterm mfa[3], int specified) {
@@ -209,17 +233,7 @@ erts_clear_mtrace_break(Eterm mfa[3], int specified) {
void
erts_clear_mtrace_bif(BeamInstr *pc) {
- BpDataTrace *bdt;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
-
- bdt = (BpDataTrace *) pc[-4];
- if (bdt) {
- if (bdt->match_spec) {
- MatchSetUnref(bdt->match_spec);
- }
- Free(bdt);
- }
- pc[-4] = (BeamInstr) NULL;
+ clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
}
int
@@ -237,6 +251,13 @@ erts_clear_count_break(Eterm mfa[3], int specified) {
}
int
+erts_clear_time_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (BeamInstr) BeamOp(op_i_time_breakpoint));
+}
+
+int
erts_clear_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return clear_break(mfa, specified, 0);
@@ -253,7 +274,7 @@ int
erts_clear_function_break(Module *modp, BeamInstr *pc) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
ASSERT(modp);
- return clear_function_break(modp, pc, 0);
+ return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
}
@@ -265,9 +286,12 @@ BeamInstr
erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid) {
Eterm tpid1, tpid2;
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+ BpData **bds = (BpData **) (pc)[-4];
+ BpDataTrace *bdt = NULL;
+ ASSERT(bds);
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
ASSERT(bdt);
bdt = (BpDataTrace *) bdt->next;
ASSERT(bdt);
@@ -286,7 +310,7 @@ erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
bdt->tracer_pid = tpid2;
ErtsSmpBPUnlock(bdt);
}
- pc[-4] = (BeamInstr) bdt;
+ bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
return bdt->orig_instr;
}
@@ -298,12 +322,15 @@ erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32
erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
Eterm *tracer_pid) {
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+ BpData **bds = (BpData **) (pc)[-4];
+ BpDataTrace *bdt = NULL;
+
ASSERT(tracer_pid);
- if (bdt) {
+ if (bds) {
Eterm tpid1, tpid2;
Uint32 flags;
+ bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
ErtsSmpBPLock(bdt);
tpid1 = tpid2 = bdt->tracer_pid;
@@ -364,24 +391,6 @@ erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_r
}
int
-erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
-
- if (bdt) {
- if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
- }
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
- }
- return !0;
- }
- return 0;
-}
-
-int
erts_is_native_break(BeamInstr *pc) {
#ifdef HIPE
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
@@ -399,15 +408,69 @@ erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
if (bdc) {
if (count_ret) {
- ErtsSmpBPLock(bdc);
- *count_ret = bdc->count;
- ErtsSmpBPUnlock(bdc);
+ *count_ret = (Sint) erts_smp_atomic_read(&bdc->acount);
}
return !0;
}
return 0;
}
+int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+ Uint i, ix;
+ bp_time_hash_t hash;
+ Uint size;
+ Eterm *hp, t;
+ bp_data_time_item_t *item = NULL;
+ BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ if (bdt) {
+ if (retval) {
+ /* collect all hashes to one hash */
+ bp_hash_init(&hash, 64);
+ /* foreach threadspecific hash */
+ for (i = 0; i < bdt->n; i++) {
+ bp_data_time_item_t *sitem;
+
+ /* foreach hash bucket not NIL*/
+ for(ix = 0; ix < bdt->hash[i].n; ix++) {
+ item = &(bdt->hash[i].item[ix]);
+ if (item->pid != NIL) {
+ sitem = bp_hash_get(&hash, item);
+ if (sitem) {
+ BP_TIME_ADD(sitem, item);
+ } else {
+ bp_hash_put(&hash, item);
+ }
+ }
+ }
+ }
+ /* *retval should be NIL or term from previous bif in export entry */
+
+ if (hash.used > 0) {
+ size = (5 + 2)*hash.used;
+ hp = HAlloc(p, size);
+
+ for(ix = 0; ix < hash.n; ix++) {
+ item = &(hash.item[ix]);
+ if (item->pid != NIL) {
+ t = TUPLE4(hp, item->pid,
+ make_small(item->count),
+ make_small(item->s_time),
+ make_small(item->us_time));
+ hp += 5;
+ *retval = CONS(hp, t, *retval); hp += 2;
+ }
+ }
+ }
+ bp_hash_delete(&hash);
+ }
+ return !0;
+ }
+
+ return 0;
+}
+
+
BeamInstr *
erts_find_local_func(Eterm mfa[3]) {
Module *modp;
@@ -432,6 +495,351 @@ erts_find_local_func(Eterm mfa[3]) {
return NULL;
}
+/* bp_hash */
+ERTS_INLINE Uint bp_sched2ix() {
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esdp;
+ esdp = erts_get_scheduler_data();
+ return esdp->no - 1;
+#else
+ return 0;
+#endif
+}
+static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
+ Uint size = sizeof(bp_data_time_item_t)*n;
+ Uint i;
+
+ hash->n = n;
+ hash->used = 0;
+
+ hash->item = (bp_data_time_item_t *)Alloc(size);
+ sys_memzero(hash->item, size);
+
+ for(i = 0; i < n; ++i) {
+ hash->item[i].pid = NIL;
+ }
+}
+
+static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) {
+ bp_data_time_item_t *item = NULL;
+ Uint size = sizeof(bp_data_time_item_t)*n;
+ Uint ix;
+ Uint hval;
+
+ item = (bp_data_time_item_t *)Alloc(size);
+ sys_memzero(item, size);
+
+ for( ix = 0; ix < n; ++ix) {
+ item[ix].pid = NIL;
+ }
+
+ /* rehash, old hash -> new hash */
+
+ for( ix = 0; ix < hash->n; ix++) {
+ if (hash->item[ix].pid != NIL) {
+
+ hval = ((hash->item[ix].pid) >> 4) % n; /* new n */
+
+ while (item[hval].pid != NIL) {
+ hval = (hval + 1) % n;
+ }
+ item[hval].pid = hash->item[ix].pid;
+ item[hval].count = hash->item[ix].count;
+ item[hval].s_time = hash->item[ix].s_time;
+ item[hval].us_time = hash->item[ix].us_time;
+ }
+ }
+
+ Free(hash->item);
+ hash->n = n;
+ hash->item = item;
+}
+static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem) {
+ Eterm pid = sitem->pid;
+ Uint hval = (pid >> 4) % hash->n;
+ bp_data_time_item_t *item = NULL;
+
+ item = hash->item;
+
+ while (item[hval].pid != pid) {
+ if (item[hval].pid == NIL) return NULL;
+ hval = (hval + 1) % hash->n;
+ }
+
+ return &(item[hval]);
+}
+
+static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t* sitem) {
+ Uint hval;
+ float r = 0.0;
+ bp_data_time_item_t *item;
+
+ /* make sure that the hash is not saturated */
+ /* if saturated, rehash it */
+
+ r = hash->used / (float) hash->n;
+
+ if (r > 0.7f) {
+ bp_hash_rehash(hash, hash->n * 2);
+ }
+ /* Do hval after rehash */
+ hval = (sitem->pid >> 4) % hash->n;
+
+ /* find free slot */
+ item = hash->item;
+
+ while (item[hval].pid != NIL) {
+ hval = (hval + 1) % hash->n;
+ }
+ item = &(hash->item[hval]);
+
+ item->pid = sitem->pid;
+ item->s_time = sitem->s_time;
+ item->us_time = sitem->us_time;
+ item->count = sitem->count;
+ hash->used++;
+
+ return item;
+}
+
+static void bp_hash_delete(bp_time_hash_t *hash) {
+ hash->n = 0;
+ hash->used = 0;
+ Free(hash->item);
+ hash->item = NULL;
+}
+
+static void bp_time_diff(bp_data_time_item_t *item, /* out */
+ process_breakpoint_time_t *pbt, /* in */
+ Uint ms, Uint s, Uint us) {
+ int dms,ds,dus;
+
+ dms = ms - pbt->ms;
+ ds = s - pbt->s;
+ dus = us - pbt->us;
+
+ /* get_sys_now may return zero difftime,
+ * this is ok.
+ */
+
+ ASSERT(dms >= 0 || ds >= 0 || dus >= 0);
+
+ if (dus < 0) {
+ dus += 1000000;
+ ds -= 1;
+ }
+ if (ds < 0) {
+ ds += 1000000;
+ }
+
+ item->s_time = ds;
+ item->us_time = dus;
+}
+
+void erts_schedule_time_break(Process *p, Uint schedule) {
+ Uint ms, s, us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+
+ if (pbt) {
+
+ switch(schedule) {
+ case ERTS_BP_CALL_TIME_SCHEDULE_EXITING :
+ break;
+ case ERTS_BP_CALL_TIME_SCHEDULE_OUT :
+ /* When a process is scheduled _out_,
+ * timestamp it and add its delta to
+ * the previous breakpoint.
+ */
+
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ if (pbdt) {
+ get_sys_now(&ms,&s,&us);
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+ break;
+ case ERTS_BP_CALL_TIME_SCHEDULE_IN :
+ /* When a process is scheduled _in_,
+ * timestamp it and remove the previous
+ * timestamp in the psd.
+ */
+ get_sys_now(&ms,&s,&us);
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ break;
+ default :
+ ASSERT(0);
+ /* will never happen */
+ break;
+ }
+ } /* pbt */
+}
+
+/* call_time breakpoint
+ * Accumulated times are added to the previous bp,
+ * not the current one. The current one is saved
+ * for future reference.
+ * The previous breakpoint is stored in the process it self, the psd.
+ * We do not need to store in a stack frame.
+ * There is no need for locking, each thread has its own
+ * area in each bp to save data.
+ * Since we need to diffrentiate between processes for each bp,
+ * every bp has a hash (per thread) to process-bp statistics.
+ * - egil
+ */
+
+void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(p->status == P_RUNNING);
+
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ switch(type) {
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ case ERTS_BP_CALL_TIME_CALL:
+ case ERTS_BP_CALL_TIME_TAIL_CALL:
+
+ if (pbt) {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ } else {
+ /* first call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
+ }
+ /* add count to this code */
+ sitem.pid = p->id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ break;
+
+ case ERTS_BP_CALL_TIME_RETURN:
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if(pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+ break;
+ default :
+ ASSERT(0);
+ /* will never happen */
+ break;
+ }
+}
/* *************************************************************************
@@ -489,26 +897,35 @@ static int set_module_break(Module *modp, Eterm mfa[3], int specified,
ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
(specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr+5;
+ BeamInstr *pc = code_ptr+5;
num_processed +=
- set_function_break(modp, pc, match_spec,
+ set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
break_op, count_op, tracer_pid);
}
}
return num_processed;
}
-static int set_function_break(Module *modp, BeamInstr *pc,
+static int set_function_break(Module *modp, BeamInstr *pc, int bif,
Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid) {
- BpData *bd, **r;
+
+ BeamInstr **code_base = NULL;
+ BpData *bd, **r, ***rs;
size_t size;
- BeamInstr **code_base = (BeamInstr **)modp->code;
+ Uint ix = 0;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
+ if (bif == BREAK_IS_ERL) {
+ code_base = (BeamInstr **)modp->code;
+ ASSERT(code_base);
+ ASSERT(code_base <= (BeamInstr **)pc);
+ ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
+ } else {
+ ASSERT(*pc == (BeamInstr) em_apply_bif);
+ ASSERT(modp == NULL);
+ }
+
/*
* Currently no trace support for native code.
*/
@@ -519,6 +936,7 @@ static int set_function_break(Module *modp, BeamInstr *pc,
if ( (bd = is_break(pc, break_op))) {
if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
|| break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
+
BpDataTrace *bdt = (BpDataTrace *) bd;
Binary *old_match_spec;
@@ -531,20 +949,44 @@ static int set_function_break(Module *modp, BeamInstr *pc,
ErtsSmpBPUnlock(bdt);
MatchSetUnref(old_match_spec);
} else {
+ BpDataCount *bdc = (BpDataCount *) bd;
+ long count = 0;
+ long res = 0;
+
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- ErtsSmpBPLock(bdc);
+ if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
if (count_op == erts_break_stop) {
- if (bdc->count >= 0) {
- bdc->count = -bdc->count-1; /* Stop call counter */
+ count = erts_smp_atomic_read(&bdc->acount);
+ if (count >= 0) {
+ while(1) {
+ res = erts_smp_atomic_cmpxchg(&bdc->acount, -count - 1, count);
+ if ((res == count) || count < 0) break;
+ count = res;
+ }
}
} else {
- bdc->count = 0; /* Reset call counter */
+ /* Reset call counter */
+ erts_smp_atomic_set(&bdc->acount, 0);
}
- ErtsSmpBPUnlock(bdc);
+
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+
+ if (count_op == erts_break_stop) {
+ bdt->pause = 1;
+ } else {
+ bdt->pause = 0;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
+ }
+
} else {
ASSERT (! count_op);
}
@@ -558,43 +1000,63 @@ static int set_function_break(Module *modp, BeamInstr *pc,
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset
- || count_op == erts_break_stop) {
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
/* Do not insert a new breakpoint */
return 1;
}
size = sizeof(BpDataCount);
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return 1;
+ }
+ size = sizeof(BpDataTime);
} else {
ASSERT(! count_op);
ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
size = sizeof(BpDataDebug);
}
}
- r = (BpData **) (pc-4);
+ rs = (BpData ***) (pc-4);
+ if (! *rs) {
+ size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
+ *rs = (BpData **) Alloc(ssize);
+ sys_memzero(*rs, ssize);
+ }
+
+ r = &((*rs)[0]);
+
if (! *r) {
ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
/* First breakpoint; create singleton ring */
bd = Alloc(size);
BpInit(bd, *pc);
- *pc = break_op;
*r = bd;
+ if (bif == BREAK_IS_ERL) {
+ *pc = break_op;
+ }
} else {
ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
*pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_count_breakpoint));
+ *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
+ *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
+ *pc == (BeamInstr) em_apply_bif);
if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
/* Debug bp must be last, so if it is also first;
* it must be singleton. */
- ASSERT(BpSingleton(*r));
+ ASSERT(BpSingleton(*r));
/* Insert new bp first in the ring, i.e second to last. */
bd = Alloc(size);
BpInitAndSpliceNext(bd, *pc, *r);
- *pc = break_op;
- } else if ((*r)->prev->orig_instr
+ if (bif == BREAK_IS_ERL) {
+ *pc = break_op;
+ }
+ } else if ((*r)->prev->orig_instr
== (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
/* Debug bp last in the ring; insert new second to last. */
bd = Alloc(size);
@@ -608,6 +1070,11 @@ static int set_function_break(Module *modp, BeamInstr *pc,
*r = bd;
}
}
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ (*rs)[ix] = (*rs)[0];
+ }
+
+ bd->this_instr = break_op;
/* Init the bp type specific data */
if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
@@ -617,12 +1084,25 @@ static int set_function_break(Module *modp, BeamInstr *pc,
MatchSetRef(match_spec);
bdt->match_spec = match_spec;
bdt->tracer_pid = tracer_pid;
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+
+ bdt->pause = 0;
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
} else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
BpDataCount *bdc = (BpDataCount *) bd;
+ erts_smp_atomic_init(&bdc->acount, 0);
+ }
- bdc->count = 0;
+ if (bif == BREAK_IS_ERL) {
+ ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
}
- ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
return 1;
}
@@ -672,33 +1152,51 @@ static int clear_module_break(Module *m, Eterm mfa[3], int specified,
BeamInstr *pc = code_ptr + 5;
num_processed +=
- clear_function_break(m, pc, break_op);
+ clear_function_break(m, pc, BREAK_IS_ERL, break_op);
}
}
return num_processed;
}
-static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
+static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
BpData *bd;
- BeamInstr **code_base = (BeamInstr **)m->code;
-
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
+ Uint ix = 0;
+ BeamInstr **code_base = NULL;
+
+ if (bif == BREAK_IS_ERL) {
+ code_base = (BeamInstr **)m->code;
+ ASSERT(code_base);
+ ASSERT(code_base <= (BeamInstr **)pc);
+ ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
+ } else {
+ ASSERT(*pc == (BeamInstr) em_apply_bif);
+ ASSERT(m == NULL);
+ }
+
/*
* Currently no trace support for native code.
*/
if (erts_is_native_break(pc)) {
return 0;
}
+
while ( (bd = is_break(pc, break_op))) {
/* Remove all breakpoints of this type.
* There should be only one of each type,
* but break_op may be 0 which matches any type.
*/
BeamInstr op;
- BpData **r = (BpData **) (pc-4);
+ BpData ***rs = (BpData ***) (pc - 4);
+ BpData **r = NULL;
+
+#ifdef DEBUG
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ ASSERT((*rs)[ix] == (*rs)[0]);
+ }
+#endif
+ r = &((*rs)[0]);
+
ASSERT(*r);
/* Find opcode for this breakpoint */
if (break_op) {
@@ -714,8 +1212,11 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
if (BpSingleton(bd)) {
ASSERT(*r == bd);
/* Only one breakpoint to remove */
- *r = NULL;
- *pc = bd->orig_instr;
+ if (bif == BREAK_IS_ERL) {
+ *pc = bd->orig_instr;
+ }
+ Free(*rs);
+ *rs = NULL;
} else {
BpData *bd_prev = bd->prev;
@@ -727,7 +1228,9 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
bd_prev->orig_instr = bd->orig_instr;
} else if (bd_prev == *r) {
/* We removed the first breakpoint in the ring */
- *pc = bd->orig_instr;
+ if (bif == BREAK_IS_ERL) {
+ *pc = bd->orig_instr;
+ }
} else {
bd_prev->orig_instr = bd->orig_instr;
}
@@ -736,13 +1239,53 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
BpDataTrace *bdt = (BpDataTrace *) bd;
-
MatchSetUnref(bdt->match_spec);
}
+ if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t *item = NULL;
+ process_breakpoint_time_t *pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
+ */
+
+ for( i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = process_tab[internal_pid_index(item->pid)];
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
+ if (pbt) {
+ Free(pbt);
+ }
+ }
+ }
+ }
+ }
+ bp_hash_delete(&(bdt->hash[i]));
+ }
+ Free(bdt->hash);
+ bdt->hash = NULL;
+ bdt->n = 0;
+ }
Free(bd);
- ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
- --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
- }
+ if (bif == BREAK_IS_ERL) {
+ ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
+ --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
+ }
+ if (*rs) {
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ (*rs)[ix] = (*rs)[0];
+ }
+ }
+ } /* while bd != NULL */
return 1;
}
@@ -755,32 +1298,63 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
** returned. The program counter must point to the first executable
** (breakpoint) instruction of the function.
*/
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
+
+BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
+ return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+}
+
+static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if (! erts_is_native_break(pc)) {
- BpData *bd = (BpData *) pc[-4];
-
- if (break_op == 0) {
- return bd;
- }
- if (*pc == break_op) {
- ASSERT(bd);
- return bd->next;
- }
- if (! bd){
+ BpData **rs = (BpData **) pc[-4];
+ BpData *bd = NULL, *ebd = NULL;
+
+ if (! rs) {
return NULL;
}
+
+ bd = ebd = rs[bp_sched2ix_proc(p)];
+ ASSERT(bd);
+ if (bd->this_instr == break_op) {
+ return bd;
+ }
+
bd = bd->next;
- while (bd != (BpData *) pc[-4]) {
+ while (bd != ebd) {
ASSERT(bd);
- if (bd->orig_instr == break_op) {
- bd = bd->next;
+ if (bd->this_instr == break_op) {
ASSERT(bd);
return bd;
- } else {
- bd = bd->next;
}
+ bd = bd->next;
}
}
return NULL;
}
+
+static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
+ BpData **rs = (BpData **) pc[-4];
+ BpData *bd = NULL, *ebd = NULL;
+ ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+
+ if (! rs) {
+ return NULL;
+ }
+
+ bd = ebd = rs[bp_sched2ix()];
+ ASSERT(bd);
+ if ( (break_op == 0) || (bd->this_instr == break_op)) {
+ return bd;
+ }
+
+ bd = bd->next;
+ while (bd != ebd) {
+ ASSERT(bd);
+ if (bd->this_instr == break_op) {
+ ASSERT(bd);
+ return bd;
+ }
+ bd = bd->next;
+ }
+ return NULL;
+}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 786cbbe9d9..b5d5b3c203 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -27,28 +27,46 @@
-/*
-** Common struct to all bp_data_*
-**
-** Two gotchas:
-**
-** 1) The type of bp_data structure in the ring is deduced from the
-** orig_instr field of the structure _before_ in the ring, except for
-** the first structure in the ring that has its instruction in
-** pc[0] of the code to execute.
-**
-** 2) pc[-4] points to the _last_ structure in the ring before the
-** breakpoints are being executed.
-**
-** So, as an example, when a breakpointed function starts to execute,
-** the first instruction that is a breakpoint instruction at pc[0] finds
-** its data at ((BpData *) pc[-4])->next and has to cast that pointer
-** to the correct bp_data type.
+/* A couple of gotchas:
+ *
+ * The breakpoint structure from BeamInstr,
+ * In beam_emu where the instruction counter pointer, I (or pc),
+ * points to the *current* instruction. At that time, if the instruction
+ * is a breakpoint instruction the pc looks like the following,
+ *
+ * I[-5] | op_i_func_info_IaaI | scheduler specific entries
+ * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
+ * I[-3] | Tagged Module | | |
+ * I[-2] | Tagged Function | V V
+ * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
+ * I[0] | The bp instruction | ^ * the bp wheel * |
+ * |------------------------------
+ *
+ * Common struct to all bp_data_*
+ *
+ * 1) The type of bp_data structure in the ring is deduced from the
+ * orig_instr field of the structure _before_ in the ring, except for
+ * the first structure in the ring that has its instruction in
+ * pc[0] of the code to execute.
+ * This is valid as long as you don't search for the function while it is
+ * being executed by something else. Or is in the middle of its rotation for
+ * any other reason.
+ * A key, the bp beam instruction, is included for this reason.
+ *
+ * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
+ * breakpoints are being executed.
+ *
+ * So, as an example, when a breakpointed function starts to execute,
+ * the first instruction that is a breakpoint instruction at pc[0] finds
+ * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
+ * to the correct bp_data type.
*/
+
typedef struct bp_data {
struct bp_data *next; /* Doubly linked ring pointers */
struct bp_data *prev; /* -"- */
BeamInstr orig_instr; /* The original instruction to execute */
+ BeamInstr this_instr; /* key */
} BpData;
/*
** All the following bp_data_.. structs must begin the same way
@@ -57,26 +75,67 @@ typedef struct bp_data {
typedef struct bp_data_trace {
struct bp_data *next;
struct bp_data *prev;
- BeamInstr orig_instr;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
Binary *match_spec;
- Eterm tracer_pid;
+ Eterm tracer_pid;
} BpDataTrace;
typedef struct bp_data_debug {
struct bp_data *next;
struct bp_data *prev;
- BeamInstr orig_instr;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
} BpDataDebug;
-typedef struct bp_data_count { /* Call count */
+typedef struct bp_data_count { /* Call count */
struct bp_data *next;
struct bp_data *prev;
BeamInstr orig_instr;
- Sint count;
+ BeamInstr this_instr; /* key */
+ erts_smp_atomic_t acount;
} BpDataCount;
+typedef struct {
+ Eterm pid;
+ Sint count;
+ Uint s_time;
+ Uint us_time;
+} bp_data_time_item_t;
+
+typedef struct {
+ Uint n;
+ Uint used;
+ bp_data_time_item_t *item;
+} bp_time_hash_t;
+
+typedef struct bp_data_time { /* Call time */
+ struct bp_data *next;
+ struct bp_data *prev;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
+ Uint pause;
+ Uint n;
+ bp_time_hash_t *hash;
+} BpDataTime;
+
+typedef struct {
+ Uint ms;
+ Uint s;
+ Uint us;
+ Uint *pc;
+} process_breakpoint_time_t; /* used within psd */
+
extern erts_smp_spinlock_t erts_bp_lock;
+#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
+#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
+#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
+
+#define ERTS_BP_CALL_TIME_CALL (0)
+#define ERTS_BP_CALL_TIME_RETURN (1)
+#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
+
#ifdef ERTS_SMP
#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
@@ -85,31 +144,46 @@ extern erts_smp_spinlock_t erts_bp_lock;
#define ErtsSmpBPUnlock(BDC)
#endif
-#define ErtsCountBreak(pc,instr_result) \
-do { \
- BpDataCount *bdc = (BpDataCount *) (pc)[-4]; \
- \
+ERTS_INLINE Uint bp_sched2ix(void);
+
+#ifdef ERTS_SMP
+#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
+#else
+#define bp_sched2ix_proc(p) (0)
+#endif
+
+#define ErtsCountBreak(p, pc,instr_result) \
+do { \
+ BpData **bds = (BpData **) (pc)[-4]; \
+ BpDataCount *bdc = NULL; \
+ Uint ix = bp_sched2ix_proc( (p) ); \
+ long count = 0; \
+ \
ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bdc); \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- (pc)[-4] = (BeamInstr) bdc; \
- ErtsSmpBPLock(bdc); \
- if (bdc->count >= 0) bdc->count++; \
- ErtsSmpBPUnlock(bdc); \
- *(instr_result) = bdc->orig_instr; \
+ ASSERT(bds); \
+ bdc = (BpDataCount *) bds[ix]; \
+ bdc = (BpDataCount *) bdc->next; \
+ ASSERT(bdc); \
+ bds[ix] = (BpData *) bdc; \
+ count = erts_smp_atomic_read(&bdc->acount); \
+ if (count >= 0) erts_smp_atomic_inc(&bdc->acount); \
+ *(instr_result) = bdc->orig_instr; \
} while (0)
-#define ErtsBreakSkip(pc,instr_result) \
-do { \
- BpData *bd = (BpData *) (pc)[-4]; \
- \
+#define ErtsBreakSkip(p, pc,instr_result) \
+do { \
+ BpData **bds = (BpData **) (pc)[-4]; \
+ BpData *bd = NULL; \
+ Uint ix = bp_sched2ix_proc( (p) ); \
+ \
ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- (pc)[-4] = (BeamInstr) bd; \
- *(instr_result) = bd->orig_instr; \
+ ASSERT(bds); \
+ bd = bds[ix]; \
+ ASSERT(bd); \
+ bd = bd->next; \
+ ASSERT(bd); \
+ bds[ix] = bd; \
+ *(instr_result) = bd->orig_instr; \
} while (0)
enum erts_break_op{
@@ -159,6 +233,17 @@ int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
int erts_is_native_break(BeamInstr *pc);
int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
+
+void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_schedule_time_break(Process *p, Uint out);
+int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
+int erts_clear_time_break(Eterm mfa[3], int specified);
+
+int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
+void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
+void erts_clear_time_trace_bif(BeamInstr *pc);
+BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index d42e74ccc9..c0680086aa 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -228,9 +228,10 @@ BeamInstr* em_call_traced_function;
** for the refering variable (one of these), and rouge references
** will most likely cause chaos.
*/
-BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
-BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/*
* All Beam instructions in numerical order.
@@ -4403,35 +4404,109 @@ apply_bif_or_nif_epilogue:
OpCase(i_count_breakpoint): {
BeamInstr real_I;
- ErtsCountBreak((BeamInstr *) I, &real_I);
+ ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
}
+ /* need to send mfa instead of bdt pointer
+ * the pointer might be deallocated.
+ */
+
+ OpCase(i_time_breakpoint): {
+ BeamInstr real_I;
+ BpData **bds = (BpData **) (I)[-4];
+ BpDataTime *bdt = NULL;
+ Uint ix = 0;
+#ifdef ERTS_SMP
+ ix = c_p->scheduler_data->no - 1;
+#else
+ ix = 0;
+#endif
+ bdt = (BpDataTime *)bds[ix];
+
+ ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ ASSERT(bdt);
+ bdt = (BpDataTime *) bdt->next;
+ ASSERT(bdt);
+ bds[ix] = (BpData *) bdt;
+ real_I = bdt->orig_instr;
+ ASSERT(VALID_INSTR(real_I));
+
+ if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
+ if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
+ (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
+ (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
+ /* This _IS_ a tail recursive call */
+ SWAPOUT;
+ erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
+ SWAPIN;
+ } else {
+ SWAPOUT;
+ erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
+
+ /* r register needs to be copied to the array
+ * for the garbage collector
+ */
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < HTOP) {
+ reg[0] = r(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ r(0) = reg[0];
+ }
+ SWAPIN;
+
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = (BeamInstr *) make_cp(beam_return_time_trace);
+ }
+ }
+
+ Goto(real_I);
+ }
+
+ OpCase(i_return_time_trace): {
+ BeamInstr *pc = (BeamInstr *) (UWord) E[0];
+ SWAPOUT;
+ erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[1]));
+ E += 2;
+ Goto(*I);
+ }
+
OpCase(i_trace_breakpoint):
if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
BeamInstr real_I;
- ErtsBreakSkip((BeamInstr *) I, &real_I);
+ ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
Goto(real_I);
}
/* Fall through to next case */
OpCase(i_mtrace_breakpoint): {
- Uint real_I;
+ BeamInstr real_I;
Uint32 flags;
Eterm tracer_pid;
- Uint *cpp;
+ BeamInstr *cpp;
int return_to_trace = 0, need = 0;
flags = 0;
SWAPOUT;
reg[0] = r(0);
if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = (Uint*)&E[2];
- } else if (*(c_p->cp)
- == (BeamInstr) OpCode(i_return_to_trace)) {
+ cpp = (BeamInstr*)&E[2];
+ } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
- cpp = (Uint*)&E[0];
+ cpp = (BeamInstr*)&E[0];
+ } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
+ return_to_trace = !0;
+ cpp = (BeamInstr*)&E[0];
} else {
cpp = NULL;
}
@@ -4448,6 +4523,8 @@ apply_bif_or_nif_epilogue:
} else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
cpp += 1;
+ } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
+ cpp += 2;
} else
break;
}
@@ -4957,13 +5034,15 @@ apply_bif_or_nif_epilogue:
em_call_error_handler = OpCode(call_error_handler);
em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
- beam_apply[0] = (BeamInstr) OpCode(i_apply);
- beam_apply[1] = (BeamInstr) OpCode(normal_exit);
- beam_exit[0] = (BeamInstr) OpCode(error_action_code);
- beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
- beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
- beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
- beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
+
+ beam_apply[0] = (BeamInstr) OpCode(i_apply);
+ beam_apply[1] = (BeamInstr) OpCode(normal_exit);
+ beam_exit[0] = (BeamInstr) OpCode(error_action_code);
+ beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
+ beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
+ beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
+ beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
+ beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace);
/*
* Enter all BIFs into the export table.
@@ -4975,6 +5054,8 @@ apply_bif_or_nif_epilogue:
bif_export[i] = ep;
ep->code[3] = (BeamInstr) OpCode(apply_bif);
ep->code[4] = (BeamInstr) bif_table[i].f;
+ /* XXX: set func info for bifs */
+ ((BeamInstr*)ep->code + 3)[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
}
return;
@@ -5167,14 +5248,18 @@ next_catch(Process* c_p, Eterm *reg) {
int active_catches = c_p->catches > 0;
int have_return_to_trace = 0;
Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
+
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+
ptr = prev = c_p->stop;
ASSERT(is_CP(*ptr));
ASSERT(ptr <= STACK_START(c_p));
if (ptr == STACK_START(c_p)) return NULL;
if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
- *cp_val(*ptr) != i_return_to_trace))
+ *cp_val(*ptr) != i_return_to_trace &&
+ *cp_val(*ptr) != i_return_time_trace ))
&& c_p->cp) {
/* Can not follow cp here - code may be unloaded */
BeamInstr *cpp = c_p->cp;
@@ -5186,6 +5271,9 @@ next_catch(Process* c_p, Eterm *reg) {
} else if (cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ } else if (cpp == beam_return_time_trace) {
+ /* Skip return_trace parameters */
+ ptr += 1;
} else if (cpp == beam_return_to_trace) {
have_return_to_trace = !0; /* Record next cp */
}
@@ -5215,6 +5303,13 @@ next_catch(Process* c_p, Eterm *reg) {
}
have_return_to_trace = !0; /* Record next cp */
return_to_trace_ptr = NULL;
+ } else if (*cp_val(*prev) == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
+ if (is_catch(*ptr) && active_catches) goto found_catch;
+ }
+ /* Skip return_trace parameters */
+ ptr += 1;
} else {
if (have_return_to_trace) {
/* Record this cp as possible return_to trace cp */
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 7a69dc6eff..506bf383ca 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -4171,54 +4171,6 @@ void erts_init_bif(void)
await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
}
-BIF_RETTYPE blocking_read_file_1(BIF_ALIST_1)
-{
- Eterm bin;
- Eterm* hp;
- byte *buff;
- int i, buff_size;
- FILE *file;
- struct stat file_info;
- char *filename = NULL;
- size_t size;
-
- i = list_length(BIF_ARG_1);
- if (i < 0) {
- BIF_ERROR(BIF_P, BADARG);
- }
- filename = erts_alloc(ERTS_ALC_T_TMP, i + 1);
- if (intlist_to_buf(BIF_ARG_1, filename, i) != i)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- filename[i] = '\0';
-
- hp = HAlloc(BIF_P, 3);
-
- file = fopen(filename, "r");
- if(file == NULL){
- erts_free(ERTS_ALC_T_TMP, (void *) filename);
- BIF_RET(TUPLE2(hp, am_error, am_nofile));
- }
-
- stat(filename, &file_info);
- erts_free(ERTS_ALC_T_TMP, (void *) filename);
-
- buff_size = file_info.st_size;
- buff = (byte *) erts_alloc_fnf(ERTS_ALC_T_TMP, buff_size);
- if (!buff) {
- fclose(file);
- BIF_RET(TUPLE2(hp, am_error, am_allocator));
- }
- size = fread(buff, 1, buff_size, file);
- fclose(file);
- if (size < 0)
- size = 0;
- else if (size > buff_size)
- size = (size_t) buff_size;
- bin = new_binary(BIF_P, buff, (int) size);
- erts_free(ERTS_ALC_T_TMP, (void *) buff);
-
- BIF_RET(TUPLE2(hp, am_ok, bin));
-}
#ifdef HARDDEBUG
/*
You'll need this line in bif.tab to be able to use this debug bif
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index e4713567de..0674aae77f 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -686,8 +686,6 @@ bif 'erl.system.code':make_stub_module/3 ebif_code_make_stub_module_3
bif code:is_module_native/1
bif 'erl.system.code':is_native/1 ebif_code_is_native_1 code_is_module_native_1
-bif erlang:blocking_read_file/1
-
#
# New Bifs in R9C.
#
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 5cb1481a3a..857cb177c8 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -258,12 +258,10 @@ print_process_info(int to, void *to_arg, Process *p)
}
{
- long s = 0;
int frags = 0;
ErlHeapFragment *m = p->mbuf;
while (m != NULL) {
frags++;
- s += m->size;
m = m->next;
}
erts_print(to, to_arg, "Number of heap fragments: %d\n", frags);
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 0a5050b1fe..521a1b1788 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -37,6 +37,8 @@ MA_STACK_DECLARE(dst);
MA_STACK_DECLARE(offset);
#endif
+static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*);
+
void
init_copy(void)
{
@@ -86,7 +88,7 @@ size_object(Eterm obj)
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
@@ -99,7 +101,7 @@ size_object(Eterm obj)
arity = header_arity(hdr);
sum += arity + 1;
if (arity == 0) { /* Empty tuple -- unusual. */
- goto size_common;
+ goto pop_next;
}
while (arity-- > 1) {
obj = *++ptr;
@@ -115,7 +117,6 @@ size_object(Eterm obj)
ErlFunThing* funp = (ErlFunThing *) bptr;
unsigned eterms = 1 /* creator */ + funp->num_free;
unsigned sz = thing_arityval(hdr);
-
sum += 1 /* header */ + sz + eterms;
bptr += 1 /* header */ + sz;
while (eterms-- > 1) {
@@ -151,7 +152,7 @@ size_object(Eterm obj)
} else {
sum += heap_bin_size(binary_size(obj)+extra_bytes);
}
- goto size_common;
+ goto pop_next;
}
break;
case BIN_MATCHSTATE_SUBTAG:
@@ -159,18 +160,12 @@ size_object(Eterm obj)
"size_object: matchstate term not allowed");
default:
sum += thing_arityval(hdr) + 1;
- /* Fall through */
- size_common:
- if (ESTACK_ISEMPTY(s)) {
- DESTROY_ESTACK(s);
- return sum;
- }
- obj = ESTACK_POP(s);
- break;
+ goto pop_next;
}
}
break;
case TAG_PRIMARY_IMMED1:
+ pop_next:
if (ESTACK_ISEMPTY(s)) {
DESTROY_ESTACK(s);
return sum;
@@ -979,3 +974,104 @@ copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*hpp = hp;
return make_tuple(ptr + offs);
}
+
+/* Move all terms in heap fragments into heap. The terms must be guaranteed to
+ * be contained within the fragments. The source terms are destructed with
+ * move markers.
+ * Typically used to copy a multi-fragmented message (from NIF).
+ */
+void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
+ Eterm* refs, unsigned nrefs)
+{
+ ErlHeapFragment* bp;
+ Eterm* hp_start = *hpp;
+ Eterm* hp_end;
+ Eterm* hp;
+ unsigned i;
+
+ for (bp=first; bp!=NULL; bp=bp->next) {
+ move_one_frag(hpp, bp->mem, bp->used_size, off_heap);
+ off_heap->overhead += bp->off_heap.overhead;
+ }
+ hp_end = *hpp;
+ for (hp=hp_start; hp<hp_end; ++hp) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *hp;
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED_BOXED(val)) {
+ ASSERT(is_boxed(val));
+ *hp = val;
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = list_val(gval);
+ val = *ptr;
+ if (IS_MOVED_CONS(val)) {
+ *hp = ptr[1];
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (header_is_thing(gval)) {
+ hp += thing_arityval(gval);
+ }
+ break;
+ }
+ }
+ for (i=0; i<nrefs; ++i) {
+ refs[i] = follow_moved(refs[i]);
+ }
+}
+
+static void
+move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
+{
+ union {
+ Uint *up;
+ ProcBin *pbp;
+ ErlFunThing *efp;
+ ExternalThing *etp;
+ } ohe;
+ Eterm* ptr = src;
+ Eterm* end = ptr + src_sz;
+ Eterm dummy_ref;
+ Eterm* hp = *hpp;
+
+ while (ptr != end) {
+ Eterm val;
+ ASSERT(ptr < end);
+ val = *ptr;
+ ASSERT(val != ERTS_HOLE_MARKER);
+ if (is_header(val)) {
+ ASSERT(ptr + header_arity(val) < end);
+ ohe.up = hp;
+ MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ switch (val & _HEADER_SUBTAG_MASK) {
+ case REFC_BINARY_SUBTAG:
+ ohe.pbp->next = off_heap->mso;
+ off_heap->mso = ohe.pbp;
+ break;
+ case FUN_SUBTAG:
+ ohe.efp->next = off_heap->funs;
+ off_heap->funs = ohe.efp;
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ ohe.etp->next = off_heap->externals;
+ off_heap->externals = ohe.etp;
+ break;
+ }
+ }
+ else { /* must be a cons cell */
+ ASSERT(ptr+1 < end);
+ MOVE_CONS(ptr, val, hp, &dummy_ref);
+ ptr += 2;
+ }
+ }
+ *hpp = hp;
+}
+
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index f8823b85fe..16ae643ed9 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -2897,17 +2897,10 @@ unsigned long erts_alc_test(unsigned long op,
}
case 0xf10: {
ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid));
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (erts_lcnt_thr_create(tid,
- (void * (*)(void *)) a1,
- (void *) a2,
- NULL) != 0)
-#else
if (ethr_thr_create(tid,
(void * (*)(void *)) a1,
(void *) a2,
NULL) != 0)
-#endif
ERTS_ALC_TEST_ABORT;
return (unsigned long) tid;
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index de60ca49fa..48cda52612 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1140,9 +1140,9 @@ process_info_aux(Process *BIF_P,
}
else {
/* Make our copy of the message */
- ASSERT(size_object(msg) == hfp->size);
+ ASSERT(size_object(msg) == hfp->used_size);
msg = copy_struct(msg,
- hfp->size,
+ hfp->used_size,
&hp,
&MSO(BIF_P));
}
@@ -1892,6 +1892,37 @@ c_compiler_used(Eterm **hpp, Uint *szp)
}
+static int is_snif_term(Eterm module_atom) {
+ int i;
+ Atom *a = atom_tab(atom_val(module_atom));
+ char *aname = (char *) a->name;
+
+ /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
+ for (i = 0; i < a->len; i++) {
+ if (aname[i] == '.')
+ return 0;
+ }
+
+ return 1;
+}
+
+static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
+ Eterm tup;
+ tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
+ res = erts_bld_cons( hpp, szp, tup, res);
+ return res;
+}
+
+static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
+ int i;
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (is_snif_term(bif_table[i].module)) {
+ res = build_snif_term(hpp, szp, i, res);
+ }
+ }
+ return res;
+}
+
BIF_RETTYPE system_info_1(BIF_ALIST_1)
{
Eterm res;
@@ -1940,6 +1971,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(db_get_trace_control_word_0(BIF_P));
} else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
+ } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
+ Uint size = 0;
+ Uint *szp;
+
+ szp = &size;
+ build_snifs_term(NULL, szp, NIL);
+ hp = HAlloc(BIF_P, size);
+ res = build_snifs_term(&hp, NULL, NIL);
+ BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
val = erts_get_system_seq_tracer();
ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
@@ -2715,7 +2755,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
erts_doforall_links(prt->nlinks, &one_link_size, &size);
for (bp = prt->bp; bp; bp = bp->next)
- size += sizeof(ErlHeapFragment) + (bp->size - 1)*sizeof(Eterm);
+ size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
if (prt->linebuf)
size += sizeof(LineBuf) + prt->linebuf->ovsiz;
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 9b56ddd4f8..378c5e73fd 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -579,8 +579,8 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
if (prt->bp == NULL) { /* MUST be CONST! */
res = prt->data;
} else {
- Eterm* hp = HAlloc(BIF_P, prt->bp->size);
- res = copy_struct(prt->data, prt->bp->size, &hp, &MSO(BIF_P));
+ Eterm* hp = HAlloc(BIF_P, prt->bp->used_size);
+ res = copy_struct(prt->data, prt->bp->used_size, &hp, &MSO(BIF_P));
}
erts_smp_port_unlock(prt);
BIF_RET(res);
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index eb40c75110..4ae2f6ebf4 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -357,7 +357,7 @@ bif_timer_timeout(ErtsBifTimer* btm)
rp,
&rp_locks);
} else {
- Eterm old_size = bp->size;
+ Eterm old_size = bp->used_size;
bp = erts_resize_message_buffer(bp, old_size + wrap_size,
&message, 1);
hp = &bp->mem[0] + old_size;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 7fe4410e0d..443cac9033 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -40,8 +40,7 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
-static erts_smp_mtx_t trace_pattern_mutex;
-const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0};
+const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
@@ -65,7 +64,6 @@ static void clear_trace_bif(int bif_index);
void
erts_bif_trace_init(void)
{
- erts_smp_mtx_init(&trace_pattern_mutex, "trace_pattern");
erts_default_trace_pattern_is_on = 0;
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
@@ -186,6 +184,14 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
flags.breakpoint = 1;
flags.call_count = 1;
break;
+ case am_call_time:
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.call_time = 1;
+ break;
+
default:
goto error;
}
@@ -195,8 +201,8 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
goto error;
}
- if (match_prog_set && !flags.local && !flags.meta && flags.call_count) {
- /* A match prog is not allowed with just call_count */
+ if (match_prog_set && !flags.local && !flags.meta && (flags.call_count || flags.call_time)) {
+ /* A match prog is not allowed with just call_count or call_time*/
goto error;
}
@@ -235,6 +241,8 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
|= flags.meta;
erts_default_trace_pattern_flags.call_count
|= (on == 1) ? flags.call_count : 0;
+ erts_default_trace_pattern_flags.call_time
+ |= (on == 1) ? flags.call_time : 0;
} else {
erts_default_trace_pattern_flags.local
&= ~flags.local;
@@ -242,10 +250,13 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
&= ~flags.meta;
erts_default_trace_pattern_flags.call_count
&= ~flags.call_count;
+ erts_default_trace_pattern_flags.call_time
+ &= ~flags.call_time;
if (! (erts_default_trace_pattern_flags.breakpoint =
erts_default_trace_pattern_flags.local |
erts_default_trace_pattern_flags.meta |
- erts_default_trace_pattern_flags.call_count)) {
+ erts_default_trace_pattern_flags.call_count |
+ erts_default_trace_pattern_flags.call_time)) {
erts_default_trace_pattern_is_on = !!on; /* i.e off */
}
}
@@ -267,8 +278,9 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (on) {
if (on != 1) {
flags.call_count = 0;
+ flags.call_time = 0;
}
- flags.breakpoint = flags.local | flags.meta | flags.call_count;
+ flags.breakpoint = flags.local | flags.meta | flags.call_count | flags.call_time;
erts_default_trace_pattern_flags = flags; /* Struct copy */
erts_default_trace_pattern_is_on = !!flags.breakpoint;
}
@@ -336,7 +348,6 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
- erts_smp_mtx_lock(&trace_pattern_mutex);
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -347,12 +358,10 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*trace_pattern_flags = erts_default_trace_pattern_flags;
if (meta_tracer_pid)
*meta_tracer_pid = erts_default_meta_tracer_pid;
- erts_smp_mtx_unlock(&trace_pattern_mutex);
}
-
Uint
erts_trace_flag2bit(Eterm flag)
{
@@ -380,7 +389,7 @@ erts_trace_flag2bit(Eterm flag)
default: return 0;
}
}
-
+
/* Scan the argument list and sort out the trace flags.
**
** Returns !0 on success, 0 on failure.
@@ -931,6 +940,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
#define FUNC_TRACE_LOCAL_TRACE (1<<2)
#define FUNC_TRACE_META_TRACE (1<<3)
#define FUNC_TRACE_COUNT_TRACE (1<<4)
+#define FUNC_TRACE_TIME_TRACE (1<<5)
/*
* Returns either FUNC_TRACE_NOEXIST, FUNC_TRACE_UNTRACED,
* FUNC_TRACE_GLOBAL_TRACE, or,
@@ -945,11 +955,13 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
*
* If the return value contains FUNC_TRACE_COUNT_TRACE, *count is set.
*/
-static int function_is_traced(Eterm mfa[3],
- Binary **ms, /* out */
- Binary **ms_meta, /* out */
+static int function_is_traced(Process *p,
+ Eterm mfa[3],
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count) /* out */
+ Sint *count, /* out */
+ Eterm *call_time) /* out */
{
Export e;
Export* ep;
@@ -980,10 +992,13 @@ static int function_is_traced(Eterm mfa[3],
r |= FUNC_TRACE_LOCAL_TRACE;
*ms = ep->match_prog_set;
}
- if (erts_is_mtrace_bif(ep->code+3, ms_meta,
+ if (erts_is_mtrace_break(ep->code+3, ms_meta,
tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, ep->code+3, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
}
return r ? r : FUNC_TRACE_UNTRACED;
}
@@ -1001,7 +1016,9 @@ static int function_is_traced(Eterm mfa[3],
| (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
| (erts_is_count_break(code, count)
- ? FUNC_TRACE_COUNT_TRACE : 0);
+ ? FUNC_TRACE_COUNT_TRACE : 0)
+ | (erts_is_time_break(p, code, call_time)
+ ? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
}
@@ -1020,6 +1037,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm match_spec = am_false;
Eterm retval = am_false;
Eterm meta = am_false;
+ Eterm call_time = NIL;
int r;
@@ -1039,7 +1057,22 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
mfa[1] = tp[2];
mfa[2] = signed_val(tp[3]);
- r = function_is_traced(mfa, &ms, &ms_meta, &meta, &count);
+#ifdef ERTS_SMP
+ if ( (key == am_call_time) || (key == am_all)) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ }
+#endif
+
+ r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
+
+#ifdef ERTS_SMP
+ if ( (key == am_call_time) || (key == am_all)) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+
switch (r) {
case FUNC_TRACE_NOEXIST:
UnUseTmpHeap(3,p);
@@ -1092,8 +1125,13 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_make_integer(count, p);
}
break;
+ case am_call_time:
+ if (r & FUNC_TRACE_TIME_TRACE) {
+ retval = call_time;
+ }
+ break;
case am_all: {
- Eterm match_spec_meta = am_false, c = am_false, t;
+ Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false;
if (ms) {
match_spec = MatchSetGetSource(ms);
@@ -1111,10 +1149,15 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_make_integer(-count-1, p) :
erts_make_integer(count, p);
}
- hp = HAlloc(p, (3+2)*5);
+ if (r & FUNC_TRACE_TIME_TRACE) {
+ ct = call_time;
+ }
+ hp = HAlloc(p, (3+2)*6);
retval = NIL;
t = TUPLE2(hp, am_call_count, c); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_call_time, ct); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta, meta); hp += 3;
@@ -1210,6 +1253,13 @@ trace_info_on_load(Process* p, Eterm key)
} else {
return TUPLE2(hp, key, am_false);
}
+ case am_call_time:
+ hp = HAlloc(p, 3);
+ if (erts_default_trace_pattern_flags.call_time) {
+ return TUPLE2(hp, key, am_true);
+ } else {
+ return TUPLE2(hp, key, am_false);
+ }
case am_all:
{
Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
@@ -1284,6 +1334,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
/* Empty loop body */
}
+
if (j == specified) {
if (on) {
if (! flags.breakpoint)
@@ -1356,6 +1407,12 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
m = 1;
}
+ if (flags.call_time) {
+ erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ /* I don't want to remove any other tracers */
+ erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
+ m = 1;
+ }
if (erts_bif_trace_flags[i]) {
setup_bif_trace(i);
}
@@ -1375,6 +1432,11 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
}
m = 1;
}
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(bif_export[i]->code + 3);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ m = 1;
+ }
if (! erts_bif_trace_flags[i]) {
reset_bif_trace(i);
}
@@ -1392,6 +1454,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
erts_clear_trace_break(mfa, specified);
erts_clear_mtrace_break(mfa, specified);
erts_clear_count_break(mfa, specified);
+ erts_clear_time_break(mfa, specified);
} else {
int m = 0;
if (flags.local) {
@@ -1405,6 +1468,9 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.call_count) {
m = erts_set_count_break(mfa, specified, on);
}
+ if (flags.call_time) {
+ m = erts_set_time_break(mfa, specified, on);
+ }
/* All assignments to 'm' above should give the same value,
* so just use the last */
matches += m;
@@ -1420,6 +1486,9 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.call_count) {
m = erts_clear_count_break(mfa, specified);
}
+ if (flags.call_time) {
+ m = erts_clear_time_break(mfa, specified);
+ }
/* All assignments to 'm' above should give the same value,
* so just use the last */
matches += m;
@@ -1536,7 +1605,7 @@ static void reset_bif_trace(int bif_index) {
ASSERT(ExportIsBuiltIn(ep));
ASSERT(ep->code[4]);
ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_bif((BeamInstr *)ep->code+3, NULL, NULL));
+ ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
ep->code[4] = (BeamInstr) bif_table[bif_index].f;
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index fd7de98ac9..93a47eb76f 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1547,10 +1547,9 @@ restart:
*/
context.save = NULL;
error: /* Here is were we land when compilation failed. */
- while (context.save != NULL) {
- ErlHeapFragment *ll = context.save->next;
+ if (context.save != NULL) {
free_message_buffer(context.save);
- context.save = ll;
+ context.save = NULL;
}
DMC_FREE(stack);
DMC_FREE(text);
@@ -1567,15 +1566,11 @@ error: /* Here is were we land when compilation failed. */
void erts_db_match_prog_destructor(Binary *bprog)
{
MatchProg *prog;
- ErlHeapFragment *tmp, *ll;
if (bprog == NULL)
return;
prog = Binary2MatchProg(bprog);
- tmp = prog->term_save;
- while (tmp != NULL) {
- ll = tmp->next;
- free_message_buffer(tmp);
- tmp = ll;
+ if (prog->term_save != NULL) {
+ free_message_buffer(prog->term_save);
}
if (prog->saved_program_buf != NULL)
free_message_buffer(prog->saved_program_buf);
@@ -4125,7 +4120,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
DMC_INIT_STACK(heap);
p = expr->mem;
- i = expr->size;
+ i = expr->used_size;
while (i--) {
if (is_thing(*p)) {
a = thing_arityval(*p);
@@ -4154,7 +4149,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
}
p = expr->mem;
- i = expr->size;
+ i = expr->used_size;
while (i--) {
if (is_thing(*p)) {
a = thing_arityval(*p);
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 58d3f92f56..d7d6fcf0a2 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -261,7 +261,7 @@ static int verify_eterm(Process *p,Eterm element)
return 1;
for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next) {
- if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->size)) {
+ if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->used_size)) {
return 1;
}
}
@@ -308,7 +308,7 @@ void erts_check_stack(Process *p)
if (IN_HEAP(p, ptr))
continue;
for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next)
- if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->size)) {
+ if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->used_size)) {
in_mbuf = 1;
break;
}
@@ -746,7 +746,7 @@ static void print_process_memory(Process *p)
PTR_SIZE, "heap fragments",
dashes, dashes, dashes, dashes);
while (bp) {
- print_untagged_memory(bp->mem,bp->mem + bp->size);
+ print_untagged_memory(bp->mem,bp->mem + bp->used_size);
bp = bp->next;
}
}
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 50d8c25c46..aa37edafd1 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -603,11 +603,7 @@ erl_drv_thread_create(char *name,
dtid->name = ((char *) dtid) + sizeof(struct ErlDrvTid_);
sys_strcpy(dtid->name, name);
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
- res = erts_lcnt_thr_create(&dtid->tid, erl_drv_thread_wrapper, dtid, use_opts);
-#else
res = ethr_thr_create(&dtid->tid, erl_drv_thread_wrapper, dtid, use_opts);
-#endif
if (res != 0) {
erts_free(ERTS_ALC_T_DRV_TID, dtid);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 9ed566e66e..a19e090f1e 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -667,7 +667,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
case TAG_PRIMARY_BOXED:
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, area, area_size)) {
@@ -679,7 +679,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
case TAG_PRIMARY_LIST:
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) { /* Moved */
+ if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (in_area(ptr, area, area_size)) {
MOVE_CONS(ptr,val,old_htop,g_ptr++);
@@ -913,7 +913,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, heap, mature_size)) {
@@ -929,7 +929,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) { /* Moved */
+ if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (in_area(ptr, heap, mature_size)) {
MOVE_CONS(ptr,val,old_htop,g_ptr++);
@@ -972,7 +972,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (in_area(ptr, heap, mature_size)) {
@@ -987,7 +987,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (in_area(ptr, heap, mature_size)) {
MOVE_CONS(ptr,val,old_htop,n_hp++);
@@ -1008,7 +1008,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
Eterm* origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(val);
} else if (in_area(ptr, heap, mature_size)) {
@@ -1161,7 +1161,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
@@ -1175,7 +1175,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
MOVE_CONS(ptr,val,n_htop,g_ptr++);
@@ -1216,7 +1216,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
@@ -1229,7 +1229,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
MOVE_CONS(ptr,val,n_htop,n_hp++);
@@ -1249,7 +1249,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (in_area(ptr, src, src_size) ||
@@ -1392,17 +1392,12 @@ combined_message_size(Process* p)
static void
remove_message_buffers(Process* p)
{
- ErlHeapFragment* bp = MBUF(p);
-
- MBUF(p) = NULL;
- MBUF_SIZE(p) = 0;
- while (bp != NULL) {
- ErlHeapFragment* next_bp = bp->next;
- free_message_buffer(bp);
- bp = next_bp;
- }
+ if (MBUF(p) != NULL) {
+ free_message_buffer(MBUF(p));
+ MBUF(p) = NULL;
+ }
+ MBUF_SIZE(p) = 0;
}
-
#ifdef HARDDEBUG
/*
@@ -1433,12 +1428,12 @@ disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
case TAG_PRIMARY_BOXED: {
ptr = _unchecked_boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
objv++;
} else {
for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1450,11 +1445,11 @@ disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
case TAG_PRIMARY_LIST: {
ptr = _unchecked_list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
objv++;
} else {
for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1499,7 +1494,7 @@ disallow_heap_frag_ref_in_heap(Process* p)
ptr = _unchecked_boxed_val(val);
if (!in_area(ptr, heap, heap_size)) {
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1509,7 +1504,7 @@ disallow_heap_frag_ref_in_heap(Process* p)
ptr = _unchecked_list_val(val);
if (!in_area(ptr, heap, heap_size)) {
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1557,7 +1552,7 @@ disallow_heap_frag_ref_in_old_heap(Process* p)
abort();
}
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1570,7 +1565,7 @@ disallow_heap_frag_ref_in_old_heap(Process* p)
abort();
}
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1610,7 +1605,7 @@ sweep_rootset(Rootset* rootset, Eterm* htop, char* src, Uint src_size)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, src, src_size)) {
@@ -1623,7 +1618,7 @@ sweep_rootset(Rootset* rootset, Eterm* htop, char* src, Uint src_size)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) { /* Moved */
+ if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
} else if (in_area(ptr, src, src_size)) {
MOVE_CONS(ptr,val,htop,g_ptr++);
@@ -1657,7 +1652,7 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (in_area(ptr, src, src_size)) {
@@ -1670,7 +1665,7 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (in_area(ptr, src, src_size)) {
MOVE_CONS(ptr,val,n_htop,n_hp++);
@@ -1690,7 +1685,7 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (in_area(ptr, src, src_size)) {
@@ -1722,7 +1717,7 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*heap_ptr++ = val;
} else if (in_area(ptr, src, src_size)) {
@@ -1735,7 +1730,7 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*heap_ptr++ = ptr[1];
} else if (in_area(ptr, src, src_size)) {
MOVE_CONS(ptr,val,htop,heap_ptr++);
@@ -1830,28 +1825,6 @@ collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
return n_htop;
}
-#ifdef DEBUG
-static Eterm follow_moved(Eterm term)
-{
- Eterm* ptr;
- switch (primary_tag(term)) {
- case TAG_PRIMARY_IMMED1:
- break;
- case TAG_PRIMARY_BOXED:
- ptr = boxed_val(term);
- if (IS_MOVED(*ptr)) term = *ptr;
- break;
- case TAG_PRIMARY_LIST:
- ptr = list_val(term);
- if (is_non_value(ptr[0])) term = ptr[1];
- break;
- default:
- abort();
- }
- return term;
-}
-#endif
-
static Uint
setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
{
@@ -2090,7 +2063,7 @@ sweep_proc_externals(Process *p, int fullsweep)
while (ptr) {
Eterm* ppt = (Eterm *) ptr;
- if (IS_MOVED(*ppt)) { /* Object is alive */
+ if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
ExternalThing* ro = external_thing_ptr(*ppt);
*prev = ro; /* Patch to moved pos */
@@ -2130,7 +2103,7 @@ sweep_proc_funs(Process *p, int fullsweep)
while (ptr) {
Eterm* ppt = (Eterm *) ptr;
- if (IS_MOVED(*ppt)) { /* Object is alive */
+ if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
ErlFunThing* ro = (ErlFunThing *) fun_val(*ppt);
*prev = ro; /* Patch to moved pos */
@@ -2244,7 +2217,7 @@ sweep_proc_bins(Process *p, int fullsweep)
while (ptr) {
Eterm* ppt = (Eterm *) ptr;
- if (IS_MOVED(*ppt)) { /* Object is alive */
+ if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
bin_vheap += ptr->size / sizeof(Eterm);
ptr = (ProcBin*) binary_val(*ppt);
link_live_proc_bin(&shrink,
@@ -2542,7 +2515,7 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
return 1;
}
while (bp != NULL) {
- if (bp->mem <= ptr && ptr < bp->mem + bp->size) {
+ if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
return 1;
}
bp = bp->next;
@@ -2556,7 +2529,7 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
hfp = erts_dist_ext_trailer(mp->data.dist_ext);
else
hfp = NULL;
- if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->size)
+ if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->used_size)
return 1;
}
mp = mp->next;
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index af55b6363f..807ef8ae8d 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -22,11 +22,12 @@
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
-#ifdef DEBUG
+#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
# define HARDDEBUG 1
#endif
-#define IS_MOVED(x) (!is_header((x)))
+#define IS_MOVED_BOXED(x) (!is_header((x)))
+#define IS_MOVED_CONS(x) (is_non_value((x)))
#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
do { \
@@ -69,4 +70,28 @@ extern Uint erts_test_long_gc_sleep;
int within(Eterm *ptr, Process *p);
#endif
+ERTS_GLB_INLINE Eterm follow_moved(Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Eterm follow_moved(Eterm term)
+{
+ Eterm* ptr;
+ switch (primary_tag(term)) {
+ case TAG_PRIMARY_IMMED1:
+ break;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(term);
+ if (IS_MOVED_BOXED(*ptr)) term = *ptr;
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = list_val(term);
+ if (IS_MOVED_CONS(ptr[0])) term = ptr[1];
+ break;
+ default:
+ ASSERT(!"strange tag in follow_moved");
+ }
+ return term;
+}
+#endif
+
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index f2e71ae98d..4a4507b212 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -336,59 +336,6 @@ init_shared_memory(int argc, char **argv)
#endif
}
-
-/*
- * Create the very first process.
- */
-
-void
-erts_first_process(Eterm modname, void* code, unsigned size, int argc, char** argv)
-{
- int i;
- Eterm args;
- Eterm pid;
- Eterm* hp;
- Process parent;
- Process* p;
- ErlSpawnOpts so;
-
- if (erts_find_function(modname, am_start, 1) == NULL) {
- char sbuf[256];
- Atom* ap;
-
- ap = atom_tab(atom_val(modname));
- memcpy(sbuf, ap->name, ap->len);
- sbuf[ap->len] = '\0';
- erl_exit(5, "No function %s:start/1\n", sbuf);
- }
-
- /*
- * We need a dummy parent process to be able to call erl_create_process().
- */
- erts_init_empty_process(&parent);
- hp = HAlloc(&parent, argc*2 + 4);
- args = NIL;
- for (i = argc-1; i >= 0; i--) {
- int len = sys_strlen(argv[i]);
- args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args);
- hp += 2;
- }
- args = CONS(hp, new_binary(&parent, code, size), args);
- hp += 2;
- args = CONS(hp, args, NIL);
-
- so.flags = 0;
- pid = erl_create_process(&parent, modname, am_start, args, &so);
- p = process_tab[internal_pid_index(pid)];
- p->group_leader = pid;
-
- erts_cleanup_empty_process(&parent);
-}
-
-/*
- * XXX Old way of starting. Hopefully soon obsolete.
- */
-
static void
erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
{
@@ -755,6 +702,10 @@ early_init(int *argc, char **argv) /*
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_late_init();
+#endif
#if defined(HIPE)
hipe_signal_init(); /* must be done very early */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 6ff5c1b9da..cee470ae37 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -105,7 +105,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "node_table", NULL },
{ "dist_table", NULL },
{ "sys_tracers", NULL },
- { "trace_pattern", NULL },
{ "module_tab", NULL },
{ "export_tab", NULL },
{ "fun_tab", NULL },
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 0d7e1335c1..26028aeefc 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -257,6 +257,10 @@ void erts_lcnt_init() {
erts_lcnt_clear_counters();
}
+void erts_lcnt_late_init() {
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+}
+
/* list operations */
/* BEGIN ASSUMPTION: lcnt_data_lock taken */
@@ -570,36 +574,26 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
/* thread operations */
-static void *lcnt_thr_init(erts_lcnt_thread_data_t *eltd) {
- void *(*function)(void *);
- void *argument;
- void *res;
- function = eltd->function;
- argument = eltd->argument;
-
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-
- res = (void *)function(argument);
- free(eltd);
- return (void *)res;
-}
-
-
-
-int erts_lcnt_thr_create(ethr_tid *tid, void * (*function)(void *), void *arg, ethr_thr_opts *opts) {
+void erts_lcnt_thread_setup(void) {
erts_lcnt_thread_data_t *eltd;
-
+
lcnt_lock();
/* lock for thread id global update */
eltd = lcnt_thread_data_alloc();
lcnt_unlock();
-
- eltd->function = function;
- eltd->argument = arg;
-
- return ethr_thr_create(tid, (void *)lcnt_thr_init, (void *)eltd, opts);
+ ASSERT(eltd);
+ ethr_tsd_set(lcnt_thr_data_key, eltd);
}
+void erts_lcnt_thread_exit_handler() {
+ erts_lcnt_thread_data_t *eltd;
+
+ eltd = ethr_tsd_get(lcnt_thr_data_key);
+
+ if (eltd) {
+ free(eltd);
+ }
+}
/* bindings for bifs */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index e3044c371f..6306580ae4 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -155,11 +155,6 @@ typedef struct {
erts_lcnt_time_t timer; /* timer */
int timer_set; /* bool */
int lock_in_conflict; /* bool */
-
- /* function pointer */
- void *(*function)(void *);
- void *argument;
-
} erts_lcnt_thread_data_t;
/* globals */
@@ -169,6 +164,11 @@ extern Uint16 erts_lcnt_rt_options;
/* function declerations */
void erts_lcnt_init(void);
+void erts_lcnt_late_init(void);
+
+/* thread operations */
+void erts_lcnt_thread_setup(void);
+void erts_lcnt_thread_exit_handler(void);
/* list operations (local) */
erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
@@ -194,12 +194,7 @@ void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
-/* thread operations */
-
-int erts_lcnt_thr_create(ethr_tid *tid, void * (*function)(void *), void *arg, ethr_thr_opts *opts);
-
/* bif interface */
-
Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
void erts_lcnt_clear_counters(void);
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index a056fce0c5..b63f3df7df 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -42,6 +42,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
#undef HARD_DEBUG
#endif
+
+
+
+static ERTS_INLINE int in_heapfrag(const Eterm* ptr, const ErlHeapFragment *bp)
+{
+ return ((unsigned)(ptr - bp->mem) < bp->used_size);
+}
+
+
void
init_message(void)
{
@@ -81,9 +90,12 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
#endif
ErlHeapFragment* nbp;
+ /* ToDo: Make use of 'used_size' to avoid realloc
+ when shrinking just a few words */
+
#ifdef DEBUG
{
- Uint off_sz = size < bp->size ? size : bp->size;
+ Uint off_sz = size < bp->used_size ? size : bp->used_size;
for (i = 0; i < brefs_size; i++) {
Eterm *ptr;
if (is_immed(brefs[i]))
@@ -95,12 +107,12 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
}
#endif
- if (size == bp->size)
+ if (size == bp->used_size)
return bp;
#ifdef HARD_DEBUG
dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size);
- dbg_bp = new_message_buffer(bp->size);
+ dbg_bp = new_message_buffer(bp->used_size);
dbg_hp = dbg_bp->mem;
dbg_tot_size = 0;
for (i = 0; i < brefs_size; i++) {
@@ -109,15 +121,15 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp,
&dbg_bp->off_heap);
}
- ASSERT(dbg_tot_size == (size < bp->size ? size : bp->size));
+ ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size));
#endif
nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG,
(void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size),
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size),
ERTS_HEAP_FRAG_SIZE(size));
if (bp != nbp) {
- Uint off_sz = size < nbp->size ? size : nbp->size;
+ Uint off_sz = size < nbp->used_size ? size : nbp->used_size;
Eterm *sp = &bp->mem[0];
Eterm *ep = sp + off_sz;
Sint offs = &nbp->mem[0] - sp;
@@ -135,7 +147,7 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
}
#endif
}
- nbp->size = size;
+ nbp->alloc_size = size;
nbp->used_size = size;
#ifdef HARD_DEBUG
@@ -168,10 +180,15 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
void
free_message_buffer(ErlHeapFragment* bp)
{
- erts_cleanup_offheap(&bp->off_heap);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG,
- (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ASSERT(bp != NULL);
+ do {
+ ErlHeapFragment* next_bp = bp->next;
+
+ erts_cleanup_offheap(&bp->off_heap);
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
+ ERTS_HEAP_FRAG_SIZE(bp->size));
+ bp = next_bp;
+ }while (bp != NULL);
}
static ERTS_INLINE void
@@ -181,7 +198,7 @@ link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
/* Link the message buffer */
bp->next = MBUF(proc);
MBUF(proc) = bp;
- MBUF_SIZE(proc) += bp->size;
+ MBUF_SIZE(proc) += bp->used_size;
FLAGS(proc) |= F_FORCE_GC;
/* Move any binaries into the process */
@@ -242,7 +259,7 @@ erts_msg_distext2heap(Process *pp,
goto decode_error;
if (is_not_nil(*tokenp)) {
ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
- tok_sz = heap_frag->size;
+ tok_sz = heap_frag->used_size;
sz += tok_sz;
}
if (pp)
@@ -283,12 +300,13 @@ erts_msg_distext2heap(Process *pp,
erts_cleanup_offheap(&heap_frag->off_heap);
}
erts_free_dist_ext_copy(dist_extp);
- if (*bpp)
+ if (*bpp) {
free_message_buffer(*bpp);
+ *bpp = NULL;
+ }
else if (hp) {
HRelease(pp, hp_end, hp);
}
- *bpp = NULL;
return THE_NON_VALUE;
}
@@ -436,11 +454,10 @@ erts_queue_message(Process* receiver,
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
mp->next = NULL;
+ mp->data.heap_frag = bp;
#ifdef ERTS_SMP
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
- mp->data.heap_frag = bp;
-
/*
* We move 'in queue' to 'private queue' and place
* message at the end of 'private queue' in order
@@ -453,11 +470,9 @@ erts_queue_message(Process* receiver,
LINK_MESSAGE_PRIVQ(receiver, mp);
}
else {
- mp->data.heap_frag = bp;
LINK_MESSAGE(receiver, mp);
}
#else
- mp->data.heap_frag = bp;
LINK_MESSAGE(receiver, mp);
#endif
@@ -530,32 +545,27 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#ifdef HARD_DEBUG
dbg_term_sz = size_object(term);
dbg_token_sz = size_object(token);
- ASSERT(bp->size == dbg_term_sz + dbg_token_sz);
-
- dbg_bp = new_message_buffer(bp->size);
+ /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg));
+ Copied size may be smaller due to removed SubBins's or garbage.
+ Copied size may be larger due to duplicated shared terms.
+ */
+ dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
dbg_hp = dbg_bp->mem;
dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
dbg_thp_start = *hpp;
#endif
- ASSERT(bp);
- msg->data.attached = NULL;
+ if (bp->next != NULL) {
+ move_multi_frags(hpp, off_heap, bp, msg->m, 2);
+ goto copy_done;
+ }
off_heap->overhead += bp->off_heap.overhead;
- sz = bp->size;
+ sz = bp->used_size;
-#ifdef DEBUG
- if (is_not_immed(term)) {
- ASSERT(bp->mem <= ptr_val(term));
- ASSERT(bp->mem + bp->size > ptr_val(term));
- }
-
- if (is_not_immed(token)) {
- ASSERT(bp->mem <= ptr_val(token));
- ASSERT(bp->mem + bp->size > ptr_val(token));
- }
-#endif
+ ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp));
+ ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp));
fhp = bp->mem;
hp = *hpp;
@@ -574,8 +584,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
break;
case TAG_PRIMARY_LIST:
case TAG_PRIMARY_BOXED:
- ASSERT(bp->mem <= ptr_val(val));
- ASSERT(bp->mem + bp->size > ptr_val(val));
+ ASSERT(in_heapfrag(ptr_val(val), bp));
*hp++ = offset_ptr(val, offs);
break;
case TAG_PRIMARY_HEADER:
@@ -609,6 +618,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
cpy_sz = header_arity(val);
cpy_words:
+ ASSERT(sz >= cpy_sz);
sz -= cpy_sz;
while (cpy_sz >= 8) {
cpy_sz -= 8;
@@ -676,12 +686,11 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
}
}
- ASSERT(bp->size == hp - *hpp);
+ ASSERT(bp->used_size == hp - *hpp);
*hpp = hp;
if (is_not_immed(token)) {
- ASSERT(bp->mem <= ptr_val(token));
- ASSERT(bp->mem + bp->size > ptr_val(token));
+ ASSERT(in_heapfrag(ptr_val(token), bp));
ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
#ifdef HARD_DEBUG
ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
@@ -690,8 +699,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
}
if (is_not_immed(term)) {
- ASSERT(bp->mem <= ptr_val(term));
- ASSERT(bp->mem + bp->size > ptr_val(term));
+ ASSERT(in_heapfrag(ptr_val(term),bp));
ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
#ifdef HARD_DEBUG
ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
@@ -699,10 +707,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#endif
}
+copy_done:
#ifdef HARD_DEBUG
{
int i, j;
+ ErlHeapFragment* frag;
{
ProcBin *mso = off_heap->mso;
i = j = 0;
@@ -710,10 +720,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
mso = mso->next;
i++;
}
- mso = bp->off_heap.mso;
- while (mso) {
- mso = mso->next;
- j++;
+ for (frag=bp; frag; frag=frag->next) {
+ mso = frag->off_heap.mso;
+ while (mso) {
+ mso = mso->next;
+ j++;
+ }
}
ASSERT(i == j);
}
@@ -724,10 +736,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
fun = fun->next;
i++;
}
- fun = bp->off_heap.funs;
- while (fun) {
- fun = fun->next;
- j++;
+ for (frag=bp; frag; frag=frag->next) {
+ fun = frag->off_heap.funs;
+ while (fun) {
+ fun = fun->next;
+ j++;
+ }
}
ASSERT(i == j);
}
@@ -738,10 +752,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
external = external->next;
i++;
}
- external = bp->off_heap.externals;
- while (external) {
- external = external->next;
- j++;
+ for (frag=bp; frag; frag=frag->next) {
+ external = frag->off_heap.externals;
+ while (external) {
+ external = external->next;
+ j++;
+ }
}
ASSERT(i == j);
}
@@ -755,6 +771,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#endif
bp->off_heap.externals = NULL;
free_message_buffer(bp);
+ msg->data.heap_frag = NULL;
#ifdef HARD_DEBUG
ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
@@ -764,6 +781,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
}
+
Uint
erts_msg_attached_data_size_aux(ErlMessage *msg)
{
@@ -789,7 +807,7 @@ erts_msg_attached_data_size_aux(ErlMessage *msg)
if (is_not_nil(msg->m[1])) {
ErlHeapFragment *heap_frag;
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- sz += heap_frag->size;
+ sz += heap_frag->used_size;
}
return sz;
}
@@ -805,7 +823,7 @@ erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *ms
ErlHeapFragment *heap_frag;
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg),
- heap_frag->size,
+ heap_frag->used_size,
hpp,
ohp);
erts_cleanup_offheap(&heap_frag->off_heap);
@@ -1062,3 +1080,4 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
erts_queue_message(to, to_locksp, bp, save, NIL);
}
}
+
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 489dee7b37..f478572ac2 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -49,7 +49,7 @@ typedef struct erl_heap_fragment ErlHeapFragment;
struct erl_heap_fragment {
ErlHeapFragment* next; /* Next heap fragment */
ErlOffHeap off_heap; /* Offset heap data. */
- unsigned size; /* Size in (half)words of mem */
+ unsigned alloc_size; /* Size in (half)words of mem */
unsigned used_size; /* With terms to be moved to heap by GC */
Eterm mem[1]; /* Data */
};
@@ -199,7 +199,7 @@ do { \
#define ERTS_INIT_HEAP_FRAG(HEAP_FRAG_P, DATA_WORDS) \
do { \
(HEAP_FRAG_P)->next = NULL; \
- (HEAP_FRAG_P)->size = (DATA_WORDS); \
+ (HEAP_FRAG_P)->alloc_size = (DATA_WORDS); \
(HEAP_FRAG_P)->used_size = (DATA_WORDS); \
(HEAP_FRAG_P)->off_heap.mso = NULL; \
(HEAP_FRAG_P)->off_heap.funs = NULL; \
@@ -227,14 +227,25 @@ void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
Eterm *, ErtsDistExternal *);
+ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg)
+{
+ const ErlHeapFragment *bp;
+ Uint sz = 0;
+ for (bp = msg->data.heap_frag; bp!=NULL; bp=bp->next) {
+ sz += bp->used_size;
+ }
+ return sz;
+}
+
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
{
ASSERT(msg->data.attached);
if (is_value(ERL_MESSAGE_TERM(msg)))
- return msg->data.heap_frag->size;
+ return erts_msg_used_frag_sz(msg);
else if (msg->data.dist_ext->heap_size < 0)
return erts_msg_attached_data_size_aux(msg);
else {
@@ -242,7 +253,7 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
ErlHeapFragment *heap_frag;
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- sz += heap_frag->size;
+ sz += heap_frag->used_size;
}
return sz;
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 7095ae03e7..3d63fa1caf 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -48,7 +48,7 @@ struct erl_module_nif {
struct enif_entry_t* entry;
erts_refc_t rt_cnt; /* number of resource types */
erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */
- int is_orphan; /* if erlang module has been purged */
+ Module* mod; /* Can be NULL if orphan with dtor-resources left */
};
#ifdef DEBUG
@@ -61,6 +61,10 @@ static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
#endif
+#ifdef DEBUG
+static int is_offheap(const ErlOffHeap* off_heap);
+#endif
+
#define MIN_HEAP_FRAG_SZ 200
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp);
@@ -84,7 +88,8 @@ static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp)
HEAP_TOP(env->proc) = env->hp;
}
else {
- HRelease(env->proc, env->hp_end, env->hp);
+ env->heap_frag->used_size = hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
frag_sz = need + MIN_HEAP_FRAG_SZ;
hp = erts_heap_alloc(env->proc, frag_sz);
@@ -143,8 +148,9 @@ void erts_post_nif(ErlNifEnv* env)
}
else {
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->size);
- HRelease(env->proc, env->hp_end, env->hp);
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
free_tmp_objs(env);
}
@@ -158,7 +164,7 @@ static void post_nif_noproc(ErlNifEnv* env)
/* Flush out our cached heap pointers to allow an ordinary HAlloc
*/
-static void enable_halloc(ErlNifEnv* env)
+static void flush_env(ErlNifEnv* env)
{
if (env->heap_frag == NULL) {
ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
@@ -168,14 +174,15 @@ static void enable_halloc(ErlNifEnv* env)
}
else {
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->size);
- HRelease(env->proc, env->hp_end, env->hp);
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
}
-/* Restore cached heap pointers
+/* Restore cached heap pointers to allow alloc_heap again.
*/
-static void disable_halloc(ErlNifEnv* env)
+static void cache_env(ErlNifEnv* env)
{
if (env->heap_frag == NULL) {
ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
@@ -185,34 +192,192 @@ static void disable_halloc(ErlNifEnv* env)
}
else {
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->size);
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
env->heap_frag = MBUF(env->proc);
ASSERT(env->heap_frag != NULL);
env->hp = env->heap_frag->mem + env->heap_frag->used_size;
- env->hp_end = env->heap_frag->mem + env->heap_frag->size;
+ env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
}
}
-
void* enif_priv_data(ErlNifEnv* env)
{
return env->mod_nif->priv_data;
}
-void* enif_alloc(ErlNifEnv* env, size_t size)
+void* enif_alloc(size_t size)
{
return erts_alloc_fnf(ERTS_ALC_T_NIF, (Uint) size);
}
-void* enif_realloc(ErlNifEnv* env, void* ptr, size_t size)
+void* enif_realloc(void* ptr, size_t size)
{
return erts_realloc_fnf(ERTS_ALC_T_NIF, ptr, size);
}
-void enif_free(ErlNifEnv* env, void* ptr)
+void enif_free(void* ptr)
{
erts_free(ERTS_ALC_T_NIF, ptr);
}
+struct enif_msg_environment_t
+{
+ ErlNifEnv env;
+ Process phony_proc;
+};
+
+ErlNifEnv* enif_alloc_env(void)
+{
+ struct enif_msg_environment_t* msg_env =
+ erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
+ Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
+
+ msg_env->env.hp = phony_heap;
+ msg_env->env.hp_end = phony_heap;
+ msg_env->env.heap_frag = NULL;
+ msg_env->env.mod_nif = NULL;
+ msg_env->env.tmp_obj_list = (struct enif_tmp_obj_t*) 1; /* invalid non-NULL */
+ msg_env->env.proc = &msg_env->phony_proc;
+ memset(&msg_env->phony_proc, 0, sizeof(Process));
+ HEAP_START(&msg_env->phony_proc) = phony_heap;
+ HEAP_TOP(&msg_env->phony_proc) = phony_heap;
+ HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
+ HEAP_END(&msg_env->phony_proc) = phony_heap;
+ MBUF(&msg_env->phony_proc) = NULL;
+ msg_env->phony_proc.id = ERTS_INVALID_PID;
+#ifdef FORCE_HEAP_FRAGS
+ msg_env->phony_proc.space_verified = 0;
+ msg_env->phony_proc.space_verified_from = NULL;
+#endif
+ return &msg_env->env;
+}
+void enif_free_env(ErlNifEnv* env)
+{
+ enif_clear_env(env);
+ erts_free(ERTS_ALC_T_NIF, env);
+}
+
+static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
+{
+ oh->mso = NULL;
+ oh->externals = NULL;
+ oh->funs = NULL;
+ oh->overhead = 0;
+}
+
+void enif_clear_env(ErlNifEnv* env)
+{
+ struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
+ Process* p = &menv->phony_proc;
+ ASSERT(p == menv->env.proc);
+ ASSERT(p->id == ERTS_INVALID_PID);
+ ASSERT(MBUF(p) == menv->env.heap_frag);
+ if (MBUF(p) != NULL) {
+ erts_cleanup_offheap(&MSO(p));
+ clear_offheap(&MSO(p));
+ free_message_buffer(MBUF(p));
+ MBUF(p) = NULL;
+ menv->env.heap_frag = NULL;
+ }
+ ASSERT(HEAP_TOP(p) == HEAP_END(p));
+ menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
+
+ ASSERT(!is_offheap(&MSO(p)));
+}
+int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
+ ErlNifEnv* msg_env, ERL_NIF_TERM msg)
+{
+ struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
+ ErtsProcLocks rp_locks = 0;
+ Process* rp;
+ Process* c_p;
+ ErlHeapFragment* frags;
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ ErtsProcLocks rp_had_locks;
+#endif
+ Eterm receiver = to_pid->pid;
+ int flush_me = 0;
+
+ if (env != NULL) {
+ c_p = env->proc;
+ if (receiver == c_p->id) {
+ rp_locks = ERTS_PROC_LOCK_MAIN;
+ flush_me = 1;
+ }
+ }
+ else {
+#ifdef ERTS_SMP
+ c_p = NULL;
+#else
+ erl_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
+#endif
+ }
+
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ rp_had_locks = rp_locks;
+#endif
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (rp == NULL) {
+ ASSERT(env == NULL || receiver != c_p->id);
+ return 0;
+ }
+ flush_env(msg_env);
+ frags = menv->env.heap_frag;
+ ASSERT(frags == MBUF(&menv->phony_proc));
+ if (frags != NULL) {
+ /* Move all offheap's from phony proc to the first fragment.
+ Quick and dirty, but erts_move_msg_mbuf_to_heap doesn't care. */
+ ASSERT(!is_offheap(&frags->off_heap));
+ frags->off_heap = MSO(&menv->phony_proc);
+ clear_offheap(&MSO(&menv->phony_proc));
+ menv->env.heap_frag = NULL;
+ MBUF(&menv->phony_proc) = NULL;
+ }
+ ASSERT(!is_offheap(&MSO(&menv->phony_proc)));
+
+ if (flush_me) {
+ flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ }
+ erts_queue_message(rp, &rp_locks, frags, msg, am_undefined);
+ if (rp_locks) {
+ ERTS_SMP_LC_ASSERT(rp_locks == (rp_had_locks | (ERTS_PROC_LOCK_MSGQ |
+ ERTS_PROC_LOCK_STATUS)));
+ erts_smp_proc_unlock(rp, (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS));
+ }
+ erts_smp_proc_dec_refc(rp);
+ if (flush_me) {
+ cache_env(env);
+ }
+ return 1;
+}
+
+ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
+{
+ Uint sz;
+ Eterm* hp;
+ sz = size_object(src_term);
+ hp = alloc_heap(dst_env, sz);
+ return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
+}
+
+
+#ifdef DEBUG
+static int is_offheap(const ErlOffHeap* oh)
+{
+ return oh->mso != NULL || oh->funs != NULL || oh->externals != NULL;
+}
+#endif
+
+ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
+{
+ pid->pid = caller_env->proc->id;
+ return pid;
+}
+int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
+{
+ return is_internal_pid(term) ? (pid->pid=term, 1) : 0;
+}
+
int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term)
{
return is_atom(term);
@@ -324,7 +489,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
return 1;
}
-int enif_alloc_binary(ErlNifEnv* env, unsigned size, ErlNifBinary* bin)
+int enif_alloc_binary(size_t size, ErlNifBinary* bin)
{
Binary* refbin;
@@ -343,7 +508,7 @@ int enif_alloc_binary(ErlNifEnv* env, unsigned size, ErlNifBinary* bin)
return 1;
}
-int enif_realloc_binary(ErlNifEnv* env, ErlNifBinary* bin, unsigned size)
+int enif_realloc_binary(ErlNifBinary* bin, size_t size)
{
if (bin->ref_bin != NULL) {
Binary* oldbin;
@@ -361,15 +526,15 @@ int enif_realloc_binary(ErlNifEnv* env, ErlNifBinary* bin, unsigned size)
}
else {
unsigned char* old_data = bin->data;
- unsigned cpy_sz = (size < bin->size ? size : bin->size);
- enif_alloc_binary(env, size, bin);
+ size_t cpy_sz = (size < bin->size ? size : bin->size);
+ enif_alloc_binary(size, bin);
sys_memcpy(bin->data, old_data, cpy_sz);
}
return 1;
}
-void enif_release_binary(ErlNifEnv* env, ErlNifBinary* bin)
+void enif_release_binary(ErlNifBinary* bin)
{
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
@@ -385,21 +550,21 @@ void enif_release_binary(ErlNifEnv* env, ErlNifBinary* bin)
#endif
}
-unsigned char* enif_make_new_binary(ErlNifEnv* env, unsigned size,
+unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size,
ERL_NIF_TERM* termp)
{
- enable_halloc(env);
+ flush_env(env);
*termp = new_binary(env->proc, NULL, size);
- disable_halloc(env);
+ cache_env(env);
return binary_bytes(*termp);
}
-int enif_is_identical(ErlNifEnv* env, Eterm lhs, Eterm rhs)
+int enif_is_identical(Eterm lhs, Eterm rhs)
{
return EQ(lhs,rhs);
}
-int enif_compare(ErlNifEnv* env, Eterm lhs, Eterm rhs)
+int enif_compare(Eterm lhs, Eterm rhs)
{
return cmp(lhs,rhs);
}
@@ -478,15 +643,15 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
return bin_term;
}
else {
- enable_halloc(env);
+ flush_env(env);
bin->bin_term = new_binary(env->proc, bin->data, bin->size);
- disable_halloc(env);
+ cache_env(env);
return bin->bin_term;
}
}
Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
- unsigned pos, unsigned size)
+ size_t pos, size_t size)
{
ErlSubBin* sb;
Eterm orig;
@@ -516,9 +681,11 @@ Eterm enif_make_badarg(ErlNifEnv* env)
BIF_ERROR(env->proc, BADARG);
}
-int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len)
+int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len,
+ ErlNifCharEncoding encoding)
{
Atom* ap;
+ ASSERT(encoding == ERL_NIF_LATIN1);
if (is_not_atom(atom)) {
return 0;
}
@@ -566,10 +733,8 @@ int enif_get_long(ErlNifEnv* env, Eterm term, long* ip)
#if SIZEOF_LONG == ERTS_SIZEOF_ETERM
return term_to_Sint(term, ip);
#elif SIZEOF_INT == ERTS_SIZEOF_ETERM
- Uint u;
- term_to_Sint(term, u);
- *ip = (long) u;
- return 1;
+ Sint i;
+ return term_to_Sint(term, &i) ? (*ip = (long) i, 1) : 0;
#else
# error Unknown long word size
#endif
@@ -581,10 +746,7 @@ int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
return term_to_Uint(term, ip);
#elif SIZEOF_INT == ERTS_SIZEOF_ETERM
Uint u;
- int r;
- r = term_to_Uint(term, &u);
- *ip = (unsigned long) u;
- return r;
+ return term_to_Uint(term, &u) ? (*ip = (unsigned long) u, 1) : 0;
#else
# error Unknown long word size
#endif
@@ -601,9 +763,11 @@ int enif_get_double(ErlNifEnv* env, Eterm term, double* dp)
return 1;
}
-int enif_get_atom_length(ErlNifEnv* env, Eterm atom, unsigned* len)
+int enif_get_atom_length(ErlNifEnv* env, Eterm atom, unsigned* len,
+ ErlNifCharEncoding enc)
{
Atom* ap;
+ ASSERT(enc == ERL_NIF_LATIN1);
if (is_not_atom(atom)) return 0;
ap = atom_tab(atom_val(atom));
*len = ap->len;
@@ -674,14 +838,16 @@ ERL_NIF_TERM enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len)
return am_atom_put(name, len);
}
-int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom)
+int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom,
+ ErlNifCharEncoding enc)
{
- return enif_make_existing_atom_len(env, name, sys_strlen(name), atom);
+ return enif_make_existing_atom_len(env, name, sys_strlen(name), atom, enc);
}
int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
- ERL_NIF_TERM* atom)
+ ERL_NIF_TERM* atom, ErlNifCharEncoding encoding)
{
+ ASSERT(encoding == ERL_NIF_LATIN1);
return erts_atom_get(name, len, atom);
}
@@ -841,7 +1007,8 @@ struct enif_resource_type_t
ErlNifResourceDtor* dtor; /* user destructor function */
erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
+1 for active erl_module_nif */
- char name[1];
+ Eterm module;
+ Eterm name;
};
/* dummy node in circular list */
@@ -859,14 +1026,14 @@ typedef struct enif_resource_t
#define SIZEOF_ErlNifResource(SIZE) (offsetof(ErlNifResource,data) + (SIZE))
#define DATA_TO_RESOURCE(PTR) ((ErlNifResource*)((char*)(PTR) - offsetof(ErlNifResource,data)))
-static ErlNifResourceType* find_resource_type(const char* name)
+static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
{
ErlNifResourceType* type;
for (type = resource_type_list.next;
type != &resource_type_list;
type = type->next) {
- if (sys_strcmp(type->name, name) == 0) {
+ if (type->module == module && type->name == name) {
return type;
}
}
@@ -899,33 +1066,42 @@ static void steal_resource_type(ErlNifResourceType* type)
if (type->dtor != NULL
&& erts_refc_dectest(&lib->rt_dtor_cnt, 0) == 0
- && lib->is_orphan) {
+ && lib->mod == NULL) {
/* last type with destructor gone, close orphan lib */
close_lib(lib);
}
if (erts_refc_dectest(&lib->rt_cnt, 0) == 0
- && lib->is_orphan) {
+ && lib->mod == NULL) {
erts_free(ERTS_ALC_T_NIF, lib);
}
}
ErlNifResourceType*
-enif_open_resource_type(ErlNifEnv* env, const char* type_name,
- ErlNifResourceDtor* dtor,
- enum ErlNifResourceFlags flags,
- enum ErlNifResourceFlags* tried)
+enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str,
+ const char* name_str,
+ ErlNifResourceDtor* dtor,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
{
- ErlNifResourceType* type = find_resource_type(type_name);
- enum ErlNifResourceFlags op = flags;
+ ErlNifResourceType* type = NULL;
+ ErlNifResourceFlags op = flags;
+ Eterm module_am, name_am;
+
ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(module_str == NULL); /* for now... */
+ module_am = make_atom(env->mod_nif->mod->module);
+ name_am = enif_make_atom(env, name_str);
+
+ type = find_resource_type(module_am, name_am);
if (type == NULL) {
if (flags & ERL_NIF_RT_CREATE) {
type = erts_alloc(ERTS_ALC_T_NIF,
- sizeof(struct enif_resource_type_t)
- + sys_strlen(type_name));
+ sizeof(struct enif_resource_type_t));
type->dtor = dtor;
- sys_strcpy(type->name, type_name);
+ type->module = module_am;
+ type->name = name_am;
erts_refc_init(&type->refc, 1);
type->owner = env->mod_nif;
type->prev = &resource_type_list;
@@ -973,13 +1149,13 @@ static void nif_resource_dtor(Binary* bin)
if (erts_refc_dectest(&type->refc, 0) == 0) {
ASSERT(type->next == NULL);
ASSERT(type->owner != NULL);
- ASSERT(type->owner->is_orphan);
+ ASSERT(type->owner->mod == NULL);
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
}
-void* enif_alloc_resource(ErlNifEnv* env, ErlNifResourceType* type, unsigned size)
+void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
{
Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor);
ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin);
@@ -992,7 +1168,7 @@ void* enif_alloc_resource(ErlNifEnv* env, ErlNifResourceType* type, unsigned siz
return resource->data;
}
-void enif_release_resource(ErlNifEnv* env, void* obj)
+void enif_release_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
@@ -1006,6 +1182,18 @@ void enif_release_resource(ErlNifEnv* env, void* obj)
}
}
+void enif_keep_resource(void* obj)
+{
+ ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+#ifdef DEBUG
+ erts_refc_inc(&resource->nif_refc, 1);
+#endif
+ erts_refc_inc(&bin->binary.refc, 2);
+}
+
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
@@ -1014,15 +1202,30 @@ ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
}
+ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
+ const void* data, size_t size)
+{
+ Eterm bin = enif_make_resource(env, obj);
+ ProcBin* pb = (ProcBin*) binary_val(bin);
+ pb->bytes = (byte*) data;
+ pb->size = size;
+ return bin;
+}
+
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
void** objp)
{
+ ProcBin* pb;
Binary* mbin;
ErlNifResource* resource;
if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
return 0;
}
- mbin = ((ProcBin*) binary_val(term))->val;
+ pb = (ProcBin*) binary_val(term);
+ /*if (pb->size != 0) {
+ return 0; / * Or should we allow "resource binaries" as handles? * /
+ }*/
+ mbin = pb->val;
resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(mbin);
if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
|| resource->type != type) {
@@ -1032,7 +1235,7 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
return 1;
}
-unsigned enif_sizeof_resource(ErlNifEnv* env, void* obj)
+size_t enif_sizeof_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
Binary* bin = &ERTS_MAGIC_BIN_FROM_DATA(resource)->binary;
@@ -1262,7 +1465,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
lib->entry = entry;
erts_refc_init(&lib->rt_cnt, 0);
erts_refc_init(&lib->rt_dtor_cnt, 0);
- lib->is_orphan = 0;
+ lib->mod = mod;
env.mod_nif = lib;
if (mod->nif != NULL) { /* Reload */
int k;
@@ -1343,7 +1546,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData* bp = (BpData*) code_ptr[1];
+ BpData** bps = (BpData**) code_ptr[1];
+ BpData* bp = (BpData*) bps[bp_sched2ix()];
bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
@@ -1376,7 +1580,7 @@ erts_unload_nif(struct erl_module_nif* lib)
ErlNifResourceType* next;
ASSERT(erts_smp_is_system_blocked(0));
ASSERT(lib != NULL);
- ASSERT(!lib->is_orphan);
+ ASSERT(lib->mod != NULL);
for (rt = resource_type_list.next;
rt != &resource_type_list;
rt = next) {
@@ -1406,7 +1610,7 @@ erts_unload_nif(struct erl_module_nif* lib)
else {
ASSERT(erts_refc_read(&lib->rt_cnt, 1) > 0);
}
- lib->is_orphan = 1;
+ lib->mod = NULL; /* orphan lib */
}
void erl_nif_init()
@@ -1415,7 +1619,8 @@ void erl_nif_init()
resource_type_list.prev = &resource_type_list;
resource_type_list.dtor = NULL;
resource_type_list.owner = NULL;
- resource_type_list.name[0] = '\0';
+ resource_type_list.module = THE_NON_VALUE;
+ resource_type_list.name = THE_NON_VALUE;
}
#ifdef READONLY_CHECK
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index a345837569..936f03bce1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -23,14 +23,16 @@
#ifndef __ERL_NIF_H__
#define __ERL_NIF_H__
+
#include "erl_drv_nif.h"
/* Version history:
** 0.1: R13B03
** 1.0: R13B04
+** 2.0: R14A
*/
-#define ERL_NIF_MAJOR_VERSION 1
-#define ERL_NIF_MINOR_VERSION 1
+#define ERL_NIF_MAJOR_VERSION 2
+#define ERL_NIF_MINOR_VERSION 0
#include <stdlib.h>
@@ -60,6 +62,10 @@
#endif
#include "erl_int_sizes_config.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifdef HALFWORD_HEAP_EMULATOR
typedef unsigned int ERL_NIF_TERM;
#else
@@ -93,7 +99,7 @@ typedef struct enif_entry_t
typedef struct
{
- unsigned size;
+ size_t size;
unsigned char* data;
/* Internals (avert your eyes) */
@@ -103,17 +109,22 @@ typedef struct
typedef struct enif_resource_type_t ErlNifResourceType;
typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
-enum ErlNifResourceFlags
+typedef enum
{
ERL_NIF_RT_CREATE = 1,
ERL_NIF_RT_TAKEOVER = 2
-};
+}ErlNifResourceFlags;
typedef enum
{
ERL_NIF_LATIN1 = 1
}ErlNifCharEncoding;
+typedef struct
+{
+ ERL_NIF_TERM pid; /* internal, may change */
+}ErlNifPid;
+
typedef ErlDrvSysInfo ErlNifSysInfo;
typedef struct ErlDrvTid_ *ErlNifTid;
@@ -146,8 +157,6 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#endif
-
-
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
@@ -163,7 +172,18 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#endif
+#ifdef __cplusplus
+}
+# define ERL_NIF_INIT_PROLOGUE extern "C" {
+# define ERL_NIF_INIT_EPILOGUE }
+#else
+# define ERL_NIF_INIT_PROLOGUE
+# define ERL_NIF_INIT_EPILOGUE
+#endif
+
+
#define ERL_NIF_INIT(NAME, FUNCS, LOAD, RELOAD, UPGRADE, UNLOAD) \
+ERL_NIF_INIT_PROLOGUE \
ERL_NIF_INIT_GLOB \
ERL_NIF_INIT_DECL(NAME) \
{ \
@@ -178,7 +198,9 @@ ERL_NIF_INIT_DECL(NAME) \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
-}
+} \
+ERL_NIF_INIT_EPILOGUE
+
#endif /* __ERL_NIF_H__ */
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 44bcca9ca4..ef4e9580b0 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -23,29 +23,29 @@
#ifdef ERL_NIF_API_FUNC_DECL
ERL_NIF_API_FUNC_DECL(void*,enif_priv_data,(ErlNifEnv*));
-ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(ErlNifEnv*, size_t size));
-ERL_NIF_API_FUNC_DECL(void,enif_free,(ErlNifEnv*, void* ptr));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_free,(void* ptr));
ERL_NIF_API_FUNC_DECL(int,enif_is_atom,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_binary,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_ref,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_inspect_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, ErlNifBinary* bin));
-ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(ErlNifEnv*, unsigned size, ErlNifBinary* bin));
-ERL_NIF_API_FUNC_DECL(int,enif_realloc_binary,(ErlNifEnv*, ErlNifBinary* bin, unsigned size));
-ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifEnv*, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(size_t size, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_realloc_binary,(ErlNifBinary* bin, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifBinary* bin));
ERL_NIF_API_FUNC_DECL(int,enif_get_int,(ErlNifEnv*, ERL_NIF_TERM term, int* ip));
ERL_NIF_API_FUNC_DECL(int,enif_get_ulong,(ErlNifEnv*, ERL_NIF_TERM term, unsigned long* ip));
ERL_NIF_API_FUNC_DECL(int,enif_get_double,(ErlNifEnv*, ERL_NIF_TERM term, double* dp));
ERL_NIF_API_FUNC_DECL(int,enif_get_list_cell,(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM* head, ERL_NIF_TERM* tail));
ERL_NIF_API_FUNC_DECL(int,enif_get_tuple,(ErlNifEnv* env, ERL_NIF_TERM tpl, int* arity, const ERL_NIF_TERM** array));
-ERL_NIF_API_FUNC_DECL(int,enif_is_identical,(ErlNifEnv* env, ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
-ERL_NIF_API_FUNC_DECL(int,enif_compare,(ErlNifEnv* env, ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(int,enif_is_identical,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(int,enif_compare,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_binary,(ErlNifEnv* env, ErlNifBinary* bin));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_badarg,(ErlNifEnv* env));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int,(ErlNifEnv* env, int i));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ulong,(ErlNifEnv* env, unsigned long i));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_double,(ErlNifEnv* env, double d));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_atom,(ErlNifEnv* env, const char* name));
-ERL_NIF_API_FUNC_DECL(int,enif_make_existing_atom,(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom));
+ERL_NIF_API_FUNC_DECL(int,enif_make_existing_atom,(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom, ErlNifCharEncoding));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple,(ErlNifEnv* env, unsigned cnt, ...));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list,(ErlNifEnv* env, unsigned cnt, ...));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_cell,(ErlNifEnv* env, ERL_NIF_TERM car, ERL_NIF_TERM cdr));
@@ -82,13 +82,13 @@ ERL_NIF_API_FUNC_DECL(int,enif_equal_tids,(ErlNifTid tid1, ErlNifTid tid2));
ERL_NIF_API_FUNC_DECL(void,enif_thread_exit,(void *resp));
ERL_NIF_API_FUNC_DECL(int,enif_thread_join,(ErlNifTid, void **respp));
-ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(ErlNifEnv*, void* ptr, size_t size));
+ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(void* ptr, size_t size));
ERL_NIF_API_FUNC_DECL(void,enif_system_info,(ErlNifSysInfo *sip, size_t si_size));
ERL_NIF_API_FUNC_DECL(int,enif_fprintf,(void/* FILE* */ *filep, const char *format, ...));
ERL_NIF_API_FUNC_DECL(int,enif_inspect_iolist_as_binary,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifBinary* bin));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, unsigned pos, unsigned size));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, size_t pos, size_t size));
ERL_NIF_API_FUNC_DECL(int,enif_get_string,(ErlNifEnv*, ERL_NIF_TERM list, char* buf, unsigned len, ErlNifCharEncoding));
-ERL_NIF_API_FUNC_DECL(int,enif_get_atom,(ErlNifEnv*, ERL_NIF_TERM atom, char* buf, unsigned len));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom,(ErlNifEnv*, ERL_NIF_TERM atom, char* buf, unsigned len, ErlNifCharEncoding));
ERL_NIF_API_FUNC_DECL(int,enif_is_fun,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_pid,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_port,(ErlNifEnv*, ERL_NIF_TERM term));
@@ -99,20 +99,29 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_long,(ErlNifEnv*, long i));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
ERL_NIF_API_FUNC_DECL(int,enif_is_empty_list,(ErlNifEnv*, ERL_NIF_TERM term));
-ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type,(ErlNifEnv*, const char* type_name, void (*dtor)(ErlNifEnv*,void *), enum ErlNifResourceFlags flags, enum ErlNifResourceFlags* tried));
-ERL_NIF_API_FUNC_DECL(void*,enif_alloc_resource,(ErlNifEnv*, ErlNifResourceType* type, unsigned size));
-ERL_NIF_API_FUNC_DECL(void,enif_release_resource,(ErlNifEnv*, void* obj));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type,(ErlNifEnv*, const char* module_str, const char* name_str, void (*dtor)(ErlNifEnv*,void *), ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc_resource,(ErlNifResourceType* type, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_resource,(void* obj));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource,(ErlNifEnv*, void* obj));
ERL_NIF_API_FUNC_DECL(int,enif_get_resource,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp));
-ERL_NIF_API_FUNC_DECL(unsigned,enif_sizeof_resource,(ErlNifEnv*, void* obj));
-ERL_NIF_API_FUNC_DECL(unsigned char*,enif_make_new_binary,(ErlNifEnv*,unsigned size,ERL_NIF_TERM* termp));
+ERL_NIF_API_FUNC_DECL(size_t,enif_sizeof_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(unsigned char*,enif_make_new_binary,(ErlNifEnv*,size_t size,ERL_NIF_TERM* termp));
ERL_NIF_API_FUNC_DECL(int,enif_is_list,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_tuple,(ErlNifEnv*, ERL_NIF_TERM term));
-ERL_NIF_API_FUNC_DECL(int,enif_get_atom_length,(ErlNifEnv*, ERL_NIF_TERM atom, unsigned* len));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom_length,(ErlNifEnv*, ERL_NIF_TERM atom, unsigned* len, ErlNifCharEncoding));
ERL_NIF_API_FUNC_DECL(int,enif_get_list_length,(ErlNifEnv* env, ERL_NIF_TERM term, unsigned* len));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_atom_len,(ErlNifEnv* env, const char* name, size_t len));
-ERL_NIF_API_FUNC_DECL(int, enif_make_existing_atom_len,(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom));
+ERL_NIF_API_FUNC_DECL(int, enif_make_existing_atom_len,(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom, ErlNifCharEncoding));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string_len,(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ErlNifEnv*,enif_alloc_env,(void));
+ERL_NIF_API_FUNC_DECL(void,enif_free_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(void,enif_clear_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(int,enif_send,(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_copy,(ErlNifEnv* dst_env, ERL_NIF_TERM src_term));
+ERL_NIF_API_FUNC_DECL(ErlNifPid*,enif_self,(ErlNifEnv* caller_env, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(int,enif_get_local_pid,(ErlNifEnv* env, ERL_NIF_TERM, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(void,enif_keep_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* obj,const void* data, size_t size));
/*
** Add last to keep compatibility on Windows!!!
@@ -212,6 +221,15 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string_len,(ErlNifEnv* env, const c
# define enif_make_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_atom_len)
# define enif_make_existing_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_existing_atom_len)
# define enif_make_string_len ERL_NIF_API_FUNC_MACRO(enif_make_string_len)
+# define enif_alloc_env ERL_NIF_API_FUNC_MACRO(enif_alloc_env)
+# define enif_free_env ERL_NIF_API_FUNC_MACRO(enif_free_env)
+# define enif_clear_env ERL_NIF_API_FUNC_MACRO(enif_clear_env)
+# define enif_send ERL_NIF_API_FUNC_MACRO(enif_send)
+# define enif_make_copy ERL_NIF_API_FUNC_MACRO(enif_make_copy)
+# define enif_self ERL_NIF_API_FUNC_MACRO(enif_self)
+# define enif_get_local_pid ERL_NIF_API_FUNC_MACRO(enif_get_local_pid)
+# define enif_keep_resource ERL_NIF_API_FUNC_MACRO(enif_keep_resource)
+# define enif_make_resource_binary ERL_NIF_API_FUNC_MACRO(enif_make_resource_binary)
#endif
#ifndef enif_make_list1
@@ -233,9 +251,7 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string_len,(ErlNifEnv* env, const c
# define enif_make_tuple7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_tuple(ENV,7,E1,E2,E3,E4,E5,E6,E7)
# define enif_make_tuple8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_tuple(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
-#endif
-#ifndef enif_get_data
-# define enif_get_data enif_priv_data /* deprecated */
+# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 0b6bb0d8e9..967a14f0d1 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -969,11 +969,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_port_release(pp);
#else
{
- long refc = erts_smp_atomic_dectest(&pp->refc);
+ long refc;
+ erts_smp_mtx_unlock(pp->lock);
+ refc = erts_smp_atomic_dectest(&pp->refc);
ASSERT(refc >= 0);
- if (refc > 0)
- erts_smp_mtx_unlock(pp->lock);
- else {
+ if (refc == 0) {
erts_smp_runq_unlock(runq);
erts_port_cleanup(pp); /* Might aquire runq lock */
erts_smp_runq_lock(runq);
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 11ca85a41c..bae17d1569 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -38,6 +38,7 @@
#include "erl_instrument.h"
#include "erl_threads.h"
#include "erl_binary.h"
+#include "beam_bp.h"
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -180,6 +181,7 @@ static ErtsCpuBindData *scheduler2cpu_map;
erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
typedef enum {
+ ERTS_CPU_BIND_UNDEFINED,
ERTS_CPU_BIND_SPREAD,
ERTS_CPU_BIND_PROCESSOR_SPREAD,
ERTS_CPU_BIND_THREAD_SPREAD,
@@ -190,6 +192,9 @@ typedef enum {
ERTS_CPU_BIND_NONE
} ErtsCpuBindOrder;
+#define ERTS_CPU_BIND_DEFAULT_BIND \
+ ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD
+
ErtsCpuBindOrder cpu_bind_order;
static erts_cpu_topology_t *user_cpudata;
@@ -388,7 +393,12 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
= ERTS_PSD_DIST_ENTRY_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
- = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+ = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks
+ = ERTS_PSD_CALL_TIME_BP_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
+ = ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -2078,12 +2088,14 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_aligned_run_queues = erts_alloc(ERTS_ALC_T_RUNQS,
(sizeof(ErtsAlignedRunQueue)*(n+1)));
- if ((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0)
+ if ((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) != 0)
erts_aligned_run_queues = ((ErtsAlignedRunQueue *)
((((UWord) erts_aligned_run_queues)
& ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE));
+ ASSERT((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0);
+
#ifdef ERTS_SMP
erts_smp_atomic_init(&no_empty_run_queues, 0);
#endif
@@ -2175,11 +2187,14 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
(sizeof(ErtsAlignedSchedulerData)
*(n+1)));
- if ((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) == 0)
+ if ((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) != 0)
erts_aligned_scheduler_data = ((ErtsAlignedSchedulerData *)
((((UWord) erts_aligned_scheduler_data)
& ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) == 0);
+
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
#ifdef ERTS_SMP
@@ -2769,7 +2784,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
else if (on) { /* ------ BLOCK ------ */
- if (erts_is_multi_scheduling_blocked()) {
+ if (schdlr_sspnd.msb.procs) {
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
schdlr_sspnd.msb.procs = plp;
@@ -2975,8 +2990,11 @@ erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
int
erts_is_multi_scheduling_blocked(void)
{
- return (erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing)
- && erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ int res;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ res = schdlr_sspnd.msb.procs != NULL;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ return res;
}
Eterm
@@ -2985,7 +3003,7 @@ erts_multi_scheduling_blockers(Process *p)
Eterm res = NIL;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (erts_is_multi_scheduling_blocked()) {
+ if (schdlr_sspnd.msb.procs) {
Eterm *hp, *hp_end;
ErtsProcList *plp1, *plp2;
Uint max_size;
@@ -3096,11 +3114,7 @@ erts_start_schedulers(void)
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
actual++;
ASSERT(actual == esdp->no);
-#ifdef ERTS_ENABLE_LOCK_COUNT
- res = erts_lcnt_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
-#else
res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
-#endif
if (res != 0) {
actual--;
break;
@@ -3503,14 +3517,15 @@ erts_init_scheduler_bind_type(char *how)
if (!system_cpudata && !user_cpudata)
return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY;
- if (sys_strcmp(how, "s") == 0)
+ if (sys_strcmp(how, "db") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
+ else if (sys_strcmp(how, "s") == 0)
cpu_bind_order = ERTS_CPU_BIND_SPREAD;
else if (sys_strcmp(how, "ps") == 0)
cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
else if (sys_strcmp(how, "ts") == 0)
cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
- else if (sys_strcmp(how, "db") == 0
- || sys_strcmp(how, "tnnps") == 0)
+ else if (sys_strcmp(how, "tnnps") == 0)
cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
else if (sys_strcmp(how, "nnps") == 0)
cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
@@ -4153,14 +4168,15 @@ erts_bind_schedulers(Process *c_p, Eterm how)
old_cpu_bind_order = cpu_bind_order;
- if (ERTS_IS_ATOM_STR("spread", how))
+ if (ERTS_IS_ATOM_STR("default_bind", how))
+ cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
+ else if (ERTS_IS_ATOM_STR("spread", how))
cpu_bind_order = ERTS_CPU_BIND_SPREAD;
else if (ERTS_IS_ATOM_STR("processor_spread", how))
cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("thread_spread", how))
cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
- else if (ERTS_IS_ATOM_STR("default_bind", how)
- || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
+ else if (ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how))
cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
@@ -4206,14 +4222,15 @@ erts_fake_scheduler_bindings(Process *p, Eterm how)
int cpudata_size;
Eterm res;
- if (ERTS_IS_ATOM_STR("spread", how))
+ if (ERTS_IS_ATOM_STR("default_bind", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
+ else if (ERTS_IS_ATOM_STR("spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_SPREAD;
else if (ERTS_IS_ATOM_STR("processor_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("thread_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
- else if (ERTS_IS_ATOM_STR("default_bind", how)
- || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
+ else if (ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
@@ -4438,7 +4455,7 @@ early_cpu_bind_init(void)
(sizeof(erts_cpu_topology_t)
* system_cpudata_size));
- cpu_bind_order = ERTS_CPU_BIND_NONE;
+ cpu_bind_order = ERTS_CPU_BIND_UNDEFINED;
if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
|| ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
@@ -4464,6 +4481,17 @@ late_cpu_bind_init(void)
scheduler2cpu_map[ix].bound_id = -1;
}
+ if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED) {
+ int ncpus = erts_get_cpu_configured(erts_cpuinfo);
+ if (ncpus < 1 || erts_no_schedulers < ncpus)
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
+ else
+ cpu_bind_order = ((system_cpudata || user_cpudata)
+ && (erts_bind_to_cpu(erts_cpuinfo, -1) != -ENOTSUP)
+ ? ERTS_CPU_BIND_DEFAULT_BIND
+ : ERTS_CPU_BIND_NONE);
+ }
+
if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
erts_cpu_topology_t *cpudata;
int cpudata_size;
@@ -5846,6 +5874,9 @@ Process *schedule(Process *p, int calls)
}
if (IS_TRACED(p)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && p->status != P_FREE) {
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
+ }
switch (p->status) {
case P_EXITING:
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
@@ -6282,7 +6313,11 @@ Process *schedule(Process *p, int calls)
trace_virtual_sched(p, am_in);
break;
}
+ if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
+ }
}
+
if (p->status != P_EXITING)
p->status = P_RUNNING;
@@ -7187,8 +7222,6 @@ erts_debug_verify_clean_empty_process(Process* p)
void
erts_cleanup_empty_process(Process* p)
{
- ErlHeapFragment* mbufp;
-
/* We only check fields that are known to be used... */
erts_cleanup_offheap(&p->off_heap);
@@ -7199,13 +7232,10 @@ erts_cleanup_empty_process(Process* p)
p->off_heap.externals = NULL;
p->off_heap.overhead = 0;
- mbufp = p->mbuf;
- while (mbufp) {
- ErlHeapFragment *next = mbufp->next;
- free_message_buffer(mbufp);
- mbufp = next;
+ if (p->mbuf != NULL) {
+ free_message_buffer(p->mbuf);
+ p->mbuf = NULL;
}
- p->mbuf = NULL;
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
erts_lcnt_proc_lock_destroy(p);
#endif
@@ -7221,7 +7251,6 @@ static void
delete_process(Process* p)
{
ErlMessage* mp;
- ErlHeapFragment* bp;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->id));
@@ -7271,11 +7300,8 @@ delete_process(Process* p)
/*
* Free all pending message buffers.
*/
- bp = p->mbuf;
- while (bp != NULL) {
- ErlHeapFragment* next_bp = bp->next;
- free_message_buffer(bp);
- bp = next_bp;
+ if (p->mbuf != NULL) {
+ free_message_buffer(p->mbuf);
}
erts_erase_dicts(p);
@@ -8034,8 +8060,13 @@ erts_do_exit_process(Process* p, Eterm reason)
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
#endif
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
- trace_proc(p, p, am_exit, reason);
+ if (IS_TRACED(p)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
+
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, p, am_exit, reason);
+ }
erts_trace_check_exiting(p->id);
@@ -8084,6 +8115,8 @@ continue_exit_process(Process *p
Eterm reason = p->fvalue;
DistEntry *dep;
struct saved_calls *scb;
+ process_breakpoint_time_t *pbt;
+
#ifdef DEBUG
int yield_allowed = 1;
#endif
@@ -8223,6 +8256,7 @@ continue_exit_process(Process *p
? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
+ pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
processes_busy--;
@@ -8265,6 +8299,9 @@ continue_exit_process(Process *p
if (scb)
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ if (pbt)
+ erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+
delete_process(p);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index cbcdec4ba7..8f9f7f004e 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -397,8 +397,9 @@ extern ErtsSchedulerData *erts_scheduler_data;
#define ERTS_PSD_SAVED_CALLS_BUF 1
#define ERTS_PSD_SCHED_ID 2
#define ERTS_PSD_DIST_ENTRY 3
+#define ERTS_PSD_CALL_TIME_BP 4
-#define ERTS_PSD_SIZE 4
+#define ERTS_PSD_SIZE 5
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -419,6 +420,9 @@ typedef struct {
#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_CALL_TIME_BP_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_CALL_TIME_BP_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -765,7 +769,7 @@ ERTS_GLB_INLINE void erts_heap_frag_shrink(Process* p, Eterm* hp)
{
ErlHeapFragment* hf = MBUF(p);
- ASSERT(hf!=NULL && (hp - hf->mem < (unsigned long)hf->size));
+ ASSERT(hf!=NULL && (hp - hf->mem < (unsigned long)hf->alloc_size));
hf->used_size = hp - hf->mem;
}
@@ -1208,6 +1212,12 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_SAVED_CALLS_BUF(P, L, SCB) \
((struct saved_calls *) erts_psd_set((P), (L), ERTS_PSD_SAVED_CALLS_BUF, (void *) (SCB)))
+#define ERTS_PROC_GET_CALL_TIME(P) \
+ ((process_breakpoint_time_t *) erts_psd_get((P), ERTS_PSD_CALL_TIME_BP))
+#define ERTS_PROC_SET_CALL_TIME(P, L, PBT) \
+ ((process_breakpoint_time_t *) erts_psd_set((P), (L), ERTS_PSD_CALL_TIME_BP, (void *) (PBT)))
+
+
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p,
ErtsProcLocks plocks,
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index d635916dd8..35b338c6eb 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2001-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,6 +34,8 @@
#include "erl_lock_count.h"
#include "erl_term.h"
+#define ERTS_THR_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+
#ifdef ERTS_ENABLE_LOCK_COUNT
#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
#define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__)
@@ -122,6 +124,8 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
#else /* #ifdef USE_THREADS */
+#define ERTS_THR_MEMORY_BARRIER
+
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
typedef int erts_thr_init_data_t;
@@ -290,11 +294,7 @@ erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
#ifdef USE_THREADS
-#ifdef ERTS_ENABLE_LOCK_COUNT
- int res = erts_lcnt_thr_create(tid, func, arg, opts);
-#else
int res = ethr_thr_create(tid, func, arg, opts);
-#endif
if (res)
erts_thr_fatal_error(res, "create thread");
#endif
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8addfcf5ad..3043bb1e8c 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -43,8 +43,9 @@
#undef DEBUG_PRINTOUTS
#endif
-extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
@@ -1941,11 +1942,13 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
Eterm* hp;
int need;
+ ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0) || erts_is_system_blocked(0));
if (is_internal_port(t_p->tracer_proc)) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
mess = TUPLE4(hp, am_trace, t_p->id, what, data);
hp += 5;
@@ -2114,6 +2117,7 @@ erts_bif_trace(int bif_index, Process* p,
Uint32 flags = 0, flags_meta = 0;
int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
+ int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
Eterm meta_tracer_pid = NIL;
int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
* is actually in the
@@ -2137,6 +2141,17 @@ erts_bif_trace(int bif_index, Process* p,
flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
&meta_tracer_pid);
}
+ if (time) {
+ BpDataTime *bdt = NULL;
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+
+ bdt = (BpDataTime *) erts_get_time_break(p, pc);
+ ASSERT(bdt);
+
+ if (!bdt->pause) {
+ erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
+ }
+ }
/* Restore original continuation pointer (if changed). */
p->cp = cp;
@@ -2145,8 +2160,9 @@ erts_bif_trace(int bif_index, Process* p,
result = func(p, arg1, arg2, arg3, I);
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- Uint i_return_trace = beam_return_trace[0];
- Uint i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
Eterm *cpp;
/* Maybe advance cp to skip trace stack frames */
for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
@@ -2154,6 +2170,10 @@ erts_bif_trace(int bif_index, Process* p,
/* Skip stack frame variables */
while (is_not_CP(*cpp)) cpp++;
cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
} else if (*cp == i_return_to_trace) {
/* A return_to trace message is going to be generated
* by normal means, so we do not have to.
@@ -2727,6 +2747,8 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
Eterm mess;
Eterm* hp;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_is_system_blocked(0));
+
if (is_internal_port(t_p->tracer_proc)) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index eeeeb7ccfd..cd63401581 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -84,6 +84,7 @@
#define ErtsHAllocLockCheck(P) \
ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks((P))) \
+ || ((P)->id == ERTS_INVALID_PID) \
|| ((P)->scheduler_data \
&& (P) == (P)->scheduler_data->match_pseudo_process) \
|| erts_is_system_blocked(0))
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 87c1d483f4..c604fdf7c3 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -37,7 +37,7 @@ typedef struct export
void* address; /* Pointer to code for function. */
struct binary* match_prog_set; /* Match program for tracing. */
- Eterm fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
* code[0]: Tagged atom for module.
* code[1]: Tagged atom for function.
@@ -52,11 +52,7 @@ typedef struct export
* on_load function that has not been run yet.
* Otherwise: 0.
*/
-#if HALFWORD_HEAP
BeamInstr code[5];
-#else
- Eterm code[5];
-#endif
} Export;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index a7990e1799..b4a7a22082 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -881,6 +881,8 @@ Eterm copy_object(Eterm, Process*);
Uint size_object(Eterm);
Eterm copy_struct(Eterm, Uint, Eterm**, ErlOffHeap*);
Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
+void move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
+ Eterm* refs, unsigned nrefs);
#ifdef HYBRID
#define RRMA_DEFAULT_SIZE 256
@@ -1078,6 +1080,7 @@ Eterm erts_heap_sizes(Process* p);
void erts_offset_off_heap(ErlOffHeap *, Sint, Eterm*, Eterm*);
void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_free_heap_frags(Process* p);
#ifdef HYBRID
int erts_global_garbage_collect(Process*, int, Eterm*, int);
@@ -1192,12 +1195,11 @@ erts_smp_port_unlock(Port *prt)
{
#ifdef ERTS_SMP
long refc;
+ erts_smp_mtx_unlock(prt->lock);
refc = erts_smp_atomic_dectest(&prt->refc);
ASSERT(refc >= 0);
if (refc == 0)
erts_port_cleanup(prt);
- else
- erts_smp_mtx_unlock(prt->lock);
#endif
}
@@ -1720,7 +1722,6 @@ int erts_print_system_version(int to, void *arg, Process *c_p);
* Interface to erl_init
*/
void erl_init(void);
-void erts_first_process(Eterm modname, void* code, unsigned size, int argc, char** argv);
#define seq_trace_output(token, msg, type, receiver, process) \
seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
@@ -1740,8 +1741,10 @@ struct trace_pattern_flags {
unsigned int local : 1; /* Local call trace breakpoint */
unsigned int meta : 1; /* Metadata trace breakpoint */
unsigned int call_count : 1; /* Fast call count breakpoint */
+ unsigned int call_time : 1; /* Fast call time breakpoint */
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
+extern int erts_call_time_breakpoint_tracing;
int erts_set_trace_pattern(Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
@@ -1785,18 +1788,20 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
void (*)(ErlOffHeap *, void *),
void *);
-#define MATCH_SET_RETURN_TRACE 0x1 /* return trace requested */
-#define MATCH_SET_RETURN_TO_TRACE 0x2 /* Misleading name, it is not actually
- set by the match program, but by the
- breakpoint functions */
-#define MATCH_SET_EXCEPTION_TRACE 0x4 /* exception trace requested */
+#define MATCH_SET_RETURN_TRACE (0x1) /* return trace requested */
+#define MATCH_SET_RETURN_TO_TRACE (0x2) /* Misleading name, it is not actually
+ set by the match program, but by the
+ breakpoint functions */
+#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
/*
* Flag values when tracing bif
+ * Future note: flag field is 8 bits
*/
-#define BIF_TRACE_AS_LOCAL 0x1
-#define BIF_TRACE_AS_GLOBAL 0x2
-#define BIF_TRACE_AS_META 0x4
+#define BIF_TRACE_AS_LOCAL (0x1)
+#define BIF_TRACE_AS_GLOBAL (0x2)
+#define BIF_TRACE_AS_META (0x4)
+#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 10f1082039..68625801cf 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -280,10 +280,36 @@ erts_test_next_port(int set, Uint next)
return res;
}
+
+static void port_cleanup(Port *prt);
+
+#ifdef ERTS_SMP
+
+static void
+sched_port_cleanup(void *vprt)
+{
+ Port *prt = (Port *) vprt;
+ erts_smp_mtx_lock(prt->lock);
+ port_cleanup(prt);
+}
+
+#endif
+
void
erts_port_cleanup(Port *prt)
{
#ifdef ERTS_SMP
+ if (erts_smp_mtx_trylock(prt->lock) == EBUSY)
+ erts_schedule_misc_op(sched_port_cleanup, (void *) prt);
+ else
+#endif
+ port_cleanup(prt);
+}
+
+void
+port_cleanup(Port *prt)
+{
+#ifdef ERTS_SMP
Uint32 port_specific;
erts_smp_mtx_t *mtx;
#endif
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 42c84989c6..a2439d5582 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -84,6 +84,8 @@ i_trace_breakpoint
i_mtrace_breakpoint
i_debug_breakpoint
i_count_breakpoint
+i_time_breakpoint
+i_return_time_trace
i_return_to_trace
i_yield
i_global_cons
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 964c10a380..900ebcbbf7 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -476,8 +476,9 @@ int erts_unregister_name(Process *c_p,
* on c_prt.
*/
- if (!c_p)
+ if (!c_p) {
c_p_locks = 0;
+ }
current_c_p_locks = c_p_locks;
restart:
@@ -489,9 +490,15 @@ int erts_unregister_name(Process *c_p,
if (is_non_value(name)) {
/* Unregister current process name */
ASSERT(c_p);
- if (c_p->reg)
+#ifdef ERTS_SMP
+ if (current_c_p_locks != c_p_locks) {
+ erts_smp_proc_lock(c_p, c_p_locks);
+ current_c_p_locks = c_p_locks;
+ }
+#endif
+ if (c_p->reg) {
r.name = c_p->reg->name;
- else {
+ } else {
/* Name got unregistered while main lock was released */
res = 0;
goto done;
@@ -533,24 +540,25 @@ int erts_unregister_name(Process *c_p,
}
} else if (rp->p) {
- Process* p = rp->p;
+
#ifdef ERTS_SMP
erts_proc_safelock(c_p,
current_c_p_locks,
c_p_locks,
rp->p,
- 0,
+ (c_p == rp->p) ? current_c_p_locks : 0,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
#endif
- p->reg = NULL;
+ rp->p->reg = NULL;
+ if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
+ trace_proc(c_p, rp->p, am_unregister, r.name);
+ }
#ifdef ERTS_SMP
- if (rp->p != c_p)
+ if (rp->p != c_p) {
erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
-#endif
- if (IS_TRACED_FL(p, F_TRACE_PROCS)) {
- trace_proc(c_p, p, am_unregister, r.name);
}
+#endif
}
hash_erase(&process_reg, (void*) &r);
res = 1;
@@ -560,14 +568,17 @@ int erts_unregister_name(Process *c_p,
reg_write_unlock();
if (c_prt != port) {
- if (port)
+ if (port) {
erts_smp_port_unlock(port);
- if (c_prt)
+ }
+ if (c_prt) {
erts_smp_port_lock(c_prt);
+ }
}
#ifdef ERTS_SMP
- if (c_p && !current_c_p_locks)
+ if (c_p && !current_c_p_locks) {
erts_smp_proc_lock(c_p, c_p_locks);
+ }
#endif
return res;
}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 0f20d36167..ca87d3d70f 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -648,7 +648,7 @@ extern char *erts_sys_ddll_error(int code);
/*
- * System interfaces for startup/sae code (functions found in respective sys.c)
+ * System interfaces for startup.
*/
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 51c12a0b69..da6f9ed12f 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -32,6 +32,7 @@
#include "erl_binary.h"
#include "erl_bits.h"
#include "packet_parser.h"
+#include "erl_gc.h"
#define ERTS_WANT_DB_INTERNAL__
#include "erl_db.h"
#include "erl_threads.h"
@@ -125,7 +126,7 @@ erts_heap_alloc(Process* p, Uint need)
n = need;
bp = MBUF(p);
- if (bp != NULL && need <= (bp->size - bp->used_size)) {
+ if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
Eterm* ret = bp->mem + bp->used_size;
bp->used_size += need;
return ret;
@@ -158,7 +159,7 @@ erts_heap_alloc(Process* p, Uint need)
bp->next = MBUF(p);
MBUF(p) = bp;
- bp->size = n;
+ bp->alloc_size = n;
bp->used_size = n;
MBUF_SIZE(p) += n;
bp->off_heap.mso = NULL;
@@ -509,7 +510,7 @@ erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
if (hpp) {
res = NIL;
while (--i >= 0) {
- res = CONS(*hpp, make_small(str[i]), res);
+ res = CONS(*hpp, make_small((byte) str[i]), res);
*hpp += 2;
}
}