aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_emu.c20
-rw-r--r--erts/emulator/beam/bif.c4
-rw-r--r--erts/emulator/beam/big.c9
-rw-r--r--erts/emulator/beam/big.h4
-rw-r--r--erts/emulator/beam/erl_alloc.c6
-rw-r--r--erts/emulator/beam/erl_alloc.types4
-rw-r--r--erts/emulator/beam/erl_db_util.c4
-rw-r--r--erts/emulator/beam/erl_gc.c20
-rw-r--r--erts/emulator/beam/erl_init.c30
-rw-r--r--erts/emulator/beam/erl_port_task.c5
-rw-r--r--erts/emulator/beam/erl_process.c452
-rw-r--r--erts/emulator/beam/erl_process.h116
-rw-r--r--erts/emulator/beam/erl_term.h3
-rw-r--r--erts/emulator/beam/erl_vm.h3
-rw-r--r--erts/emulator/beam/external.c207
-rwxr-xr-xerts/emulator/beam/global.h346
-rw-r--r--erts/emulator/beam/utils.c69
17 files changed, 853 insertions, 449 deletions
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 78ab6fa30f..b413f0e859 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -48,7 +48,7 @@
# define OpCase(OpCode) case op_##OpCode
# define CountCase(OpCode) case op_count_##OpCode
# define OpCode(OpCode) ((Uint*)op_##OpCode)
-# define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
+# define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;}
# define LabelAddr(Addr) &&##Addr
#else
# define OpCase(OpCode) lb_##OpCode
@@ -133,7 +133,7 @@ do { \
/* We don't check the range if an ordinary switch is used */
#ifdef NO_JUMP_TABLE
-#define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
+#define VALID_INSTR(IP) ((UWord)(IP) < (NUMBER_OF_OPCODES*2+10))
#else
#define VALID_INSTR(IP) \
((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
@@ -4326,7 +4326,19 @@ void process_main(void)
flags = Arg(2);
BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
if (size >= SMALL_BITS) {
- Uint wordsneeded = 1+WSIZE(NBYTES((Uint) size));
+ Uint wordsneeded;
+ /* check bits size before potential gc.
+ * We do not want a gc and then realize we don't need
+ * the allocated space (i.e. if the op fails)
+ *
+ * remember to reacquire the matchbuffer after gc.
+ */
+
+ mb = ms_matchbuffer(tmp_arg1);
+ if (mb->size - mb->offset < size) {
+ ClauseFail();
+ }
+ wordsneeded = 1+WSIZE(NBYTES((Uint) size));
TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
}
mb = ms_matchbuffer(tmp_arg1);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 96666d98ed..61c1abedb5 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -4488,7 +4488,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_P->group_leader,
"A call to erlang:system_flag(cpu_topology, _) was made.\n"
"The cpu_topology argument is deprecated and scheduled\n"
- "for removal in erts-5.10/OTP-R16. For more information\n"
+ "for removal in Erlang/OTP 18. For more information\n"
"see the erlang:system_flag/2 documentation.\n");
BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
@@ -4496,7 +4496,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_P->group_leader,
"A call to erlang:system_flag(scheduler_bind_type, _) was\n"
"made. The scheduler_bind_type argument is deprecated and\n"
- "scheduled for removal in erts-5.10/OTP-R16. For more\n"
+ "scheduled for removal in Erlang/OTP 18. For more\n"
"information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
}
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 2b27b111d8..41a041eba6 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1603,9 +1603,11 @@ big_to_double(Wterm x, double* resp)
/*
* Logic has been copied from erl_bif_guard.c and slightly
* modified to use a static instead of dynamic heap
+ *
+ * HALFWORD: Return relative term with 'heap' as base.
*/
Eterm
-double_to_big(double x, Eterm *heap)
+double_to_big(double x, Eterm *heap, Uint hsz)
{
int is_negative;
int ds;
@@ -1633,9 +1635,10 @@ double_to_big(double x, Eterm *heap)
sz = BIG_NEED_SIZE(ds); /* number of words including arity */
hp = heap;
- res = make_big(hp);
+ res = make_big_rel(hp, heap);
xp = (ErtsDigit*) (hp + 1);
+ ASSERT(ds < hsz);
for (i = ds - 1; i >= 0; i--) {
ErtsDigit d;
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 1a7b14170f..d80111822e 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -141,7 +141,7 @@ Eterm big_lshift(Eterm, Sint, Eterm*);
int big_comp (Wterm, Wterm);
int big_ucomp (Eterm, Eterm);
int big_to_double(Wterm x, double* resp);
-Eterm double_to_big(double, Eterm*);
+Eterm double_to_big(double, Eterm*, Uint hsz);
Eterm small_to_big(Sint, Eterm*);
Eterm uint_to_big(Uint, Eterm*);
Eterm uword_to_big(UWord, Eterm*);
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index b5ba9bb94a..8094c6ee2e 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -75,9 +75,9 @@
#define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45
#define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85
-#define ERTS_ALC_DEFAULT_ACUL 0
-#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0
-#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0
+#define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL
+#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
+#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
#ifndef ERTS_SMP
# undef ERTS_ALC_DEFAULT_ACUL
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 32308fae9b..b4e52770e3 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2013. All Rights Reserved.
+# Copyright Ericsson AB 2003-2014. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -150,7 +150,7 @@ type LINK_LH STANDARD PROCESSES link_lh
type SUSPEND_MON STANDARD PROCESSES suspend_monitor
type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
-type EXTRA_ROOT SHORT_LIVED PROCESSES extra_root
+type SAVED_ESTACK SHORT_LIVED PROCESSES saved_estack
type FUN_ENTRY LONG_LIVED CODE fun_entry
type ATOM_TXT LONG_LIVED ATOM atom_text
type BEAM_REGISTER EHEAP PROCESSES beam_register
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index ef3749a2c4..a358ecf326 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1838,7 +1838,7 @@ restart:
ep = termp;
break;
case matchArrayBind: /* When the array size is unknown. */
- ASSERT(termp);
+ ASSERT(termp || arity==0);
n = *pc++;
variables[n].term = dpm_array_to_list(psp, termp, arity);
break;
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index c5585d39e8..ab8448e8a1 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1157,7 +1157,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
old_htop = sweep_one_area(OLD_HTOP(p), old_htop, heap, heap_size);
}
OLD_HTOP(p) = old_htop;
- HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop;
+ HIGH_WATER(p) = n_htop;
if (MSO(p).first) {
sweep_off_heap(p, 0);
@@ -1975,17 +1975,6 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
++n;
}
- /*
- * A trapping BIF can add to rootset by setting the extra_root
- * in the process_structure.
- */
- if (p->extra_root != NULL) {
- roots[n].v = p->extra_root->objv;
- roots[n].sz = p->extra_root->sz;
- ++n;
- }
-
-
ASSERT((is_nil(p->seq_trace_token) ||
is_tuple(follow_moved(p->seq_trace_token)) ||
is_atom(p->seq_trace_token)));
@@ -2563,11 +2552,6 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
p->dictionary->used,
offs, area, area_size);
}
- if (p->extra_root != NULL) {
- offset_heap_ptr(p->extra_root->objv,
- p->extra_root->sz,
- offs, area, area_size);
- }
offset_heap_ptr(&p->fvalue, 1, offs, area, area_size);
offset_heap_ptr(&p->ftrace, 1, offs, area, area_size);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 1af80dd04b..19088fd913 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -537,6 +537,12 @@ void erts_usage(void)
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+ erts_fprintf(stderr, "-sub bool enable/disable scheduler utilization balancing,\n");
+#else
+ erts_fprintf(stderr, "-sub false disable scheduler utilization balancing,\n");
+#endif
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n");
erts_fprintf(stderr, " default|legacy.\n");
erts_fprintf(stderr, "-swct val set scheduler wake cleanup threshold, valid values are:\n");
@@ -1433,8 +1439,10 @@ erl_start(int argc, char **argv)
}
else if (has_prefix("cl", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
- if (sys_strcmp("true", arg) == 0)
+ if (sys_strcmp("true", arg) == 0) {
erts_sched_compact_load = 1;
+ erts_sched_balance_util = 0;
+ }
else if (sys_strcmp("false", arg) == 0)
erts_sched_compact_load = 0;
else {
@@ -1512,6 +1520,26 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("ub", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sys_strcmp("true", arg) == 0) {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+ erts_sched_balance_util = 1;
+#else
+ erts_fprintf(stderr,
+ "scheduler utilization balancing not "
+ "supported on this system\n");
+ erts_usage();
+#endif
+ }
+ else if (sys_strcmp("false", arg) == 0)
+ erts_sched_balance_util = 0;
+ else {
+ erts_fprintf(stderr, "bad scheduler utilization balancing "
+ " value '%s'\n", arg);
+ erts_usage();
+ }
+ }
else if (has_prefix("wct", sub_param)) {
arg = get_arg(sub_param+3, argv[i+1], &i);
if (erts_sched_set_wake_cleanup_threshold(arg) != 0) {
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 547a42beb2..d4108067d0 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -877,6 +877,11 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
ASSERT(runq->ports.start && runq->ports.end);
erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
+
+#ifdef ERTS_SMP
+ if (runq->halt_in_progress)
+ erts_non_empty_runq(runq);
+#endif
}
static ERTS_INLINE Port *
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 21fd8dd50a..74cd84a998 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -144,6 +144,7 @@ extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
int erts_sched_compact_load;
+int erts_sched_balance_util = 0;
Uint erts_no_schedulers;
#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024)
@@ -608,6 +609,7 @@ erts_late_init_process(void)
static void
init_sched_wall_time(ErtsSchedWallTime *swtp)
{
+ swtp->need = erts_sched_balance_util;
swtp->enabled = 0;
swtp->start = 0;
swtp->working.total = 0;
@@ -630,27 +632,253 @@ sched_wall_time_ts(void)
#endif
}
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+
+#ifdef ARCH_64
+
+static ERTS_INLINE Uint64
+aschedtime_read(ErtsAtomicSchedTime *var)
+{
+ return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var);
+}
+
+static ERTS_INLINE void
+aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
+{
+ erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val);
+}
+
+static ERTS_INLINE void
+aschedtime_init(ErtsAtomicSchedTime *var)
+{
+ erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0);
+}
+
+#elif defined(ARCH_32)
+
+static ERTS_INLINE Uint64
+aschedtime_read(ErtsAtomicSchedTime *var)
+{
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw);
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw.dw_sint;
+#else
+ {
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+ }
+#endif
+}
+
+static ERTS_INLINE void
+aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val)
+{
+ erts_dw_aint_t dw;
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
+ dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
+#endif
+ erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw);
+}
+
+static ERTS_INLINE void
+aschedtime_init(ErtsAtomicSchedTime *var)
+{
+ erts_dw_aint_t dw;
+ dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0;
+ dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0;
+ erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw);
+}
+
+#else
+# error :-/
+#endif
+
+#define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50
+#define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0))
+
+/* Intervals in nanoseconds */
+#define ERTS_SCHED_UTIL_SHORT_INTERVAL ((Uint64) 1*1000*1000*1000)
+#define ERTS_SCHED_UTIL_LONG_INTERVAL ((Uint64) 10*1000*1000*1000)
+
+
+#define ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF 5000 /* ppm */
+
+static ERTS_INLINE Uint64
+calc_sched_worktime(int is_working, Uint64 now, Uint64 last,
+ Uint64 interval, Uint64 old_worktime)
+{
+ Uint64 worktime;
+ Uint64 new;
+
+ if (now <= last)
+ return old_worktime;
+
+ new = now - last;
+
+ if (new >= interval)
+ return is_working ? interval : (Uint64) 0;
+
+
+ /*
+ * Division by 1000 in order to avoid
+ * overflow. If changed update assertions
+ * in init_runq_sched_util().
+ */
+ worktime = old_worktime;
+ worktime *= (interval - new)/1000;
+ worktime /= (interval/1000);
+ if (is_working)
+ worktime += new;
+
+ ASSERT(0 <= worktime && worktime <= interval);
+
+ return worktime;
+}
+
+static ERTS_INLINE void
+update_avg_sched_util(ErtsSchedulerData *esdp, Uint64 now, int is_working)
+{
+ ErtsRunQueue *rq;
+ int worked;
+ Uint64 swt, lwt, last;
+
+ rq = esdp->run_queue;
+ last = aschedtime_read(&rq->sched_util.last);
+
+ if (now <= last) {
+ ASSERT(last == ERTS_SCHED_AVG_UTIL_WRITE_MARKER);
+ return;
+ }
+
+ ASSERT(now >= last);
+
+ worked = rq->sched_util.is_working;
+
+ swt = calc_sched_worktime(worked, now, last, ERTS_SCHED_UTIL_SHORT_INTERVAL,
+ rq->sched_util.worktime.short_interval);
+ lwt = calc_sched_worktime(worked, now, last, ERTS_SCHED_UTIL_LONG_INTERVAL,
+ rq->sched_util.worktime.long_interval);
+
+ aschedtime_set(&rq->sched_util.last, ERTS_SCHED_AVG_UTIL_WRITE_MARKER);
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ rq->sched_util.is_working = is_working;
+ rq->sched_util.worktime.short_interval = swt;
+ rq->sched_util.worktime.long_interval = lwt;
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ aschedtime_set(&rq->sched_util.last, now);
+}
+
+int
+erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval)
+{
+ /* Average scheduler utilization in ppm */
+ int util, is_working, try = 0, locked = initially_locked;
+ Uint64 worktime, old_worktime, now, last, interval, *old_worktimep;
+
+ if (short_interval) {
+ old_worktimep = &rq->sched_util.worktime.short_interval;
+ interval = ERTS_SCHED_UTIL_SHORT_INTERVAL;
+ }
+ else {
+ old_worktimep = &rq->sched_util.worktime.long_interval;
+ interval = ERTS_SCHED_UTIL_LONG_INTERVAL;
+ }
+
+ while (1) {
+ Uint64 chk_last;
+ last = aschedtime_read(&rq->sched_util.last);
+ ERTS_THR_READ_MEMORY_BARRIER;
+ is_working = rq->sched_util.is_working;
+ old_worktime = *old_worktimep;
+ ERTS_THR_READ_MEMORY_BARRIER;
+ chk_last = aschedtime_read(&rq->sched_util.last);
+ if (chk_last == last)
+ break;
+ if (!locked) {
+ if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) {
+ /* Writer will eventually block on runq-lock */
+ erts_smp_runq_lock(rq);
+ locked = 1;
+ }
+ }
+ }
+
+ if (!initially_locked && locked)
+ erts_smp_runq_unlock(rq);
+
+ now = sched_wall_time_ts();
+ worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime);
+
+ util = (int) ((worktime * 1000000)/interval);
+
+ ASSERT(0 <= util && util <= 1000000);
+
+ return util;
+}
+
+static void
+init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
+{
+ aschedtime_init(&rqsu->last);
+ if (!enabled)
+ aschedtime_set(&rqsu->last, ERTS_SCHED_AVG_UTIL_WRITE_MARKER);
+ rqsu->is_working = 0;
+ rqsu->worktime.short_interval = (Uint64) 0;
+ rqsu->worktime.long_interval = (Uint64) 0;
+
+#ifdef DEBUG
+ {
+ Uint64 intrvl;
+ /*
+ * If one of these asserts fail we may have
+ * overflow in calc_sched_worktime(). Which
+ * have to be fixed either by shrinking
+ * interval size, or fix calculation of
+ * worktime in calc_sched_worktime().
+ */
+ intrvl = ERTS_SCHED_UTIL_SHORT_INTERVAL;
+ ASSERT(intrvl*(intrvl/1000) > intrvl);
+ intrvl = ERTS_SCHED_UTIL_LONG_INTERVAL;
+ ASSERT(intrvl*(intrvl/1000) > intrvl);
+ }
+#endif
+}
+
+#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
+
static ERTS_INLINE void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
{
- if (esdp->sched_wall_time.enabled) {
+ if (esdp->sched_wall_time.need) {
Uint64 ts = sched_wall_time_ts();
- if (working) {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ update_avg_sched_util(esdp, ts, working);
+#endif
+ if (esdp->sched_wall_time.enabled) {
+ if (working) {
#ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
+ ASSERT(!esdp->sched_wall_time.working.currently);
+ esdp->sched_wall_time.working.currently = 1;
#endif
- ts -= esdp->sched_wall_time.start;
- esdp->sched_wall_time.working.start = ts;
- }
- else {
+ ts -= esdp->sched_wall_time.start;
+ esdp->sched_wall_time.working.start = ts;
+ }
+ else {
#ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
+ ASSERT(esdp->sched_wall_time.working.currently);
+ esdp->sched_wall_time.working.currently = 0;
#endif
- ts -= esdp->sched_wall_time.start;
- ts -= esdp->sched_wall_time.working.start;
- esdp->sched_wall_time.working.total += ts;
+ ts -= esdp->sched_wall_time.start;
+ ts -= esdp->sched_wall_time.working.start;
+ esdp->sched_wall_time.working.total += ts;
+ }
}
}
}
@@ -705,10 +933,13 @@ reply_sched_wall_time(void *vswtrp)
ASSERT(esdp);
if (swtrp->set) {
- if (!swtrp->enable && esdp->sched_wall_time.enabled)
+ if (!swtrp->enable && esdp->sched_wall_time.enabled) {
+ esdp->sched_wall_time.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
+ }
else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
+ esdp->sched_wall_time.need = 1;
esdp->sched_wall_time.enabled = 1;
esdp->sched_wall_time.start = ts;
esdp->sched_wall_time.working.total = 0;
@@ -2084,9 +2315,8 @@ ongoing_multi_scheduling_block(void)
}
static ERTS_INLINE void
-empty_runq(ErtsRunQueue *rq)
+empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags)
{
- Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED);
if (old_flags & ERTS_RUNQ_FLG_NONEMPTY) {
#ifdef DEBUG
erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
@@ -2107,6 +2337,23 @@ empty_runq(ErtsRunQueue *rq)
}
static ERTS_INLINE void
+empty_runq(ErtsRunQueue *rq)
+{
+ Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED);
+ empty_runq_aux(rq, old_flags);
+}
+
+static ERTS_INLINE Uint32
+empty_protected_runq(ErtsRunQueue *rq)
+{
+ Uint32 old_flags = ERTS_RUNQ_FLGS_BSET(rq,
+ ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED,
+ ERTS_RUNQ_FLG_PROTECTED);
+ empty_runq_aux(rq, old_flags);
+ return old_flags;
+}
+
+static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY);
@@ -2130,6 +2377,18 @@ non_empty_runq(ErtsRunQueue *rq)
}
}
+void
+erts_empty_runq(ErtsRunQueue *rq)
+{
+ empty_runq(rq);
+}
+
+void
+erts_non_empty_runq(ErtsRunQueue *rq)
+{
+ non_empty_runq(rq);
+}
+
static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
@@ -2632,7 +2891,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq)
{
ErtsSchedulerSleepInfo *ssi;
erts_aint32_t flgs;
@@ -2651,9 +2910,6 @@ wake_scheduler(ErtsRunQueue *rq, int incq)
flgs = ssi_flags_set_wake(ssi);
erts_sched_finish_poke(ssi, flgs);
-
- if (incq && (flgs & ERTS_SSI_FLG_WAITING))
- non_empty_runq(rq);
}
#define ERTS_NO_USED_RUNQS_SHIFT 16
@@ -2744,7 +3000,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
if (try_inc_no_active_runqs(ix+1))
(void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
}
- wake_scheduler(wrq, 0);
+ wake_scheduler(wrq);
return 1;
}
return 0;
@@ -2792,7 +3048,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
if (runq)
- wake_scheduler(runq, 1);
+ wake_scheduler(runq);
#endif
}
@@ -2810,7 +3066,7 @@ erts_sched_notify_check_cpu_bind(void)
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
#else
erts_sched_check_cpu_bind(erts_get_scheduler_data());
@@ -2938,6 +3194,11 @@ check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
if (!f_rq)
return NULL;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (mp->sched_util)
+ return NULL;
+#endif
+
f_rq_flags = ERTS_RUNQ_FLGS_GET(f_rq);
if (f_rq_flags & ERTS_RUNQ_FLG_PROTECTED)
return NULL;
@@ -3077,7 +3338,7 @@ suspend_run_queue(ErtsRunQueue *rq)
ERTS_SSI_FLG_SUSPENDED);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
static void scheduler_ix_resume_wake(Uint ix);
@@ -3169,6 +3430,9 @@ evacuate_run_queue(ErtsRunQueue *rq,
to_rq->misc.start = start;
to_rq->misc.end = end;
+
+ non_empty_runq(to_rq);
+
erts_smp_runq_unlock(to_rq);
smp_notify_inc_runq(to_rq);
erts_smp_runq_lock(to_rq);
@@ -3381,7 +3645,7 @@ try_steal_task(ErtsRunQueue *rq)
Uint32 flags;
/* Protect jobs we steal from getting stolen from us... */
- flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ flags = empty_protected_runq(rq);
if (flags & ERTS_RUNQ_FLG_SUSPENDED)
return 0; /* go suspend instead... */
@@ -3460,6 +3724,9 @@ typedef struct {
int full_reds_history_change;
int oowc;
int max_len;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ int sched_util;
+#endif
} ErtsRunQueueBalance;
static ErtsRunQueueBalance *run_queue_info;
@@ -3623,6 +3890,9 @@ check_balance(ErtsRunQueue *c_rq)
Sint64 scheds_reds, full_scheds_reds;
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ int sched_util_balancing;
+#endif
if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
@@ -3678,6 +3948,10 @@ check_balance(ErtsRunQueue *c_rq)
return;
}
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ sched_util_balancing = 0;
+#endif
+
freds_hist_ix = balance_info.full_reds_history_index;
balance_info.full_reds_history_index++;
if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE)
@@ -3708,7 +3982,12 @@ check_balance(ErtsRunQueue *c_rq)
run_queue_info[qix].oowc = rq->out_of_work_count;
run_queue_info[qix].max_len = rq->max_len;
rq->check_balance_reds = INT_MAX;
-
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (erts_sched_balance_util)
+ run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0);
+#endif
+
erts_smp_runq_unlock(rq);
}
@@ -3778,8 +4057,38 @@ check_balance(ErtsRunQueue *c_rq)
mmax_len = run_queue_info[qix].max_len;
}
- if (!erts_sched_compact_load)
+ if (!erts_sched_compact_load) {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (erts_sched_balance_util && full_scheds < blnc_no_rqs) {
+ int avg_util = 0;
+
+ for (qix = 0; qix < blnc_no_rqs; qix++)
+ avg_util += run_queue_info[qix].sched_util;
+
+ avg_util /= blnc_no_rqs; /* in ppm */
+
+ sched_util_balancing = 1;
+ /*
+ * In order to avoid renaming a large amount of fields
+ * we write utilization values instead of lenght values
+ * in the 'max_len' and 'migration_limit' fields...
+ */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ run_queue_info[qix].flags = 0; /* Reset for later use... */
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ run_queue_info[qix].prio[pix].emigrate_to = -1;
+ run_queue_info[qix].prio[pix].immigrate_from = -1;
+ run_queue_info[qix].prio[pix].avail = 100;
+ run_queue_info[qix].prio[pix].max_len = run_queue_info[qix].sched_util;
+ run_queue_info[qix].prio[pix].migration_limit = avg_util;
+ }
+ }
+ active = blnc_no_rqs;
+ goto setup_migration_paths;
+ }
+#endif
goto all_active;
+ }
if (!forced && half_full_scheds != blnc_no_rqs) {
int min = 1;
@@ -3896,15 +4205,30 @@ check_balance(ErtsRunQueue *c_rq)
}
}
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ setup_migration_paths:
+#endif
+
/* Setup migration paths for all priorities */
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
int low = 0, high = 0;
for (qix = 0; qix < blnc_no_rqs; qix++) {
int len_diff = run_queue_info[qix].prio[pix].max_len;
len_diff -= run_queue_info[qix].prio[pix].migration_limit;
+
#ifdef DBG_PRINT
if (pix == 2) erts_fprintf(stderr, "%d ", len_diff);
#endif
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (sched_util_balancing
+ && -ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF <= len_diff
+ && len_diff <= ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF) {
+ /* ignore minor imbalance */
+ len_diff = 0;
+ }
+#endif
+
run_queue_compare[qix].qix = qix;
run_queue_compare[qix].len = len_diff;
if (len_diff != 0) {
@@ -4031,6 +4355,9 @@ erts_fprintf(stderr, "--------------------------------\n");
Uint32 flags = run_queue_info[qix].flags;
ErtsMigrationPath *mp = &new_mpaths->mpath[qix];
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ mp->sched_util = sched_util_balancing;
+#endif
mp->flags = flags;
mp->misc_evac_runq = NULL;
@@ -4628,6 +4955,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
set_wakeup_other_data();
#endif
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (erts_sched_balance_util)
+ erts_sched_compact_load = 0;
+#endif
+
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
@@ -4696,6 +5028,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
rq->ports.info.reds = 0;
rq->ports.start = NULL;
rq->ports.end = NULL;
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ init_runq_sched_util(&rq->sched_util, erts_sched_balance_util);
+#endif
+
}
#ifdef ERTS_SMP
@@ -4794,6 +5131,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
esdp->reductions = 0;
init_sched_wall_time(&esdp->sched_wall_time);
+
erts_port_task_handle_init(&esdp->nosuspend_port_task_handle);
}
@@ -5761,7 +6099,7 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
}
}
@@ -5860,7 +6198,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
for (ix = 1; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0);
+ wake_scheduler(rq);
}
if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
@@ -7265,19 +7603,13 @@ Process *schedule(Process *p, int calls)
#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
-
-#ifdef ERTS_SMP
- {
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
- rq->procs.pending_exiters = NULL;
- erts_smp_runq_unlock(rq);
- handle_pending_exiters(pnd_xtrs);
- erts_smp_runq_lock(rq);
- }
-
+ ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
+ if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
+ rq->procs.pending_exiters = NULL;
+ erts_smp_runq_unlock(rq);
+ handle_pending_exiters(pnd_xtrs);
+ erts_smp_runq_lock(rq);
}
-#endif
if (rq->check_balance_reds <= 0)
check_balance(rq);
@@ -7294,7 +7626,7 @@ Process *schedule(Process *p, int calls)
continue_check_activities_to_run:
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
continue_check_activities_to_run_known_flags:
-
+ ASSERT(flags & ERTS_RUNQ_FLG_NONEMPTY);
if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
@@ -7346,20 +7678,16 @@ Process *schedule(Process *p, int calls)
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- empty_runq(rq);
-
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- non_empty_runq(rq);
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED)
goto continue_check_activities_to_run_known_flags;
- }
- else if (!(flags & ERTS_RUNQ_FLG_INACTIVE)) {
- if (try_steal_task(rq)) {
- non_empty_runq(rq);
+ if (flags & ERTS_RUNQ_FLG_INACTIVE)
+ empty_runq(rq);
+ else {
+ if (try_steal_task(rq))
goto continue_check_activities_to_run;
- }
- (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ empty_runq(rq);
/*
* Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
@@ -7368,10 +7696,10 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
non_empty_runq(rq);
+ flags |= ERTS_RUNQ_FLG_NONEMPTY;
goto continue_check_activities_to_run_known_flags;
}
}
-
#endif
scheduler_wait(&fcalls, esdp, rq);
@@ -8486,6 +8814,10 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
rq->misc.start = molp;
rq->misc.end = molp;
+#ifdef ERTS_SMP
+ non_empty_runq(rq);
+#endif
+
erts_smp_runq_unlock(rq);
smp_notify_inc_runq(rq);
@@ -8755,7 +9087,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->htop = p->heap;
p->heap_sz = sz;
p->catches = 0;
- p->extra_root = NULL;
p->bin_vheap_sz = p->min_vheap_size;
p->bin_old_vheap_sz = p->min_vheap_size;
@@ -9372,8 +9703,11 @@ save_pending_exiter(Process *p)
erts_proclist_store_last(&rq->procs.pending_exiters, plp);
+ non_empty_runq(rq);
+
erts_smp_runq_unlock(rq);
- wake_scheduler(rq, 1);
+
+ wake_scheduler(rq);
}
#endif
@@ -10219,12 +10553,6 @@ erts_continue_exit_process(Process *p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
- if (p->extra_root != NULL) {
- (p->extra_root->cleanup)(p->extra_root); /* Should deallocate
- whole structure */
- p->extra_root = NULL;
- }
-
delete_process(p);
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 043621125c..24832d59b5 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -70,6 +70,9 @@ typedef struct process Process;
struct ErtsNodesMonitor_;
+#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 0
+#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT 0
+
#define ERTS_MAX_NO_OF_SCHEDULERS 1024
#define ERTS_DEFAULT_MAX_PROCESSES (1 << 18)
@@ -98,6 +101,7 @@ struct saved_calls {
extern Export exp_send, exp_receive, exp_timeout;
extern int erts_sched_compact_load;
+extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
@@ -198,6 +202,10 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \
((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \
(erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ (erts_aint32_t) (MSK), \
+ (erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \
((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \
(erts_aint32_t) ~(FLGS)))
@@ -316,9 +324,40 @@ typedef struct {
int reds;
} ErtsRunQueueInfo;
+
+#ifdef HAVE_GETHRTIME
+# undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1
+#endif
+
#ifdef ERTS_SMP
+#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
+
+#ifdef ARCH_64
+typedef erts_atomic_t ErtsAtomicSchedTime;
+#elif defined(ARCH_32)
+typedef erts_dw_atomic_t ErtsAtomicSchedTime;
+#else
+# error :-/
+#endif
+
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+typedef struct {
+ ErtsAtomicSchedTime last;
+ struct {
+ Uint64 short_interval;
+ Uint64 long_interval;
+ } worktime;
+ int is_working;
+} ErtsRunQueueSchedUtil;
+#endif
+
typedef struct {
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ int sched_util;
+#endif
Uint32 flags;
ErtsRunQueue *misc_evac_runq;
struct {
@@ -385,6 +424,9 @@ struct ErtsRunQueue_ {
Port *start;
Port *end;
} ports;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ ErtsRunQueueSchedUtil sched_util;
+#endif
};
#ifdef ERTS_SMP
@@ -414,6 +456,7 @@ do { \
} while (0)
typedef struct {
+ int need; /* "+sbu true" or scheduler_wall_time enabled */
int enabled;
Uint64 start;
struct {
@@ -499,7 +542,6 @@ struct ErtsSchedulerData_ {
Eterm tmp_heap[TMP_HEAP_SIZE];
int num_tmp_heap_used;
Eterm beam_emu_tmp_heap[BEAM_EMU_TMP_HEAP_SIZE];
- Eterm cmp_tmp_heap[CMP_TMP_HEAP_SIZE];
Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE];
#endif
ErtsSchedulerSleepInfo *ssi;
@@ -542,6 +584,12 @@ int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#ifdef ERTS_SMP
+void erts_empty_runq(ErtsRunQueue *rq);
+void erts_non_empty_runq(ErtsRunQueue *rq);
+#endif
+
+
/*
* Run queue locked during modifications. We use atomic ops since
* other threads peek at values without run queue lock.
@@ -574,6 +622,10 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
erts_smp_atomic32_set_relb(&rqi->len, len);
+#ifdef ERTS_SMP
+ if (rq->len == 0)
+ erts_non_empty_runq(rq);
+#endif
rq->len++;
if (rq->max_len < rq->len)
rq->max_len = len;
@@ -711,13 +763,6 @@ struct ErtsPendingSuspend_ {
#endif
-typedef struct ErlExtraRootSet_ ErlExtraRootSet;
-struct ErlExtraRootSet_ {
- Eterm *objv;
- Uint sz;
- void (*cleanup)(ErlExtraRootSet *);
-};
-
/* Defines to ease the change of memory architecture */
# define HEAP_START(p) (p)->heap
# define HEAP_TOP(p) (p)->htop
@@ -811,8 +856,6 @@ struct process {
ErlMessageQueue msg; /* Message queue */
- ErlExtraRootSet *extra_root; /* Used by trapping BIF's */
-
union {
ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
void *terminate;
@@ -1695,6 +1738,13 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
extern erts_atomic_t erts_migration_paths;
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+int erts_get_sched_util(ErtsRunQueue *rq,
+ int initially_locked,
+ int short_interval);
+#endif
+
+
ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths_managed(void);
ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths(void);
ERTS_GLB_INLINE ErtsRunQueue *erts_check_emigration_need(ErtsRunQueue *c_rq,
@@ -1746,22 +1796,36 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
return mp->prio[prio].runq;
}
-
- if (prio == ERTS_PORT_PRIO_LEVEL)
- len = RUNQ_READ_LEN(&c_rq->ports.info.len);
+#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
+ if (mp->sched_util) {
+ ErtsRunQueue *rq = mp->prio[prio].runq;
+ /* No migration if other is non-empty */
+ if (!(ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY)
+ && erts_get_sched_util(rq, 0, 1) < mp->prio[prio].limit.other
+ && erts_get_sched_util(c_rq, 0, 1) > mp->prio[prio].limit.this) {
+ return rq;
+ }
+ }
else
- len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len);
-
- if (len > mp->prio[prio].limit.this) {
- ErtsRunQueue *n_rq = mp->prio[prio].runq;
- if (n_rq) {
- if (prio == ERTS_PORT_PRIO_LEVEL)
- len = RUNQ_READ_LEN(&n_rq->ports.info.len);
- else
- len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len);
-
- if (len < mp->prio[prio].limit.other)
- return n_rq;
+#endif
+ {
+
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&c_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len);
+
+ if (len > mp->prio[prio].limit.this) {
+ ErtsRunQueue *n_rq = mp->prio[prio].runq;
+ if (n_rq) {
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&n_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len);
+
+ if (len < mp->prio[prio].limit.other)
+ return n_rq;
+ }
}
}
}
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 953edf79ea..50d3e63c58 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1126,6 +1126,7 @@ extern unsigned tag_val_def(Wterm);
#define make_tuple_rel make_boxed_rel
#define make_external_rel make_boxed_rel
#define make_internal_ref_rel make_boxed_rel
+#define make_big_rel make_boxed_rel
#define binary_val_rel(RTERM, BASE) binary_val(rterm2wterm(RTERM, BASE))
#define list_val_rel(RTERM, BASE) list_val(rterm2wterm(RTERM, BASE))
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 337422eead..b7de8208ad 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -46,7 +46,6 @@
heap data on the C stack or if we use the buffers in the scheduler data. */
#define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers
small heap for transient heap data */
-#define CMP_TMP_HEAP_SIZE 32 /* cmp wants its own tmp-heap... */
#define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */
#define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 2cb44a5b64..5e7a5cab6e 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -87,7 +87,8 @@
static Export term_to_binary_trap_export;
static byte* enc_term(ErtsAtomCacheMap *, Eterm, byte*, Uint32, struct erl_off_heap_header** off_heap);
-static int enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
+struct TTBEncodeContext_;
+static int enc_term_int(struct TTBEncodeContext_*,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
struct erl_off_heap_header** off_heap, Sint *reds, byte **res);
static Uint is_external_string(Eterm obj, int* p_is_string);
static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
@@ -103,7 +104,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
Binary *context_b);
static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned);
-static int encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj,
+struct TTBSizeContext_;
+static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acmp, Eterm obj,
unsigned dflags, Sint *reds, Uint *res);
static Export binary_to_term_trap_export;
@@ -1086,7 +1088,6 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
int level = 0;
Uint flags = TERM_TO_BINARY_DFLAGS;
Eterm res;
- Binary *bin = NULL;
while (is_list(Flags)) {
Eterm arg = CAR(list_val(Flags));
@@ -1123,7 +1124,7 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
goto error;
}
- res = erts_term_to_binary_int(p, Term, level, flags, bin);
+ res = erts_term_to_binary_int(p, Term, level, flags, NULL);
if (is_tuple(res)) {
erts_set_gc_state(p, 0);
BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
@@ -1726,14 +1727,20 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState;
-typedef struct {
+typedef struct TTBSizeContext_ {
Uint flags;
int level;
+ Uint result;
+ Eterm obj;
+ ErtsEStack estack;
} TTBSizeContext;
-typedef struct {
+typedef struct TTBEncodeContext_ {
Uint flags;
int level;
+ byte* ep;
+ Eterm obj;
+ ErtsWStack wstack;
Binary *result_bin;
} TTBEncodeContext;
@@ -1763,8 +1770,10 @@ static void ttb_context_destructor(Binary *context_bin)
context->alive = 0;
switch (context->state) {
case TTBSize:
+ DESTROY_SAVED_ESTACK(&context->s.sc.estack);
break;
case TTBEncode:
+ DESTROY_SAVED_WSTACK(&context->s.ec.wstack);
if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */
ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0);
erts_bin_free(context->s.ec.result_bin);
@@ -1829,6 +1838,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
/* Setup enough to get started */
context->state = TTBSize;
context->alive = 1;
+ context->s.sc.estack.start = NULL;
context->s.sc.flags = flags;
context->s.sc.level = level;
} else {
@@ -1844,7 +1854,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
int level;
Uint flags;
/* Try for fast path */
- if (encode_size_struct_int(p, NULL, Term, context->s.sc.flags, &reds, &size) < 0) {
+ if (encode_size_struct_int(&context->s.sc, NULL, Term,
+ context->s.sc.flags, &reds, &size) < 0) {
EXPORT_CONTEXT();
/* Same state */
RETURN_STATE();
@@ -1870,6 +1881,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context->state = TTBEncode;
context->s.ec.flags = flags;
context->s.ec.level = level;
+ context->s.ec.wstack.wstart = NULL;
context->s.ec.result_bin = result_bin;
break;
}
@@ -1881,7 +1893,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
Binary *result_bin;
flags = context->s.ec.flags;
- if (enc_term_int(p,NULL,Term, bytes+1, flags, NULL, &reds, &endp) < 0) {
+ if (enc_term_int(&context->s.ec, NULL,Term, bytes+1, flags, NULL, &reds, &endp) < 0) {
EXPORT_CONTEXT();
RETURN_STATE();
}
@@ -2289,27 +2301,6 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
#define ENC_PATCH_FUN_SIZE ((Eterm) 2)
#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 3)
-/* Free extra rootset (used when trapping) */
-static void cleanup_ttb_extra_root(ErlExtraRootSet *rs)
-{
- if (rs->objv != NULL) {
- erts_free(ERTS_ALC_T_EXTRA_ROOT, rs->objv);
- }
- erts_free(ERTS_ALC_T_EXTRA_ROOT, rs);
-}
-
-/* Same as above, but we have an extra "stack" beyond GC reach, i.e. an array of two extra roots */
-static void cleanup_ttb_extra_root_2(ErlExtraRootSet *rs)
-{
- if (rs->objv != NULL) {
- erts_free(ERTS_ALC_T_EXTRA_ROOT, rs->objv);
- }
- if (rs[1].objv != NULL) {
- erts_free(ERTS_ALC_T_EXTRA_ROOT, rs[1].objv);
- }
-
- erts_free(ERTS_ALC_T_EXTRA_ROOT, rs);
-}
static byte*
enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
@@ -2321,39 +2312,43 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
}
static int
-enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
+enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
struct erl_off_heap_header** off_heap, Sint *reds, byte **res)
{
- DECLARE_ESTACK(s);
- DECLARE_WSTACK(com);
+ DECLARE_WSTACK(s);
Uint n;
Uint i;
Uint j;
Uint* ptr;
Eterm val;
FloatDef f;
- int count_reds = (p != NULL && reds != NULL);
Sint r = 0;
+#if HALFWORD_HEAP
+ UWord wobj;
+#endif
+
- if (count_reds) {
- ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_EXTRA_ROOT);
- WSTACK_CHANGE_ALLOCATOR(com, ERTS_ALC_T_EXTRA_ROOT);
+ if (ctx) {
+ WSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
r = *reds;
- }
- if (p && p->extra_root) { /* restore saved stacks and byte pointer */
- ESTACK_RESTORE(s,p->extra_root[0].objv, p->extra_root[0].sz);
- obj = ESTACK_POP(s);
- WSTACK_RESTORE(com, p->extra_root[1].objv, p->extra_root[1].sz);
- ep = (byte *) WSTACK_POP(com);
+ if (ctx->wstack.wstart) { /* restore saved stacks and byte pointer */
+ WSTACK_RESTORE(s, &ctx->wstack);
+ ep = ctx->ep;
+ obj = ctx->obj;
+ }
}
goto L_jump_start;
outer_loop:
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- switch (val = WSTACK_POP(com)) {
+ while (!WSTACK_ISEMPTY(s)) {
+#if HALFWORD_HEAP
+ obj = (Eterm) (wobj = WSTACK_POP(s));
+#else
+ obj = WSTACK_POP(s);
+#endif
+ switch (val = WSTACK_POP(s)) {
case ENC_TERM:
break;
case ENC_ONE_CONS:
@@ -2364,55 +2359,52 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla
obj = CAR(cons);
tl = CDR(cons);
- WSTACK_PUSH(com, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
- ESTACK_PUSH(s, tl);
+ WSTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
+ WSTACK_PUSH(s, tl);
}
break;
case ENC_PATCH_FUN_SIZE:
- /* obj will be discarded, it was NIL */
{
- byte* size_p = (byte *) WSTACK_POP(com);
+#if HALFWORD_HEAP
+ byte* size_p = (byte *) wobj;
+#else
+ byte* size_p = (byte *) obj;
+#endif
put_int32(ep - size_p, size_p);
}
goto outer_loop;
case ENC_LAST_ARRAY_ELEMENT:
/* obj is the tuple */
{
- Eterm* ptr = tuple_val(obj);
- i = arityval(*ptr);
- obj = ptr[i];
+#if HALFWORD_HEAP
+ Eterm* ptr = (Eterm *) wobj;
+#else
+ Eterm* ptr = (Eterm *) obj;
+#endif
+ obj = *ptr;
}
break;
default: /* ENC_LAST_ARRAY_ELEMENT+1 and upwards */
{
- Eterm* ptr = tuple_val(obj);
- i = arityval(*ptr);
- ESTACK_PUSH(s, obj); /* put back tuple and next element index */
- WSTACK_PUSH(com, val-1);
- obj = ptr[i - (val - ENC_LAST_ARRAY_ELEMENT)]; /* the index is counting down */
+#if HALFWORD_HEAP
+ Eterm* ptr = (Eterm *) wobj;
+#else
+ Eterm* ptr = (Eterm *) obj;
+#endif
+ WSTACK_PUSH(s, val-1);
+ obj = *ptr++;
+ WSTACK_PUSH(s, (UWord)ptr);
}
break;
}
L_jump_start:
- if (count_reds && --r == 0) {
+ if (ctx && --r == 0) {
*reds = r;
- ESTACK_PUSH(s,obj); /* push back current object, to be popped on restore */
- WSTACK_PUSH(com,((UWord) ep));
- if (p->extra_root == NULL) {
- /* NB. Allocate an array of two "extra-roots", of which only the first element
- is seen and handled by the GC. Index 1 holds the Wstack. */
- p->extra_root = erts_alloc(ERTS_ALC_T_EXTRA_ROOT, sizeof(ErlExtraRootSet)*2);
- p->extra_root->objv = NULL;
- p->extra_root->sz = 0;
- p->extra_root->cleanup = cleanup_ttb_extra_root_2;
- p->extra_root[1].objv = NULL;
- p->extra_root[1].sz = 0;
- p->extra_root[1].cleanup = NULL; /* Never used */
- }
- ESTACK_SAVE(s, p->extra_root[0].objv, p->extra_root[0].sz);
- WSTACK_SAVE(com, p->extra_root[1].objv, (p->extra_root[1].sz));
+ ctx->obj = obj;
+ ctx->ep = ep;
+ WSTACK_SAVE(s, &ctx->wstack);
return -1;
}
switch(tag_val_def(obj)) {
@@ -2558,8 +2550,8 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla
ep += 4;
}
if (i > 0) {
- WSTACK_PUSH(com, ENC_LAST_ARRAY_ELEMENT+i-1);
- ESTACK_PUSH(s, obj);
+ WSTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
+ WSTACK_PUSH(s, (UWord)ptr);
}
break;
@@ -2703,9 +2695,8 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla
int ei;
*ep++ = NEW_FUN_EXT;
- WSTACK_PUSH(com, (UWord) ep); /* Position for patching in size */
- WSTACK_PUSH(com, ENC_PATCH_FUN_SIZE);
- ESTACK_PUSH(s,NIL); /* Will be thrown away */
+ WSTACK_PUSH(s, ENC_PATCH_FUN_SIZE);
+ WSTACK_PUSH(s, (UWord) ep); /* Position for patching in size */
ep += 4;
*ep = funp->arity;
ep += 1;
@@ -2722,8 +2713,8 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla
fun_env:
for (ei = funp->num_free-1; ei > 0; ei--) {
- WSTACK_PUSH(com, ENC_TERM);
- ESTACK_PUSH(s, (UWord) funp->env[ei]);
+ WSTACK_PUSH(s, ENC_TERM);
+ WSTACK_PUSH(s, (UWord) funp->env[ei]);
}
if (funp->num_free != 0) {
obj = funp->env[0];
@@ -2766,13 +2757,9 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla
break;
}
}
- DESTROY_ESTACK(s);
- DESTROY_WSTACK(com);
- if (p && p->extra_root) {
- cleanup_ttb_extra_root_2(p->extra_root);
- p->extra_root = NULL;
- }
- if (count_reds) {
+ DESTROY_WSTACK(s);
+ if (ctx) {
+ ASSERT(ctx->wstack.wstart == NULL);
*reds = r;
}
*res = ep;
@@ -3742,26 +3729,24 @@ static Uint encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dfla
}
static int
-encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj,
+encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
unsigned dflags, Sint *reds, Uint *res)
{
DECLARE_ESTACK(s);
Uint m, i, arity;
Uint result = 0;
- int count_reds = (p != NULL && reds != 0);
Sint r = 0;
- if (count_reds) {
- ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_EXTRA_ROOT);
+ if (ctx) {
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
r = *reds;
- }
-
- if (p && p->extra_root) { /* restore saved stack */
- ESTACK_RESTORE(s,p->extra_root->objv, p->extra_root->sz + 1);
- result = ESTACK_POP(s); /*Untagged, beyond p->extra_root->sz */
- obj = ESTACK_POP(s);
- }
+ if (ctx->estack.start) { /* restore saved stack */
+ ESTACK_RESTORE(s, &ctx->estack);
+ result = ctx->result;
+ obj = ctx->obj;
+ }
+ }
goto L_jump_start;
@@ -3787,18 +3772,11 @@ encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj,
}
L_jump_start:
- if (count_reds && --r == 0) {
+ if (ctx && --r == 0) {
*reds = r;
- ESTACK_PUSH(s,obj); /* push back current object */
- ESTACK_PUSH(s,result); /* Untagged, will be out of GC reach */
- if (p->extra_root == NULL) {
- p->extra_root = erts_alloc(ERTS_ALC_T_EXTRA_ROOT, sizeof(ErlExtraRootSet));
- p->extra_root->objv = NULL;
- p->extra_root->sz = 0;
- p->extra_root->cleanup = cleanup_ttb_extra_root;
- }
- ESTACK_SAVE(s, p->extra_root->objv, p->extra_root->sz);
- --p->extra_root->sz; /* Hide result from GC */
+ ctx->obj = obj;
+ ctx->result = result;
+ ESTACK_SAVE(s, &ctx->estack);
return -1;
}
switch (tag_val_def(obj)) {
@@ -4001,11 +3979,8 @@ encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj,
}
DESTROY_ESTACK(s);
- if (p && p->extra_root) {
- cleanup_ttb_extra_root(p->extra_root);
- p->extra_root = NULL;
- }
- if (count_reds) {
+ if (ctx) {
+ ASSERT(ctx->estack.start == NULL);
*reds = r;
}
*res = result;
@@ -4074,7 +4049,9 @@ init_done:
switch (tag) {
case INTEGER_EXT:
SKIP(4);
+#if !defined(ARCH_64) || HALFWORD_HEAP
heap_size += BIG_UINT_HEAP_SIZE;
+#endif
break;
case SMALL_INTEGER_EXT:
SKIP(1);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 6e5d352e5b..83a8911a36 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -370,231 +370,233 @@ extern int stackdump_on_exit;
* DESTROY_ESTACK(Stack)
*/
+typedef struct {
+ Eterm* start;
+ Eterm* sp;
+ Eterm* end;
+ ErtsAlcType_t alloc_type;
+}ErtsEStack;
-void erl_grow_stack(ErtsAlcType_t a_type, Eterm** start, Eterm** sp, Eterm** end);
-#define ESTK_CONCAT(a,b) a##b
-#define ESTK_SUBSCRIPT(s,i) *((Eterm *)((byte *)ESTK_CONCAT(s,_start) + (i)))
#define DEF_ESTACK_SIZE (16)
-#define DECLARE_ESTACK(s) \
- Eterm ESTK_CONCAT(s,_default_stack)[DEF_ESTACK_SIZE]; \
- Eterm* ESTK_CONCAT(s,_start) = ESTK_CONCAT(s,_default_stack); \
- Eterm* ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start); \
- Eterm* ESTK_CONCAT(s,_end) = ESTK_CONCAT(s,_start) + DEF_ESTACK_SIZE;\
- ErtsAlcType_t ESTK_CONCAT(s,_alloc_type) = ERTS_ALC_T_ESTACK
+void erl_grow_estack(ErtsEStack*, Eterm* def_stack);
+#define ESTK_CONCAT(a,b) a##b
+#define ESTK_DEF_STACK(s) ESTK_CONCAT(s,_default_estack)
+
+#define DECLARE_ESTACK(s) \
+ Eterm ESTK_DEF_STACK(s)[DEF_ESTACK_SIZE]; \
+ ErtsEStack s = { \
+ ESTK_DEF_STACK(s), /* start */ \
+ ESTK_DEF_STACK(s), /* sp */ \
+ ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }
#define ESTACK_CHANGE_ALLOCATOR(s,t) \
do { \
- if (ESTK_CONCAT(s,_start) != ESTK_CONCAT(s,_default_stack)) { \
+ if (s.start != ESTK_DEF_STACK(s)) { \
erl_exit(1, "Internal error - trying to change allocator " \
"type of active estack\n"); \
} \
- ESTK_CONCAT(s,_alloc_type) = (t); \
+ s.alloc_type = (t); \
} while (0)
+#define DESTROY_ESTACK(s) \
+do { \
+ if (s.start != ESTK_DEF_STACK(s)) { \
+ erts_free(s.alloc_type, s.start); \
+ } \
+} while(0)
+
+
/*
- * Do not free the stack after this, it may have pointers into what
- * was saved in 'v'. 'v' and 'vsize' are changed by this macro. If
- * 'v' points to anything, it should have been allocated by a previous
- * call to this macro. Be careful to set a correct allocator prior to
- * saving.
- * 'v' can be any lvalue pointer, it will point to an array of UWord
- * after calling this macro.
+ * Do not free the stack after this, it may have pointers into what
+ * was saved in 'dst'.
*/
-#define ESTACK_SAVE(s,v,vsize) /* v and vsize are "name parameters" */ \
-do { \
- Uint _esz = ESTACK_COUNT(s); \
- if (ESTK_CONCAT(s,_start) == ESTK_CONCAT(s,_default_stack)) { \
- if ((v) == NULL) { \
- (v) = erts_alloc(ESTK_CONCAT(s,_alloc_type), \
- DEF_ESTACK_SIZE * sizeof(Eterm)); \
- } \
- memcpy((v),ESTK_CONCAT(s,_start),_esz*sizeof(Eterm)); \
- } else { \
- (v) = (void *) ESTK_CONCAT(s,_start); \
- } \
- (vsize) = _esz; \
+#define ESTACK_SAVE(s,dst)\
+do {\
+ if (s.start == ESTK_DEF_STACK(s)) {\
+ UWord _wsz = ESTACK_COUNT(s);\
+ (dst)->start = erts_alloc(s.alloc_type,\
+ DEF_ESTACK_SIZE * sizeof(Eterm));\
+ memcpy((dst)->start, s.start,_wsz*sizeof(Eterm));\
+ (dst)->sp = (dst)->start + _wsz;\
+ (dst)->end = (dst)->start + DEF_ESTACK_SIZE;\
+ (dst)->alloc_type = s.alloc_type;\
+ } else\
+ *(dst) = s;\
} while (0)
-/*
- * Use on empty stack, only the allocator can be changed before this
- * The vector parameter is reset to NULL if the vector is moved to stack,
- * otherwise it's kept for reuse, so a saved and restored vector might
- * need freeing using the correct allocator parameter.
- * 'v' can be any lvalue pointer, it's cast to an (Eterm *).
+#define DESTROY_SAVED_ESTACK(estack)\
+do {\
+ if ((estack)->start) {\
+ erts_free((estack)->alloc_type, (estack)->start);\
+ (estack)->start = NULL;\
+ }\
+} while(0)
+
+/*
+ * Use on empty stack, only the allocator can be changed before this.
+ * The src stack is reset to NULL.
*/
-#define ESTACK_RESTORE(s, v, vsize) /*v is a "name parameter"*/ \
-do { \
- if ((vsize) > DEF_ESTACK_SIZE) { \
- Uint _ca = DEF_ESTACK_SIZE; \
- while (_ca < (vsize)) \
- _ca = _ca * 2; \
- ESTK_CONCAT(s,_start) = (Eterm *) (v); \
- ESTK_CONCAT(s,_end) = ((Eterm *)(v)) + _ca; \
- ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start) + (vsize); \
- (v) = NULL; \
- } else { \
- memcpy(ESTK_CONCAT(s,_start),(v),(vsize)*sizeof(Eterm));\
- ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start) + (vsize); \
- } \
- } while (0)
+#define ESTACK_RESTORE(s, src) \
+do { \
+ ASSERT(s.start == ESTK_DEF_STACK(s)); \
+ s = *(src); /* struct copy */ \
+ (src)->start = NULL; \
+ ASSERT(s.sp >= s.start); \
+ ASSERT(s.sp <= s.end); \
+} while (0)
-#define ESTACK_IS_STATIC(s) (ESTK_CONCAT(s,_start) == ESTK_CONCAT(s,_default_stack))
+#define ESTACK_IS_STATIC(s) (s.start == ESTK_DEF_STACK(s)))
-#define DESTROY_ESTACK(s) \
-do { \
- if (ESTK_CONCAT(s,_start) != ESTK_CONCAT(s,_default_stack)) { \
- erts_free(ESTK_CONCAT(s,_alloc_type), ESTK_CONCAT(s,_start)); \
- } \
+#define ESTACK_PUSH(s, x) \
+do { \
+ if (s.sp == s.end) { \
+ erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ } \
+ *s.sp++ = (x); \
} while(0)
-#define ESTACK_PUSH(s, x) \
-do { \
- if (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_end)) { \
- erl_grow_stack(ESTK_CONCAT(s,_alloc_type),&ESTK_CONCAT(s,_start), \
- &ESTK_CONCAT(s,_sp), &ESTK_CONCAT(s,_end)); \
- } \
- *ESTK_CONCAT(s,_sp)++ = (x); \
+#define ESTACK_PUSH2(s, x, y) \
+do { \
+ if (s.sp > s.end - 2) { \
+ erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ } \
+ *s.sp++ = (x); \
+ *s.sp++ = (y); \
} while(0)
-#define ESTACK_PUSH2(s, x, y) \
-do { \
- if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 2) { \
- erl_grow_stack(ESTK_CONCAT(s,_alloc_type),&ESTK_CONCAT(s,_start), \
- &ESTK_CONCAT(s,_sp), &ESTK_CONCAT(s,_end)); \
- } \
- *ESTK_CONCAT(s,_sp)++ = (x); \
- *ESTK_CONCAT(s,_sp)++ = (y); \
+#define ESTACK_PUSH3(s, x, y, z) \
+do { \
+ if (s.sp > s.end - 3) { \
+ erl_grow_estack(&s, ESTK_DEF_STACK(s)); \
+ } \
+ *s.sp++ = (x); \
+ *s.sp++ = (y); \
+ *s.sp++ = (z); \
} while(0)
-#define ESTACK_PUSH3(s, x, y, z) \
-do { \
- if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 3) { \
- erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
- &ESTK_CONCAT(s,_end)); \
- } \
- *ESTK_CONCAT(s,_sp)++ = (x); \
- *ESTK_CONCAT(s,_sp)++ = (y); \
- *ESTK_CONCAT(s,_sp)++ = (z); \
-} while(0)
+#define ESTACK_COUNT(s) (s.sp - s.start)
+#define ESTACK_ISEMPTY(s) (s.sp == s.start)
+#define ESTACK_POP(s) (*(--s.sp))
-#define ESTACK_COUNT(s) (ESTK_CONCAT(s,_sp) - ESTK_CONCAT(s,_start))
-#define ESTACK_ISEMPTY(s) (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_start))
-#define ESTACK_POP(s) (*(--ESTK_CONCAT(s,_sp)))
+/*
+ * WSTACK: same as ESTACK but with UWord instead of Eterm
+ */
+typedef struct {
+ UWord* wstart;
+ UWord* wsp;
+ UWord* wend;
+ ErtsAlcType_t alloc_type;
+}ErtsWStack;
-void erl_grow_wstack(ErtsAlcType_t a_type, UWord** start, UWord** sp, UWord** end);
-#define WSTK_CONCAT(a,b) a##b
-#define WSTK_SUBSCRIPT(s,i) *((UWord *)((byte *)WSTK_CONCAT(s,_start) + (i)))
#define DEF_WSTACK_SIZE (16)
-#define DECLARE_WSTACK(s) \
- UWord WSTK_CONCAT(s,_default_stack)[DEF_WSTACK_SIZE]; \
- UWord* WSTK_CONCAT(s,_start) = WSTK_CONCAT(s,_default_stack); \
- UWord* WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start); \
- UWord* WSTK_CONCAT(s,_end) = WSTK_CONCAT(s,_start) + DEF_WSTACK_SIZE; \
- ErtsAlcType_t WSTK_CONCAT(s,_alloc_type) = ERTS_ALC_T_ESTACK
+void erl_grow_wstack(ErtsWStack*, UWord* def_stack);
+#define WSTK_CONCAT(a,b) a##b
+#define WSTK_DEF_STACK(s) WSTK_CONCAT(s,_default_wstack)
+
+#define DECLARE_WSTACK(s) \
+ UWord WSTK_DEF_STACK(s)[DEF_WSTACK_SIZE]; \
+ ErtsWStack s = { \
+ WSTK_DEF_STACK(s), /* wstart */ \
+ WSTK_DEF_STACK(s), /* wsp */ \
+ WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \
+ ERTS_ALC_T_ESTACK /* alloc_type */ \
+ }
#define WSTACK_CHANGE_ALLOCATOR(s,t) \
do { \
- if (WSTK_CONCAT(s,_start) != WSTK_CONCAT(s,_default_stack)) { \
+ if (s.wstart != WSTK_DEF_STACK(s)) { \
erl_exit(1, "Internal error - trying to change allocator " \
"type of active wstack\n"); \
} \
- WSTK_CONCAT(s,_alloc_type) = (t); \
+ s.alloc_type = (t); \
} while (0)
-#define DESTROY_WSTACK(s) \
-do { \
- if (WSTK_CONCAT(s,_start) != WSTK_CONCAT(s,_default_stack)) { \
- erts_free(WSTK_CONCAT(s,_alloc_type), WSTK_CONCAT(s,_start)); \
- } \
+#define DESTROY_WSTACK(s) \
+do { \
+ if (s.wstart != WSTK_DEF_STACK(s)) { \
+ erts_free(s.alloc_type, s.wstart); \
+ } \
} while(0)
+
/*
- * Do not free the stack after this, it may have pointers into what
- * was saved in 'v'. 'v' and 'vsize' are changed by this macro. If
- * 'v' points to anything, it should have been allocated by a previous
- * call to this macro. Be careful to set a correct allocator prior to
- * saving.
- * 'v' can be any lvalue pointer, it will point to an array of UWord
- * after calling this macro.
+ * Do not free the stack after this, it may have pointers into what
+ * was saved in 'dst'.
*/
-#define WSTACK_SAVE(s,v,vsize) /* v and vsize are "name parameters" */ \
-do { \
- Uint _wsz = WSTACK_COUNT(s); \
- if (WSTK_CONCAT(s,_start) == WSTK_CONCAT(s,_default_stack)) { \
- if ((v) == NULL) { \
- (v) = erts_alloc(WSTK_CONCAT(s,_alloc_type), \
- DEF_WSTACK_SIZE * sizeof(UWord)); \
- } \
- memcpy((v),WSTK_CONCAT(s,_start),_wsz*sizeof(UWord)); \
- } else { \
- (v) = (void *) WSTK_CONCAT(s,_start); \
- } \
- (vsize) = _wsz; \
+#define WSTACK_SAVE(s,dst)\
+do {\
+ if (s.wstart == WSTK_DEF_STACK(s)) {\
+ UWord _wsz = WSTACK_COUNT(s);\
+ (dst)->wstart = erts_alloc(s.alloc_type,\
+ DEF_WSTACK_SIZE * sizeof(UWord));\
+ memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\
+ (dst)->wsp = (dst)->wstart + _wsz;\
+ (dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\
+ (dst)->alloc_type = s.alloc_type;\
+ } else\
+ *(dst) = s;\
} while (0)
-/*
- * Use on empty stack, only the allocator can be changed before this
- * The vector parameter is reset to NULL if the vector is moved to stack,
- * otherwise it's kept for reuse, so a saved and restored vector might
- * need freeing using the correct allocator parameter.
- * 'v' can be any lvalue pointer, it's cast to an (UWord *).
+#define DESTROY_SAVED_WSTACK(wstack)\
+do {\
+ if ((wstack)->wstart) {\
+ erts_free((wstack)->alloc_type, (wstack)->wstart);\
+ (wstack)->wstart = NULL;\
+ }\
+} while(0)
+
+/*
+ * Use on empty stack, only the allocator can be changed before this.
+ * The src stack is reset to NULL.
*/
-#define WSTACK_RESTORE(s, v, vsize) /*v is a "name parameter"*/ \
-do { \
- if ((vsize) > DEF_WSTACK_SIZE) { \
- Uint _ca = DEF_WSTACK_SIZE; \
- while (_ca < (vsize)) \
- _ca = _ca * 2; \
- WSTK_CONCAT(s,_start) = (UWord *) (v); \
- WSTK_CONCAT(s,_end) = ((UWord *)(v)) + _ca; \
- WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start) + (vsize); \
- (v) = NULL; \
- } else { \
- memcpy(WSTK_CONCAT(s,_start),(v),(vsize)*sizeof(UWord));\
- WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start) + (vsize); \
- } \
- } while (0)
+#define WSTACK_RESTORE(s, src) \
+do { \
+ ASSERT(s.wstart == WSTK_DEF_STACK(s)); \
+ s = *(src); /* struct copy */ \
+ (src)->wstart = NULL; \
+ ASSERT(s.wsp >= s.wstart); \
+ ASSERT(s.wsp <= s.wend); \
+} while (0)
-#define WSTACK_IS_STATIC(s) (WSTK_CONCAT(s,_start) == WSTK_CONCAT(s,_default_stack))
+#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s)))
-#define WSTACK_PUSH(s, x) \
-do { \
- if (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_end)) { \
- erl_grow_wstack(WSTK_CONCAT(s,_alloc_type), &WSTK_CONCAT(s,_start), \
- &WSTK_CONCAT(s,_sp), &WSTK_CONCAT(s,_end)); \
- } \
- *WSTK_CONCAT(s,_sp)++ = (x); \
+#define WSTACK_PUSH(s, x) \
+do { \
+ if (s.wsp == s.wend) { \
+ erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ } \
+ *s.wsp++ = (x); \
} while(0)
-#define WSTACK_PUSH2(s, x, y) \
-do { \
- if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 2) { \
- erl_grow_wstack(WSTK_CONCAT(s,_alloc_type), &WSTK_CONCAT(s,_start), \
- &WSTK_CONCAT(s,_sp), &WSTK_CONCAT(s,_end)); \
- } \
- *WSTK_CONCAT(s,_sp)++ = (x); \
- *WSTK_CONCAT(s,_sp)++ = (y); \
+#define WSTACK_PUSH2(s, x, y) \
+do { \
+ if (s.wsp > s.wend - 2) { \
+ erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ } \
+ *s.wsp++ = (x); \
+ *s.wsp++ = (y); \
} while(0)
-#define WSTACK_PUSH3(s, x, y, z) \
-do { \
- if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 3) { \
- erl_grow_wstack(WSTK_CONCAT(s,_alloc_type), &WSTK_CONCAT(s,_start), \
- &WSTK_CONCAT(s,_sp), &WSTK_CONCAT(s,_end)); \
- } \
- *WSTK_CONCAT(s,_sp)++ = (x); \
- *WSTK_CONCAT(s,_sp)++ = (y); \
- *WSTK_CONCAT(s,_sp)++ = (z); \
+#define WSTACK_PUSH3(s, x, y, z) \
+do { \
+ if (s.wsp > s.wend - 3) { \
+ erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \
+ } \
+ *s.wsp++ = (x); \
+ *s.wsp++ = (y); \
+ *s.wsp++ = (z); \
} while(0)
-#define WSTACK_COUNT(s) (WSTK_CONCAT(s,_sp) - WSTK_CONCAT(s,_start))
+#define WSTACK_COUNT(s) (s.wsp - s.wstart)
+#define WSTACK_ISEMPTY(s) (s.wsp == s.wstart)
+#define WSTACK_POP(s) (*(--s.wsp))
-#define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start))
-#define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp)))
/* binary.c */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 297c4bf439..e0776cf67d 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -185,39 +185,41 @@ erts_set_hole_marker(Eterm* ptr, Uint sz)
* Helper function for the ESTACK macros defined in global.h.
*/
void
-erl_grow_stack(ErtsAlcType_t a_type, Eterm** start, Eterm** sp, Eterm** end)
+erl_grow_estack(ErtsEStack* s, Eterm* default_estack)
{
- Uint old_size = (*end - *start);
+ Uint old_size = (s->end - s->start);
Uint new_size = old_size * 2;
- Uint sp_offs = *sp - *start;
- if (new_size > 2 * DEF_ESTACK_SIZE) {
- *start = erts_realloc(a_type, (void *) *start, new_size*sizeof(Eterm));
+ Uint sp_offs = s->sp - s->start;
+ if (s->start != default_estack) {
+ s->start = erts_realloc(s->alloc_type, s->start,
+ new_size*sizeof(Eterm));
} else {
- Eterm* new_ptr = erts_alloc(a_type, new_size*sizeof(Eterm));
- sys_memcpy(new_ptr, *start, old_size*sizeof(Eterm));
- *start = new_ptr;
+ Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm));
+ sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm));
+ s->start = new_ptr;
}
- *end = *start + new_size;
- *sp = *start + sp_offs;
+ s->end = s->start + new_size;
+ s->sp = s->start + sp_offs;
}
/*
- * Helper function for the ESTACK macros defined in global.h.
+ * Helper function for the WSTACK macros defined in global.h.
*/
void
-erl_grow_wstack(ErtsAlcType_t a_type, UWord** start, UWord** sp, UWord** end)
+erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
{
- Uint old_size = (*end - *start);
+ Uint old_size = (s->wend - s->wstart);
Uint new_size = old_size * 2;
- Uint sp_offs = *sp - *start;
- if (new_size > 2 * DEF_ESTACK_SIZE) {
- *start = erts_realloc(a_type, (void *) *start, new_size*sizeof(UWord));
+ Uint sp_offs = s->wsp - s->wstart;
+ if (s->wstart != default_wstack) {
+ s->wstart = erts_realloc(s->alloc_type, s->wstart,
+ new_size*sizeof(UWord));
} else {
- UWord* new_ptr = erts_alloc(a_type, new_size*sizeof(UWord));
- sys_memcpy(new_ptr, *start, old_size*sizeof(UWord));
- *start = new_ptr;
+ UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord));
+ sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord));
+ s->wstart = new_ptr;
}
- *end = *start + new_size;
- *sp = *start + sp_offs;
+ s->wend = s->wstart + new_size;
+ s->wsp = s->wstart + sp_offs;
}
/* CTYPE macros */
@@ -2686,11 +2688,6 @@ tailrecur_ne:
{
FloatDef f1, f2;
Eterm big;
-#if HEAP_ON_C_STACK
- Eterm big_buf[CMP_TMP_HEAP_SIZE]; /* If HEAP_ON_C_STACK */
-#else
- Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
-#endif
#if HALFWORD_HEAP
Wterm aw = is_immed(a) ? a : rterm2wterm(a,a_base);
Wterm bw = is_immed(b) ? b : rterm2wterm(b,b_base);
@@ -2701,6 +2698,8 @@ tailrecur_ne:
#define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
#define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
#define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
+ Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)];
+
b_tag = tag_val_def(bw);
switch(_NUMBER_CODE(a_tag, b_tag)) {
@@ -2716,8 +2715,9 @@ tailrecur_ne:
/* Float is within the no loss limit */
f1.fd = signed_val(aw);
j = float_comp(f1.fd, f2.fd);
+ }
#if ERTS_SIZEOF_ETERM == 8
- } else if (f2.fd > (double) (MAX_SMALL + 1)) {
+ else if (f2.fd > (double) (MAX_SMALL + 1)) {
/* Float is a positive bignum, i.e. bigger */
j = -1;
} else if (f2.fd < (double) (MIN_SMALL - 1)) {
@@ -2728,7 +2728,7 @@ tailrecur_ne:
j = signed_val(aw) - (Sint) f2.fd;
}
#else
- } else {
+ else {
/* If float is positive it is bigger than small */
j = (f2.fd > 0.0) ? -1 : 1;
}
@@ -2762,8 +2762,8 @@ tailrecur_ne:
j = float_comp(f1.fd, f2.fd);
}
} else {
- big = double_to_big(f2.fd, big_buf);
- j = big_comp(aw, big);
+ big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
+ j = big_comp(aw, rterm2wterm(big,big_buf));
}
if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
j = -j;
@@ -2775,8 +2775,9 @@ tailrecur_ne:
/* Float is within the no loss limit */
f2.fd = signed_val(bw);
j = float_comp(f1.fd, f2.fd);
+ }
#if ERTS_SIZEOF_ETERM == 8
- } else if (f1.fd > (double) (MAX_SMALL + 1)) {
+ else if (f1.fd > (double) (MAX_SMALL + 1)) {
/* Float is a positive bignum, i.e. bigger */
j = 1;
} else if (f1.fd < (double) (MIN_SMALL - 1)) {
@@ -2787,7 +2788,7 @@ tailrecur_ne:
j = (Sint) f1.fd - signed_val(bw);
}
#else
- } else {
+ else {
/* If float is positive it is bigger than small */
j = (f1.fd > 0.0) ? 1 : -1;
}
@@ -2846,7 +2847,7 @@ pop_next:
return 0;
not_equal:
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return j;
#undef CMP_NODES