aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_trace.c
diff options
context:
space:
mode:
authorLukas Larsson <[email protected]>2016-08-11 10:35:24 +0200
committerLukas Larsson <[email protected]>2016-08-12 11:51:18 +0200
commitca4ad75e8f2058d965465d612ee1b5dcd364200e (patch)
tree2d21c568e126d4fb570e0f9056cd8bf128d74bd4 /erts/emulator/beam/erl_trace.c
parent1db9e32445dc368c6073e3b412567d81a2b5eeb2 (diff)
downloadotp-ca4ad75e8f2058d965465d612ee1b5dcd364200e.tar.gz
otp-ca4ad75e8f2058d965465d612ee1b5dcd364200e.tar.bz2
otp-ca4ad75e8f2058d965465d612ee1b5dcd364200e.zip
erts: Align ErtsThrPrgrLaterOp when free'ing tracer
On Sparc all structs with a double word element in are assumed to be allocated on a two word boundary. So we have to make sure that the ErtsThrPrgrLaterOp * points to 8 / 16 byte aligned memory. As it only costs one word I've done it on all architectures in case some unknown other arch also needs this.
Diffstat (limited to 'erts/emulator/beam/erl_trace.c')
-rw-r--r--erts/emulator/beam/erl_trace.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index d14d237ca6..c4396488f5 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -3108,6 +3108,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
if (is_not_nil(*tracer)) {
Uint offs = 2;
UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp);
+ ErtsThrPrgrLaterOp *lop;
ASSERT(is_list(*tracer));
if (is_not_immed(ERTS_TRACER_STATE(*tracer))) {
hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem)));
@@ -3115,6 +3116,16 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment);
ASSERT(offs == size_object(*tracer));
}
+
+ /* sparc assumes that all structs are double word aligned, so we
+ have to align the ErtsThrPrgrLaterOp struct otherwise it may
+ segfault.*/
+ if ((UWord)(ptr_val(*tracer) + offs) % (sizeof(UWord)*2) == sizeof(UWord))
+ offs += 1;
+
+ lop = (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs);
+ ASSERT((UWord)lop % (sizeof(UWord)*2) == 0);
+
/* We schedule the free:ing of the tracer until after a thread progress
has been made so that we know that no schedulers have any references
to it. Because we do this, it is possible to release all locks of a
@@ -3122,9 +3133,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
without having to worry if it is free'd.
*/
erts_schedule_thr_prgr_later_cleanup_op(
- free_tracer, (void*)(*tracer),
- (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs),
- size);
+ free_tracer, (void*)(*tracer), lop, size);
}
if (is_nil(new_tracer)) {
@@ -3141,9 +3150,10 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer),
tracer_module = ERTS_TRACER_MODULE(new_tracer);
Uint sz = size_object(tracer_state);
- hf = new_message_buffer(sz + 2 /* cons cell */ + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm));
+ hf = new_message_buffer(sz + 2 /* cons cell */ +
+ (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1);
hp = hf->mem + 2;
- hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm);
+ hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1;
*tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap);
*tracer = CONS(hf->mem, tracer_module, *tracer);
ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf);