diff options
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r-- | erts/emulator/beam/erl_db.c | 4 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_hash.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_trace.c | 20 |
3 files changed, 18 insertions, 8 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index bad34211a5..128a7b3865 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -1094,7 +1094,7 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2) CHECK_TABLES(); - /* Write lock table if more than one object to keep atomicy */ + /* Write lock table if more than one object to keep atomicity */ kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL) ? LCK_WRITE : LCK_WRITE_REC); @@ -1164,7 +1164,7 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2) Eterm lookup_ret; DbTableMethod* meth; - /* More than one object, use LCK_WRITE to keep atomicy */ + /* More than one object, use LCK_WRITE to keep atomicity */ kind = LCK_WRITE; tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind); if (tb == NULL) { diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 12ae086b31..5e6fe4f460 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -40,7 +40,7 @@ ** DB_FINE_LOCKED set. The table variable is_thread_safe will then indicate ** if operations need to obtain fine grained locks or not. Some operations ** will for example always use exclusive table lock to guarantee -** a higher level of atomicy. +** a higher level of atomicity. */ /* FIXATION: diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index d14d237ca6..c4396488f5 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -3108,6 +3108,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) if (is_not_nil(*tracer)) { Uint offs = 2; UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp); + ErtsThrPrgrLaterOp *lop; ASSERT(is_list(*tracer)); if (is_not_immed(ERTS_TRACER_STATE(*tracer))) { hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))); @@ -3115,6 +3116,16 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment); ASSERT(offs == size_object(*tracer)); } + + /* sparc assumes that all structs are double word aligned, so we + have to align the ErtsThrPrgrLaterOp struct otherwise it may + segfault.*/ + if ((UWord)(ptr_val(*tracer) + offs) % (sizeof(UWord)*2) == sizeof(UWord)) + offs += 1; + + lop = (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs); + ASSERT((UWord)lop % (sizeof(UWord)*2) == 0); + /* We schedule the free:ing of the tracer until after a thread progress has been made so that we know that no schedulers have any references to it. Because we do this, it is possible to release all locks of a @@ -3122,9 +3133,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) without having to worry if it is free'd. */ erts_schedule_thr_prgr_later_cleanup_op( - free_tracer, (void*)(*tracer), - (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs), - size); + free_tracer, (void*)(*tracer), lop, size); } if (is_nil(new_tracer)) { @@ -3141,9 +3150,10 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer), tracer_module = ERTS_TRACER_MODULE(new_tracer); Uint sz = size_object(tracer_state); - hf = new_message_buffer(sz + 2 /* cons cell */ + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm)); + hf = new_message_buffer(sz + 2 /* cons cell */ + + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1); hp = hf->mem + 2; - hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm); + hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1; *tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap); *tracer = CONS(hf->mem, tracer_module, *tracer); ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf); |