aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2018-03-08 11:29:14 +0100
committerRickard Green <[email protected]>2018-03-13 18:03:56 +0100
commit4cf4044313ae5a1a349fcedd3d2472c3b6ed3fe7 (patch)
tree44aee11d9d29b7f467725b7a06126228df6a7fe4 /erts
parentfbb10ebc4a37555c7ea7f99e14286d862993976a (diff)
downloadotp-4cf4044313ae5a1a349fcedd3d2472c3b6ed3fe7.tar.gz
otp-4cf4044313ae5a1a349fcedd3d2472c3b6ed3fe7.tar.bz2
otp-4cf4044313ae5a1a349fcedd3d2472c3b6ed3fe7.zip
Force 64-bit alignment for pre-allocators unless x86
Diffstat (limited to 'erts')
-rw-r--r--erts/configure.in7
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c13
2 files changed, 18 insertions, 2 deletions
diff --git a/erts/configure.in b/erts/configure.in
index b2435f1ac9..820247b4b8 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -2721,6 +2721,13 @@ LIBS=$saved_libs
dnl restore CPPFLAGS
CPPFLAGS=$saved_cppflags
+case $ARCH in
+ x86|amd64)
+ AC_DEFINE(ERTS_STRUCTURE_ALIGNED_ALLOC, 1, [Define if structure alignment is enough for allocators. If not defined, 64-bit alignment will be forced.]);;
+ *)
+ ;;
+esac
+
LM_SYS_IPV6
LM_SYS_MULTICAST
ERL_TIME_CORRECTION
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index ab204303d7..4a6e02281a 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -47,6 +47,15 @@ erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
int cix;
int no_blocks = pa_size;
int no_blocks_per_chunk;
+ size_t aligned_blk_sz;
+
+#if !defined(ERTS_STRUCTURE_ALIGNED_ALLOC)
+ /* Force 64-bit alignment... */
+ aligned_blk_sz = ((blk_sz - 1) / 8) * 8 + 8;
+#else
+ /* Alignment of structure is enough... */
+ aligned_blk_sz = blk_sz;
+#endif
if (!name) { /* schedulers only variant */
ASSERT(!nthreads);
@@ -68,7 +77,7 @@ erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
}
no_blocks = no_blocks_per_chunk * nthreads;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
- chunk_mem_size += blk_sz * no_blocks_per_chunk;
+ chunk_mem_size += aligned_blk_sz * no_blocks_per_chunk;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
tot_size += chunk_mem_size * nthreads;
@@ -115,7 +124,7 @@ erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
blk = (erts_sspa_blk_t *) p;
for (i = 0; i < no_blocks_per_chunk; i++) {
blk = (erts_sspa_blk_t *) p;
- p += blk_sz;
+ p += aligned_blk_sz;
blk->next_ptr = (erts_sspa_blk_t *) p;
}