aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2018-03-13 18:09:40 +0100
committerRickard Green <[email protected]>2018-03-13 18:09:40 +0100
commitdfd7a2956a465dccd754243e3b1bf5c24931132e (patch)
tree8522a4670e25581e4a45588facb32944990e6236 /erts
parentf812c271fb755c105cd618737f3035acb796b0a2 (diff)
parent4cf4044313ae5a1a349fcedd3d2472c3b6ed3fe7 (diff)
downloadotp-dfd7a2956a465dccd754243e3b1bf5c24931132e.tar.gz
otp-dfd7a2956a465dccd754243e3b1bf5c24931132e.tar.bz2
otp-dfd7a2956a465dccd754243e3b1bf5c24931132e.zip
Merge branch 'rickard/pre-alloc-alignment/OTP-14977'
* rickard/pre-alloc-alignment/OTP-14977: Force 64-bit alignment for pre-allocators unless x86
Diffstat (limited to 'erts')
-rw-r--r--erts/configure.in7
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c13
2 files changed, 18 insertions, 2 deletions
diff --git a/erts/configure.in b/erts/configure.in
index b2435f1ac9..820247b4b8 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -2721,6 +2721,13 @@ LIBS=$saved_libs
dnl restore CPPFLAGS
CPPFLAGS=$saved_cppflags
+case $ARCH in
+ x86|amd64)
+ AC_DEFINE(ERTS_STRUCTURE_ALIGNED_ALLOC, 1, [Define if structure alignment is enough for allocators. If not defined, 64-bit alignment will be forced.]);;
+ *)
+ ;;
+esac
+
LM_SYS_IPV6
LM_SYS_MULTICAST
ERL_TIME_CORRECTION
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index ab204303d7..4a6e02281a 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -47,6 +47,15 @@ erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
int cix;
int no_blocks = pa_size;
int no_blocks_per_chunk;
+ size_t aligned_blk_sz;
+
+#if !defined(ERTS_STRUCTURE_ALIGNED_ALLOC)
+ /* Force 64-bit alignment... */
+ aligned_blk_sz = ((blk_sz - 1) / 8) * 8 + 8;
+#else
+ /* Alignment of structure is enough... */
+ aligned_blk_sz = blk_sz;
+#endif
if (!name) { /* schedulers only variant */
ASSERT(!nthreads);
@@ -68,7 +77,7 @@ erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
}
no_blocks = no_blocks_per_chunk * nthreads;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
- chunk_mem_size += blk_sz * no_blocks_per_chunk;
+ chunk_mem_size += aligned_blk_sz * no_blocks_per_chunk;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
tot_size += chunk_mem_size * nthreads;
@@ -115,7 +124,7 @@ erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
blk = (erts_sspa_blk_t *) p;
for (i = 0; i < no_blocks_per_chunk; i++) {
blk = (erts_sspa_blk_t *) p;
- p += blk_sz;
+ p += aligned_blk_sz;
blk->next_ptr = (erts_sspa_blk_t *) p;
}