aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_db_hash.c
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2011-05-10 14:26:44 +0200
committerRickard Green <[email protected]>2011-05-13 14:15:47 +0200
commit498a1d56be241458c85231b5f9da43f4eac0b033 (patch)
tree8dcf15456dae0fe0f3c8c9b818046d0d0eb2f17e /erts/emulator/beam/erl_db_hash.c
parent5bcdb3ac4ea27ca47e18628aa147e7544043fa84 (diff)
downloadotp-498a1d56be241458c85231b5f9da43f4eac0b033.tar.gz
otp-498a1d56be241458c85231b5f9da43f4eac0b033.tar.bz2
otp-498a1d56be241458c85231b5f9da43f4eac0b033.zip
Add needed barriers for write_concurrency tables
Ets tables using the write_concurrency option could potentially get into an internally inconsistent state.
Diffstat (limited to 'erts/emulator/beam/erl_db_hash.c')
-rw-r--r--erts/emulator/beam/erl_db_hash.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 9ef990cc4f..9092fa8785 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -105,7 +105,7 @@
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
-#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read(&(tb)->segtab))
+#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_acqb(&(tb)->segtab))
#define NACTIVE(tb) ((int)erts_smp_atomic_read(&(tb)->nactive))
#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems))
@@ -122,8 +122,8 @@
*/
static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
{
- Uint mask = erts_smp_atomic_read(&tb->szm);
- Uint ix = hval & mask;
+ Uint mask = erts_smp_atomic_read_acqb(&tb->szm);
+ Uint ix = hval & mask;
if (ix >= erts_smp_atomic_read(&tb->nactive)) {
ix &= mask>>1;
ASSERT(ix < erts_smp_atomic_read(&tb->nactive));
@@ -668,6 +668,7 @@ int db_create_hash(Process *p, DbTable *tbl)
else { /* coarse locking */
tb->locks = NULL;
}
+ ERTS_THR_MEMORY_BARRIER;
#endif /* ERST_SMP */
return DB_ERROR_NONE;
}
@@ -2342,7 +2343,7 @@ static int alloc_seg(DbTableHash *tb)
struct ext_segment* eseg;
eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- erts_smp_atomic_set(&tb->segtab, (erts_aint_t) eseg->segtab);
+ erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t) eseg->segtab);
tb->nsegs = eseg->nsegs;
}
ASSERT(seg_ix < tb->nsegs);
@@ -2414,7 +2415,7 @@ static int free_seg(DbTableHash *tb, int free_records)
MY_ASSERT(newtop->s.is_ext_segment);
if (newtop->prev_segtab != NULL) {
/* Time to use a smaller segtab */
- erts_smp_atomic_set(&tb->segtab, (erts_aint_t)newtop->prev_segtab);
+ erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)newtop->prev_segtab);
tb->nsegs = seg_ix;
ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
}
@@ -2431,7 +2432,7 @@ static int free_seg(DbTableHash *tb, int free_records)
if (seg_ix > 0) {
if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
} else {
- erts_smp_atomic_set(&tb->segtab, (erts_aint_t)NULL);
+ erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)NULL);
}
#endif
tb->nslots -= SEGSZ;
@@ -2526,9 +2527,9 @@ static void grow(DbTableHash* tb, int nactive)
}
erts_smp_atomic_inc(&tb->nactive);
if (from_ix == 0) {
- erts_smp_atomic_set(&tb->szm, szm);
+ erts_smp_atomic_set_relb(&tb->szm, szm);
}
- erts_smp_atomic_set(&tb->is_resizing, 0);
+ erts_smp_atomic_set_relb(&tb->is_resizing, 0);
/* Finally, let's split the bucket. We try to do it in a smart way
to keep link order and avoid unnecessary updates of next-pointers */
@@ -2560,7 +2561,7 @@ static void grow(DbTableHash* tb, int nactive)
return;
abort:
- erts_smp_atomic_set(&tb->is_resizing, 0);
+ erts_smp_atomic_set_relb(&tb->is_resizing, 0);
}
@@ -2604,7 +2605,7 @@ static void shrink(DbTableHash* tb, int nactive)
erts_smp_atomic_set(&tb->nactive, src_ix);
if (dst_ix == 0) {
- erts_smp_atomic_set(&tb->szm, low_szm);
+ erts_smp_atomic_set_relb(&tb->szm, low_szm);
}
WUNLOCK_HASH(lck);
@@ -2618,7 +2619,7 @@ static void shrink(DbTableHash* tb, int nactive)
}
/*else already done */
- erts_smp_atomic_set(&tb->is_resizing, 0);
+ erts_smp_atomic_set_relb(&tb->is_resizing, 0);
}