From 6cba7d35d3322db8acc25c45889c2b03f1b2f4c2 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Fri, 19 Aug 2016 20:16:20 +0200 Subject: erts: Add ErtsContainerStruct_ for array members that otherwise may produce warning from compilers that think T* and T[] are incompatible types (?). --- erts/emulator/beam/sys.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index dfe82cab44..2d0628f70e 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -99,6 +99,10 @@ #define ErtsContainerStruct(ptr, type, member) \ ((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member))) +/* Use this variant when the member is an array */ +#define ErtsContainerStruct_(ptr, type, memberv) \ + ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv))) + #if defined (__WIN32__) # include "erl_win_sys.h" #else -- cgit v1.2.3 From f1ffa5e90e5eecaac890e876760932d1bb1d9c86 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Mon, 19 Sep 2016 15:47:18 +0200 Subject: erts: Add ErtsSizeofMember macro (in case it matters) --- erts/emulator/beam/sys.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 2d0628f70e..16e6c33367 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -103,6 +103,8 @@ #define ErtsContainerStruct_(ptr, type, memberv) \ ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv))) +#define ErtsSizeofMember(type, member) sizeof(((type *)0)->member) + #if defined (__WIN32__) # include "erl_win_sys.h" #else -- cgit v1.2.3 From ff7eb12d002afbffacae5429bd9bb0819aa2d146 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 1 Jun 2016 16:19:04 +0200 Subject: erts: Remove unnecessary access of 'is_resizing' in tables without write_concurrency and remove it totally #ifndef ERTS_SMP --- erts/emulator/beam/erl_db_hash.c | 21 ++++++++++----------- erts/emulator/beam/erl_db_hash.h | 2 +- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 12ae086b31..16d97f05ee 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -670,8 +670,8 @@ int db_create_hash(Process *p, DbTable *tbl) tb->nsegs = NSEG_1; tb->nslots = SEGSZ; - erts_smp_atomic_init_nob(&tb->is_resizing, 0); #ifdef ERTS_SMP + erts_smp_atomic_init_nob(&tb->is_resizing, 0); if (tb->common.type & DB_FINE_LOCKED) { erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; int i; @@ -2604,23 +2604,22 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, static ERTS_INLINE int begin_resizing(DbTableHash* tb) { +#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) - return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1); - else { - if (erts_smp_atomic_read_nob(&tb->is_resizing)) - return 0; - erts_smp_atomic_set_nob(&tb->is_resizing, 1); - return 1; - } + return !erts_atomic_xchg_acqb(&tb->is_resizing, 1); + else + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); +#endif + return 1; } static ERTS_INLINE void done_resizing(DbTableHash* tb) { +#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->is_resizing, 0); - else - erts_smp_atomic_set_nob(&tb->is_resizing, 0); + erts_atomic_set_relb(&tb->is_resizing, 0); +#endif } /* Grow table with one new bucket. diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index e654363cd5..081ff8fafc 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -60,8 +60,8 @@ typedef struct db_table_hash { /* List of slots where elements have been deleted while table was fixed */ erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ erts_smp_atomic_t nactive; /* Number of "active" slots */ - erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ #ifdef ERTS_SMP + erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; #endif #ifdef VALGRIND -- cgit v1.2.3 From abc2f4fdae2d62b5d2843082dbb4437595973b38 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Thu, 23 Jun 2016 19:30:37 +0200 Subject: erts: Redesign ets with separate segment tables * Keep it simple(r) * To prepare for both dynamic sized segments and segtabs --- erts/emulator/beam/erl_db.h | 2 - erts/emulator/beam/erl_db_hash.c | 246 +++++++++++++++------------------------ erts/emulator/beam/erl_db_hash.h | 4 +- 3 files changed, 97 insertions(+), 155 deletions(-) diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 1d26c49652..2f3c4a8e1b 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -267,7 +267,5 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#undef ERTS_DB_ALC_MEM_UPDATE_ - #endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 16d97f05ee..82f03e458a 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -84,14 +84,6 @@ #include "erl_db_hash.h" -#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */ -# define IF_DEBUG(x) x -# define MY_ASSERT(x) ASSERT(x) -#else -# define IF_DEBUG(x) -# define MY_ASSERT(x) -#endif - /* * The following symbols can be manipulated to "tune" the linear hash array */ @@ -102,7 +94,7 @@ #define SEGSZ (1 << SEGSZ_EXP) #define SEGSZ_MASK (SEGSZ-1) -#define NSEG_1 2 /* Size of first segment table (must be at least 2) */ +#define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*)) #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ @@ -318,27 +310,21 @@ struct mp_info { /* A table segment */ struct segment { - HashDbTerm* buckets[SEGSZ]; -#ifdef MYDEBUG - int is_ext_segment; -#endif + HashDbTerm* buckets[1]; }; +#define SIZEOF_SEGMENT(N) \ + (offsetof(struct segment,buckets) + sizeof(HashDbTerm*)*(N)) -/* A segment that also contains a segment table */ -struct ext_segment { - struct segment s; /* The segment itself. Must be first */ - +/* An extended segment table */ +struct ext_segtab { + ErtsThrPrgrLaterOp lop; struct segment** prev_segtab; /* Used when table is shrinking */ - int nsegs; /* Size of segtab */ + int prev_nsegs; /* Size of prev_segtab */ + int nsegs; /* Size of this segtab */ struct segment* segtab[1]; /* The segment table */ }; -#define SIZEOF_EXTSEG(NSEGS) \ - (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS)) - -#if defined(DEBUG) || defined(VALGRIND) -# define EXTSEG(SEGTAB_PTR) \ - ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab))) -#endif +#define SIZEOF_EXT_SEGTAB(NSEGS) \ + (offsetof(struct ext_segtab,segtab) + sizeof(struct segment*)*(NSEGS)) static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, @@ -348,41 +334,14 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); else erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); -#ifdef VALGRIND - tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab); -#endif } -/* How the table segments relate to each other: - - ext_segment: ext_segment: "plain" segment - #=================# #================# #=============# - | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] | - | bucket[1] | | | | [257] | | | [513] | - : : | | : : | : : - | bucket[255] | | | | [511] | | | [767] | - |-----------------| | | |----------------| | #=============# - | prev_segtab=NULL| | | +--<---prev_segtab | | - | nsegs = 2 | | | | | nsegs = 256 | | -+->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ | -| | segtab[1] -->-----------+---|--<---segtab[1] | | | -| #=================# | | segtab[2] -->-----|--+ ext_segment: -| | : : | #================# -+----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]| - #================# | | | - | : : - | |----------------| - +----<---prev_segtab | - : : -*/ - /* ** Forward decl's (static functions) */ -static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix, - struct segment** old_segtab); +static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); static int alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, @@ -666,9 +625,13 @@ int db_create_hash(Process *p, DbTable *tbl) erts_smp_atomic_init_nob(&tb->nactive, SEGSZ); erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); - SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab); + SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; tb->nslots = SEGSZ; + tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, + (DbTable *) tb, + SIZEOF_SEGMENT(SEGSZ)); + sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(SEGSZ)); #ifdef ERTS_SMP erts_smp_atomic_init_nob(&tb->is_resizing, 0); @@ -2414,34 +2377,29 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, return DB_ERROR_NONE; } -static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix, - struct segment** old_segtab) +static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix) { - int nsegs; - struct ext_segment* eseg; + struct segment** old_segtab = SEGTAB(tb); + int nsegs = 0; + struct ext_segtab* est; + ASSERT(seg_ix >= NSEG_1); switch (seg_ix) { - case 0: nsegs = NSEG_1; break; - case 1: nsegs = NSEG_2; break; - default: nsegs = seg_ix + NSEG_INC; break; - } - eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, - (DbTable *) tb, - SIZEOF_EXTSEG(nsegs)); - ASSERT(eseg != NULL); - sys_memset(&eseg->s, 0, sizeof(struct segment)); - IF_DEBUG(eseg->s.is_ext_segment = 1); - eseg->prev_segtab = old_segtab; - eseg->nsegs = nsegs; - if (old_segtab) { - ASSERT(nsegs > tb->nsegs); - sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*)); - } + case NSEG_1: nsegs = NSEG_2; break; + default: nsegs = seg_ix + NSEG_INC; break; + } + ASSERT(nsegs > tb->nsegs); + est = (struct ext_segtab*) erts_db_alloc(ERTS_ALC_T_DB_SEG, + (DbTable *) tb, + SIZEOF_EXT_SEGTAB(nsegs)); + est->nsegs = nsegs; + est->prev_segtab = old_segtab; + est->prev_nsegs = tb->nsegs; + sys_memcpy(est->segtab, old_segtab, tb->nsegs*sizeof(struct segment*)); #ifdef DEBUG - sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*)); + sys_memset(&est->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*)); #endif - eseg->segtab[seg_ix] = &eseg->s; - return eseg; + return est; } /* Extend table with one new segment @@ -2449,36 +2407,32 @@ static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix, static int alloc_seg(DbTableHash *tb) { int seg_ix = tb->nslots >> SEGSZ_EXP; - - if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */ - struct segment** segtab = SEGTAB(tb); - struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab); - if (seg == NULL) return 0; - segtab[seg_ix] = &seg->s; - /* We don't use the new segtab until next call (see "shrink race") */ - } - else { /* Just a new plain segment */ - struct segment** segtab; - if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */ - struct ext_segment* eseg; - eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1]; - MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment); - SET_SEGTAB(tb, eseg->segtab); - tb->nsegs = eseg->nsegs; - } - ASSERT(seg_ix < tb->nsegs); - segtab = SEGTAB(tb); - ASSERT(segtab[seg_ix] == NULL); - segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, - (DbTable *) tb, - sizeof(struct segment)); - if (segtab[seg_ix] == NULL) return 0; - sys_memset(segtab[seg_ix], 0, sizeof(struct segment)); - } + struct segment** segtab; + + if (seg_ix == tb->nsegs) { /* New segtab needed */ + struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix); + SET_SEGTAB(tb, est->segtab); + tb->nsegs = est->nsegs; + } + ASSERT(seg_ix < tb->nsegs); + segtab = SEGTAB(tb); + segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, + (DbTable *) tb, + SIZEOF_SEGMENT(SEGSZ)); + sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(SEGSZ)); tb->nslots += SEGSZ; return 1; } +#ifdef ERTS_SMP +static void dealloc_ext_segtab(void* lop_data) +{ + struct ext_segtab* est = (struct ext_segtab*) lop_data; + + erts_free(ERTS_ALC_T_DB_SEG, est); +} +#endif + /* Shrink table by freeing the top segment ** free_records: 1=free any records in segment, 0=assume segment is empty */ @@ -2486,18 +2440,17 @@ static int free_seg(DbTableHash *tb, int free_records) { const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1; struct segment** const segtab = SEGTAB(tb); - struct ext_segment* const top = (struct ext_segment*) segtab[seg_ix]; - int bytes; + struct segment* const segp = segtab[seg_ix]; int nrecords = 0; - ASSERT(top != NULL); + ASSERT(segp != NULL); #ifndef DEBUG if (free_records) #endif { int i; for (i=0; is.buckets[i]; + HashDbTerm* p = segp->buckets[i]; while(p != 0) { HashDbTerm* nxt = p->next; ASSERT(free_records); /* segment not empty as assumed? */ @@ -2507,53 +2460,46 @@ static int free_seg(DbTableHash *tb, int free_records) } } } - - /* The "shrink race": - * We must avoid deallocating an extended segment while its segtab may - * still be used by other threads. - * The trick is to stop use a segtab one call earlier. That is, stop use - * a segtab when the segment above it is deallocated. When the segtab is - * later deallocated, it has not been used for a very long time. - * It is even theoretically safe as we have by then rehashed the entire - * segment, seizing *all* locks, so there cannot exist any retarded threads - * still hanging in BUCKET macro with an old segtab pointer. - * For this to work, we must of course allocate a new segtab one call - * earlier in alloc_seg() as well. And this is also the reason why - * the minimum size of the first segtab is 2 and not 1 (NSEG_1). - */ - if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */ - MY_ASSERT(top->s.is_ext_segment); - ASSERT(segtab != top->segtab || seg_ix==0); - bytes = SIZEOF_EXTSEG(top->nsegs); - } - else { /* Dealloc plain segment */ - struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1]; - MY_ASSERT(!top->s.is_ext_segment); - - if (segtab == newtop->segtab) { /* New top segment is extended */ - MY_ASSERT(newtop->s.is_ext_segment); - if (newtop->prev_segtab != NULL) { - /* Time to use a smaller segtab */ - SET_SEGTAB(tb, newtop->prev_segtab); - tb->nsegs = seg_ix; - ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs); - } - else { - ASSERT(NSEG_1 > 2 && seg_ix==1); - } - } - bytes = sizeof(struct segment); + if (seg_ix >= NSEG_1) { + struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab); + + if (seg_ix == est->prev_nsegs) { /* Dealloc extended segtab */ + ASSERT(est->prev_segtab != NULL); + SET_SEGTAB(tb, est->prev_segtab); + tb->nsegs = est->prev_nsegs; + +#ifdef ERTS_SMP + if (!tb->common.is_thread_safe) { + /* + * Table is doing a graceful shrink operation and we must avoid + * deallocating this segtab while it may still be read by other + * threads. Schedule deallocation with thread progress to make + * sure no lingering threads are still hanging in BUCKET macro + * with an old segtab pointer. + */ + Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs); + ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est)); + ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0); + erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab, + est, + &est->lop, + sz); + } + else +#endif + erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, + SIZEOF_EXT_SEGTAB(est->nsegs)); + } + else { + + } } + erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(SEGSZ)); - erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, - (void*)top, bytes); #ifdef DEBUG - if (seg_ix > 0) { - segtab[seg_ix] = NULL; - } else { - SET_SEGTAB(tb, NULL); - } + if (seg_ix < tb->nsegs) + SEGTAB(tb)[seg_ix] = NULL; #endif tb->nslots -= SEGSZ; ASSERT(tb->nslots >= 0); diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 081ff8fafc..2d9b5e308a 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -51,6 +51,7 @@ typedef struct db_table_hash { DbTableCommon common; erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ + struct segment* first_segtab[1]; erts_smp_atomic_t szm; /* current size mask. */ /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ @@ -64,9 +65,6 @@ typedef struct db_table_hash { erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; #endif -#ifdef VALGRIND - struct ext_segment* top_ptr_to_segment_with_active_segtab; -#endif } DbTableHash; -- cgit v1.2.3 From 0d7a001039dbcab096397f27213b518113a9e5d0 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 24 Aug 2016 16:28:53 +0200 Subject: erts: Enable a smaller first hash segment for ets --- erts/emulator/beam/erl_db_hash.c | 67 ++++++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 26 deletions(-) diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 82f03e458a..2d24f438ca 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -89,10 +89,20 @@ */ #define CHAIN_LEN 6 /* Medium bucket chain len */ -/* Number of slots per segment */ -#define SEGSZ_EXP 8 -#define SEGSZ (1 << SEGSZ_EXP) -#define SEGSZ_MASK (SEGSZ-1) +/* +** We want the first mandatory segment to be small (to reduce minimal footprint) +** and larger extra segments (to reduce number of alloc/free calls). +*/ + +/* Number of slots in first segment */ +#define FIRST_SEGSZ_EXP 8 +#define FIRST_SEGSZ (1 << FIRST_SEGSZ_EXP) +#define FIRST_SEGSZ_MASK (FIRST_SEGSZ - 1) + +/* Number of slots per extra segment */ +#define EXT_SEGSZ_EXP 11 +#define EXT_SEGSZ (1 << EXT_SEGSZ_EXP) +#define EXT_SEGSZ_MASK (EXT_SEGSZ-1) #define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*)) #define NSEG_2 256 /* Size of second segment table */ @@ -115,7 +125,9 @@ #define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) #define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) -#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK] +#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP) + +#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK] /* * When deleting a table, the number of records to delete. @@ -422,7 +434,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle); static ERTS_INLINE void try_shrink(DbTableHash* tb) { int nactive = NACTIVE(tb); - if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN) + if (nactive > FIRST_SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN) && !IS_FIXED(tb)) { shrink(tb, nactive); } @@ -621,17 +633,17 @@ int db_create_hash(Process *p, DbTable *tbl) { DbTableHash *tb = &tbl->hash; - erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK); - erts_smp_atomic_init_nob(&tb->nactive, SEGSZ); + erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); + erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; - tb->nslots = SEGSZ; + tb->nslots = FIRST_SEGSZ; tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, (DbTable *) tb, - SIZEOF_SEGMENT(SEGSZ)); - sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(SEGSZ)); + SIZEOF_SEGMENT(FIRST_SEGSZ)); + sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ)); #ifdef ERTS_SMP erts_smp_atomic_init_nob(&tb->is_resizing, 0); @@ -649,7 +661,7 @@ int db_create_hash(Process *p, DbTable *tbl) erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt, "db_hash_slot", make_small(i)); } - /* This important property is needed to guarantee that the buckets + /* This important property is needed to guarantee the two buckets * involved in a grow/shrink operation it protected by the same lock: */ ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); @@ -2213,12 +2225,12 @@ static int db_free_table_continue_hash(DbTable *tbl) done /= 2; while(tb->nslots != 0) { - free_seg(tb, 1); + done += 1 + EXT_SEGSZ/64 + free_seg(tb, 1); /* * If we have done enough work, get out here. */ - if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) { + if (done >= DELETE_RECORD_LIMIT) { return 0; /* Not done */ } } @@ -2406,9 +2418,10 @@ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix) */ static int alloc_seg(DbTableHash *tb) { - int seg_ix = tb->nslots >> SEGSZ_EXP; + int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots); struct segment** segtab; + ASSERT(seg_ix > 0); if (seg_ix == tb->nsegs) { /* New segtab needed */ struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix); SET_SEGTAB(tb, est->segtab); @@ -2418,9 +2431,9 @@ static int alloc_seg(DbTableHash *tb) segtab = SEGTAB(tb); segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, (DbTable *) tb, - SIZEOF_SEGMENT(SEGSZ)); - sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(SEGSZ)); - tb->nslots += SEGSZ; + SIZEOF_SEGMENT(EXT_SEGSZ)); + sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ)); + tb->nslots += EXT_SEGSZ; return 1; } @@ -2438,9 +2451,10 @@ static void dealloc_ext_segtab(void* lop_data) */ static int free_seg(DbTableHash *tb, int free_records) { - const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1; + const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1; struct segment** const segtab = SEGTAB(tb); struct segment* const segp = segtab[seg_ix]; + Uint seg_sz; int nrecords = 0; ASSERT(segp != NULL); @@ -2448,8 +2462,8 @@ static int free_seg(DbTableHash *tb, int free_records) if (free_records) #endif { - int i; - for (i=0; ibuckets[i]; while(p != 0) { HashDbTerm* nxt = p->next; @@ -2495,13 +2509,14 @@ static int free_seg(DbTableHash *tb, int free_records) } } - erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(SEGSZ)); + seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ; + erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz)); #ifdef DEBUG if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL; #endif - tb->nslots -= SEGSZ; + tb->nslots -= seg_sz; ASSERT(tb->nslots >= 0); return nrecords; } @@ -2589,7 +2604,7 @@ static void grow(DbTableHash* tb, int nactive) /* Ensure that the slot nactive exists */ if (nactive == tb->nslots) { /* Time to get a new segment */ - ASSERT((nactive & SEGSZ_MASK) == 0); + ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0); if (!alloc_seg(tb)) goto abort; } ASSERT(nactive < tb->nslots); @@ -2668,7 +2683,7 @@ static void shrink(DbTableHash* tb, int nactive) int dst_ix = src_ix & low_szm; ASSERT(dst_ix < src_ix); - ASSERT(nactive > SEGSZ); + ASSERT(nactive > FIRST_SEGSZ); lck = WLOCK_HASH(tb, dst_ix); /* Double check for racing table fixers */ if (!IS_FIXED(tb)) { @@ -2697,7 +2712,7 @@ static void shrink(DbTableHash* tb, int nactive) } WUNLOCK_HASH(lck); - if (tb->nslots - src_ix >= SEGSZ) { + if (tb->nslots - src_ix >= EXT_SEGSZ) { free_seg(tb, 0); } } -- cgit v1.2.3 From 92c98a138638541a710f17f21073b568362502f8 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 24 Aug 2016 18:26:55 +0200 Subject: erts: Reduce ets hash load factor for faster lookup/insert/delete at the expense of about one word per object. --- erts/emulator/beam/erl_db_hash.c | 9 ++++---- lib/stdlib/test/ets_SUITE.erl | 46 ++++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 2d24f438ca..1752ec5191 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -87,7 +87,8 @@ /* * The following symbols can be manipulated to "tune" the linear hash array */ -#define CHAIN_LEN 6 /* Medium bucket chain len */ +#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1) +#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2) /* ** We want the first mandatory segment to be small (to reduce minimal footprint) @@ -434,7 +435,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle); static ERTS_INLINE void try_shrink(DbTableHash* tb) { int nactive = NACTIVE(tb); - if (nactive > FIRST_SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN) + if (nactive > FIRST_SEGSZ && NITEMS(tb) < SHRINK_LIMIT(nactive) && !IS_FIXED(tb)) { shrink(tb, nactive); } @@ -837,7 +838,7 @@ Lnew: WUNLOCK_HASH(lck); { int nactive = NACTIVE(tb); - if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) { + if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { grow(tb, nactive); } } @@ -2891,7 +2892,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) WUNLOCK_HASH(lck); nactive = NACTIVE(tb); - if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) { + if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { grow(tb, nactive); } } else { diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index b02d17bdb6..6835e745bc 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -607,11 +607,11 @@ memory(Config) when is_list(Config) -> memory_do(Opts) -> L = [T1,T2,T3,T4] = fill_sets_int(1000,Opts), XR1 = case mem_mode(T1) of - {normal,_} -> {13836,13046,13046,13052}; %{13862,13072,13072,13078}; - {compressed,4} -> {11041,10251,10251,10252}; %{11067,10277,10277,10278}; - {compressed,8} -> {10050,9260,9260,9260} %{10076,9286,9286,9286} + {normal,_} -> {13836, 15346, 15346, 15346+6}; + {compressed,4} -> {11041, 12551, 12551, 12551+1}; + {compressed,8} -> {10050, 11560, 11560, 11560} end, - XRes1 = adjust_xmem(L, XR1), + XRes1 = adjust_xmem(L, XR1, 1), Res1 = {?S(T1),?S(T2),?S(T3),?S(T4)}, lists:foreach(fun(T) -> Before = ets:info(T,size), @@ -623,11 +623,11 @@ memory_do(Opts) -> end, L), XR2 = case mem_mode(T1) of - {normal,_} -> {13826,13037,13028,13034}; %{13852,13063,13054,13060}; - {compressed,4} -> {11031,10242,10233,10234}; %{11057,10268,10259,10260}; - {compressed,8} -> {10040,9251,9242,9242} %10066,9277,9268,9268} + {normal,_} -> {13826, 15337, 15337-9, 15337-3}; + {compressed,4} -> {11031, 12542, 12542-9, 12542-8}; + {compressed,8} -> {10040, 11551, 11551-9, 11551-9} end, - XRes2 = adjust_xmem(L, XR2), + XRes2 = adjust_xmem(L, XR2, 1), Res2 = {?S(T1),?S(T2),?S(T3),?S(T4)}, lists:foreach(fun(T) -> Before = ets:info(T,size), @@ -639,17 +639,17 @@ memory_do(Opts) -> end, L), XR3 = case mem_mode(T1) of - {normal,_} -> {13816,13028,13010,13016}; %{13842,13054,13036,13042}; - {compressed,4} -> {11021,10233,10215,10216}; %{11047,10259,10241,10242}; - {compressed,8} -> {10030,9242,9224,9224} %{10056,9268,9250,9250} + {normal,_} -> {13816, 15328, 15328-18, 15328-12}; + {compressed,4} -> {11021, 12533, 12533-18, 12533-17}; + {compressed,8} -> {10030, 11542, 11542-18, 11542-18} end, - XRes3 = adjust_xmem(L, XR3), + XRes3 = adjust_xmem(L, XR3, 1), Res3 = {?S(T1),?S(T2),?S(T3),?S(T4)}, lists:foreach(fun(T) -> ets:delete_all_objects(T) end, L), - XRes4 = adjust_xmem(L, {50,260,260,260}), %{76,286,286,286}), + XRes4 = adjust_xmem(L, {50, 256, 256, 256}, 0), Res4 = {?S(T1),?S(T2),?S(T3),?S(T4)}, lists:foreach(fun(T) -> ets:delete(T) @@ -660,7 +660,7 @@ memory_do(Opts) -> ets:select_delete(T,[{'_',[],[true]}]) end, L2), - XRes5 = adjust_xmem(L2, {50,260,260,260}), %{76,286,286,286}), + XRes5 = adjust_xmem(L2, {50, 256, 256, 256}, 0), Res5 = {?S(T11),?S(T12),?S(T13),?S(T14)}, io:format("XRes1 = ~p~n" " Res1 = ~p~n~n" @@ -701,12 +701,22 @@ chk_normal_tab_struct_size() -> io:format("?TAB_STRUCT_SZ=~p~n", [?TAB_STRUCT_SZ]), ok. -adjust_xmem([_T1,_T2,_T3,_T4], {A0,B0,C0,D0} = _Mem0) -> +sizeof_ext_segtab() -> + case {erlang:system_info(wordsize), + erlang:system_info(smp_support)} of + {4,true} -> 5 + 3; + {4,false} -> 3 + 3; + {8,true} -> 4 + 2; + {8,false} -> 3 + 2 + end. + +adjust_xmem([_T1,_T2,_T3,_T4], {A0,B0,C0,D0} = _Mem0, EstCnt) -> %% Adjust for 64-bit, smp, and os: %% Table struct size may differ. TabDiff = ?TAB_STRUCT_SZ, - {A0+TabDiff, B0+TabDiff, C0+TabDiff, D0+TabDiff}. + HTabDiff = TabDiff + EstCnt*sizeof_ext_segtab(), + {A0+TabDiff, B0+HTabDiff, C0+HTabDiff, D0+HTabDiff}. %% Misc. whitebox tests t_whitebox(Config) when is_list(Config) -> @@ -5363,12 +5373,12 @@ verify_table_load(T) -> Stats = ets:info(T,stats), {Buckets,AvgLen,StdDev,ExpSD,_MinLen,_MaxLen,_} = Stats, ok = if - AvgLen > 7 -> + AvgLen > 1.2 -> io:format("Table overloaded: Stats=~p\n~p\n", [Stats, ets:info(T)]), false; - Buckets>256, AvgLen < 6 -> + Buckets>256, AvgLen < 0.47 -> io:format("Table underloaded: Stats=~p\n~p\n", [Stats, ets:info(T)]), false; -- cgit v1.2.3 From 25eb3fe353cb0f5c381107e43a865d3a312c8c25 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 30 Aug 2016 11:49:20 +0200 Subject: erts: Suppress failed ETS memory checks due to the grow/shrink hysteresis of the meta tables --- erts/emulator/beam/erl_bif_info.c | 8 ++++++++ erts/emulator/beam/erl_db.c | 30 +++++++++++++++++++++++++++ erts/emulator/beam/erl_db.h | 2 ++ erts/emulator/beam/erl_db_hash.c | 43 ++++++++++++++++++++++++++++++++++++++- erts/emulator/beam/erl_db_hash.h | 2 ++ lib/stdlib/test/ets_SUITE.erl | 34 +++++++++++++++++++++++-------- 6 files changed, 110 insertions(+), 9 deletions(-) diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 3fb866733c..abf20a90e4 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -3547,6 +3547,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint); BIF_RET(make_small((Uint) words)); } + else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { + /* Used by ets_SUITE (stdlib) */ + BIF_RET(erts_ets_get_meta_state(BIF_P)); + } else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) { /* Used by driver_SUITE (emulator) */ Uint sz, *szp; @@ -4280,6 +4284,10 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } BIF_RET(am_ok); } + else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { + /* Used by ets_SUITE (stdlib) */ + BIF_RET(erts_ets_restore_meta_state(BIF_P, BIF_ARG_2)); + } } BIF_ERROR(BIF_P, BADARG); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index bad34211a5..df4e34511f 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -3978,6 +3978,36 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt) return list; } +/* + * For testing only + * Retreive meta table size state + */ +Eterm erts_ets_get_meta_state(Process* p) +{ + Eterm* hp = HAlloc(p, 3); + return TUPLE2(hp, + erts_ets_hash_get_memstate(p, &meta_pid_to_tab->hash), + erts_ets_hash_get_memstate(p, &meta_pid_to_fixed_tab->hash)); +} +/* + * For testing only + * Restore a previously retrieved meta table size state. + * We do this to suppress failed memory checks + * caused by the hysteresis of meta tables grow/shrink limits. + */ +Eterm erts_ets_restore_meta_state(Process* p, Eterm meta_state) +{ + Eterm* tv; + Eterm* hp; + if (!is_tuple_arity(meta_state, 2)) + return am_badarg; + + tv = tuple_val(meta_state); + hp = HAlloc(p, 3); + return TUPLE2(hp, + erts_ets_hash_restore_memstate(&meta_pid_to_tab->hash, tv[1]), + erts_ets_hash_restore_memstate(&meta_pid_to_fixed_tab->hash, tv[2])); +} #ifdef HARDDEBUG /* Here comes some debug functions */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 2f3c4a8e1b..f7eb3dc45c 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -90,6 +90,8 @@ extern Export ets_select_continue_exp; extern erts_smp_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); +Eterm erts_ets_get_meta_state(Process* p); +Eterm erts_ets_restore_meta_state(Process* p, Eterm target_state); Uint erts_db_get_max_tabs(void); diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 1752ec5191..581b135233 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -2726,7 +2726,6 @@ static void shrink(DbTableHash* tb, int nactive) done_resizing(tb); } - /* Search a list of tuples for a matching key */ static HashDbTerm* search_list(DbTableHash* tb, Eterm key, @@ -2977,6 +2976,48 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb))); stats->kept_items = kept_items; } + +/* For testing only */ +Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) +{ + Eterm seg_cnt; + while (!begin_resizing(tb)) + /*spinn*/; + + seg_cnt = make_small(SLOT_IX_TO_SEG_IX(tb->nslots)); + done_resizing(tb); + return seg_cnt; +} +/* For testing only */ +Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) +{ + int seg_cnt, target; + int nactive; + + if (!is_small(memstate)) + return make_small(__LINE__); + + target = signed_val(memstate); + if (target < 1) + return make_small(__LINE__); + while (1) { + while (!begin_resizing(tb)) + /*spin*/; + seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots); + nactive = NACTIVE(tb); + done_resizing(tb); + + if (target == seg_cnt) + return am_ok; + if (IS_FIXED(tb)) + return make_small(__LINE__); + if (target < seg_cnt) + shrink(tb, nactive); + else + grow(tb, nactive); + } +} + #ifdef HARDDEBUG void db_check_table_hash(DbTable *tbl) diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 2d9b5e308a..e209037878 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -107,5 +107,7 @@ typedef struct { }DbHashStats; void db_calc_stats_hash(DbTableHash* tb, DbHashStats*); +Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb); +Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate); #endif /* _DB_HASH_H */ diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index 6835e745bc..ea555f9c39 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -218,7 +218,7 @@ memory_check_summary(_Config) -> receive {get_failed_memchecks, FailedMemchecks} -> ok end, io:format("Failed memchecks: ~p\n",[FailedMemchecks]), NoFailedMemchecks = length(FailedMemchecks), - if NoFailedMemchecks > 3 -> + if NoFailedMemchecks > 1 -> ct:fail("Too many failed (~p) memchecks", [NoFailedMemchecks]); true -> ok @@ -5728,27 +5728,45 @@ etsmem() -> {Bl0+Bl,BlSz0+BlSz} end, {0,0}, CS) end}, - {Mem,AllTabs}. + {Mem,AllTabs, erts_debug:get_internal_state('DbTable_meta')}. -verify_etsmem({MemInfo,AllTabs}) -> +verify_etsmem(EtsMem) -> wait_for_test_procs(), + verify_etsmem(EtsMem, false). + +verify_etsmem({MemInfo,AllTabs,MetaState}=EtsMem, Adjusted) -> case etsmem() of - {MemInfo,_} -> + {MemInfo,_,_} -> io:format("Ets mem info: ~p", [MemInfo]), case MemInfo of {ErlMem,EtsAlloc} when ErlMem == notsup; EtsAlloc == undefined -> %% Use 'erl +Mea max' to do more complete memory leak testing. {comment,"Incomplete or no mem leak testing"}; _ -> - ok + case Adjusted of + true -> + {comment, "Meta state adjusted"}; + false -> + ok + end end; - {MemInfo2, AllTabs2} -> + + {MemInfo2, AllTabs2, MetaState2} -> io:format("Expected: ~p", [MemInfo]), io:format("Actual: ~p", [MemInfo2]), io:format("Changed tables before: ~p\n",[AllTabs -- AllTabs2]), io:format("Changed tables after: ~p\n", [AllTabs2 -- AllTabs]), - ets_test_spawn_logger ! {failed_memcheck, get('__ETS_TEST_CASE__')}, - {comment, "Failed memory check"} + io:format("Meta state before: ~p\n", [MetaState]), + io:format("Meta state after: ~p\n", [MetaState2]), + case {MetaState =:= MetaState2, Adjusted} of + {false, false} -> + io:format("Adjust meta state and retry...\n\n",[]), + {ok,ok} = erts_debug:set_internal_state('DbTable_meta', MetaState), + verify_etsmem(EtsMem, true); + _ -> + ets_test_spawn_logger ! {failed_memcheck, get('__ETS_TEST_CASE__')}, + {comment, "Failed memory check"} + end end. -- cgit v1.2.3 From fdc2f2b4a6f6314ae7c183dc4e39e19d05ac89e4 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Fri, 2 Sep 2016 20:04:47 +0200 Subject: erts: Fix ets_SUITE:memory by simply asking for the size of struct ext_segtab --- erts/emulator/beam/erl_bif_info.c | 4 +++- erts/emulator/beam/erl_db_hash.c | 5 +++++ erts/emulator/beam/erl_db_hash.h | 1 + lib/stdlib/test/ets_SUITE.erl | 22 +++------------------- 4 files changed, 12 insertions(+), 20 deletions(-) diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index abf20a90e4..13391b7c67 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -3545,7 +3545,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) { /* Used by ets_SUITE (stdlib) */ size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint); - BIF_RET(make_small((Uint) words)); + Eterm* hp = HAlloc(BIF_P ,3); + BIF_RET(TUPLE2(hp, make_small((Uint) words), + erts_ets_hash_sizeof_ext_segtab())); } else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { /* Used by ets_SUITE (stdlib) */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 581b135233..3f7e14d15d 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -2977,6 +2977,11 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) stats->kept_items = kept_items; } +/* For testing only */ +Eterm erts_ets_hash_sizeof_ext_segtab(void) +{ + return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1); +} /* For testing only */ Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) { diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index e209037878..11bd6aa32a 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -107,6 +107,7 @@ typedef struct { }DbHashStats; void db_calc_stats_hash(DbTableHash* tb, DbHashStats*); +Eterm erts_ets_hash_sizeof_ext_segtab(void); Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb); Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate); diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index ea555f9c39..ecdac0deff 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -591,12 +591,6 @@ select_fail_do(Opts) -> -define(S(T),ets:info(T,memory)). --define(TAB_STRUCT_SZ, erts_debug:get_internal_state('DbTable_words')). -%%-define(NORMAL_TAB_STRUCT_SZ, 26). %% SunOS5.8, 32-bit, non smp, private heap -%% -%% The hardcoded expected memory sizes (in words) are the ones we expect on: -%% SunOS5.8, 32-bit, non smp, private heap -%% %% Whitebox test of ets:info(X, memory). memory(Config) when is_list(Config) -> @@ -698,25 +692,15 @@ chk_normal_tab_struct_size() -> erlang:system_info(smp_support), erlang:system_info(heap_type)}, io:format("System = ~p~n", [System]), - io:format("?TAB_STRUCT_SZ=~p~n", [?TAB_STRUCT_SZ]), ok. -sizeof_ext_segtab() -> - case {erlang:system_info(wordsize), - erlang:system_info(smp_support)} of - {4,true} -> 5 + 3; - {4,false} -> 3 + 3; - {8,true} -> 4 + 2; - {8,false} -> 3 + 2 - end. - adjust_xmem([_T1,_T2,_T3,_T4], {A0,B0,C0,D0} = _Mem0, EstCnt) -> %% Adjust for 64-bit, smp, and os: %% Table struct size may differ. - TabDiff = ?TAB_STRUCT_SZ, - HTabDiff = TabDiff + EstCnt*sizeof_ext_segtab(), - {A0+TabDiff, B0+HTabDiff, C0+HTabDiff, D0+HTabDiff}. + {TabSz, EstSz} = erts_debug:get_internal_state('DbTable_words'), + HTabSz = TabSz + EstCnt*EstSz, + {A0+TabSz, B0+HTabSz, C0+HTabSz, D0+HTabSz}. %% Misc. whitebox tests t_whitebox(Config) when is_list(Config) -> -- cgit v1.2.3 From 1573603177f5ba641390c67b0515defb2786ecfb Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 6 Sep 2016 17:42:30 +0200 Subject: erts: Tweak ets grow/shrink to keep up at contention --- erts/emulator/beam/erl_db_hash.c | 287 +++++++++++++++++++++------------------ erts/emulator/beam/erl_db_hash.h | 8 +- 2 files changed, 161 insertions(+), 134 deletions(-) diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 3f7e14d15d..698e4c301b 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -194,6 +194,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, #ifdef ERTS_SMP # define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1) # define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck) +# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval)) /* Fine grained read lock */ static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) @@ -330,7 +331,9 @@ struct segment { /* An extended segment table */ struct ext_segtab { +#ifdef ERTS_SMP ErtsThrPrgrLaterOp lop; +#endif struct segment** prev_segtab; /* Used when table is shrinking */ int prev_nsegs; /* Size of prev_segtab */ int nsegs; /* Size of this segtab */ @@ -355,14 +358,14 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, ** Forward decl's (static functions) */ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); -static int alloc_seg(DbTableHash *tb); +static void alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, HashDbTerm *list); static HashDbTerm* search_list(DbTableHash* tb, Eterm key, HashValue hval, HashDbTerm *list); -static void shrink(DbTableHash* tb, int nactive); -static void grow(DbTableHash* tb, int nactive); +static void shrink(DbTableHash* tb, int nitems); +static void grow(DbTableHash* tb, int nitems); static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, Uint sz, DbTableHash*); static int analyze_pattern(DbTableHash *tb, Eterm pattern, @@ -435,9 +438,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle); static ERTS_INLINE void try_shrink(DbTableHash* tb) { int nactive = NACTIVE(tb); - if (nactive > FIRST_SEGSZ && NITEMS(tb) < SHRINK_LIMIT(nactive) + int nitems = NITEMS(tb); + if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive) && !IS_FIXED(tb)) { - shrink(tb, nactive); + shrink(tb, nitems); } } @@ -839,7 +843,7 @@ Lnew: { int nactive = NACTIVE(tb); if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { - grow(tb, nactive); + grow(tb, nitems); } } CHECK_TABLES(); @@ -2417,7 +2421,7 @@ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix) /* Extend table with one new segment */ -static int alloc_seg(DbTableHash *tb) +static void alloc_seg(DbTableHash *tb) { int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots); struct segment** segtab; @@ -2435,7 +2439,6 @@ static int alloc_seg(DbTableHash *tb) SIZEOF_SEGMENT(EXT_SEGSZ)); sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ)); tb->nslots += EXT_SEGSZ; - return 1; } #ifdef ERTS_SMP @@ -2506,9 +2509,6 @@ static int free_seg(DbTableHash *tb, int free_records) erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, SIZEOF_EXT_SEGTAB(est->nsegs)); } - else { - - } } seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ; erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz)); @@ -2584,85 +2584,93 @@ done_resizing(DbTableHash* tb) #endif } -/* Grow table with one new bucket. +/* Grow table with one or more new buckets. ** Allocate new segment if needed. */ -static void grow(DbTableHash* tb, int nactive) +static void grow(DbTableHash* tb, int nitems) { HashDbTerm** pnext; HashDbTerm** to_pnext; HashDbTerm* p; erts_smp_rwmtx_t* lck; - int from_ix; + int nactive; + int from_ix, to_ix; int szm; + int loop_limit = 5; - if (!begin_resizing(tb)) - return; /* already in progress */ - if (NACTIVE(tb) != nactive) { - goto abort; /* already done (race) */ - } + do { + if (!begin_resizing(tb)) + return; /* already in progress */ + nactive = NACTIVE(tb); + if (nitems <= GROW_LIMIT(nactive)) { + goto abort; /* already done (race) */ + } - /* Ensure that the slot nactive exists */ - if (nactive == tb->nslots) { - /* Time to get a new segment */ - ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0); - if (!alloc_seg(tb)) goto abort; - } - ASSERT(nactive < tb->nslots); + /* Ensure that the slot nactive exists */ + if (nactive == tb->nslots) { + /* Time to get a new segment */ + ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0); + alloc_seg(tb); + } + ASSERT(nactive < tb->nslots); - szm = erts_smp_atomic_read_nob(&tb->szm); - if (nactive <= szm) { - from_ix = nactive & (szm >> 1); - } else { - ASSERT(nactive == szm+1); - from_ix = 0; - szm = (szm<<1) | 1; - } + szm = erts_smp_atomic_read_nob(&tb->szm); + if (nactive <= szm) { + from_ix = nactive & (szm >> 1); + } else { + ASSERT(nactive == szm+1); + from_ix = 0; + szm = (szm<<1) | 1; + } + to_ix = nactive; + + lck = WLOCK_HASH(tb, from_ix); + ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); + /* Now a final double check (with the from_ix lock held) + * that we did not get raced by a table fixer. + */ + if (IS_FIXED(tb)) { + WUNLOCK_HASH(lck); + goto abort; + } + erts_smp_atomic_set_nob(&tb->nactive, ++nactive); + if (from_ix == 0) { + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_relb(&tb->szm, szm); + else + erts_smp_atomic_set_nob(&tb->szm, szm); + } + done_resizing(tb); - lck = WLOCK_HASH(tb, from_ix); - /* Now a final double check (with the from_ix lock held) - * that we did not get raced by a table fixer. - */ - if (IS_FIXED(tb)) { - WUNLOCK_HASH(lck); - goto abort; - } - erts_smp_atomic_inc_nob(&tb->nactive); - if (from_ix == 0) { - if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->szm, szm); - else - erts_smp_atomic_set_nob(&tb->szm, szm); - } - done_resizing(tb); + /* Finally, let's split the bucket. We try to do it in a smart way + to keep link order and avoid unnecessary updates of next-pointers */ + pnext = &BUCKET(tb, from_ix); + p = *pnext; + to_pnext = &BUCKET(tb, to_ix); + while (p != NULL) { + if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */ + *pnext = p->next; + free_term(tb, p); + p = *pnext; + } + else { + int ix = p->hvalue & szm; + if (ix != from_ix) { + ASSERT(ix == (from_ix ^ ((szm+1)>>1))); + *to_pnext = p; + /* Swap "from" and "to": */ + from_ix = ix; + to_pnext = pnext; + } + pnext = &p->next; + p = *pnext; + } + } + *to_pnext = NULL; + WUNLOCK_HASH(lck); - /* Finally, let's split the bucket. We try to do it in a smart way - to keep link order and avoid unnecessary updates of next-pointers */ - pnext = &BUCKET(tb, from_ix); - p = *pnext; - to_pnext = &BUCKET(tb, nactive); - while (p != NULL) { - if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */ - *pnext = p->next; - free_term(tb, p); - p = *pnext; - } - else { - int ix = p->hvalue & szm; - if (ix != from_ix) { - ASSERT(ix == (from_ix ^ ((szm+1)>>1))); - *to_pnext = p; - /* Swap "from" and "to": */ - from_ix = ix; - to_pnext = pnext; - } - pnext = &p->next; - p = *pnext; - } - } - *to_pnext = NULL; + }while (--loop_limit && nitems > GROW_LIMIT(nactive)); - WUNLOCK_HASH(lck); return; abort: @@ -2673,56 +2681,75 @@ abort: /* Shrink table by joining top bucket. ** Remove top segment if it gets empty. */ -static void shrink(DbTableHash* tb, int nactive) -{ - if (!begin_resizing(tb)) - return; /* already in progress */ - if (NACTIVE(tb) == nactive) { - erts_smp_rwmtx_t* lck; - int src_ix = nactive - 1; - int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; - int dst_ix = src_ix & low_szm; - - ASSERT(dst_ix < src_ix); - ASSERT(nactive > FIRST_SEGSZ); - lck = WLOCK_HASH(tb, dst_ix); - /* Double check for racing table fixers */ - if (!IS_FIXED(tb)) { - HashDbTerm** src_bp = &BUCKET(tb, src_ix); - HashDbTerm** dst_bp = &BUCKET(tb, dst_ix); - HashDbTerm** bp = src_bp; - - /* Q: Why join lists by appending "dst" at the end of "src"? - A: Must step through "src" anyway to purge pseudo deleted. */ - while(*bp != NULL) { - if ((*bp)->hvalue == INVALID_HASH) { - HashDbTerm* deleted = *bp; - *bp = deleted->next; - free_term(tb, deleted); - } else { - bp = &(*bp)->next; - } - } - *bp = *dst_bp; - *dst_bp = *src_bp; - *src_bp = NULL; - - erts_smp_atomic_set_nob(&tb->nactive, src_ix); - if (dst_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, low_szm); - } - WUNLOCK_HASH(lck); - - if (tb->nslots - src_ix >= EXT_SEGSZ) { - free_seg(tb, 0); - } - } - else { - WUNLOCK_HASH(lck); - } +static void shrink(DbTableHash* tb, int nitems) +{ + HashDbTerm** src_bp; + HashDbTerm** dst_bp; + HashDbTerm** bp; + erts_smp_rwmtx_t* lck; + int src_ix, dst_ix, low_szm; + int nactive; + int loop_limit = 5; - } - /*else already done */ + do { + if (!begin_resizing(tb)) + return; /* already in progress */ + nactive = NACTIVE(tb); + if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) { + goto abort; /* already done (race) */ + } + src_ix = nactive - 1; + low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; + dst_ix = src_ix & low_szm; + + ASSERT(dst_ix < src_ix); + ASSERT(nactive > FIRST_SEGSZ); + lck = WLOCK_HASH(tb, dst_ix); + ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); + /* Double check for racing table fixers */ + if (IS_FIXED(tb)) { + WUNLOCK_HASH(lck); + goto abort; + } + + src_bp = &BUCKET(tb, src_ix); + dst_bp = &BUCKET(tb, dst_ix); + bp = src_bp; + + /* + * We join lists by appending "dst" at the end of "src" + * as we must step through "src" anyway to purge pseudo deleted. + */ + while(*bp != NULL) { + if ((*bp)->hvalue == INVALID_HASH) { + HashDbTerm* deleted = *bp; + *bp = deleted->next; + free_term(tb, deleted); + } else { + bp = &(*bp)->next; + } + } + *bp = *dst_bp; + *dst_bp = *src_bp; + *src_bp = NULL; + + nactive = src_ix; + erts_smp_atomic_set_nob(&tb->nactive, nactive); + if (dst_ix == 0) { + erts_smp_atomic_set_relb(&tb->szm, low_szm); + } + WUNLOCK_HASH(lck); + + if (tb->nslots - src_ix >= EXT_SEGSZ) { + free_seg(tb, 0); + } + done_resizing(tb); + + } while (--loop_limit + && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)); + return; + +abort: done_resizing(tb); } @@ -2892,7 +2919,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) nactive = NACTIVE(tb); if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { - grow(tb, nactive); + grow(tb, nitems); } } else { WUNLOCK_HASH(lck); @@ -2997,7 +3024,6 @@ Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) { int seg_cnt, target; - int nactive; if (!is_small(memstate)) return make_small(__LINE__); @@ -3009,7 +3035,6 @@ Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) while (!begin_resizing(tb)) /*spin*/; seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots); - nactive = NACTIVE(tb); done_resizing(tb); if (target == seg_cnt) @@ -3017,9 +3042,9 @@ Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) if (IS_FIXED(tb)) return make_small(__LINE__); if (target < seg_cnt) - shrink(tb, nactive); + shrink(tb, 0); else - grow(tb, nactive); + grow(tb, INT_MAX); } } @@ -3031,7 +3056,7 @@ void db_check_table_hash(DbTable *tbl) HashDbTerm* list; int j; - for (j = 0; j < tb->nactive; j++) { + for (j = 0; j < NACTIVE(tb); j++) { if ((list = BUCKET(tb,j)) != 0) { while (list != 0) { if (!is_tuple(make_tuple(list->dbterm.tpl))) { diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 11bd6aa32a..6d25c73549 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -50,17 +50,19 @@ typedef struct db_table_hash_fine_locks { typedef struct db_table_hash { DbTableCommon common; + /* SMP: szm and nactive are write-protected by is_resizing or table write lock */ + erts_smp_atomic_t szm; /* current size mask. */ + erts_smp_atomic_t nactive; /* Number of "active" slots */ + erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ struct segment* first_segtab[1]; - erts_smp_atomic_t szm; /* current size mask. */ - + /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ int nslots; /* Total number of slots */ int nsegs; /* Size of segment table */ /* List of slots where elements have been deleted while table was fixed */ erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ - erts_smp_atomic_t nactive; /* Number of "active" slots */ #ifdef ERTS_SMP erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; -- cgit v1.2.3 From 150d41a7bbd75463d388b464f9e658caebe5f1f5 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 6 Sep 2016 17:43:53 +0200 Subject: stdlib: Fix ets_SUITE:smp_select_delete no point in checking table load as select_delete does not shrink. --- lib/stdlib/test/ets_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index ecdac0deff..0f61ec9d40 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -5436,7 +5436,8 @@ smp_select_delete(Config) when is_list(Config) -> Eq+1 end, 0, TotCnts), - verify_table_load(T), + %% May fail as select_delete does not shrink table (enough) + %%verify_table_load(T), LeftInTab = ets:select_delete(T, [{{'$1','$1'}, [], [true]}]), 0 = ets:info(T,size), false = ets:info(T,fixed), -- cgit v1.2.3 From 938781e3fe5eb5b9ffaf77292fea9d10f92b3050 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 6 Sep 2016 17:44:17 +0200 Subject: stdlib: Cuddle ets_SUITE for valgrind --- lib/stdlib/test/ets_SUITE.erl | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index 0f61ec9d40..e726a29c9e 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -1902,7 +1902,7 @@ evil_counter(I,Opts) -> end, Start = Start0 + rand:uniform(100000), ets:insert(T, {dracula,Start}), - Iter = 40000, + Iter = 40000 div syrup_factor(), End = Start + Iter, End = evil_counter_1(Iter, T), ets:delete(T). @@ -3296,7 +3296,8 @@ evil_delete_owner(Name, Flags, Data, Fix) -> exit_large_table_owner(Config) when is_list(Config) -> %%Data = [{erlang:phash2(I, 16#ffffff),I} || I <- lists:seq(1, 500000)], - FEData = fun(Do) -> repeat_while(fun(500000) -> {false,ok}; + Laps = 500000 div syrup_factor(), + FEData = fun(Do) -> repeat_while(fun(I) when I =:= Laps -> {false,ok}; (I) -> Do({erlang:phash2(I, 16#ffffff),I}), {true, I+1} end, 1) @@ -3312,7 +3313,8 @@ exit_large_table_owner_do(Opts,{FEData,Config}) -> exit_many_large_table_owner(Config) when is_list(Config) -> ct:timetrap({minutes,30}), %% valgrind needs a lot %%Data = [{erlang:phash2(I, 16#ffffff),I} || I <- lists:seq(1, 500000)], - FEData = fun(Do) -> repeat_while(fun(500000) -> {false,ok}; + Laps = 500000 div syrup_factor(), + FEData = fun(Do) -> repeat_while(fun(I) when I =:= Laps -> {false,ok}; (I) -> Do({erlang:phash2(I, 16#ffffff),I}), {true, I+1} end, 1) @@ -4265,7 +4267,8 @@ heavy_lookup_element_do(Opts) -> Tab = ets_new(foobar_table, [set, protected, {keypos, 2} | Opts]), ok = fill_tab2(Tab, 0, 7000), %% lookup ALL elements 50 times - _ = [do_lookup_element(Tab, 6999, 1) || _ <- lists:seq(1, 50)], + Laps = 50 div syrup_factor(), + _ = [do_lookup_element(Tab, 6999, 1) || _ <- lists:seq(1, Laps)], true = ets:delete(Tab), verify_etsmem(EtsMem). @@ -4289,6 +4292,7 @@ heavy_concurrent(Config) when is_list(Config) -> do_heavy_concurrent(Opts) -> Size = 10000, + Laps = 10000 div syrup_factor(), EtsMem = etsmem(), Tab = ets_new(blupp, [set, public, {keypos, 2} | Opts]), ok = fill_tab2(Tab, 0, Size), @@ -4296,7 +4300,7 @@ do_heavy_concurrent(Opts) -> fun (N) -> my_spawn_link( fun () -> - do_heavy_concurrent_proc(Tab, Size, N) + do_heavy_concurrent_proc(Tab, Laps, N) end) end, lists:seq(1, 500)), @@ -6274,3 +6278,9 @@ do_tc(Do, Report) -> T2 = erlang:monotonic_time(), Elapsed = erlang:convert_time_unit(T2 - T1, native, milli_seconds), Report(Elapsed). + +syrup_factor() -> + case erlang:system_info(build_type) of + valgrind -> 20; + _ -> 1 + end. -- cgit v1.2.3 From d100187bde4f3f555b4f3cd9561ec8c67a528895 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 13 Sep 2016 18:28:09 +0200 Subject: erts: Unify reduction count for ets:select to be per object as the other select-variants and not per table slot. --- erts/emulator/beam/erl_db_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 698e4c301b..40df1c5356 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -1494,6 +1494,7 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, match_list = CONS(hp, match_res, match_list); ++got; } + --num_left; } current = current->next; } @@ -1511,7 +1512,6 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, } } else { /* Key is variable */ - --num_left; if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) { slot_ix = -1; -- cgit v1.2.3