diff options
author | Sverker Eriksson <[email protected]> | 2018-11-16 13:29:45 +0100 |
---|---|---|
committer | Sverker Eriksson <[email protected]> | 2018-11-16 19:18:36 +0100 |
commit | aff44bcc16aa6aaf3c7efcef907ca25cbdb4ec8c (patch) | |
tree | e83b03c726fe6e70b6f6454579fde7b7e7317980 /erts | |
parent | d02702d38264927587bc81988c6f2370a8f9a6ed (diff) | |
download | otp-aff44bcc16aa6aaf3c7efcef907ca25cbdb4ec8c.tar.gz otp-aff44bcc16aa6aaf3c7efcef907ca25cbdb4ec8c.tar.bz2 otp-aff44bcc16aa6aaf3c7efcef907ca25cbdb4ec8c.zip |
erts: Refactor erl_db_catree.c
with some code moving and removed obsolete comments.
Diffstat (limited to 'erts')
-rw-r--r-- | erts/emulator/beam/erl_db_catree.c | 81 |
1 files changed, 32 insertions, 49 deletions
diff --git a/erts/emulator/beam/erl_db_catree.c b/erts/emulator/beam/erl_db_catree.c index 639c7e5e03..153a720f07 100644 --- a/erts/emulator/beam/erl_db_catree.c +++ b/erts/emulator/beam/erl_db_catree.c @@ -48,24 +48,8 @@ * activated when the options {write_concurrency, true}, public and * ordered_set are passed to the ets:new/2 function. This * implementation is expected to scale better than the default - * implementation (located in "erl_db_tree.c") when concurrent - * processes use the following ETS operations to operate on a table: + * implementation located in "erl_db_tree.c". * - * delete/2, delete_object/2, first/1, insert/2 (single object), - * insert_new/2 (single object), lookup/2, lookup_element/2, member/2, - * next/2, take/2 and update_element/3 (single object). - * - * Currently, the implementation does not have scalable support for - * the other operations (e.g., select/2). These operations are handled - * by merging all locks so that all terms get protected by a single - * lock. This implementation may thus perform worse than the default - * implementation in some scenarios. For example, when concurrent - * processes access a table with the operations insert/2, delete/2 and - * select/2, the insert/2 and delete/2 operations will trigger splits - * of locks (to get more fine-grained synchronization) but this will - * quickly be undone by the select/2 operation if this operation is - * also called frequently. - * * The default implementation has a static stack optimization (see * get_static_stack in erl_db_tree.c). This implementation does not * have such an optimization as it induces bad scalability when @@ -232,7 +216,7 @@ DbTableMethod db_catree = /* Helpers for reading and writing shared atomic variables */ /* No memory barrier */ -#define GET_ROOT(tb) ((DbTableCATreeNode*)erts_atomic_read_nob(&(tb->root))) +#define GET_ROOT(tb) ((DbTableCATreeNode*)erts_atomic_read_nob(&((tb)->root))) #define GET_LEFT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->u.route.left))) #define GET_RIGHT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->u.route.right))) #define SET_ROOT(tb, v) erts_atomic_set_nob(&((tb)->root), (erts_aint_t)(v)) @@ -241,7 +225,7 @@ DbTableMethod db_catree = /* Release or acquire barriers */ -#define GET_ROOT_ACQB(tb) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(tb->root))) +#define GET_ROOT_ACQB(tb) ((DbTableCATreeNode*)erts_atomic_read_acqb(&((tb)->root))) #define GET_LEFT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->u.route.left))) #define GET_RIGHT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->u.route.right))) #define SET_ROOT_RELB(tb, v) erts_atomic_set_relb(&((tb)->root), (erts_aint_t)(v)) @@ -751,6 +735,35 @@ void unlock_route_node(DbTableCATreeNode *route_node) } static ERTS_INLINE +Eterm copy_route_key(DbRouteKey* dst, Eterm key, Uint key_size) +{ + dst->size = key_size; + if (key_size != 0) { + Eterm* hp = &dst->heap[0]; + ErlOffHeap tmp_offheap; + tmp_offheap.first = NULL; + dst->term = copy_struct(key, key_size, &hp, &tmp_offheap); + dst->oh = tmp_offheap.first; + } + else { + ASSERT(is_immed(key)); + dst->term = key; + dst->oh = NULL; + } + return dst->term; +} + +static ERTS_INLINE +void destroy_route_key(DbRouteKey* key) +{ + if (key->oh) { + ErlOffHeap oh; + oh.first = key->oh; + erts_cleanup_offheap(&oh); + } +} + +static ERTS_INLINE void init_root_iterator(DbTableCATree* tb, CATreeRootIterator* iter, int read_only) { @@ -863,36 +876,6 @@ DbTableCATreeNode* find_wlock_valid_base_node(DbTableCATree* tb, Eterm key, return base_node; } -static ERTS_INLINE -Eterm copy_route_key(DbRouteKey* dst, Eterm key, Uint key_size) -{ - dst->size = key_size; - if (key_size != 0) { - Eterm* hp = &dst->heap[0]; - ErlOffHeap tmp_offheap; - tmp_offheap.first = NULL; - dst->term = copy_struct(key, key_size, &hp, &tmp_offheap); - dst->oh = tmp_offheap.first; - } - else { - ASSERT(is_immed(key)); - dst->term = key; - dst->oh = NULL; - } - return dst->term; -} - -static ERTS_INLINE -void destroy_route_key(DbRouteKey* key) -{ - if (key->oh) { - ErlOffHeap oh; - oh.first = key->oh; - erts_cleanup_offheap(&oh); - } -} - - #ifdef ERTS_ENABLE_LOCK_CHECK # define LC_ORDER(ORDER) ORDER #else |