aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_db.c
diff options
context:
space:
mode:
authorKjell Winblad <[email protected]>2018-09-05 21:45:57 +0200
committerKjell Winblad <[email protected]>2018-09-05 21:46:40 +0200
commit2a8e00ad72f5a0a9c73d558f247c23d27d6ffd5b (patch)
treec5fa25cd6618c3aab76ea0676ee9ada87316c8c3 /erts/emulator/beam/erl_db.c
parent26e03d10c6c51868640869da8b091efdeab28bb0 (diff)
downloadotp-2a8e00ad72f5a0a9c73d558f247c23d27d6ffd5b.tar.gz
otp-2a8e00ad72f5a0a9c73d558f247c23d27d6ffd5b.tar.bz2
otp-2a8e00ad72f5a0a9c73d558f247c23d27d6ffd5b.zip
Add a more scalable ETS ordered_set implementation
The current ETS ordered_set implementation can quickly become a scalability bottleneck on multicore machines when an application updates an ordered_set table from concurrent processes [1][2]. The current implementation is based on an AVL tree protected from concurrent writes by a single readers-writer lock. Furthermore, the current implementation has an optimization, called the stack optimization [3], that can improve the performance when only a single process accesses a table but can cause bad scalability even in read-only scenarios. It is possible to pass the option {write_concurrency, true} to ets:new/2 when creating an ETS table of type ordered_set but this option has no effect for tables of type ordered_set without this commit. The new ETS ordered_set implementation, added by this commit, is only activated when one passes the options ordered_set and {write_concurrency, true} to the ets:new/2 function. Thus, the previous ordered_set implementation (from here on called the default implementation) can still be used in applications that do not benefit from the new implementation. The benchmark results on the following web page show that the new implementation is many times faster than the old implementation in some scenarios and that the old implementation is still better than the new implementation in some scenarios. http://winsh.me/ets_catree_benchmark/ets_ca_tree_benchmark_results.html The new implementation is expected to scale better than the default implementation when concurrent processes use the following ETS operations to operate on a table: delete/2, delete_object/2, first/1, insert/2 (single object), insert_new/2 (single object), lookup/2, lookup_element/2, member/2, next/2, take/2 and update_element/3 (single object). Currently, the new implementation does not have scalable support for the other operations (e.g., select/2). However, when these operations are used infrequently, the new implantation may still scale better than the default implementation as the benchmark results at the URL above shows. Description of the New Implementation ---------------------------------- The new implementation is based on a data structure which is called the contention adapting search tree (CA tree for short). The following publication contains a detailed description of the CA tree: A Contention Adapting Approach to Concurrent Ordered Sets Journal of Parallel and Distributed Computing, 2018 Kjell Winblad and Konstantinos Sagonas https://doi.org/10.1016/j.jpdc.2017.11.007 http://www.it.uu.se/research/group/languages/software/ca_tree/catree_proofs.pdf A discussion of how the CA tree can be used as an ETS back-end can be found in another publication [1]. The CA tree is a data structure that dynamically changes its synchronization granularity based on detected contention. Internally, the CA tree uses instances of a sequential data structure to store items. The CA tree implementation contained in this commit uses the same AVL tree implementation as is used for the default ordered set implementation. This AVL tree implementation is reused so that much of the existing code to implement the ETS operations can be reused. Tests ----- The ETS tests in `lib/stdlib/test/ets_SUITE.erl` have been extended to also test the new ordered_set implementation. The function ets_SUITE:throughput_benchmark/0 has also been added to this file. This function can be used to measure and compare the performance of the different ETS table types and options. This function writes benchmark data to standard output that can be visualized by the HTML page `lib/stdlib/test/ets_SUITE_data/visualize_throughput.html`. [1] More Scalable Ordered Set for ETS Using Adaptation. In Thirteenth ACM SIGPLAN workshop on Erlang (2014). Kjell Winblad and Konstantinos Sagonas. https://doi.org/10.1145/2633448.2633455 http://www.it.uu.se/research/group/languages/software/ca_tree/erlang_paper.pdf [2] On the Scalability of the Erlang Term Storage In Twelfth ACM SIGPLAN workshop on Erlang (2013) Kjell Winblad, David Klaftenegger and Konstantinos Sagonas https://doi.org/10.1145/2505305.2505308 http://winsh.me/papers/erlang_workshop_2013.pdf [3] The stack optimization works by keeping one preallocated stack instance in every ordered_set table. This stack is updated so that it contains the search path in some read operations (e.g., ets:next/2). This makes it possible for a subsequent ets:next/2 to avoid traversing some nodes in some cases. Unfortunately, the preallocated stack needs to be flagged so that it is not updated concurrently by several threads which cause bad scalability.
Diffstat (limited to 'erts/emulator/beam/erl_db.c')
-rw-r--r--erts/emulator/beam/erl_db.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index c009a3bde8..1df972f4b6 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -90,7 +90,8 @@ enum DbIterSafety {
ITER_SAFE /* No need to fixate at all */
};
# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
+ ((IS_TREE_TABLE((Tab)->common.status) || IS_CATREE_TABLE((Tab)->common.status) \
+ || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
@@ -359,6 +360,7 @@ typedef enum {
extern DbTableMethod db_hash;
extern DbTableMethod db_tree;
+extern DbTableMethod db_catree;
int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
@@ -414,6 +416,15 @@ free_dbtable(void *vtb)
tb->common.fixations);
}
#endif
+ if (erts_atomic_read_nob(&tb->common.memory_size) > sizeof(DbTable)) {
+ /* The CA tree implementation use delayed freeing and the DbTable needs to
+ be freed after all other memory blocks that are allocated by the table. */
+ erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
+ (void *) tb,
+ &tb->release.data,
+ sizeof(DbTable));
+ return;
+ }
erts_rwmtx_destroy(&tb->common.rwlock);
erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
@@ -1076,7 +1087,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3);
UseTmpHeap(2,BIF_P);
- if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) {
goto bail_out;
}
if (is_tuple(BIF_ARG_3)) {
@@ -1165,7 +1176,7 @@ do_update_counter(Process *p, DbTable* tb,
UseTmpHeap(5, p);
- if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) {
goto bail_out;
}
if (is_integer(arg3)) { /* Incr */
@@ -1647,15 +1658,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
val = CAR(list_val(list));
if (val == am_bag) {
status |= DB_BAG;
- status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET);
}
else if (val == am_duplicate_bag) {
status |= DB_DUPLICATE_BAG;
- status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET);
+ status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET);
}
else if (val == am_ordered_set) {
status |= DB_ORDERED_SET;
- status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG);
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_CA_ORDERED_SET);
}
else if (is_tuple(val)) {
Eterm *tp = tuple_val(val);
@@ -1716,7 +1727,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
if (is_not_nil(list)) { /* bad opt or not a well formed list */
BIF_ERROR(BIF_P, BADARG);
}
- if (IS_HASH_TABLE(status)) {
+ if (IS_TREE_TABLE(status) && is_fine_locked && !(status & DB_PRIVATE)) {
+ meth = &db_catree;
+ status |= DB_CA_ORDERED_SET;
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ status |= DB_FINE_LOCKED;
+ }
+ else if (IS_HASH_TABLE(status)) {
meth = &db_hash;
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
@@ -3506,6 +3523,7 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_initialize_hash();
db_initialize_tree();
+ db_initialize_catree();
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
@@ -4114,6 +4132,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_duplicate_bag;
} else if (tb->common.status & DB_ORDERED_SET) {
ret = am_ordered_set;
+ } else if (tb->common.status & DB_CA_ORDERED_SET) {
+ ret = am_ordered_set;
} else { /*TT*/
ASSERT(tb->common.status & DB_BAG);
ret = am_bag;
@@ -4409,6 +4429,12 @@ void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
if(IS_HASH_TABLE(tb->common.status)) {
erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ } else if(IS_CATREE_TABLE(tb->common.status)) {
+ /* erts_lcnt_enable_db_catree_lock_count is not thread safe so
+ the table needs to get locked */
+ db_lock(tb, LCK_WRITE);
+ erts_lcnt_enable_db_catree_lock_count(&tb->catree, enable);
+ db_unlock(tb, LCK_WRITE);
}
}