File 2722-Add-write_concurrency-auto-option-to-ETS-tables.patch of Package erlang
From cb21fa43ccc325a8dcd21af64455d21a5fdaab99 Mon Sep 17 00:00:00 2001
From: Kjell Winblad <kjellwinblad@gmail.com>
Date: Tue, 14 Sep 2021 15:40:14 +0200
Subject: [PATCH 2/8] Add {write_concurrency, auto} option to ETS tables
This commit makes it possible for users to configure ETS tables with
the {write_concurrency, auto} option. This option forces tables to
automatically change the number of locks that are used at run-time
depending on how much concurrency is detected.
Benchmark results comparing this option with the other ETS
optimization options are available here:
http://winsh.me/bench/ets_config_locks/ets_bench_result_lock_config.html
---
erts/emulator/beam/atom.names | 3 +-
erts/emulator/beam/erl_db.c | 19 ++-
erts/emulator/beam/erl_db_hash.c | 228 ++++++++++++++++++++++++++++---
erts/emulator/beam/erl_db_hash.h | 11 +-
erts/emulator/beam/erl_db_util.h | 1 +
lib/stdlib/doc/src/ets.xml | 77 ++++++-----
lib/stdlib/src/ets.erl | 2 +-
lib/stdlib/test/ets_SUITE.erl | 25 ++--
8 files changed, 302 insertions(+), 64 deletions(-)
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 55b662abc9..8715c0c440 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -733,4 +733,5 @@ atom xor
atom x86
atom yes
atom yield
-atom nifs
\ No newline at end of file
+atom nifs
+atom auto
\ No newline at end of file
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 061ccd5038..3826e2189c 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -815,6 +815,7 @@ DbTable* db_get_table_aux(Process *p,
return tb;
}
+ erl_db_hash_adapt_no_locks(tb);
db_lock(tb, kind);
if (name_lck)
erts_rwmtx_runlock(name_lck);
@@ -2258,6 +2259,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
int is_decentralized_counters;
int is_decentralized_counters_option;
int is_explicit_lock_granularity;
+ int is_write_concurrency_auto;
int cret;
DbTableMethod* meth;
@@ -2280,6 +2282,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_compressed = erts_ets_always_compress;
no_locks = -1;
is_explicit_lock_granularity = 0;
+ is_write_concurrency_auto = 0;
list = BIF_ARG_2;
while(is_list(list)) {
@@ -2312,14 +2315,22 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
no_locks_param <= 32768) {
is_fine_locked = 1;
is_explicit_lock_granularity = 1;
+ is_write_concurrency_auto = 0;
no_locks = no_locks_param;
+ } else if (tp[2] == am_auto) {
+ is_write_concurrency_auto = 1;
+ is_fine_locked = 1;
+ is_explicit_lock_granularity = 0;
+ no_locks = -1;
} else if (tp[2] == am_true) {
is_fine_locked = 1;
is_explicit_lock_granularity = 0;
+ is_write_concurrency_auto = 0;
no_locks = -1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
is_explicit_lock_granularity = 0;
+ is_write_concurrency_auto = 0;
no_locks = -1;
} else break;
if (DB_LOCK_FREE(NULL))
@@ -2391,6 +2402,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status |= DB_FINE_LOCKED;
if (is_explicit_lock_granularity) {
status |= DB_EXPLICIT_LOCK_GRANULARITY;
+ } else if (is_write_concurrency_auto) {
+ status |= DB_FINE_LOCKED_AUTO;
}
} else {
no_locks = -1;
@@ -2445,7 +2458,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
#endif
if (IS_HASH_TABLE(status)) {
- DbTableHash* hash_db = (DbTableHash*) tb;
+ DbTableHash* hash_db = (DbTableHash*) tb;
hash_db->nlocks = no_locks;
}
cret = meth->db_create(BIF_P, tb);
@@ -5053,6 +5066,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
(tb->common.status & (DB_SET | DB_BAG | DB_DUPLICATE_BAG)) &&
(tb->common.status & DB_EXPLICIT_LOCK_GRANULARITY)) {
ret = erts_make_integer(tb->hash.nlocks, p);
+ } else if ((tb->common.status & DB_FINE_LOCKED) &&
+ (tb->common.status & (DB_SET | DB_BAG | DB_DUPLICATE_BAG)) &&
+ (tb->common.status & DB_FINE_LOCKED_AUTO)) {
+ ret = am_auto;
} else {
ret = tb->common.status & DB_FINE_LOCKED ? am_true : am_false;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index e21c833d78..56e5729867 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -270,6 +270,140 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
# define GET_LOCK_AND_CTR(tb,hval) (&(tb)->locks[(hval) & GET_LOCK_MASK(tb->nlocks)].u.lck_ctr)
# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
+# define LCK_AUTO_CONTENDED_STAT_CONTRIB 100
+# define LCK_AUTO_UNCONTENDED_STAT_CONTRIB -1
+# define LCK_AUTO_GROW_LIMIT 1000
+# define LCK_AUTO_SHRINK_LIMIT -10000000
+# define LCK_AUTO_MAX_LOCKS 8192
+# define LCK_AUTO_MIN_LOCKS 4
+# define LCK_AUTO_DEFAULT_NO_LOCKS 64
+# define LCK_AUTO_MAX_LOCKS_FREQ_READ_RW_LOCKS 128
+
+static Sint get_lock_nitems_form_prev_lock_array(int index,
+ int new_no_locks,
+ int old_no_locks,
+ DbTableHashFineLockSlot* old_locks) {
+ if (new_no_locks > old_no_locks) {
+ Sint nitems = 0;
+ Sint in_source = old_locks[index % old_no_locks].u.lck_ctr.nitems;
+ nitems += in_source / 2;
+ if (index >= old_no_locks) {
+ nitems += in_source % 2;
+ }
+ return nitems;
+ } else {
+ Sint in_source_1 = old_locks[index].u.lck_ctr.nitems;
+ Sint in_source_2 = old_locks[index + new_no_locks].u.lck_ctr.nitems;
+ return in_source_1 + in_source_2;
+ }
+
+}
+
+static void calc_shrink_limit(DbTableHash* tb);
+
+void erl_db_hash_adapt_no_locks(DbTable* tb) {
+ db_hash_lock_array_resize_state current_state;
+ DbTableHash* tbl;
+ int new_no_locks;
+ if(!(tb->common.type & DB_FINE_LOCKED_AUTO)) {
+ return;
+ }
+ current_state = erts_atomic_read_nob(&tb->hash.lock_array_resize_state);
+ if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL) {
+ return;
+ }
+ tbl = &tb->hash;
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ current_state = erts_atomic_read_nob(&tb->hash.lock_array_resize_state);
+ if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL) {
+ /* Another thread did the lock array resize job before us */
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ return;
+ }
+ if (IS_FIXED(tb)) {
+ /*
+ Do not do any adaptation if the table is fixed as this can
+ lead to missed slots when traversing over the table.
+
+ The lock statistics is kept as it is likely that we want to
+ adapt when the table is not fixed any more.
+ */
+ erts_atomic_set_nob(&tbl->lock_array_resize_state,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ return;
+ }
+ if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW &&
+ erts_atomic_read_nob(&tbl->nactive) >= (2*tbl->nlocks)) {
+ new_no_locks = 2*tbl->nlocks;
+ } else if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_SHRINK) {
+ new_no_locks = tbl->nlocks / 2;
+ } else {
+ /*
+ Do not do any adaptation if the number of active buckets is
+ smaller than the resulting number of locks.
+
+ We do not want to make the table unnecessary large just to
+ potentially reduce contention.
+ */
+ int i;
+ for (i = 0; i < tbl->nlocks; i++) {
+ tbl->locks[i].u.lck_ctr.lck_stat = 0;
+ }
+ erts_atomic_set_nob(&tbl->lock_array_resize_state,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ return;
+ }
+ {
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ int i;
+ DbTableHashFineLockSlot* old_locks = tbl->locks;
+ Uint old_no_locks = tbl->nlocks;
+ ASSERT(new_no_locks != 0);
+ tbl->nlocks = new_no_locks;
+ if (tb->common.type & DB_FREQ_READ &&
+ new_no_locks <= LCK_AUTO_MAX_LOCKS_FREQ_READ_RW_LOCKS) {
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ }
+ if (erts_ets_rwmtx_spin_count >= 0) {
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+ }
+ tbl->locks = (DbTableHashFineLockSlot*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ sizeof(DbTableHashFineLockSlot) * tbl->nlocks);
+ for (i=0; i < tbl->nlocks; i++) {
+ erts_rwmtx_init_opt(&tbl->locks[i].u.lck_ctr.lck, &rwmtx_opt,
+ "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ tbl->locks[i].u.lck_ctr.nitems =
+ get_lock_nitems_form_prev_lock_array(i, tbl->nlocks, old_no_locks, old_locks);
+ tbl->locks[i].u.lck_ctr.lck_stat = 0;
+ }
+/* #define HARD_DEBUG_ITEM_CNT_LOCK_CHANGE 1 */
+#ifdef HARD_DEBUG_ITEM_CNT_LOCK_CHANGE
+ {
+ Sint total_old = 0;
+ Sint total_new = 0;
+ int i;
+ for (i=0; i < old_no_locks; i++) {
+ total_old += old_locks[i].u.lck_ctr.nitems;
+ }
+ for (i=0; i < tbl->nlocks; i++) {
+ total_new += tbl->locks[i].u.lck_ctr.nitems;
+ }
+ /* erts_printf("%ld %ld %d\n", total_new, total_old, tbl->nlocks); */
+ ERTS_ASSERT(total_new == total_old);
+ }
+#endif
+ erts_db_free(ERTS_ALC_T_DB_SEG, tb, old_locks, sizeof(DbTableHashFineLockSlot) * old_no_locks);
+
+ calc_shrink_limit(tbl);
+
+ erts_atomic_set_nob(&tbl->lock_array_resize_state, DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ }
+}
+
/* Fine grained read lock */
static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
{
@@ -282,31 +416,81 @@ static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
return lck;
}
}
+
/* Fine grained write lock */
-static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE
+DbTableHashLockAndCounter* WLOCK_HASH_GET_LCK_AND_CTR(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_rwmtx_rwlock(lck);
- return lck;
+ if (tb->common.type & DB_FINE_LOCKED_AUTO) {
+ DbTableHashLockAndCounter* lck_couter = GET_LOCK_AND_CTR(tb, hval);
+ if (EBUSY == erts_rwmtx_tryrwlock(&lck_couter->lck)) {
+ erts_rwmtx_rwlock(&lck_couter->lck);
+ lck_couter->lck_stat += LCK_AUTO_CONTENDED_STAT_CONTRIB;
+ if (lck_couter->lck_stat > LCK_AUTO_GROW_LIMIT) {
+ if (tb->nlocks < LCK_AUTO_MAX_LOCKS &&
+ !IS_FIXED(tb) &&
+ (DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL ==
+ erts_atomic_read_nob(&tb->lock_array_resize_state))) {
+ /*
+ Trigger lock array increase later when we
+ can take the table lock
+ */
+ erts_atomic_set_nob(&tb->lock_array_resize_state,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW);
+ } else if (!IS_FIXED(tb)) {
+ /*
+ Do not do any adaptation if the table is
+ fixed as this can lead to missed slots when
+ traversing over the table.
+
+ The lock statistics is kept if the table is
+ fixed as it is likely that we want to adapt
+ when the table is not fixed any more.
+ */
+ lck_couter->lck_stat = 0;
+ }
+ }
+ } else {
+ lck_couter->lck_stat += LCK_AUTO_UNCONTENDED_STAT_CONTRIB;
+ if (lck_couter->lck_stat < LCK_AUTO_SHRINK_LIMIT) {
+ if(tb->nlocks > LCK_AUTO_MIN_LOCKS &&
+ !IS_FIXED(tb) &&
+ (DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL ==
+ erts_atomic_read_nob(&tb->lock_array_resize_state))) {
+ /*
+ Trigger lock array increase later when we
+ can take the table lock
+ */
+ erts_atomic_set_nob(&tb->lock_array_resize_state,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_SHRINK);
+ } else if (!IS_FIXED(tb)) {
+ lck_couter->lck_stat = 0;
+ }
+ }
+ }
+ return lck_couter;
+ } else {
+ DbTableHashLockAndCounter* lck_ctr = GET_LOCK_AND_CTR(tb,hval);
+ ASSERT(tb->common.type & DB_FINE_LOCKED);
+ erts_rwmtx_rwlock(&lck_ctr->lck);
+ return lck_ctr;
+ }
}
}
/* Fine grained write lock */
-static ERTS_INLINE
-DbTableHashLockAndCounter* WLOCK_HASH_GET_LCK_AND_CTR(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
{
- if (tb->common.is_thread_safe) {
- return NULL;
- } else {
- DbTableHashLockAndCounter* lck_ctr = GET_LOCK_AND_CTR(tb,hval);
- ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_rwmtx_rwlock(&lck_ctr->lck);
- return lck_ctr;
+ DbTableHashLockAndCounter* lck_cntr =
+ WLOCK_HASH_GET_LCK_AND_CTR(tb, hval);
+ if (lck_cntr == NULL) {
+ return NULL;
}
+ return &lck_cntr->lck;
}
static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck)
@@ -808,6 +992,8 @@ int db_create_hash(Process *p, DbTable *tbl)
sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
erts_atomic_init_nob(&tb->is_resizing, 0);
+ erts_atomic_init_nob(&tb->lock_array_resize_state,
+ (erts_aint_t)DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL);
if (tb->nlocks == -1 || !(tb->common.type & DB_FINE_LOCKED)) {
/*
The number of locks needs to be set even if fine grained
@@ -820,6 +1006,9 @@ int db_create_hash(Process *p, DbTable *tbl)
if (tb->common.type & DB_FINE_LOCKED) {
erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
int i;
+ if (tb->common.type & DB_FINE_LOCKED_AUTO) {
+ tb->nlocks = LCK_AUTO_DEFAULT_NO_LOCKS;
+ }
/*
nlocks needs to be a power of two so we round down to
nearest power of two
@@ -844,13 +1033,14 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_rwmtx_init_opt(&tb->locks[i].u.lck_ctr.lck, &rwmtx_opt,
"db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->locks[i].u.lck_ctr.nitems = 0;
+ tb->locks[i].u.lck_ctr.lck_stat = 0;
}
- /*
- * These important properties is needed to guarantee the two
- * buckets involved in a grow/shrink operation it protected by
- * the same lock:
- */
- ASSERT((erts_atomic_read_nob(&tb->szm) + 1) % tb->nlocks == 0);
+ /*
+ * These properties are needed to guarantee that the buckets
+ * involved in a grow/shrink operation it protected by the
+ * same lock:
+ */
+ ASSERT((erts_atomic_read_nob(&tb->szm) + 1) % tb->nlocks == 0);
ASSERT(tb->nlocks <= erts_atomic_read_nob(&tb->nactive));
ASSERT(erts_atomic_read_nob(&tb->nactive) <= tb->nslots);
ASSERT(tb->nslots <= (erts_atomic_read_nob(&tb->szm) + 1));
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 0ec781b45d..4154a2aa2b 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -55,6 +55,7 @@ typedef struct hash_db_term {
typedef struct DbTableHashLockAndCounter {
Sint nitems;
+ Sint lck_stat;
erts_rwmtx_t lck;
} DbTableHashLockAndCounter;
@@ -67,7 +68,7 @@ typedef struct db_table_hash_fine_lock_slot {
typedef struct db_table_hash {
DbTableCommon common;
-
+ erts_atomic_t lock_array_resize_state;
/* szm, nactive, shrink_limit are write-protected by is_resizing or table write lock */
erts_atomic_t szm; /* current size mask. */
erts_atomic_t nactive; /* Number of "active" slots */
@@ -87,6 +88,14 @@ typedef struct db_table_hash {
DbTableHashFineLockSlot* locks;
} DbTableHash;
+typedef enum {
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL = 0,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW = 1,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_SHRINK = 2
+} db_hash_lock_array_resize_state;
+
+/* To adapt number of locks if hash table with {write_concurrency, auto} */
+void erl_db_hash_adapt_no_locks(DbTable* tb);
/*
** Function prototypes, looks the same (except the suffix) for all
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 325cedc24a..1fdb0275c6 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -346,6 +346,7 @@ typedef struct db_table_common {
#define DB_NAMED_TABLE (1 << 11)
#define DB_BUSY (1 << 12)
#define DB_EXPLICIT_LOCK_GRANULARITY (1 << 13)
+#define DB_FINE_LOCKED_AUTO (1 << 14)
#define DB_CATREE_FORCE_SPLIT (1 << 31) /* erts_debug */
#define DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN (1 << 30) /* erts_debug */
diff --git a/lib/stdlib/doc/src/ets.xml b/lib/stdlib/doc/src/ets.xml
index e1a051ba19..e68cf23517 100644
--- a/lib/stdlib/doc/src/ets.xml
+++ b/lib/stdlib/doc/src/ets.xml
@@ -1269,29 +1269,34 @@ ets:select(Table, MatchSpec),</code>
(and read) by concurrent processes. This is achieved to some
degree at the expense of memory consumption and the performance
of sequential access and concurrent reading.</p>
- <p>Users can explicitly control the synchronization
+ <p>The <c>auto</c> alternative for the
+ <c>write_concurrency</c> option is similar to the
+ <c>true</c> option but automatically adjusts the
+ synchronization granularity at runtime depending on how the
+ table is used. This is the recommended
+ <c>write_concurrency</c> option when using Erlang/OTP 25
+ and above as it performs well in most scenarios.</p>
+ <p>Users can also explicitly set the synchronization
granularity for tables of types <c>set</c>, <c>bag</c>,
and <c>duplicate_bag</c> by setting the
<c>write_concurrency</c> option to an integer in the range
- <c>[2, 32768]</c>. Currently, setting the
- <c>write_concurrency</c> option to a number for
- <c>ordered_set</c> tables has the same effect as setting
- its value to true. Explicitly setting the synchronization
- granularity is not recommended unless it is motivated and
- possible to do experimentation to figure out which
- synchronization granularity gives the best performance.
- Currently, the number
- automatically rounds down to the nearest power of two. A
- high number for this setting should usually not be
- combined with the <c>{read_concurrency, true}</c> setting
- as this usally lead to worse performance and high memory
- utilization. It is usually not a good idea to set this
- setting to a number that is much greater than the number
- of expected items in the table, as this can lead to slow
- table traversals. The effect of this setting might change
- in future versions of Erlang/OTP. If you are unsure what
- number to set this setting to, it is probably best to use
- {write_concurrency, true} instead.</p>
+ <c>[2, 32768]</c>. Currently, the number automatically
+ rounds down to the nearest power of two. The likelihood of
+ conflicts between operations reduces with an increased
+ value. Explicitly setting the synchronization granularity
+ is not recommended unless it is possible to
+ do experimentation to figure out which synchronization
+ granularity gives the best performance. If you are unsure
+ what number to set this setting to, it is probably best to
+ use <c>{write_concurrency, auto}</c> instead. One should be
+ careful about combining the <c>{read_concurrency, true}</c>
+ setting with explicitly setting the synchronization
+ granularity as this can lead to worse performance and high
+ memory utilization. It is usually not a good idea to set
+ this setting to a number that is much greater than the
+ number of expected items in the table, as this can lead to
+ slow table traversals. The effect of this setting might
+ change in future versions of Erlang/OTP.</p>
<p>The <c>write_concurrency</c> option can be combined with the options
<seeerl marker="#new_2_read_concurrency">
<c>read_concurrency</c></seeerl> and
@@ -1301,12 +1306,14 @@ ets:select(Table, MatchSpec),</code>
concurrent read bursts and large concurrent
write bursts are common; for more information, see option
<seeerl marker="#new_2_read_concurrency">
- <c>read_concurrency</c></seeerl>. The <c>decentralized_counters</c>
- option is turned on by default for tables of type <c>ordered_set</c>
- with the <c>write_concurrency</c> option enabled, and the
- <c>decentralized_counters</c> option is turned <em>off</em> by default for
- all other table types.
- For more information, see the documentation for the
+ <c>read_concurrency</c></seeerl>. The
+ <c>decentralized_counters</c> option is turned on by
+ default for tables of type <c>ordered_set</c> with any
+ of the <c>write_concurrency</c> options expcept
+ <c>false</c> enabled, and the
+ <c>decentralized_counters</c> option is turned
+ <em>off</em> by default for all other table types. For
+ more information, see the documentation for the
<seeerl marker="#new_2_decentralized_counters">
<c>decentralized_counters</c></seeerl> option.</p>
<p>Notice that this option does not change any guarantees about
@@ -1314,13 +1321,17 @@ ets:select(Table, MatchSpec),</code>
Functions that makes such promises over many objects (like
<seemfa marker="#insert/2"><c>insert/2</c></seemfa>)
gain less (or nothing) from this option.</p>
- <p>The memory consumption inflicted by both <c>write_concurrency</c>
- and <c>read_concurrency</c> is a constant overhead per table for
- <c>set</c>, <c>bag</c> and <c>duplicate_bag</c>. For
- <c>ordered_set</c> the memory overhead depends on the number
- of inserted objects and the amount of actual detected
- concurrency in runtime. The memory overhead can be especially
- large when both options are combined.</p>
+ <p>The memory consumption inflicted by both
+ <c>write_concurrency</c> and <c>read_concurrency</c> is a
+ constant overhead per table for <c>set</c>, <c>bag</c> and
+ <c>duplicate_bag</c> when the <c>auto</c> alternative for
+ the <c>write_concurrency</c> option is not used. For
+ <c>ordered_set</c> as well as <c>set</c>, <c>bag</c> and
+ <c>duplicate_bag</c> with the <c>auto</c> alternative the
+ memory overhead depends on the number of inserted objects
+ and the amount of actual detected concurrency during
+ runtime. The memory overhead can be especially large when
+ both options are combined.</p>
<note>
<p>Prior to stdlib-3.7 (OTP-22.0) <c>write_concurrency</c> had no
effect on <c>ordered_set</c>.</p>
diff --git a/lib/stdlib/src/ets.erl b/lib/stdlib/src/ets.erl
index 185081f326..e25289d80f 100644
--- a/lib/stdlib/src/ets.erl
+++ b/lib/stdlib/src/ets.erl
@@ -309,7 +309,7 @@ member(_, _) ->
| {heir, Pid :: pid(), HeirData} | {heir, none} | Tweaks,
Type :: type(),
Access :: access(),
- WriteConcurrencyAlternative :: boolean() | 2..32768,
+ WriteConcurrencyAlternative :: boolean() | auto | 2..32768,
Tweaks :: {write_concurrency, WriteConcurrencyAlternative}
| {read_concurrency, boolean()}
| {decentralized_counters, boolean()}
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index bcf1ca5221..86f1dff196 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -131,7 +131,7 @@ end_per_testcase(_Func, _Config) ->
suite() ->
[{ct_hooks,[ts_install_cth]},
- {timetrap,{minutes,5}}].
+ {timetrap,{minutes,30}}].
all() ->
[{group, new}, {group, insert}, {group, lookup},
@@ -173,8 +173,8 @@ all() ->
take,
whereis_table,
delete_unfix_race,
- %test_throughput_benchmark,
- %{group, benchmark},
+ test_throughput_benchmark,
+ {group, benchmark},
test_table_size_concurrency,
test_table_memory_concurrency,
test_delete_table_while_size_snapshot,
@@ -4899,8 +4899,12 @@ info(Config) when is_list(Config) ->
true = ets:info(T3, write_concurrency),
T4 = ets:new(t4, [private, {write_concurrency, 1024}]),
false = ets:info(T4, write_concurrency),
- T5 = ets:new(t5, [private, {write_concurrency, true}]),
- false = ets:info(T5, write_concurrency),
+ T5 = ets:new(t5, [public, {write_concurrency, auto}]),
+ auto = ets:info(T5, write_concurrency),
+ T6 = ets:new(t6, [private, {write_concurrency, true}]),
+ false = ets:info(T6, write_concurrency),
+ T7 = ets:new(t7, [private, {write_concurrency, auto}]),
+ false = ets:info(T7, write_concurrency),
ok.
info_do(Opts) ->
@@ -7684,7 +7688,9 @@ prefill_insert_map_loop(T, RS0, N, ObjFun, InsertMap, NrOfSchedulers) ->
[set, public],
[set, public, {write_concurrency, true}],
[set, public, {read_concurrency, true}],
- [set, public, {write_concurrency, true}, {read_concurrency, true}]
+ [set, public, {write_concurrency, true}, {read_concurrency, true}],
+ [set, public, {write_concurrency, auto}, {read_concurrency, true}],
+ [set, public, {write_concurrency, 16384}]
],
etsmem_fun = fun() -> ok end,
verify_etsmem_fun = fun(_) -> true end,
@@ -8051,7 +8057,9 @@ long_throughput_benchmark(Config) when is_list(Config) ->
table_types =
[
[ordered_set, public, {write_concurrency, true}, {read_concurrency, true}],
- [set, public, {write_concurrency, true}, {read_concurrency, true}]
+ [set, public, {write_concurrency, true}, {read_concurrency, true}],
+ [set, public, {write_concurrency, auto}, {read_concurrency, true}],
+ [set, public, {write_concurrency, 16384}]
],
etsmem_fun = fun etsmem/0,
verify_etsmem_fun = fun verify_etsmem/1,
@@ -9283,7 +9291,8 @@ repeat_for_opts_atom2list(all_non_stim_set_types) -> [set,ordered_set,cat_ord_se
repeat_for_opts_atom2list(write_concurrency) -> [{write_concurrency,false},
{write_concurrency,true},
{write_concurrency,2},
- {write_concurrency,2048}];
+ {write_concurrency,2048},
+ {write_concurrency,auto}];
repeat_for_opts_atom2list(read_concurrency) -> [{read_concurrency,false},{read_concurrency,true}];
repeat_for_opts_atom2list(compressed) -> [void,compressed].
--
2.31.1