File 3404-erts-Micro-optimize-ets-lock-adaption.patch of Package erlang
From 7751899402583c8b358d163466d2a4d7deacd8e6 Mon Sep 17 00:00:00 2001
From: Sverker Eriksson <sverker@erlang.org>
Date: Thu, 3 Feb 2022 18:05:16 +0100
Subject: [PATCH 4/4] erts: Micro-optimize ets lock adaption
---
erts/emulator/beam/erl_db.c | 2 +-
erts/emulator/beam/erl_db_hash.c | 89 ++++++++++++++++++--------------
erts/emulator/beam/erl_db_hash.h | 11 +++-
3 files changed, 61 insertions(+), 41 deletions(-)
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 861cc0d917..a070e0876f 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -818,7 +818,7 @@ DbTable* db_get_table_aux(Process *p,
return tb;
}
- erl_db_hash_adapt_number_of_locks(tb);
+ DB_HASH_ADAPT_NUMBER_OF_LOCKS(tb);
db_lock(tb, kind);
if (name_lck)
erts_rwmtx_runlock(name_lck);
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index db8af4a8d3..e625c95313 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -291,17 +291,13 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
static void calc_shrink_limit(DbTableHash* tb);
-void erl_db_hash_adapt_number_of_locks(DbTable* tb) {
+void db_hash_adapt_number_of_locks(DbTable* tb) {
db_hash_lock_array_resize_state current_state;
DbTableHash* tbl;
int new_number_of_locks;
- if(!IS_HASH_WITH_AUTO_TABLE(tb->common.type)) {
- return;
- }
- current_state = erts_atomic_read_nob(&tb->hash.lock_array_resize_state);
- if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL) {
- return;
- }
+
+ ASSERT(IS_HASH_WITH_AUTO_TABLE(tb->common.type));
+
tbl = &tb->hash;
erts_rwmtx_rwlock(&tb->common.rwlock);
current_state = erts_atomic_read_nob(&tb->hash.lock_array_resize_state);
@@ -414,6 +410,40 @@ static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
}
}
+static void
+wlock_after_failed_trylock(DbTableHash* tb, DbTableHashLockAndCounter* lock)
+{
+ erts_rwmtx_rwlock(&lock->lck);
+ lock->lck_stat += LCK_AUTO_CONTENDED_STAT_CONTRIB;
+ if (lock->lck_stat > LCK_AUTO_GROW_LIMIT) {
+ /*
+ * Do not do any adaptation if the table is
+ * fixed as this can lead to missed slots when
+ * traversing over the table.
+ */
+ if (!IS_FIXED(tb)) {
+ if (tb->nlocks < LCK_AUTO_MAX_LOCKS &&
+ (DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL ==
+ erts_atomic_read_nob(&tb->lock_array_resize_state))) {
+ /*
+ * Trigger lock array increase later when we
+ * can take the table lock
+ */
+ erts_atomic_set_nob(&tb->lock_array_resize_state,
+ DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW);
+ }
+ else {
+ /*
+ * The lock statistics is kept if the table is
+ * fixed as it is likely that we want to adapt
+ * when the table is not fixed any more.
+ */
+ lock->lck_stat = 0;
+ }
+ }
+ }
+}
+
/* Fine grained write lock */
static ERTS_INLINE
DbTableHashLockAndCounter* WLOCK_HASH_GET_LCK_AND_CTR(DbTableHash* tb, HashValue hval)
@@ -425,37 +455,12 @@ DbTableHashLockAndCounter* WLOCK_HASH_GET_LCK_AND_CTR(DbTableHash* tb, HashValue
if (tb->common.type & DB_FINE_LOCKED_AUTO) {
DbTableHashLockAndCounter* lck_couter = GET_LOCK_AND_CTR(tb, hval);
if (EBUSY == erts_rwmtx_tryrwlock(&lck_couter->lck)) {
- erts_rwmtx_rwlock(&lck_couter->lck);
- lck_couter->lck_stat += LCK_AUTO_CONTENDED_STAT_CONTRIB;
- if (lck_couter->lck_stat > LCK_AUTO_GROW_LIMIT) {
- if (tb->nlocks < LCK_AUTO_MAX_LOCKS &&
- !IS_FIXED(tb) &&
- (DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL ==
- erts_atomic_read_nob(&tb->lock_array_resize_state))) {
- /*
- Trigger lock array increase later when we
- can take the table lock
- */
- erts_atomic_set_nob(&tb->lock_array_resize_state,
- DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW);
- } else if (!IS_FIXED(tb)) {
- /*
- Do not do any adaptation if the table is
- fixed as this can lead to missed slots when
- traversing over the table.
-
- The lock statistics is kept if the table is
- fixed as it is likely that we want to adapt
- when the table is not fixed any more.
- */
- lck_couter->lck_stat = 0;
- }
- }
+ wlock_after_failed_trylock(tb, lck_couter);
} else {
lck_couter->lck_stat += LCK_AUTO_UNCONTENDED_STAT_CONTRIB;
- if (lck_couter->lck_stat < LCK_AUTO_SHRINK_LIMIT) {
+ if (lck_couter->lck_stat < LCK_AUTO_SHRINK_LIMIT
+ && !IS_FIXED(tb)) {
if(tb->nlocks > LCK_AUTO_MIN_LOCKS &&
- !IS_FIXED(tb) &&
(DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL ==
erts_atomic_read_nob(&tb->lock_array_resize_state))) {
/*
@@ -464,7 +469,7 @@ DbTableHashLockAndCounter* WLOCK_HASH_GET_LCK_AND_CTR(DbTableHash* tb, HashValue
*/
erts_atomic_set_nob(&tb->lock_array_resize_state,
DB_HASH_LOCK_ARRAY_RESIZE_STATUS_SHRINK);
- } else if (!IS_FIXED(tb)) {
+ } else {
lck_couter->lck_stat = 0;
}
}
@@ -531,6 +536,13 @@ static ERTS_INLINE void WUNLOCK_HASH_LCK_CTR(DbTableHashLockAndCounter* lck_ctr)
static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
erts_rwmtx_t** lck_ptr)
{
+ /*
+ * To minimize locking ops, we jump to next bucket using same lock.
+ * In case of {write_concurrency,auto} this is safe as 'nlocks' does not
+ * change as long as table is fixed, which all single call select/match do.
+ * Unfixed next,prev and select/1 calls are also "safe" in the sence that
+ * we will seize correct locks as 'nlocks' will not change during the calls.
+ */
ix += tb->nlocks;
if (ix < NACTIVE(tb)) return ix;
RUNLOCK_HASH(*lck_ptr);
@@ -1923,7 +1935,8 @@ static int match_traverse_continue(traverse_context_t* ctx,
}
lck = ctx->on_lock_hash(tb, slot_ix);
- if (slot_ix >= NACTIVE(tb)) { /* Is this possible? */
+ if (slot_ix >= NACTIVE(tb)) {
+ /* Is this possible? Yes, for ets:select/1 without safe_fixtable */
ctx->on_unlock_hash(lck);
*ret = NIL;
ret_value = DB_ERROR_BADPARAM;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 1590afb931..6025cde555 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -95,8 +95,15 @@ typedef enum {
} db_hash_lock_array_resize_state;
/* To adapt number of locks if hash table with {write_concurrency, auto} */
-void erl_db_hash_adapt_number_of_locks(DbTable* tb);
-
+void db_hash_adapt_number_of_locks(DbTable* tb);
+#define DB_HASH_ADAPT_NUMBER_OF_LOCKS(TB) \
+ do { \
+ if (IS_HASH_WITH_AUTO_TABLE(TB->common.type) \
+ && (erts_atomic_read_nob(&tb->hash.lock_array_resize_state) \
+ != DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL)) { \
+ db_hash_adapt_number_of_locks(tb); \
+ } \
+ }while(0)
/*
** Function prototypes, looks the same (except the suffix) for all
** table types. The process is always an [in out] parameter.
--
2.34.1