File 4495-Fixes-due-to-comments-by-sverker.patch of Package erlang

From 49c2445ade21abf1dc5c5ee557418e56aa87af50 Mon Sep 17 00:00:00 2001
From: Kjell Winblad <kjellwinblad@gmail.com>
Date: Wed, 6 Oct 2021 15:37:23 +0200
Subject: [PATCH 5/8] Fixes due to comments by @sverker

---
 erts/emulator/beam/erl_db.c      | 29 +++++++++--------
 erts/emulator/beam/erl_db_hash.c | 54 +++++++++++++++++---------------
 erts/emulator/beam/erl_db_hash.h |  2 +-
 lib/stdlib/test/ets_SUITE.erl    | 12 +++++--
 4 files changed, 55 insertions(+), 42 deletions(-)

diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index a047c6ecb5..9cfc89c8e9 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -58,6 +58,9 @@
 #include "bif.h"
 
 
+#define DB_WRITE_CONCURRENCY_MIN_LOCKS 1
+#define DB_WRITE_CONCURRENCY_MAX_LOCKS 32768
+
 erts_atomic_t erts_ets_misc_mem_size;
 
 /*
@@ -815,7 +818,7 @@ DbTable* db_get_table_aux(Process *p,
         tb = tid2tab(id);
 
     if (tb) {
-        erl_db_hash_adapt_no_locks(tb);
+        erl_db_hash_adapt_number_of_locks(tb);
 	db_lock(tb, kind);
 #ifdef ETS_DBG_FORCE_TRAP
         if (erts_atomic_read_nob(&tb->common.dbg_force_trap)) {
@@ -2255,7 +2258,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
     Sint keypos;
     int is_named, is_compressed;
     int is_fine_locked, frequent_read;
-    int no_locks;
+    int number_of_locks;
     int is_decentralized_counters;
     int is_decentralized_counters_option;
     int is_explicit_lock_granularity;
@@ -2280,7 +2283,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
     heir = am_none;
     heir_data = (UWord) am_undefined;
     is_compressed = erts_ets_always_compress;
-    no_locks = -1;
+    number_of_locks = -1;
     is_explicit_lock_granularity = 0;
     is_write_concurrency_auto = 0;
 
@@ -2308,22 +2311,22 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
 		    keypos = signed_val(tp[2]);
 		}
 		else if (tp[1] == am_write_concurrency) {
-                    Sint no_locks_param;
+                    Sint number_of_locks_param;
                     if (is_integer(tp[2]) &&
-                        term_to_Sint(tp[2], &no_locks_param) &&
-                        no_locks_param >= 1 &&
-                        no_locks_param <= 32768) {
+                        term_to_Sint(tp[2], &number_of_locks_param) &&
+                        number_of_locks_param >= DB_WRITE_CONCURRENCY_MIN_LOCKS &&
+                        number_of_locks_param <= DB_WRITE_CONCURRENCY_MAX_LOCKS) {
                         is_decentralized_counters = 1;
                         is_fine_locked = 1;
                         is_explicit_lock_granularity = 1;
                         is_write_concurrency_auto = 0;
-                        no_locks = no_locks_param;
+                        number_of_locks = number_of_locks_param;
                     } else if (tp[2] == am_auto) {
                         is_decentralized_counters = 1;
                         is_write_concurrency_auto = 1;
                         is_fine_locked = 1;
                         is_explicit_lock_granularity = 0;
-                        no_locks = -1;
+                        number_of_locks = -1;
                     } else if (tp[2] == am_true) {
                         if (!(status & DB_ORDERED_SET)) {
                             is_decentralized_counters = 0;
@@ -2331,12 +2334,12 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
                         is_fine_locked = 1;
                         is_explicit_lock_granularity = 0;
                         is_write_concurrency_auto = 0;
-                        no_locks = -1;
+                        number_of_locks = -1;
                     } else if (tp[2] == am_false) {
                         is_fine_locked = 0;
                         is_explicit_lock_granularity = 0;
                         is_write_concurrency_auto = 0;
-                        no_locks = -1;
+                        number_of_locks = -1;
                     } else break;
                     if (DB_LOCK_FREE(NULL))
 			is_fine_locked = 0;
@@ -2411,7 +2414,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
                 status |=  DB_FINE_LOCKED_AUTO;
             }
 	} else {
-            no_locks = -1;
+            number_of_locks = -1;
         }
     }
     else if (IS_TREE_TABLE(status)) {
@@ -2464,7 +2467,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
 
     if (IS_HASH_TABLE(status)) {
         DbTableHash* hash_db = (DbTableHash*) tb;
-        hash_db->nlocks = no_locks;
+        hash_db->nlocks = number_of_locks;
     }
     cret = meth->db_create(BIF_P, tb);
     ASSERT(cret == DB_ERROR_NONE); (void)cret;
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index fd79dd3344..6b160eb97a 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -138,6 +138,8 @@
 #define GROW_LIMIT(NACTIVE) ((NACTIVE)*1)
 #define SHRINK_LIMIT(TB) erts_atomic_read_nob(&(TB)->shrink_limit)
 
+#define IS_POW2(x) ((x) && !((x) & ((x)-1)))
+
 /*
 ** We want the first mandatory segment to be small (to reduce minimal footprint)
 ** and larger extra segments (to reduce number of alloc/free calls).
@@ -264,7 +266,7 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
     ((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
       make_internal_hash(term, 0)) & MAX_HASH_MASK)
 
-#  define GET_LOCK_MASK(NO_LOCKS) ((NO_LOCKS)-1)
+#  define GET_LOCK_MASK(NUMBER_OF_LOCKS) ((NUMBER_OF_LOCKS)-1)
 
 #  define GET_LOCK(tb,hval) (&(tb)->locks[(hval) & GET_LOCK_MASK(tb->nlocks)].u.lck_ctr.lck)
 #  define GET_LOCK_AND_CTR(tb,hval) (&(tb)->locks[(hval) & GET_LOCK_MASK(tb->nlocks)].u.lck_ctr)
@@ -276,24 +278,24 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
 #  define LCK_AUTO_SHRINK_LIMIT                 -10000000
 #  define LCK_AUTO_MAX_LOCKS                    8192
 #  define LCK_AUTO_MIN_LOCKS                    4
-#  define LCK_AUTO_DEFAULT_NO_LOCKS             64
+#  define LCK_AUTO_DEFAULT_NUMBER_OF_LOCKS      64
 #  define LCK_AUTO_MAX_LOCKS_FREQ_READ_RW_LOCKS 128
 
 static Sint get_lock_nitems_form_prev_lock_array(int index,
-                                                 int new_no_locks,
-                                                 int old_no_locks,
+                                                 int new_number_of_locks,
+                                                 int old_number_of_locks,
                                                  DbTableHashFineLockSlot* old_locks) {
-    if (new_no_locks > old_no_locks) {
+    if (new_number_of_locks > old_number_of_locks) {
         Sint nitems = 0;
-        Sint in_source = old_locks[index % old_no_locks].u.lck_ctr.nitems;
+        Sint in_source = old_locks[index % old_number_of_locks].u.lck_ctr.nitems;
         nitems += in_source / 2;
-        if (index >= old_no_locks) {
+        if (index >= old_number_of_locks) {
             nitems += in_source % 2;
         }
         return nitems;
     } else {
         Sint in_source_1 = old_locks[index].u.lck_ctr.nitems;
-        Sint in_source_2 = old_locks[index + new_no_locks].u.lck_ctr.nitems;
+        Sint in_source_2 = old_locks[index + new_number_of_locks].u.lck_ctr.nitems;
         return in_source_1 + in_source_2;
     }
 
@@ -301,10 +303,10 @@ static Sint get_lock_nitems_form_prev_lock_array(int index,
 
 static void calc_shrink_limit(DbTableHash* tb);
 
-void erl_db_hash_adapt_no_locks(DbTable* tb) {
+void erl_db_hash_adapt_number_of_locks(DbTable* tb) {
     db_hash_lock_array_resize_state current_state;
     DbTableHash* tbl;
-    int new_no_locks;
+    int new_number_of_locks;
     if(!(tb->common.type & DB_FINE_LOCKED_AUTO)) {
         return;
     }
@@ -335,9 +337,9 @@ void erl_db_hash_adapt_no_locks(DbTable* tb) {
     }
     if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW &&
         erts_atomic_read_nob(&tbl->nactive) >= (2*tbl->nlocks)) {
-        new_no_locks = 2*tbl->nlocks;
+        new_number_of_locks = 2*tbl->nlocks;
     } else if (current_state == DB_HASH_LOCK_ARRAY_RESIZE_STATUS_SHRINK) {
-        new_no_locks = tbl->nlocks / 2;
+        new_number_of_locks = tbl->nlocks / 2;
     } else {
         /*
           Do not do any adaptation if the number of active buckets is
@@ -359,11 +361,11 @@ void erl_db_hash_adapt_no_locks(DbTable* tb) {
         erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
         int i;
         DbTableHashFineLockSlot* old_locks = tbl->locks;
-        Uint old_no_locks = tbl->nlocks;
-        ASSERT(new_no_locks != 0);
-        tbl->nlocks = new_no_locks;
+        Uint old_number_of_locks = tbl->nlocks;
+        ASSERT(new_number_of_locks != 0);
+        tbl->nlocks = new_number_of_locks;
         if (tb->common.type & DB_FREQ_READ &&
-            new_no_locks <= LCK_AUTO_MAX_LOCKS_FREQ_READ_RW_LOCKS) {
+            new_number_of_locks <= LCK_AUTO_MAX_LOCKS_FREQ_READ_RW_LOCKS) {
             rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
         }
         if (erts_ets_rwmtx_spin_count >= 0) {
@@ -376,7 +378,7 @@ void erl_db_hash_adapt_no_locks(DbTable* tb) {
             erts_rwmtx_init_opt(&tbl->locks[i].u.lck_ctr.lck, &rwmtx_opt,
                                 "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
             tbl->locks[i].u.lck_ctr.nitems =
-                get_lock_nitems_form_prev_lock_array(i, tbl->nlocks, old_no_locks, old_locks);
+                get_lock_nitems_form_prev_lock_array(i, tbl->nlocks, old_number_of_locks, old_locks);
             tbl->locks[i].u.lck_ctr.lck_stat = 0;
         }
 /* #define HARD_DEBUG_ITEM_CNT_LOCK_CHANGE 1 */
@@ -385,7 +387,7 @@ void erl_db_hash_adapt_no_locks(DbTable* tb) {
             Sint total_old = 0;
             Sint total_new = 0;
             int i;
-            for (i=0; i < old_no_locks; i++) {
+            for (i=0; i < old_number_of_locks; i++) {
                 total_old += old_locks[i].u.lck_ctr.nitems;
             }
             for (i=0; i < tbl->nlocks; i++) {
@@ -400,10 +402,10 @@ void erl_db_hash_adapt_no_locks(DbTable* tb) {
 
         erts_atomic_set_nob(&tbl->lock_array_resize_state, DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL);
         erts_rwmtx_rwunlock(&tb->common.rwlock);
-        for (i = 0; i < old_no_locks; i++) {
+        for (i = 0; i < old_number_of_locks; i++) {
             erts_rwmtx_destroy(&old_locks[i].u.lck_ctr.lck);
         }
-        erts_db_free(ERTS_ALC_T_DB_SEG, tb, old_locks, sizeof(DbTableHashFineLockSlot) * old_no_locks);
+        erts_db_free(ERTS_ALC_T_DB_SEG, tb, old_locks, sizeof(DbTableHashFineLockSlot) * old_number_of_locks);
     }
 }
 
@@ -997,12 +999,14 @@ int db_create_hash(Process *p, DbTable *tbl)
     erts_atomic_init_nob(&tb->is_resizing, 0);
     erts_atomic_init_nob(&tb->lock_array_resize_state,
                          (erts_aint_t)DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL);
-    if (tb->nlocks == -1 || !(tb->common.type & DB_FINE_LOCKED)) {
+    if (!(tb->common.type & DB_FINE_LOCKED)) {
         /*
           The number of locks needs to be set even if fine grained
           locking is not used as this variable is used when iterating
-          over the table
+          over the table.
         */
+        tb->nlocks = 1;
+    } else if(tb->nlocks == -1) {
         tb->nlocks = DB_HASH_LOCK_CNT;
     }
 
@@ -1010,7 +1014,7 @@ int db_create_hash(Process *p, DbTable *tbl)
 	erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
 	int i;
         if (tb->common.type & DB_FINE_LOCKED_AUTO) {
-            tb->nlocks = LCK_AUTO_DEFAULT_NO_LOCKS;
+            tb->nlocks = LCK_AUTO_DEFAULT_NUMBER_OF_LOCKS;
         }
         /*
           nlocks needs to be a power of two so we round down to
@@ -1047,8 +1051,8 @@ int db_create_hash(Process *p, DbTable *tbl)
         ASSERT(tb->nlocks <= erts_atomic_read_nob(&tb->nactive));
         ASSERT(erts_atomic_read_nob(&tb->nactive) <= tb->nslots);
         ASSERT(tb->nslots <= (erts_atomic_read_nob(&tb->szm) + 1));
-        ASSERT((tb->nlocks % 2) == 0);
-        ASSERT((erts_atomic_read_nob(&tb->szm) + 1) % 2 == 0);
+        ASSERT(IS_POW2(tb->nlocks));
+        ASSERT(IS_POW2(erts_atomic_read_nob(&tb->szm) + 1));
     }
     else { /* coarse locking */
 	tb->locks = NULL;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 4154a2aa2b..086ca0837b 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -95,7 +95,7 @@ typedef enum {
 } db_hash_lock_array_resize_state;
 
 /* To adapt number of locks if hash table with {write_concurrency, auto} */
-void erl_db_hash_adapt_no_locks(DbTable* tb);
+void erl_db_hash_adapt_number_of_locks(DbTable* tb);
 
 /*
 ** Function prototypes, looks the same (except the suffix) for all 
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index b44d67aec4..c7a4b38c51 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -7427,9 +7427,15 @@ whereis_table(Config) when is_list(Config) ->
     Tid = ets:whereis(whereis_test),
 
     ets:insert(whereis_test, [{hello}, {there}]),
-
-    [[{hello}],[{there}]] = ets:match(whereis_test, '$1'),
-    [[{hello}],[{there}]] = ets:match(Tid, '$1'),
+    CheckMatch =
+        fun(MatchRes) ->
+                case MatchRes of
+                    [[{there}],[{hello}]] -> ok;
+                    [[{hello}],[{there}]] -> ok
+                end
+        end,
+    CheckMatch(ets:match(whereis_test, '$1')),
+    CheckMatch(ets:match(Tid, '$1')),
 
     true = ets:delete_all_objects(Tid),
 
-- 
2.31.1

openSUSE Build Service is sponsored by