File 5821-erts-Lift-32-bit-limit-of-ETS-table-sizes.patch of Package erlang
From acb4771f41745f8e34aa194dff5b24d75bd9faed Mon Sep 17 00:00:00 2001
From: Sverker Eriksson <sverker@erlang.org>
Date: Mon, 10 Jun 2024 14:17:18 +0200
Subject: [PATCH 1/3] erts: Lift 32-bit limit of ETS table sizes
Use word size variables (not int) to keep table index and size.
---
erts/emulator/beam/erl_db.c | 16 +--
erts/emulator/beam/erl_db_hash.c | 170 ++++++++++++++++---------------
erts/emulator/beam/erl_db_hash.h | 19 ++--
3 files changed, 101 insertions(+), 104 deletions(-)
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index b8ea7a3e63..9f15b855bf 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -2484,7 +2484,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Sint keypos;
int is_named, is_compressed;
int is_fine_locked, frequent_read;
- int number_of_locks;
+ UWord number_of_locks;
int is_decentralized_counters;
int is_decentralized_counters_option;
int is_explicit_lock_granularity;
@@ -2509,7 +2509,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
- number_of_locks = -1;
+ number_of_locks = 0;
is_explicit_lock_granularity = 0;
is_write_concurrency_auto = 0;
@@ -2542,7 +2542,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_write_concurrency_auto = 1;
is_fine_locked = 1;
is_explicit_lock_granularity = 0;
- number_of_locks = -1;
+ number_of_locks = 0;
} else if (tp[2] == am_true) {
if (!(status & DB_ORDERED_SET)) {
is_decentralized_counters = 0;
@@ -2550,18 +2550,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_fine_locked = 1;
is_explicit_lock_granularity = 0;
is_write_concurrency_auto = 0;
- number_of_locks = -1;
+ number_of_locks = 0;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
is_explicit_lock_granularity = 0;
is_write_concurrency_auto = 0;
- number_of_locks = -1;
+ number_of_locks = 0;
} else if (is_tuple(tp[2])) {
Eterm *stp = tuple_val(tp[2]);
- Sint number_of_locks_param;
+ UWord number_of_locks_param;
if (arityval(stp[0]) == 2 &&
stp[1] == am_debug_hash_fixed_number_of_locks &&
- term_to_Sint(stp[2], &number_of_locks_param) &&
+ term_to_UWord(stp[2], &number_of_locks_param) &&
number_of_locks_param >= DB_WRITE_CONCURRENCY_MIN_LOCKS &&
number_of_locks_param <= DB_WRITE_CONCURRENCY_MAX_LOCKS) {
@@ -2664,7 +2664,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status |= DB_FINE_LOCKED_AUTO;
}
} else {
- number_of_locks = -1;
+ number_of_locks = 0;
}
}
else if (IS_TREE_TABLE(status)) {
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 7def06e96b..d19289c252 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -100,7 +100,7 @@
#define LCK_AUTO_MAX_LOCKS_FREQ_READ_RW_LOCKS 128
-static ERTS_INLINE int
+static ERTS_INLINE Sint
NITEMS_ESTIMATE(DbTableHash* DB, DbTableHashLockAndCounter* LCK_CTR, HashValue HASH)
{
if (IS_DECENTRALIZED_CTRS(DB)) {
@@ -181,7 +181,7 @@ DEC_NITEMS(DbTableHash* DB, DbTableHashLockAndCounter* LCK_CTR, HashValue HASH)
? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \
: ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)))
#endif
-#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive))
+#define NACTIVE(tb) ((UWord)erts_atomic_read_nob(&(tb)->nactive))
#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
@@ -276,7 +276,7 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_internal_hash(term, 0)) & MAX_HASH_MASK)
+ make_internal_hash(term, 0)) >> 1)
# define GET_LOCK_MASK(NUMBER_OF_LOCKS) ((NUMBER_OF_LOCKS)-1)
@@ -294,7 +294,7 @@ static void calc_shrink_limit(DbTableHash* tb);
void db_hash_adapt_number_of_locks(DbTable* tb) {
db_hash_lock_array_resize_state current_state;
DbTableHash* tbl;
- int new_number_of_locks;
+ UWord new_number_of_locks;
ASSERT(IS_HASH_WITH_AUTO_TABLE(tb->common.type));
@@ -332,7 +332,7 @@ void db_hash_adapt_number_of_locks(DbTable* tb) {
We do not want to make the table unnecessary large just to
potentially reduce contention.
*/
- int i;
+ UWord i;
for (i = 0; i < tbl->nlocks; i++) {
tbl->locks[i].u.lck_ctr.lck_stat = 0;
}
@@ -343,7 +343,7 @@ void db_hash_adapt_number_of_locks(DbTable* tb) {
}
{
erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
- int i;
+ UWord i;
DbTableHashFineLockSlot* old_locks = tbl->locks;
Uint old_number_of_locks = tbl->nlocks;
ASSERT(new_number_of_locks != 0);
@@ -373,7 +373,7 @@ void db_hash_adapt_number_of_locks(DbTable* tb) {
{
Sint total_old = 0;
Sint total_new = 0;
- int i;
+ UWord i;
for (i=0; i < old_number_of_locks; i++) {
total_old += old_locks[i].u.lck_ctr.nitems;
}
@@ -584,7 +584,7 @@ static ERTS_INLINE void free_term_list(DbTableHash *tb, HashDbTerm* p)
*/
struct mp_prefound {
HashDbTerm** bucket;
- int ix;
+ UWord ix;
};
struct mp_info {
@@ -609,8 +609,8 @@ struct segment {
struct ext_segtab {
ErtsThrPrgrLaterOp lop;
struct segment** prev_segtab; /* Used when table is shrinking */
- int prev_nsegs; /* Size of prev_segtab */
- int nsegs; /* Size of this segtab */
+ UWord prev_nsegs; /* Size of prev_segtab */
+ UWord nsegs; /* Size of this segtab */
struct segment* segtab[1]; /* The segment table */
};
#define SIZEOF_EXT_SEGTAB(NSEGS) \
@@ -627,20 +627,20 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
}
/* Used by select_replace on analyze_pattern */
-typedef int ExtraMatchValidatorF(int keypos, Eterm match, Eterm guard, Eterm body);
+typedef bool ExtraMatchValidatorF(int keypos, Eterm match, Eterm guard, Eterm body);
/*
** Forward decl's (static functions)
*/
-static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
static void alloc_seg(DbTableHash *tb, int activate_new_seg);
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, UWord seg_ix);
static int free_seg(DbTableHash *tb);
-static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
+static HashDbTerm* next_live(DbTableHash *tb, UWord *iptr, erts_rwmtx_t** lck_ptr,
HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
HashValue hval, HashDbTerm *list);
-static void shrink(DbTableHash* tb, int nitems);
-static void grow(DbTableHash* tb, int nitems);
+static void shrink(DbTableHash* tb, UWord nitems);
+static void grow(DbTableHash* tb, UWord nitems);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
Uint sz, DbTableHash*);
static Eterm get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
@@ -673,7 +673,7 @@ static int db_next_lookup_hash(Process *p,
static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret);
static int db_get_element_hash(Process *p, DbTable *tbl,
- Eterm key, int ndex, Eterm *ret);
+ Eterm key, int pos, Eterm *ret);
static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret);
@@ -906,7 +906,6 @@ static void DEBUG_WAIT(void)
when "unfixer" gets interrupted by "fixer" */
static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
- /*int tries = 0;*/
DEBUG_WAIT();
if (erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
@@ -1029,20 +1028,20 @@ int db_create_hash(Process *p, DbTable *tbl)
}
else {
erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
- int i;
+ UWord i;
if (tb->common.type & DB_FINE_LOCKED_AUTO) {
tb->nlocks = LCK_AUTO_DEFAULT_NUMBER_OF_LOCKS;
}
else {
- if (tb->nlocks < 1) {
+ if (tb->nlocks == 0) {
tb->nlocks = DB_HASH_LOCK_CNT;
}
/*
* nlocks needs to be a power of two so we round down to
* nearest power of two
*/
- tb->nlocks = 1 << (erts_fit_in_bits_int64(tb->nlocks)-1);
+ tb->nlocks = 1 << (erts_fit_in_bits_uint(tb->nlocks)-1);
if (tb->nlocks < NLOCKS_WITH_ITEM_COUNTERS) {
tb->nlocks = NLOCKS_WITH_ITEM_COUNTERS;
}
@@ -1063,7 +1062,7 @@ int db_create_hash(Process *p, DbTable *tbl)
tb->locks = (DbTableHashFineLockSlot*) erts_db_alloc(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLockSlot) * tb->nlocks);
- for (i=0; i<tb->nlocks; ++i) {
+ for (i=0; i < tb->nlocks; ++i) {
erts_rwmtx_init_opt(
GET_LOCK(tb,i), &rwmtx_opt,
"db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
@@ -1115,11 +1114,11 @@ static ERTS_INLINE Eterm db_copy_key_and_objects_hash(Process* p, DbTable* tbl,
static int db_first_hash_common(Process *p, DbTable *tbl, Eterm *ret, Eterm (*func)(Process *, DbTable *, HashDbTerm *))
{
DbTableHash *tb = &tbl->hash;
- Uint ix = 0;
+ UWord ix = 0;
erts_rwmtx_t* lck = RLOCK_HASH(tb,ix);
HashDbTerm* list;
- list = BUCKET(tb,ix);
+ list = BUCKET(tb, ix);
list = next_live(tb, &ix, &lck, list);
if (list != NULL) {
@@ -1146,7 +1145,7 @@ static int db_next_hash_common(Process *p, DbTable *tbl, Eterm key, Eterm *ret,
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- Uint ix;
+ UWord ix;
HashDbTerm* b;
erts_rwmtx_t* lck;
@@ -1246,13 +1245,13 @@ static int db_put_dbterm_hash(DbTable* tbl,
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
Eterm key;
HashDbTerm** bp;
HashDbTerm* b;
HashDbTerm* q;
DbTableHashLockAndCounter* lck_ctr;
- int nitems;
+ UWord nitems;
int ret = DB_ERROR_NONE;
HashDbTerm *value_to_insert = ob;
Uint size_to_insert = db_term_size(tbl, value_to_insert, offsetof(HashDbTerm, dbterm));
@@ -1346,7 +1345,7 @@ Lnew:
nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval);
WUNLOCK_HASH_LCK_CTR(lck_ctr);
{
- int nactive = NACTIVE(tb);
+ UWord nactive = NACTIVE(tb);
if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
grow(tb, nitems);
}
@@ -1363,7 +1362,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail,
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
Eterm key;
HashDbTerm** bp;
HashDbTerm* b;
@@ -1452,7 +1451,7 @@ Lnew:
nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval);
WUNLOCK_HASH_LCK_CTR(lck_ctr);
{
- int nactive = NACTIVE(tb);
+ const UWord nactive = NACTIVE(tb);
if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
grow(tb, nitems);
}
@@ -1491,7 +1490,7 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
HashDbTerm* b;
erts_rwmtx_t* lck;
@@ -1517,7 +1516,7 @@ static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
HashDbTerm* b1;
erts_rwmtx_t* lck;
@@ -1541,12 +1540,12 @@ done:
static int db_get_element_hash(Process *p, DbTable *tbl,
Eterm key,
- int ndex,
+ int pos,
Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
HashDbTerm* b1;
erts_rwmtx_t* lck;
int retval;
@@ -1559,7 +1558,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
while(b1 != 0) {
if (has_live_key(tb,b1,key,hval)) {
- if (ndex > arityval(b1->dbterm.tpl[0])) {
+ if (pos > arityval(b1->dbterm.tpl[0])) {
retval = DB_ERROR_BADITEM;
goto done;
}
@@ -1569,7 +1568,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
Eterm elem_list = NIL;
while(b2 != NULL && has_key(tb,b2,key,hval)) {
- if (ndex > arityval(b2->dbterm.tpl[0])
+ if (pos > arityval(b2->dbterm.tpl[0])
&& !is_pseudo_deleted(b2)) {
retval = DB_ERROR_BADITEM;
goto done;
@@ -1581,7 +1580,8 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
if (!is_pseudo_deleted(b)) {
Eterm *hp;
Eterm copy = db_copy_element_from_ets(&tb->common, p,
- &b->dbterm, ndex, &hp, 2);
+ &b->dbterm, pos,
+ &hp, 2);
elem_list = CONS(hp, copy, elem_list);
}
b = b->next;
@@ -1590,7 +1590,8 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
}
else {
Eterm* hp;
- *ret = db_copy_element_from_ets(&tb->common, p, &b1->dbterm, ndex, &hp, 0);
+ *ret = db_copy_element_from_ets(&tb->common, p, &b1->dbterm,
+ pos, &hp, 0);
}
retval = DB_ERROR_NONE;
goto done;
@@ -1610,7 +1611,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
HashDbTerm** bp;
HashDbTerm* b;
HashDbTerm* free_us = NULL;
@@ -1665,7 +1666,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
HashDbTerm** bp;
HashDbTerm* b;
HashDbTerm* free_us = NULL;
@@ -1730,7 +1731,7 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
erts_rwmtx_t* lck;
Sint slot;
int retval;
- int nactive;
+ UWord nactive;
if (is_not_small(slot_term) || ((slot = signed_val(slot_term)) < 0)) {
return DB_ERROR_BADPARAM;
@@ -2621,7 +2622,7 @@ static int select_delete_on_match_res(traverse_context_t* ctx_base, Sint slot_ix
select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
HashDbTerm* del;
DbTableHashLockAndCounter* lck_ctr;
- Uint32 hval;
+ UWord hval;
if (match_res != am_true)
return 0;
hval = (*current_ptr)->hvalue;
@@ -2651,7 +2652,7 @@ static int select_delete_on_match_res(traverse_context_t* ctx_base, Sint slot_ix
static Sint get_nitems_from_locks_or_counter(DbTableHash* tb)
{
if (IS_DECENTRALIZED_CTRS(tb)) {
- int i;
+ UWord i;
Sint total = 0;
for (i=0; i < NLOCKS_WITH_ITEM_COUNTERS; ++i) {
total += erts_atomic_read_nob(&tb->locks[i].u.lck_ctr.nitems);
@@ -2947,7 +2948,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
HashDbTerm *free_us = NULL;
HashValue hval = MAKE_HASH(key);
DbTableHashLockAndCounter *lck_ctr = WLOCK_HASH_GET_LCK_AND_CTR(tb, hval);
- int ix = hash_to_ix(tb, hval);
+ UWord ix = hash_to_ix(tb, hval);
int nitems_diff = 0;
Sint nitems;
@@ -3003,7 +3004,7 @@ static SWord db_mark_all_deleted_hash(DbTable *tbl, SWord reds)
DbTableHash *tb = &tbl->hash;
FixedDeletion* fixdel;
SWord loops = reds * LOOPS_PER_REDUCTION;
- int i;
+ UWord i;
ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
@@ -3050,11 +3051,11 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
DbHashStats stats;
- int i;
+ int was_thread_safe;
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
- i = tbl->common.is_thread_safe;
+ was_thread_safe = tbl->common.is_thread_safe;
/* If crash dumping we set table to thread safe in order to
avoid taking any locks */
if (ERTS_IS_CRASH_DUMPING)
@@ -3062,7 +3063,7 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
db_calc_stats_hash(&tbl->hash, &stats);
- tbl->common.is_thread_safe = i;
+ tbl->common.is_thread_safe = was_thread_safe;
erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
@@ -3078,6 +3079,7 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
erts_print(to, to_arg, "Fixed: false\n");
if (show) {
+ UWord i;
for (i = 0; i < NACTIVE(tb); i++) {
HashDbTerm* list = BUCKET(tb,i);
if (list == NULL)
@@ -3140,8 +3142,8 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
}
}
if (tb->locks != NULL) {
- int i;
- for (i=0; i<tb->nlocks; ++i) {
+ UWord i;
+ for (i=0; i < tb->nlocks; ++i) {
ERTS_DB_ALC_MEM_UPDATE_(tb, erts_rwmtx_size(GET_LOCK(tb,i)), 0);
erts_rwmtx_destroy(GET_LOCK(tb,i));
}
@@ -3249,7 +3251,8 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
key = db_getkey(tb->common.keypos, tpl);
if (is_value(key)) {
if (db_is_fully_bound(key)) {
- int ix, search_slot;
+ UWord ix;
+ int search_slot;
HashDbTerm** bp;
erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
@@ -3257,7 +3260,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
ix = hash_to_ix(tb, hval);
bp = &BUCKET(tb,ix);
if (lck == NULL) {
- search_slot = search_list(tb,key,hval,*bp) != NULL;
+ search_slot = (search_list(tb,key,hval,*bp) != NULL);
} else {
/* No point to verify if key exist now as there may be
concurrent inserters/deleters anyway */
@@ -3313,10 +3316,10 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix)
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, UWord seg_ix)
{
struct segment** old_segtab = SEGTAB(tb);
- int nsegs = 0;
+ UWord nsegs = 0;
struct ext_segtab* est;
ASSERT(seg_ix >= NSEG_1);
@@ -3363,7 +3366,7 @@ static void calc_shrink_limit(DbTableHash* tb)
/* const double d = n*x / (x + n - 1) + 1; */
/* printf("Cochran_formula=%f size=%d mod_with_size=%f\n", x, n, d); */
/* } */
- const int needed_slots = 100 * NLOCKS_WITH_ITEM_COUNTERS;
+ const UWord needed_slots = 100 * NLOCKS_WITH_ITEM_COUNTERS;
if (tb->nslots < needed_slots) {
sample_size_is_enough = 0;
}
@@ -3399,7 +3402,7 @@ static void calc_shrink_limit(DbTableHash* tb)
*/
static void alloc_seg(DbTableHash *tb, int activate_buckets)
{
- int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
+ UWord seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
struct segment** segtab;
ASSERT(seg_ix > 0);
@@ -3415,7 +3418,7 @@ static void alloc_seg(DbTableHash *tb, int activate_buckets)
SIZEOF_SEGMENT(EXT_SEGSZ));
#ifdef DEBUG
{
- int i;
+ UWord i;
for (i = 0; i < EXT_SEGSZ; i++) {
segtab[seg_ix]->buckets[i] = DBG_BUCKET_INACTIVE;
}
@@ -3459,7 +3462,7 @@ struct dealloc_seg_ops {
static int remove_seg(DbTableHash *tb, int free_records,
struct dealloc_seg_ops *ds_ops)
{
- const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
+ const UWord seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
struct segment** const segtab = SEGTAB(tb);
struct segment* const segp = segtab[seg_ix];
Uint seg_sz;
@@ -3470,7 +3473,7 @@ static int remove_seg(DbTableHash *tb, int free_records,
ASSERT(segp != NULL);
if (free_records) {
- int ix, n;
+ UWord ix, n;
if (seg_ix == 0) {
/* First segment (always fully active) */
n = FIRST_SEGSZ;
@@ -3498,7 +3501,7 @@ static int remove_seg(DbTableHash *tb, int free_records,
}
#ifdef DEBUG
else {
- int ix = (seg_ix == 0) ? FIRST_SEGSZ-1 : EXT_SEGSZ-1;
+ SWord ix = (seg_ix == 0) ? FIRST_SEGSZ-1 : EXT_SEGSZ-1;
for ( ; ix >= 0; ix--) {
ASSERT(segp->buckets[ix] == DBG_BUCKET_INACTIVE);
}
@@ -3620,8 +3623,9 @@ begin_resizing(DbTableHash* tb)
return
!erts_atomic_read_acqb(&tb->is_resizing) &&
!erts_atomic_xchg_acqb(&tb->is_resizing, 1);
- } else
+ } else {
ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ }
return 1;
}
@@ -3635,15 +3639,15 @@ done_resizing(DbTableHash* tb)
/* Grow table with one or more new buckets.
** Allocate new segment if needed.
*/
-static void grow(DbTableHash* tb, int nitems)
+static void grow(DbTableHash* tb, UWord nitems)
{
HashDbTerm** pnext;
HashDbTerm** to_pnext;
HashDbTerm* p;
erts_rwmtx_t* lck;
- int nactive;
- int from_ix, to_ix;
- int szm;
+ UWord nactive;
+ UWord from_ix, to_ix;
+ UWord szm;
int loop_limit = 5;
do {
@@ -3703,7 +3707,7 @@ static void grow(DbTableHash* tb, int nitems)
p = *pnext;
}
else {
- int ix = p->hvalue & szm;
+ const UWord ix = p->hvalue & szm;
if (ix != from_ix) {
ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
*to_pnext = p;
@@ -3730,15 +3734,15 @@ abort:
/* Shrink table by joining top bucket.
** Remove top segment if it gets empty.
*/
-static void shrink(DbTableHash* tb, int nitems)
+static void shrink(DbTableHash* tb, UWord nitems)
{
struct dealloc_seg_ops ds_ops;
HashDbTerm* src;
HashDbTerm* tail;
HashDbTerm** bp;
erts_rwmtx_t* lck;
- int src_ix, dst_ix, low_szm;
- int nactive;
+ UWord src_ix, dst_ix, low_szm;
+ UWord nactive;
int loop_limit = 5;
ds_ops.segp = NULL;
@@ -3826,14 +3830,14 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
}
-/* This function is called by the next AND the select BIF */
+/* This function is called by the 'next' AND the 'select' BIF */
/* It return the next live object in a table, NULL if no more */
/* In-bucket: RLOCKED */
/* Out-bucket: RLOCKED unless NULL */
-static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
+static HashDbTerm* next_live(DbTableHash *tb, UWord *iptr, erts_rwmtx_t** lck_ptr,
HashDbTerm *list)
{
- int i;
+ UWord i;
ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
@@ -3950,7 +3954,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp = (HashDbTerm **) handle->bp;
HashDbTerm *b = *bp;
- Uint32 hval = b->hvalue;
+ const UWord hval = b->hvalue;
DbTableHashLockAndCounter* lck_ctr = handle->u.hash.lck_ctr;
HashDbTerm* free_me = NULL;
Sint nitems;
@@ -3979,8 +3983,8 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
free_me = b;
}
if (handle->flags & DB_INC_TRY_GROW) {
- int nactive;
- int nitems;
+ UWord nactive;
+ UWord nitems;
ASSERT(cret == DB_ERROR_NONE);
INC_NITEMS(tb, lck_ctr, hval);
nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval);
@@ -4037,8 +4041,8 @@ void db_foreach_offheap_hash(DbTable *tbl,
{
DbTableHash *tb = &tbl->hash;
HashDbTerm* list;
- int i;
- int nactive = NACTIVE(tb);
+ UWord i;
+ UWord nactive = NACTIVE(tb);
if (nactive > tb->nslots) {
/* Table is being emptied by delete/1 or delete_all_objects/1 */
@@ -4063,11 +4067,11 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
{
HashDbTerm* b;
erts_rwmtx_t* lck;
- int sum = 0;
- int sq_sum = 0;
- int kept_items = 0;
- int ix;
- int len;
+ UWord sum = 0;
+ UWord sq_sum = 0;
+ UWord kept_items = 0;
+ UWord ix;
+ UWord len;
if (tb->nslots < NACTIVE(tb)) {
ASSERT(ERTS_IS_CRASH_DUMPING);
@@ -4108,7 +4112,7 @@ static int db_get_binary_info_hash(Process *p, DbTable *tbl, Eterm key, Eterm *r
{
DbTableHash *tb = &tbl->hash;
HashValue hval;
- int ix;
+ UWord ix;
HashDbTerm *b, *first, *end;
erts_rwmtx_t* lck;
Eterm *hp, *hp_end;
@@ -4281,7 +4285,7 @@ erts_db_foreach_thr_prgr_offheap_hash(void (*func)(ErlOffHeap *, void *),
#ifdef ERTS_ENABLE_LOCK_COUNT
void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
- int i;
+ UWord i;
if(tb->locks == NULL) {
return;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 974715f5d2..2c1b973ad2 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -38,15 +38,8 @@ typedef Uint32 HashVal;
typedef struct hash_db_term {
struct hash_db_term* next; /* next bucket */
-#if SIZEOF_VOID_P == 4
- Uint32 hvalue : 31; /* stored hash value */
- Uint32 pseudo_deleted : 1;
-# define MAX_HASH_MASK (((Uint32)1 << 31)-1)
-#elif SIZEOF_VOID_P == 8
- Uint32 hvalue;
- Uint32 pseudo_deleted;
-# define MAX_HASH_MASK ((Uint32)(Sint32)-1)
-#endif
+ UWord hvalue : sizeof(UWord)*8 - 1; /* stored hash value */
+ int pseudo_deleted : 1;
DbTerm dbterm; /* The actual term */
} HashDbTerm;
@@ -81,9 +74,9 @@ typedef struct db_table_hash {
struct segment* first_segtab[1];
/* SMP: nslots and nsegs are protected by is_resizing or table write lock */
- int nlocks; /* Needs to be smaller or equal to nactive */
- int nslots; /* Total number of slots */
- int nsegs; /* Size of segment table */
+ UWord nlocks; /* Needs to be smaller or equal to nactive */
+ UWord nslots; /* Total number of slots */
+ UWord nsegs; /* Size of segment table */
/* List of slots where elements have been deleted while table was fixed */
erts_atomic_t fixdel; /* (FixedDeletion*) */
@@ -131,7 +124,7 @@ typedef struct {
float std_dev_expected;
int max_chain_len;
int min_chain_len;
- int kept_items;
+ UWord kept_items;
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
--
2.35.3