File 0429-erts-Use-regular-ASSERT-in-erl_process_lock.c.patch of Package erlang
From 1f0d813f386afc0bdc2e427991269676bcde448b Mon Sep 17 00:00:00 2001
From: Sverker Eriksson <sverker@erlang.org>
Date: Wed, 10 Feb 2021 20:48:00 +0100
Subject: [PATCH 29/34] erts: Use regular ASSERT in erl_process_lock.c
to do internal consistency checks.
ERTS_LC_ASSERT contains exception while crash dumping
and should only be used to check correct locks are held.
---
erts/emulator/beam/erl_lock_check.h | 6 +++
erts/emulator/beam/erl_process_lock.c | 62 +++++++++++++--------------
erts/emulator/beam/erl_process_lock.h | 21 +++++----
3 files changed, 47 insertions(+), 42 deletions(-)
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 3f3ddcca78..4ffce00db0 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -97,6 +97,12 @@ int erts_lc_is_emu_thr(void);
Eterm erts_lc_dump_graph(void);
+/*
+ * ERTS_LC_ASSERT should only be used to check expected locks or other
+ * thread synchronization primitives are held.
+ * Do not use it for data invariants like function contracts
+ * as it contains an exception for crash dumping thread.
+ */
#define ERTS_LC_ASSERT(A) \
((void) (((A) || ERTS_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
#else /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index af2924ac17..1541d26559 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -149,7 +149,7 @@ erts_init_proc_lock(int cpus)
#if ERTS_PROC_LOCK_OWN_IMPL
#ifdef ERTS_ENABLE_LOCK_CHECK
-#define CHECK_UNUSED_TSE(W) ERTS_LC_ASSERT((W)->uflgs == 0)
+#define CHECK_UNUSED_TSE(W) ASSERT((W)->uflgs == 0)
#else
#define CHECK_UNUSED_TSE(W)
#endif
@@ -193,7 +193,7 @@ enqueue_waiter(erts_proc_lock_t *lck, int ix, erts_tse_t *wtr)
wtr->prev = wtr;
}
else {
- ERTS_LC_ASSERT(lck->queue[ix]->next && lck->queue[ix]->prev);
+ ASSERT(lck->queue[ix]->next && lck->queue[ix]->prev);
wtr->next = lck->queue[ix];
wtr->prev = lck->queue[ix]->prev;
wtr->prev->next = wtr;
@@ -205,14 +205,14 @@ static erts_tse_t *
dequeue_waiter(erts_proc_lock_t *lck, int ix)
{
erts_tse_t *wtr = lck->queue[ix];
- ERTS_LC_ASSERT(lck->queue[ix]);
+ ASSERT(lck->queue[ix]);
if (wtr->next == wtr) {
- ERTS_LC_ASSERT(lck->queue[ix]->prev == wtr);
+ ASSERT(lck->queue[ix]->prev == wtr);
lck->queue[ix] = NULL;
}
else {
- ERTS_LC_ASSERT(wtr->next != wtr);
- ERTS_LC_ASSERT(wtr->prev != wtr);
+ ASSERT(wtr->next != wtr);
+ ASSERT(wtr->prev != wtr);
wtr->next->prev = wtr->prev;
wtr->prev->next = wtr->next;
lck->queue[ix] = wtr->next;
@@ -236,7 +236,7 @@ try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
ErtsProcLocks locks = wtr->uflgs;
int lock_no;
- ERTS_LC_ASSERT(got_locks != locks);
+ ASSERT(got_locks != locks);
for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
@@ -245,7 +245,7 @@ try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
if (lck->queue[lock_no]) {
/* Others already waiting */
enqueue:
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(lck)
+ ASSERT(ERTS_PROC_LOCK_FLGS_READ_(lck)
& (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
enqueue_waiter(lck, lock_no, wtr);
break;
@@ -259,7 +259,7 @@ try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
else {
/* Got the lock */
got_locks |= lock;
- ERTS_LC_ASSERT(!(old_lflgs & wflg));
+ ASSERT(!(old_lflgs & wflg));
/* No one else can be waiting for the lock; remove wait flag */
(void) ERTS_PROC_LOCK_FLGS_BAND_(lck, ~wflg);
if (got_locks == locks)
@@ -303,7 +303,7 @@ transfer_locks(Process *p,
#ifdef ERTS_ENABLE_LOCK_CHECK
tlocks &= ~lock;
#endif
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock)
+ ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock)
& (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
transferred++;
wtr = dequeue_waiter(&p->lock, lock_no);
@@ -335,7 +335,7 @@ transfer_locks(Process *p,
check_queue(&p->lock);
#endif
- ERTS_LC_ASSERT(tlocks == 0); /* We should have transferred all of them */
+ ASSERT(tlocks == 0); /* We should have transferred all of them */
if (!wake) {
if (unlock)
@@ -455,7 +455,7 @@ wait_for_locks(Process *p,
ASSERT(wtr->uflgs == 0);
}
- ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
+ ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
tse_return(wtr);
}
@@ -592,7 +592,7 @@ proc_safelock(int is_managed,
ErtsProcLocks unlock_mask;
int lock_no, refc1 = 0, refc2 = 0;
- ERTS_LC_ASSERT(b_proc);
+ ASSERT(b_proc);
/* Determine inter process lock order...
@@ -628,8 +628,8 @@ proc_safelock(int is_managed,
have_locks2 = a_have_locks;
}
else {
- ERTS_LC_ASSERT(a_proc == b_proc);
- ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id);
+ ASSERT(a_proc == b_proc);
+ ASSERT(a_proc->common.id == b_proc->common.id);
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = a_proc->common.id;
@@ -774,21 +774,21 @@ proc_safelock(int is_managed,
if (p1 && p2) {
if (p1 == a_proc) {
- ERTS_LC_ASSERT(a_need_locks == have_locks1);
- ERTS_LC_ASSERT(b_need_locks == have_locks2);
+ ASSERT(a_need_locks == have_locks1);
+ ASSERT(b_need_locks == have_locks2);
}
else {
- ERTS_LC_ASSERT(a_need_locks == have_locks2);
- ERTS_LC_ASSERT(b_need_locks == have_locks1);
+ ASSERT(a_need_locks == have_locks2);
+ ASSERT(b_need_locks == have_locks1);
}
}
else {
- ERTS_LC_ASSERT(p1);
+ ASSERT(p1);
if (a_proc) {
- ERTS_LC_ASSERT(have_locks1 == (a_need_locks | b_need_locks));
+ ASSERT(have_locks1 == (a_need_locks | b_need_locks));
}
else {
- ERTS_LC_ASSERT(have_locks1 == b_need_locks);
+ ASSERT(have_locks1 == b_need_locks);
}
}
#endif
@@ -846,7 +846,7 @@ erts_pid2proc_opt(Process *c_p,
return NULL;
pix = internal_pid_index(pid);
- ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
+ ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
need_locks = pid_need_locks;
if (c_p && c_p->common.id == pid) {
@@ -985,11 +985,11 @@ erts_pid2proc_opt(Process *c_p,
erts_proc_dec_refc(dec_refc_proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
- ERTS_LC_ASSERT(!proc
- || proc == ERTS_PROC_LOCK_BUSY
- || (pid_need_locks ==
- (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
- & pid_need_locks)));
+ ASSERT(!proc
+ || proc == ERTS_PROC_LOCK_BUSY
+ || (pid_need_locks ==
+ (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
+ & pid_need_locks)));
#endif
return proc;
@@ -1686,7 +1686,7 @@ check_queue(erts_proc_lock_t *lck)
if (lflgs & bit) {
int n;
erts_tse_t *wtr;
- ERTS_LC_ASSERT(lck->queue[lock_no]);
+ ERTS_ASSERT(lck->queue[lock_no]);
wtr = lck->queue[lock_no];
n = 0;
do {
@@ -1697,10 +1697,10 @@ check_queue(erts_proc_lock_t *lck)
wtr = wtr->prev;
n--;
} while (wtr != lck->queue[lock_no]);
- ERTS_LC_ASSERT(n == 0);
+ ERTS_ASSERT(n == 0);
}
else {
- ERTS_LC_ASSERT(!lck->queue[lock_no]);
+ ERTS_ASSERT(!lck->queue[lock_no]);
}
}
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index c3651c8e3c..1e577cf004 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -558,13 +558,13 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
- ERTS_LC_ASSERT(pixlck);
+ ASSERT(pixlck);
erts_mtx_lock(&pixlck->u.mtx);
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
- ERTS_LC_ASSERT(pixlck);
+ ASSERT(pixlck);
erts_mtx_unlock(&pixlck->u.mtx);
}
@@ -671,7 +671,7 @@ erts_proc_lock__(Process *p,
erts_lcnt_proc_lock(&(p->lock), locks);
#endif
- ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
+ ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_proc_lc_lock(p, locks, file, line);
@@ -690,7 +690,7 @@ erts_proc_lock__(Process *p,
#if !ERTS_PROC_LOCK_ATOMIC_IMPL
else {
- ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
+ ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
erts_pix_unlock(pix_lck);
}
#endif
@@ -755,8 +755,8 @@ erts_proc_unlock__(Process *p,
old_lflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
- ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
- ERTS_LC_ASSERT(locks == (old_lflgs & locks));
+ ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
+ ASSERT(locks == (old_lflgs & locks));
while (1) {
/*
@@ -826,7 +826,7 @@ erts_proc_trylock__(Process *p,
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
+ ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
if (erts_proc_lc_trylock_force_busy(p, locks)) {
res = EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation to occur */
@@ -850,8 +850,7 @@ erts_proc_trylock__(Process *p,
else {
res = 0;
- ERTS_LC_ASSERT(locks
- == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
+ ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
#if !ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_unlock(pix_lck);
@@ -898,11 +897,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
erts_aint32_t lock_count;
if (locked) {
lock_count = erts_atomic32_inc_read_nob(&p->lock.locked[i]);
- ERTS_LC_ASSERT(lock_count == 1);
+ ASSERT(lock_count == 1);
}
else {
lock_count = erts_atomic32_dec_read_nob(&p->lock.locked[i]);
- ERTS_LC_ASSERT(lock_count == 0);
+ ASSERT(lock_count == 0);
}
}
}
--
2.26.2