File xsa314.patch of Package xen.26345

Subject: xen/rwlock: Add missing memory barrier in the unlock path of rwlock
From: Julien Grall jgrall@amazon.com Tue Apr 14 15:11:38 2020 +0200
Date: Tue Apr 14 15:11:38 2020 +0200:
Git: 38e589d4b0eeb917e14cc021e62669f04cabd31b

The rwlock unlock paths are using atomic_sub() to release the lock.
However the implementation of atomic_sub() rightfully doesn't contain a
memory barrier. On Arm, this means a processor is allowed to re-order
the memory access with the preceeding access.

In other words, the unlock may be seen by another processor before all
the memory accesses within the "critical" section.

The rwlock paths already contains barrier indirectly, but they are not
very useful without the counterpart in the unlock paths.

The memory barriers are not necessary on x86 because loads/stores are
not re-ordered with lock instructions.

So add arch_lock_release_barrier() in the unlock paths that will only
add memory barrier on Arm.

Take the opportunity to document each lock paths explaining why a
barrier is not necessary.

This is XSA-314.

Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Jan Beulich <jbeulich@suse.com>
master commit: 6890a04072e664c25447a297fe663b45ecfd6398
master date: 2020-04-14 14:37:11 +0200

diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 35657c56c4..15c3f8155e 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -47,6 +47,10 @@ static inline int _read_trylock(rwlock_t *lock)
     if ( likely(!(cnts & _QW_WMASK)) )
     {
         cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+        /*
+         * atomic_add_return() is a full barrier so no need for an
+         * arch_lock_acquire_barrier().
+         */
         if ( likely(!(cnts & _QW_WMASK)) )
             return 1;
         atomic_sub(_QR_BIAS, &lock->cnts);
@@ -63,11 +67,19 @@ static inline void _read_lock(rwlock_t *lock)
     u32 cnts;
 
     cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+    /*
+     * atomic_add_return() is a full barrier so no need for an
+     * arch_lock_acquire_barrier().
+     */
     if ( likely(!(cnts & _QW_WMASK)) )
         return;
 
     /* The slowpath will decrement the reader count, if necessary. */
     queue_read_lock_slowpath(lock);
+    /*
+     * queue_read_lock_slowpath() is using spinlock and therefore is a
+     * full barrier. So no need for an arch_lock_acquire_barrier().
+     */
 }
 
 static inline void _read_lock_irq(rwlock_t *lock)
@@ -91,6 +103,7 @@ static inline unsigned long _read_lock_irqsave(rwlock_t *lock)
  */
 static inline void _read_unlock(rwlock_t *lock)
 {
+    arch_lock_release_barrier();
     /*
      * Atomically decrement the reader count
      */
@@ -120,11 +133,20 @@ static inline int _rw_is_locked(rwlock_t *lock)
  */
 static inline void _write_lock(rwlock_t *lock)
 {
-    /* Optimize for the unfair lock case where the fair flag is 0. */
+    /*
+     * Optimize for the unfair lock case where the fair flag is 0.
+     *
+     * atomic_cmpxchg() is a full barrier so no need for an
+     * arch_lock_acquire_barrier().
+     */
     if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
         return;
 
     queue_write_lock_slowpath(lock);
+    /*
+     * queue_write_lock_slowpath() is using spinlock and therefore is a
+     * full barrier. So no need for an arch_lock_acquire_barrier().
+     */
 }
 
 static inline void _write_lock_irq(rwlock_t *lock)
@@ -156,11 +178,16 @@ static inline int _write_trylock(rwlock_t *lock)
     if ( unlikely(cnts) )
         return 0;
 
+    /*
+     * atomic_cmpxchg() is a full barrier so no need for an
+     * arch_lock_acquire_barrier().
+     */
     return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
 }
 
 static inline void _write_unlock(rwlock_t *lock)
 {
+    arch_lock_release_barrier();
     /*
      * If the writer field is atomic, it can be cleared directly.
      * Otherwise, an atomic subtraction will be used to clear it.
openSUSE Build Service is sponsored by