File xsa453-6.patch of Package xen.33625
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Subject: locking: attempt to ensure lock wrappers are always inline
In order to prevent the locking speculation barriers from being inside of
`call`ed functions that could be speculatively bypassed.
While there also add an extra locking barrier to _mm_write_lock() in the branch
taken when the lock is already held.
Note some functions are switched to use the unsafe variants (without speculation
barrier) of the locking primitives, but a speculation barrier is always added
to the exposed public lock wrapping helper. That's the case with
sched_spin_lock_double() or pcidevs_lock() for example.
This is part of XSA-453 / CVE-2024-2193
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit 197ecd838a2aaf959a469df3696d4559c4f8b762)
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -161,7 +161,7 @@ static int pt_irq_masked(struct periodic
* pt->vcpu field, because another thread holding the pt_migrate lock
* may already be spinning waiting for your vcpu lock.
*/
-static void pt_vcpu_lock(struct vcpu *v)
+static always_inline void pt_vcpu_lock(struct vcpu *v)
{
spin_lock(&v->arch.hvm.tm_lock);
}
@@ -180,9 +180,13 @@ static void pt_vcpu_unlock(struct vcpu *
* need to take an additional lock that protects against pt->vcpu
* changing.
*/
-static void pt_lock(struct periodic_time *pt)
+static always_inline void pt_lock(struct periodic_time *pt)
{
- read_lock(&pt->vcpu->domain->arch.hvm.pl_time->pt_migrate);
+ /*
+ * Use the speculation unsafe variant for the first lock, as the following
+ * lock taking helper already includes a speculation barrier.
+ */
+ _read_lock(&pt->vcpu->domain->arch.hvm.pl_time->pt_migrate);
spin_lock(&pt->vcpu->arch.hvm.tm_lock);
}
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -88,8 +88,8 @@ static inline void _set_lock_level(int l
this_cpu(mm_lock_level) = l;
}
-static inline void _mm_lock(const struct domain *d, mm_lock_t *l,
- const char *func, int level, int rec)
+static always_inline void _mm_lock(const struct domain *d, mm_lock_t *l,
+ const char *func, int level, int rec)
{
if ( !((mm_locked_by_me(l)) && rec) )
_check_lock_level(d, level);
@@ -139,8 +139,8 @@ static inline int mm_write_locked_by_me(
return (l->locker == get_processor_id());
}
-static inline void _mm_write_lock(const struct domain *d, mm_rwlock_t *l,
- const char *func, int level)
+static always_inline void _mm_write_lock(const struct domain *d, mm_rwlock_t *l,
+ const char *func, int level)
{
if ( !mm_write_locked_by_me(l) )
{
@@ -151,6 +151,8 @@ static inline void _mm_write_lock(const
l->unlock_level = _get_lock_level();
_set_lock_level(_lock_level(d, level));
}
+ else
+ block_lock_speculation();
l->recurse_count++;
}
@@ -164,8 +166,8 @@ static inline void mm_write_unlock(mm_rw
percpu_write_unlock(p2m_percpu_rwlock, &l->lock);
}
-static inline void _mm_read_lock(const struct domain *d, mm_rwlock_t *l,
- int level)
+static always_inline void _mm_read_lock(const struct domain *d, mm_rwlock_t *l,
+ int level)
{
_check_lock_level(d, level);
percpu_read_lock(p2m_percpu_rwlock, &l->lock);
@@ -180,15 +182,15 @@ static inline void mm_read_unlock(mm_rwl
/* This wrapper uses the line number to express the locking order below */
#define declare_mm_lock(name) \
- static inline void mm_lock_##name(const struct domain *d, mm_lock_t *l, \
- const char *func, int rec) \
+ static always_inline void mm_lock_##name( \
+ const struct domain *d, mm_lock_t *l, const char *func, int rec) \
{ _mm_lock(d, l, func, MM_LOCK_ORDER_##name, rec); }
#define declare_mm_rwlock(name) \
- static inline void mm_write_lock_##name(const struct domain *d, \
- mm_rwlock_t *l, const char *func) \
+ static always_inline void mm_write_lock_##name( \
+ const struct domain *d, mm_rwlock_t *l, const char *func) \
{ _mm_write_lock(d, l, func, MM_LOCK_ORDER_##name); } \
- static inline void mm_read_lock_##name(const struct domain *d, \
- mm_rwlock_t *l) \
+ static always_inline void mm_read_lock_##name(const struct domain *d, \
+ mm_rwlock_t *l) \
{ _mm_read_lock(d, l, MM_LOCK_ORDER_##name); }
/* These capture the name of the calling function */
#define mm_lock(name, d, l) mm_lock_##name(d, l, __func__, 0)
@@ -321,7 +323,7 @@ declare_mm_lock(altp2mlist)
#define MM_LOCK_ORDER_altp2m 40
declare_mm_rwlock(altp2m);
-static inline void p2m_lock(struct p2m_domain *p)
+static always_inline void p2m_lock(struct p2m_domain *p)
{
if ( p2m_is_altp2m(p) )
mm_write_lock(altp2m, p->domain, &p->lock);
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -32,7 +32,7 @@
#define superpage_aligned(_x) (((_x)&(SUPERPAGE_PAGES-1))==0)
/* Enforce lock ordering when grabbing the "external" page_alloc lock */
-static inline void lock_page_alloc(struct p2m_domain *p2m)
+static always_inline void lock_page_alloc(struct p2m_domain *p2m)
{
page_alloc_mm_pre_lock(p2m->domain);
spin_lock(&(p2m->domain->page_alloc_lock));
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -57,7 +57,7 @@
* just assume the event channel is free or unbound at the moment when the
* evtchn_read_trylock() returns false.
*/
-static inline void evtchn_write_lock(struct evtchn *evtchn)
+static always_inline void evtchn_write_lock(struct evtchn *evtchn)
{
write_lock(&evtchn->lock);
@@ -317,8 +317,8 @@ static long evtchn_alloc_unbound(evtchn_
return rc;
}
-
-static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn)
+static always_inline void double_evtchn_lock(struct evtchn *lchn,
+ struct evtchn *rchn)
{
if ( lchn <= rchn )
{
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -382,7 +382,7 @@ static inline void act_set_gfn(struct ac
static DEFINE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
-static inline void grant_read_lock(struct grant_table *gt)
+static always_inline void grant_read_lock(struct grant_table *gt)
{
percpu_read_lock(grant_rwlock, >->lock);
}
@@ -392,7 +392,7 @@ static inline void grant_read_unlock(str
percpu_read_unlock(grant_rwlock, >->lock);
}
-static inline void grant_write_lock(struct grant_table *gt)
+static always_inline void grant_write_lock(struct grant_table *gt)
{
percpu_write_lock(grant_rwlock, >->lock);
}
@@ -429,7 +429,7 @@ nr_active_grant_frames(struct grant_tabl
return num_act_frames_from_sha_frames(nr_grant_frames(gt));
}
-static inline struct active_grant_entry *
+static always_inline struct active_grant_entry *
active_entry_acquire(struct grant_table *t, grant_ref_t e)
{
struct active_grant_entry *act;
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -348,23 +348,28 @@ uint64_t get_cpu_idle_time(unsigned int
* This avoids dead- or live-locks when this code is running on both
* cpus at the same time.
*/
-static void sched_spin_lock_double(spinlock_t *lock1, spinlock_t *lock2,
- unsigned long *flags)
+static always_inline void sched_spin_lock_double(
+ spinlock_t *lock1, spinlock_t *lock2, unsigned long *flags)
{
+ /*
+ * In order to avoid extra overhead, use the locking primitives without the
+ * speculation barrier, and introduce a single barrier here.
+ */
if ( lock1 == lock2 )
{
- spin_lock_irqsave(lock1, *flags);
+ *flags = _spin_lock_irqsave(lock1);
}
else if ( lock1 < lock2 )
{
- spin_lock_irqsave(lock1, *flags);
- spin_lock(lock2);
+ *flags = _spin_lock_irqsave(lock1);
+ _spin_lock(lock2);
}
else
{
- spin_lock_irqsave(lock2, *flags);
- spin_lock(lock1);
+ *flags = _spin_lock_irqsave(lock2);
+ _spin_lock(lock1);
}
+ block_lock_speculation();
}
static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -206,8 +206,24 @@ DECLARE_PER_CPU(cpumask_t, cpumask_scrat
#define cpumask_scratch (&this_cpu(cpumask_scratch))
#define cpumask_scratch_cpu(c) (&per_cpu(cpumask_scratch, c))
+/*
+ * Deal with _spin_lock_irqsave() returning the flags value instead of storing
+ * it in a passed parameter.
+ */
+#define _sched_spinlock0(lock, irq) _spin_lock##irq(lock)
+#define _sched_spinlock1(lock, irq, arg) ({ \
+ BUILD_BUG_ON(sizeof(arg) != sizeof(unsigned long)); \
+ (arg) = _spin_lock##irq(lock); \
+})
+
+#define _sched_spinlock__(nr) _sched_spinlock ## nr
+#define _sched_spinlock_(nr) _sched_spinlock__(nr)
+#define _sched_spinlock(lock, irq, args...) \
+ _sched_spinlock_(count_args(args))(lock, irq, ## args)
+
#define sched_lock(kind, param, cpu, irq, arg...) \
-static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
+static always_inline spinlock_t \
+*kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
{ \
for ( ; ; ) \
{ \
@@ -219,10 +235,16 @@ static inline spinlock_t *kind##_schedul
* \
* It may also be the case that v->processor may change but the \
* lock may be the same; this will succeed in that case. \
+ * \
+ * Use the speculation unsafe locking helper, there's a speculation \
+ * barrier before returning to the caller. \
*/ \
- spin_lock##irq(lock, ## arg); \
+ _sched_spinlock(lock, irq, ## arg); \
if ( likely(lock == get_sched_res(cpu)->schedule_lock) ) \
+ { \
+ block_lock_speculation(); \
return lock; \
+ } \
spin_unlock##irq(lock, ## arg); \
} \
}
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -240,7 +240,7 @@ static inline void deactivate_timer(stru
list_add(&timer->inactive, &per_cpu(timers, timer->cpu).inactive);
}
-static inline bool_t timer_lock(struct timer *timer)
+static inline bool_t timer_lock_unsafe(struct timer *timer)
{
unsigned int cpu;
@@ -254,7 +254,8 @@ static inline bool_t timer_lock(struct t
rcu_read_unlock(&timer_cpu_read_lock);
return 0;
}
- spin_lock(&per_cpu(timers, cpu).lock);
+ /* Use the speculation unsafe variant, the wrapper has the barrier. */
+ _spin_lock(&per_cpu(timers, cpu).lock);
if ( likely(timer->cpu == cpu) )
break;
spin_unlock(&per_cpu(timers, cpu).lock);
@@ -267,8 +268,9 @@ static inline bool_t timer_lock(struct t
#define timer_lock_irqsave(t, flags) ({ \
bool_t __x; \
local_irq_save(flags); \
- if ( !(__x = timer_lock(t)) ) \
+ if ( !(__x = timer_lock_unsafe(t)) ) \
local_irq_restore(flags); \
+ block_lock_speculation(); \
__x; \
})
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -54,9 +54,10 @@ struct pci_seg {
static spinlock_t _pcidevs_lock = SPIN_LOCK_UNLOCKED;
-void pcidevs_lock(void)
+/* Do not use, as it has no speculation barrier, use pcidevs_lock() instead. */
+void pcidevs_lock_unsafe(void)
{
- spin_lock_recursive(&_pcidevs_lock);
+ _spin_lock_recursive(&_pcidevs_lock);
}
void pcidevs_unlock(void)
@@ -69,11 +70,6 @@ bool_t pcidevs_locked(void)
return !!spin_is_locked(&_pcidevs_lock);
}
-bool_t pcidevs_trylock(void)
-{
- return !!spin_trylock_recursive(&_pcidevs_lock);
-}
-
static struct radix_tree_root pci_segments;
static inline struct pci_seg *get_pseg(u16 seg)
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -177,6 +177,7 @@ extern void irq_complete_move(struct irq
extern struct irq_desc *irq_desc;
+/* Not speculation safe, only used for AP bringup. */
void lock_vector_lock(void);
void unlock_vector_lock(void);
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -111,12 +111,12 @@ static inline unsigned int max_evtchns(c
: BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
}
-static inline void evtchn_read_lock(struct evtchn *evtchn)
+static always_inline void evtchn_read_lock(struct evtchn *evtchn)
{
read_lock(&evtchn->lock);
}
-static inline bool evtchn_read_trylock(struct evtchn *evtchn)
+static always_inline bool evtchn_read_trylock(struct evtchn *evtchn)
{
return read_trylock(&evtchn->lock);
}
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -144,11 +144,14 @@ struct pci_dev {
* devices, it also sync the access to the msi capability that is not
* interrupt handling related (the mask bit register).
*/
-
-void pcidevs_lock(void);
+void pcidevs_lock_unsafe(void);
+static always_inline void pcidevs_lock(void)
+{
+ pcidevs_lock_unsafe();
+ block_lock_speculation();
+}
void pcidevs_unlock(void);
bool_t __must_check pcidevs_locked(void);
-bool_t __must_check pcidevs_trylock(void);
bool_t pci_known_segment(u16 seg);
bool_t pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func);