File xsa453-8.patch of Package xen.35284
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Subject: x86: protect conditional lock taking from speculative execution
Conditionally taken locks that use the pattern:
if ( lock )
spin_lock(...);
Need an else branch in order to issue an speculation barrier in the else case,
just like it's done in case the lock needs to be acquired.
eval_nospec() could be used on the condition itself, but that would result in a
double barrier on the branch where the lock is taken.
Introduce a new pair of helpers, {gfn,spin}_lock_if() that can be used to
conditionally take a lock in a speculation safe way.
This is part of XSA-453 / CVE-2024-2193
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit 03cf7ca23e0e876075954c558485b267b7d02406)
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5150,8 +5150,7 @@ static l3_pgentry_t *virt_to_xen_l3e(uns
if ( !l3t )
return NULL;
clear_page(l3t);
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
{
l4_pgentry_t l4e = l4e_from_paddr(__pa(l3t), __PAGE_HYPERVISOR);
@@ -5185,8 +5184,7 @@ static l2_pgentry_t *virt_to_xen_l2e(uns
if ( !l2t )
return NULL;
clear_page(l2t);
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
{
l3e_write(pl3e, l3e_from_paddr(__pa(l2t), __PAGE_HYPERVISOR));
@@ -5218,8 +5216,7 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned l
if ( !l1t )
return NULL;
clear_page(l1t);
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
{
l2e_write(pl2e, l2e_from_paddr(__pa(l1t), __PAGE_HYPERVISOR));
@@ -5254,6 +5251,8 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned l
do { \
if ( locking ) \
l3t_lock(page); \
+ else \
+ block_lock_speculation(); \
} while ( false )
#define L3T_UNLOCK(page) \
@@ -5406,8 +5405,7 @@ int map_pages_to_xen(
if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL )
flush_flags |= FLUSH_TLB_GLOBAL;
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
(l3e_get_flags(*pl3e) & _PAGE_PSE) )
{
@@ -5506,8 +5504,7 @@ int map_pages_to_xen(
if ( l2e_get_flags(*pl2e) & _PAGE_GLOBAL )
flush_flags |= FLUSH_TLB_GLOBAL;
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
(l2e_get_flags(*pl2e) & _PAGE_PSE) )
{
@@ -5546,8 +5543,7 @@ int map_pages_to_xen(
unsigned long base_mfn;
const l1_pgentry_t *l1t;
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
ol2e = *pl2e;
/*
@@ -5600,8 +5596,7 @@ int map_pages_to_xen(
unsigned long base_mfn;
const l2_pgentry_t *l2t;
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
ol3e = *pl3e;
/*
@@ -5736,8 +5731,8 @@ int modify_xen_mappings(unsigned long s,
l2e_from_pfn(l3e_get_pfn(*pl3e) +
(i << PAGETABLE_ORDER),
l3e_get_flags(*pl3e)));
- if ( locking )
- spin_lock(&map_pgdir_lock);
+
+ spin_lock_if(locking, &map_pgdir_lock);
if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
(l3e_get_flags(*pl3e) & _PAGE_PSE) )
{
@@ -5793,8 +5788,8 @@ int modify_xen_mappings(unsigned long s,
l1e_write(&l1t[i],
l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
l2e_get_flags(*pl2e) & ~_PAGE_PSE));
- if ( locking )
- spin_lock(&map_pgdir_lock);
+
+ spin_lock_if(locking, &map_pgdir_lock);
if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
(l2e_get_flags(*pl2e) & _PAGE_PSE) )
{
@@ -5836,8 +5831,7 @@ int modify_xen_mappings(unsigned long s,
*/
if ( (nf & _PAGE_PRESENT) || ((v != e) && (l1_table_offset(v) != 0)) )
continue;
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
/*
* L2E may be already cleared, or set to a superpage, by
@@ -5882,8 +5876,7 @@ int modify_xen_mappings(unsigned long s,
if ( (nf & _PAGE_PRESENT) ||
((v != e) && (l2_table_offset(v) + l1_table_offset(v) != 0)) )
continue;
- if ( locking )
- spin_lock(&map_pgdir_lock);
+ spin_lock_if(locking, &map_pgdir_lock);
/*
* L3E may be already cleared, or set to a superpage, by
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -347,6 +347,15 @@ static inline void p2m_unlock(struct p2m
#define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock)
#define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
+static always_inline void gfn_lock_if(bool condition, struct p2m_domain *p2m,
+ gfn_t gfn, unsigned int order)
+{
+ if ( condition )
+ gfn_lock(p2m, gfn, order);
+ else
+ block_lock_speculation();
+}
+
/* PoD lock (per-p2m-table)
*
* Protects private PoD data structs: entry and cache
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -502,9 +502,8 @@ mfn_t __get_gfn_type_access(struct p2m_d
return _mfn(gfn_l);
}
- if ( locked )
- /* Grab the lock here, don't release until put_gfn */
- gfn_lock(p2m, gfn, 0);
+ /* Grab the lock here, don't release until put_gfn */
+ gfn_lock_if(locked, p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -208,6 +208,14 @@ static always_inline void spin_lock_irq(
block_lock_speculation(); \
})
+/* Conditionally take a spinlock in a speculation safe way. */
+static always_inline void spin_lock_if(bool condition, spinlock_t *l)
+{
+ if ( condition )
+ _spin_lock(l);
+ block_lock_speculation();
+}
+
#define spin_unlock(l) _spin_unlock(l)
#define spin_unlock_irq(l) _spin_unlock_irq(l)
#define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f)