File xsa456-0n.patch of Package xen.33138

# Commit 97c5b8b657e41a6645de9d40713b881234417b49
# Date 2024-04-09 16:37:30 +0100
# Author Roger Pau Monne <roger.pau@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/vmx: Add support for virtualize SPEC_CTRL

The feature is defined in the tertiary exec control, and is available starting
from Sapphire Rapids and Alder Lake CPUs.

When enabled, two extra VMCS fields are used: SPEC_CTRL mask and shadow.  Bits
set in mask are not allowed to be toggled by the guest (either set or clear)
and the value in the shadow field is the value the guest expects to be in the
SPEC_CTRL register.

By using it the hypervisor can force the value of SPEC_CTRL bits behind the
guest back without having to trap all accesses to SPEC_CTRL, note that no bits
are forced into the guest as part of this patch.  It also allows getting rid of
SPEC_CTRL in the guest MSR load list, since the value in the shadow field will
be loaded by the hardware on vmentry.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -186,6 +186,7 @@ static void __init vmx_display_features(
     P(cpu_has_vmx_virt_exceptions, "Virtualisation Exceptions");
     P(cpu_has_vmx_pml, "Page Modification Logging");
     P(cpu_has_vmx_tsc_scaling, "TSC Scaling");
+    P(cpu_has_vmx_virt_spec_ctrl, "Virtualize SPEC_CTRL");
 #undef P
 
     if ( !printed )
@@ -333,7 +334,7 @@ static int vmx_init_vmcs_config(void)
 
     if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS )
     {
-        uint64_t opt = 0;
+        uint64_t opt = TERTIARY_EXEC_VIRT_SPEC_CTRL;
 
         _vmx_tertiary_exec_control = adjust_vmx_controls2(
             "Tertiary Exec Control", 0, opt,
@@ -1346,6 +1347,12 @@ static int construct_vmcs(struct vcpu *v
     if ( cpu_has_vmx_tsc_scaling )
         __vmwrite(TSC_MULTIPLIER, d->arch.hvm.tsc_scaling_ratio);
 
+    if ( cpu_has_vmx_virt_spec_ctrl )
+    {
+        __vmwrite(SPEC_CTRL_MASK, 0);
+        __vmwrite(SPEC_CTRL_SHADOW, 0);
+    }
+
     /* will update HOST & GUEST_CR3 as reqd */
     paging_update_paging_modes(v);
 
@@ -2054,6 +2061,9 @@ void vmcs_dump_vcpu(struct vcpu *v)
     if ( v->arch.hvm.vmx.secondary_exec_control &
          SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY )
         printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS));
+    if ( cpu_has_vmx_virt_spec_ctrl )
+        printk("SPEC_CTRL mask = 0x%016lx  shadow = 0x%016lx\n",
+               vmr(SPEC_CTRL_MASK), vmr(SPEC_CTRL_SHADOW));
 
     printk("*** Host State ***\n");
     printk("RIP = 0x%016lx (%ps)  RSP = 0x%016lx\n",
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -573,6 +573,10 @@ static void vmx_cpuid_policy_changed(str
     /*
      * We can safely pass MSR_SPEC_CTRL through to the guest, even if STIBP
      * isn't enumerated in hardware, as SPEC_CTRL_STIBP is ignored.
+     *
+     * If VMX_VIRT_SPEC_CTRL is available, it is activated by default and the
+     * guest MSR_SPEC_CTRL value lives in the VMCS.  Otherwise, it lives in
+     * the MSR load/save list.
      */
     if ( cp->feat.ibrsb )
         vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
@@ -1179,6 +1183,24 @@ static bool vmx_get_guest_bndcfgs(struct
     return true;
 }
 
+static void vmx_set_guest_spec_ctrl(struct vcpu *v, u64 val)
+{
+    ASSERT(cpu_has_vmx_virt_spec_ctrl);
+
+    vmx_vmcs_enter(v);
+    __vmwrite(SPEC_CTRL_SHADOW, val);
+    vmx_vmcs_exit(v);
+}
+
+static void vmx_get_guest_spec_ctrl(struct vcpu *v, u64 *val)
+{
+    ASSERT(cpu_has_vmx_virt_spec_ctrl);
+
+    vmx_vmcs_enter(v);
+    __vmread(SPEC_CTRL_SHADOW, val);
+    vmx_vmcs_exit(v);
+}
+
 static void vmx_handle_cd(struct vcpu *v, unsigned long value)
 {
     if ( !paging_mode_hap(v->domain) )
@@ -2554,6 +2576,12 @@ const struct hvm_function_table * __init
         vmx_function_table.get_guest_bndcfgs = vmx_get_guest_bndcfgs;
     }
 
+    if ( cpu_has_vmx_virt_spec_ctrl )
+    {
+        vmx_function_table.set_guest_spec_ctrl = vmx_set_guest_spec_ctrl;
+        vmx_function_table.get_guest_spec_ctrl = vmx_get_guest_spec_ctrl;
+    }
+
     setup_vmcs_dump();
 
     lbr_tsx_fixup_check();
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -116,7 +116,8 @@ int guest_rdmsr(struct vcpu *v, uint32_t
     case MSR_SPEC_CTRL:
         if ( !cp->feat.ibrsb && !cp->extd.ibrs )
             goto gp_fault;
-        *val = msrs->spec_ctrl.raw;
+        if ( !is_hvm_domain(d) || !hvm_get_guest_spec_ctrl(v, val) )
+            *val = msrs->spec_ctrl.raw;
         break;
 
     case MSR_INTEL_PLATFORM_INFO:
@@ -336,6 +337,8 @@ int guest_wrmsr(struct vcpu *v, uint32_t
              (val & ~msr_spec_ctrl_valid_bits(cp)) )
             goto gp_fault;
 
+        if ( is_hvm_domain(d) )
+            hvm_set_guest_spec_ctrl(v, val);
         msrs->spec_ctrl.raw = val;
         break;
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -148,6 +148,9 @@ struct hvm_function_table {
     bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
     bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
 
+    void (*get_guest_spec_ctrl)(struct vcpu *v, u64 *);
+    void (*set_guest_spec_ctrl)(struct vcpu *v, u64);
+
     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
 
     void (*inject_event)(const struct x86_event *event);
@@ -448,6 +451,20 @@ static inline bool hvm_get_guest_bndcfgs
            alternative_call(hvm_funcs.get_guest_bndcfgs, v, val);
 }
 
+static inline bool hvm_get_guest_spec_ctrl(struct vcpu *v, u64 *val)
+{
+    if ( !hvm_funcs.get_guest_spec_ctrl )
+        return false;
+    alternative_vcall(hvm_funcs.get_guest_spec_ctrl, v, val);
+    return true;
+}
+
+static inline void hvm_set_guest_spec_ctrl(struct vcpu *v, u64 val)
+{
+    if ( hvm_funcs.set_guest_spec_ctrl )
+        alternative_vcall(hvm_funcs.set_guest_spec_ctrl, v, val);
+}
+
 #define has_hvm_params(d) \
     ((d)->arch.hvm.params != NULL)
 
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -278,6 +278,9 @@ extern u32 vmx_secondary_exec_control;
 #define TERTIARY_EXEC_VIRT_SPEC_CTRL            BIT(7, UL)
 extern uint64_t vmx_tertiary_exec_control;
 
+#define cpu_has_vmx_virt_spec_ctrl \
+     (vmx_tertiary_exec_control & TERTIARY_EXEC_VIRT_SPEC_CTRL)
+
 #define VMX_EPT_EXEC_ONLY_SUPPORTED                         0x00000001
 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED                     0x00000040
 #define VMX_EPT_MEMORY_TYPE_UC                              0x00000100
@@ -434,6 +437,8 @@ enum vmcs_field {
     XSS_EXIT_BITMAP                 = 0x0000202c,
     TSC_MULTIPLIER                  = 0x00002032,
     TERTIARY_VM_EXEC_CONTROL        = 0x00002034,
+    SPEC_CTRL_MASK                  = 0x0000204a,
+    SPEC_CTRL_SHADOW                = 0x0000204c,
     GUEST_PHYSICAL_ADDRESS          = 0x00002400,
     VMCS_LINK_POINTER               = 0x00002800,
     GUEST_IA32_DEBUGCTL             = 0x00002802,
openSUSE Build Service is sponsored by