File xsa435-0-31.patch of Package xen.31136
From 994c1553a158ada9db5ab64c9178a0d23c0a42ce Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Wed, 29 Mar 2023 13:07:03 +0100
Subject: x86: Remove temporary {cpuid,msr}_policy defines
With all code areas updated, drop the temporary defines and adjust all
remaining users.
No practical change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -35,7 +35,7 @@ void guest_cpuid(const struct vcpu *v, u
uint32_t subleaf, struct cpuid_leaf *res)
{
const struct domain *d = v->domain;
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
*res = EMPTY_LEAF;
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -49,7 +49,7 @@ static int gdbsx_guest_mem_io(domid_t do
static int update_domain_cpuid_info(struct domain *d,
const struct xen_domctl_cpuid *ctl)
{
- struct cpuid_policy *p = d->arch.cpuid;
+ struct cpu_policy *p = d->arch.cpu_policy;
const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
int old_vendor = p->x86_vendor;
unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -892,7 +892,7 @@ const char *hvm_efer_valid(const struct
signed int cr0_pg)
{
const struct domain *d = v->domain;
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
if ( value & ~EFER_KNOWN_MASK )
return "Unknown bits set";
@@ -929,7 +929,7 @@ const char *hvm_efer_valid(const struct
/* These bits in CR4 can be set by the guest. */
unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore)
{
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
bool mce, vmxe;
/* Logic broken out simply to aid readability below. */
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -593,7 +593,7 @@ static void svm_cpuid_policy_changed(str
{
struct svm_vcpu *svm = &v->arch.hvm.svm;
struct vmcb_struct *vmcb = svm->vmcb;
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
if ( opt_hvm_fep ||
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1074,7 +1074,7 @@ static void set_x2apic_id(struct vlapic
int guest_wrmsr_apic_base(struct vcpu *v, uint64_t value)
{
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
struct vlapic *vlapic = vcpu_vlapic(v);
if ( !has_vlapic(v->domain) )
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -552,7 +552,7 @@ void vmx_update_exception_bitmap(struct
static void vmx_cpuid_policy_changed(struct vcpu *v)
{
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
if ( opt_hvm_fep ||
(v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
@@ -3249,7 +3249,7 @@ void vmx_vlapic_msr_changed(struct vcpu
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
struct vcpu *v = current;
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x, msr_value=%#"PRIx64, msr, msr_content);
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -52,8 +52,7 @@ int guest_rdmsr(const struct vcpu *v, ui
{
const struct vcpu *curr = current;
const struct domain *d = v->domain;
- const struct cpuid_policy *cp = d->arch.cpuid;
- const struct msr_policy *mp = d->arch.msr;
+ const struct cpu_policy *cp = d->arch.cpu_policy;
const struct vcpu_msrs *msrs = v->arch.msrs;
int ret = X86EMUL_OKAY;
@@ -119,13 +118,13 @@ int guest_rdmsr(const struct vcpu *v, ui
break;
case MSR_INTEL_PLATFORM_INFO:
- *val = mp->platform_info.raw;
+ *val = cp->platform_info.raw;
break;
case MSR_ARCH_CAPABILITIES:
if ( !cp->feat.arch_caps )
goto gp_fault;
- *val = mp->arch_caps.raw;
+ *val = cp->arch_caps.raw;
break;
case MSR_INTEL_MISC_FEATURES_ENABLES:
@@ -224,7 +223,7 @@ int guest_rdmsr(const struct vcpu *v, ui
* separate CPUID features for this functionality, but only set will be
* active.
*/
-uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp)
+uint64_t msr_spec_ctrl_valid_bits(const struct cpu_policy *cp)
{
bool ssbd = cp->feat.ssbd || cp->extd.amd_ssbd;
bool psfd = cp->feat.intel_psfd || cp->extd.psfd;
@@ -243,8 +242,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
{
const struct vcpu *curr = current;
struct domain *d = v->domain;
- const struct cpuid_policy *cp = d->arch.cpuid;
- const struct msr_policy *mp = d->arch.msr;
+ const struct cpu_policy *cp = d->arch.cpu_policy;
struct vcpu_msrs *msrs = v->arch.msrs;
int ret = X86EMUL_OKAY;
@@ -286,7 +284,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
* for backwards compatiblity, the OS should write 0 to it before
* trying to access the current microcode version.
*/
- if ( d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL || val != 0 )
+ if ( cp->x86_vendor != X86_VENDOR_INTEL || val != 0 )
goto gp_fault;
break;
@@ -296,7 +294,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
* to AMD CPUs as well (at least the architectural/CPUID part does).
*/
if ( is_pv_domain(d) ||
- d->arch.cpuid->x86_vendor != X86_VENDOR_AMD )
+ cp->x86_vendor != X86_VENDOR_AMD )
goto gp_fault;
break;
@@ -308,7 +306,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
* by any CPUID bit.
*/
if ( is_pv_domain(d) ||
- d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL )
+ cp->x86_vendor != X86_VENDOR_INTEL )
goto gp_fault;
break;
@@ -347,7 +345,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
bool old_cpuid_faulting = msrs->misc_features_enables.cpuid_faulting;
rsvd = ~0ull;
- if ( mp->platform_info.cpuid_faulting )
+ if ( cp->platform_info.cpuid_faulting )
rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
if ( val & rsvd )
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -100,7 +100,7 @@ static void release_compat_l4(struct vcp
unsigned long pv_fixup_guest_cr4(const struct vcpu *v, unsigned long cr4)
{
- const struct cpuid_policy *p = v->domain->arch.cpuid;
+ const struct cpu_policy *p = v->domain->arch.cpu_policy;
/* Discard attempts to set guest controllable bits outside of the policy. */
cr4 &= ~((p->basic.tsc ? 0 : X86_CR4_TSD) |
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -852,7 +852,7 @@ void cpuid_hypervisor_leaves(const struc
uint32_t subleaf, struct cpuid_leaf *res)
{
const struct domain *d = v->domain;
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000;
uint32_t idx = leaf - base;
unsigned int limit = is_viridian_domain(d) ? p->hv2_limit : p->hv_limit;
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -666,7 +666,7 @@ static bool valid_xcr0(u64 xcr0)
int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum,
const struct xsave_hdr *hdr)
{
- const struct cpuid_policy *cp = d->arch.cpuid;
+ const struct cpu_policy *cp = d->arch.cpuid;
uint64_t xcr0_max =
((uint64_t)cp->xstate.xcr0_high << 32) | cp->xstate.xcr0_low;
unsigned int i;
@@ -692,7 +692,7 @@ int validate_xstate(const struct domain
int handle_xsetbv(u32 index, u64 new_bv)
{
struct vcpu *curr = current;
- const struct cpuid_policy *cp = curr->domain->arch.cpuid;
+ const struct cpu_policy *cp = curr->domain->arch.cpuid;
uint64_t xcr0_max =
((uint64_t)cp->xstate.xcr0_high << 32) | cp->xstate.xcr0_low;
u64 mask;
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -270,7 +270,7 @@ static inline void wrmsr_tsc_aux(uint32_
}
}
-uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp);
+uint64_t msr_spec_ctrl_valid_bits(const struct cpu_policy *cp);
/* Container object for per-vCPU MSRs */
struct vcpu_msrs
--- a/xen/include/xen/lib/x86/cpu-policy.h
+++ b/xen/include/xen/lib/x86/cpu-policy.h
@@ -358,10 +358,6 @@ struct cpu_policy
uint8_t x86_vendor;
};
-/* Temporary */
-#define cpuid_policy cpu_policy
-#define msr_policy cpu_policy
-
/**
* Copy the featureset words out of a cpu_policy object.
*/