File 54c27c6d-vmx-merge-MSR-management-routines.patch of Package xen.11298
# Commit f0195f9f096cdf2dc233d4f34f96c19b44a15252
# Date 2015-01-23 17:53:01 +0100
# Author Boris Ostrovsky <boris.ostrovsky@oracle.com>
# Committer Jan Beulich <jbeulich@suse.com>
vmx: merge MSR management routines
vmx_add_host_load_msr() and vmx_add_guest_msr() share fair amount of code. Merge
them to simplify code maintenance.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1226,64 +1226,62 @@ int vmx_write_guest_msr(u32 msr, u64 val
return -ESRCH;
}
-int vmx_add_guest_msr(u32 msr)
+int vmx_add_msr(u32 msr, int type)
{
struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ unsigned int idx, *msr_count;
+ struct vmx_msr_entry **msr_area, *msr_area_elem;
- if ( msr_area == NULL )
+ if ( type == VMX_GUEST_MSR )
{
- if ( (msr_area = alloc_xenheap_page()) == NULL )
+ msr_count = &curr->arch.hvm_vmx.msr_count;
+ msr_area = &curr->arch.hvm_vmx.msr_area;
+ }
+ else
+ {
+ ASSERT(type == VMX_HOST_MSR);
+ msr_count = &curr->arch.hvm_vmx.host_msr_count;
+ msr_area = &curr->arch.hvm_vmx.host_msr_area;
+ }
+
+ if ( *msr_area == NULL )
+ {
+ if ( (*msr_area = alloc_xenheap_page()) == NULL )
return -ENOMEM;
- curr->arch.hvm_vmx.msr_area = msr_area;
- __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
- __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
+
+ if ( type == VMX_GUEST_MSR )
+ {
+ __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
+ __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+ }
+ else
+ __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
}
- for ( i = 0; i < msr_count; i++ )
- if ( msr_area[i].index == msr )
+ for ( idx = 0; idx < *msr_count; idx++ )
+ if ( (*msr_area)[idx].index == msr )
return 0;
- if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+ if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
return -ENOSPC;
- msr_area[msr_count].index = msr;
- msr_area[msr_count].mbz = 0;
- msr_area[msr_count].data = 0;
- curr->arch.hvm_vmx.msr_count = ++msr_count;
- __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
- __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
+ msr_area_elem = *msr_area + *msr_count;
+ msr_area_elem->index = msr;
+ msr_area_elem->mbz = 0;
- return 0;
-}
+ ++*msr_count;
-int vmx_add_host_load_msr(u32 msr)
-{
- struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
-
- if ( msr_area == NULL )
+ if ( type == VMX_GUEST_MSR )
{
- if ( (msr_area = alloc_xenheap_page()) == NULL )
- return -ENOMEM;
- curr->arch.hvm_vmx.host_msr_area = msr_area;
- __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
+ msr_area_elem->data = 0;
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
+ }
+ else
+ {
+ rdmsrl(msr, msr_area_elem->data);
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
}
-
- for ( i = 0; i < msr_count; i++ )
- if ( msr_area[i].index == msr )
- return 0;
-
- if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
- return -ENOSPC;
-
- msr_area[msr_count].index = msr;
- msr_area[msr_count].mbz = 0;
- rdmsrl(msr, msr_area[msr_count].data);
- curr->arch.hvm_vmx.host_msr_count = ++msr_count;
- __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
return 0;
}
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -486,12 +486,15 @@ extern const unsigned int vmx_introspect
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
+
+#define VMX_GUEST_MSR 0
+#define VMX_HOST_MSR 1
+
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
int vmx_read_guest_msr(u32 msr, u64 *val);
int vmx_write_guest_msr(u32 msr, u64 val);
-int vmx_add_guest_msr(u32 msr);
-int vmx_add_host_load_msr(u32 msr);
+int vmx_add_msr(u32 msr, int type);
void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
@@ -501,6 +504,15 @@ void virtual_vmcs_exit(void *vvmcs);
u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding);
void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val);
+static inline int vmx_add_guest_msr(u32 msr)
+{
+ return vmx_add_msr(msr, VMX_GUEST_MSR);
+}
+static inline int vmx_add_host_load_msr(u32 msr)
+{
+ return vmx_add_msr(msr, VMX_HOST_MSR);
+}
+
DECLARE_PER_CPU(bool_t, vmxon);
#endif /* ASM_X86_HVM_VMX_VMCS_H__ */