File 5aeb2c57-x86-retval-checks-of-set-guest-trapbounce.patch of Package xen.8389
# Commit 4611f529c0e39493a3945641cc161967a864d6b5
# Date 2018-05-03 17:35:51 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86: fix return value checks of set_guest_{machinecheck,nmi}_trapbounce
Commit 0142064421 ("x86/traps: move set_guest_{machine,nmi}_trapbounce")
converted the functions' return types from int to bool without also
correcting the checks in assembly code: The ABI does not guarantee sub-
32-bit return values to be promoted to 32 bits.
Take the liberty and also adjust the number of spaces used in the compat
code, such that both code sequences end up similar (they already are in
the non-compat case).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -74,9 +74,9 @@ compat_process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
jnz .Lcompat_test_guest_nmi
sti
- movb $0,VCPU_mce_pending(%rbx)
- call set_guest_machinecheck_trapbounce
- testl %eax,%eax
+ movb $0, VCPU_mce_pending(%rbx)
+ call set_guest_machinecheck_trapbounce
+ test %al, %al
jz compat_test_all_events
movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
@@ -88,11 +88,11 @@ compat_process_mce:
/* %rbx: struct vcpu */
compat_process_nmi:
testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
- jnz compat_test_guest_events
+ jnz compat_test_guest_events
sti
- movb $0,VCPU_nmi_pending(%rbx)
+ movb $0, VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
- testl %eax,%eax
+ test %al, %al
jz compat_test_all_events
movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -77,7 +77,7 @@ process_mce:
sti
movb $0, VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
- test %eax, %eax
+ test %al, %al
jz test_all_events
movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
movb %dl, VCPU_mce_old_mask(%rbx) # iret hypercall
@@ -93,7 +93,7 @@ process_nmi:
sti
movb $0, VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
- test %eax, %eax
+ test %al, %al
jz test_all_events
movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
movb %dl, VCPU_nmi_old_mask(%rbx) # iret hypercall