File 5a219aec-x86-hvm-fix-interaction-between-int-and-ext-emul.patch of Package xen.6649
# Commit 9c9384d6d8184ca6d21975ccf4e4f72b560540cc
# Date 2017-12-01 18:09:48 +0000
# Author Paul Durrant <paul.durrant@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/hvm: fix interaction between internal and external emulation
A call to handle_hvm_io_completion() is needed for completing I/O
that requires external emulation. Such completion should be requested when
hvm_vcpu_io_need_completion() returns true after hvm_emulate_once() has
completed. This is indicative of the underlying I/O emulation having
returned X86EMUL_RETRY and hence a re-emulation of the instruction is
needed to pick up the result of the I/O.
A call to handle_hvm_io_completion() is NOT needed when the underlying
I/O has not returned X86EMUL_RETRY since there will be no result to pick
up. Hence it bogus to request such completion when mmio_retry is set,
since this can only happen if the underlying I/O emulation has returned
X86EMUL_OKAY (meaning the I/O has completed successfully).
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
# Commit 5fcb26e69e8089e20c9168774bee681b8f5a3187
# Date 2017-12-06 12:50:23 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/HVM: don't retain emulated insn cache when exiting back to guest
vio->mmio_retry is being set when a repeated string insn is being split
up. In that case we'll exit to the guest, expecting immediate re-entry.
Interruptions, however, may be serviced by the guest before re-entry
from the repeated string insn. Any emulation needed in the course of
handling the interruption must not fetch from the internally maintained
cache.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1777,20 +1777,22 @@ static int _hvm_emulate_one(struct hvm_e
else
hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_all;
- rc = x86_emulate(&hvmemul_ctxt->ctxt, ops);
-
- if ( rc == X86EMUL_OKAY && vio->mmio_retry )
- rc = X86EMUL_RETRY;
- if ( rc != X86EMUL_RETRY )
+ switch ( rc = x86_emulate(&hvmemul_ctxt->ctxt, ops) )
{
+ case X86EMUL_OKAY:
+ if ( vio->mmio_retry )
+ rc = X86EMUL_RETRY;
+ /* fall through */
+ default:
vio->mmio_cache_count = 0;
vio->mmio_insn_bytes = 0;
- }
- else
- {
+ break;
+
+ case X86EMUL_RETRY:
BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
+ break;
}
if ( rc != X86EMUL_OKAY )
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -91,7 +91,7 @@ int handle_mmio(void)
rc = hvm_emulate_one(&ctxt);
- if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry )
+ if ( hvm_vcpu_io_need_completion(vio) )
vio->io_completion = HVMIO_mmio_completion;
else
vio->mmio_access = (struct npfec){};
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -111,7 +111,7 @@ void vmx_realmode_emulate_one(struct hvm
rc = hvm_emulate_one(hvmemul_ctxt);
- if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry )
+ if ( hvm_vcpu_io_need_completion(vio) )
vio->io_completion = HVMIO_realmode_completion;
if ( rc == X86EMUL_UNHANDLEABLE )