File 5a4fd893-x86-entry-remove-partial-cpu_user_regs.patch of Package xen.7317

# Commit f9eb74789af77e985ae653193f3622263499f674
# Date 2018-01-05 19:57:07 +0000
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/entry: Remove support for partial cpu_user_regs frames

Save all GPRs on entry to Xen.

The entry_int82() path is via a DPL1 gate, only usable by 32bit PV guests, so
can get away with only saving the 32bit registers.  All other entrypoints can
be reached from 32 or 64bit contexts.

This is part of XSA-254.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/tests/x86_emulator/x86_emulate.c
+++ b/tools/tests/x86_emulator/x86_emulate.c
@@ -24,7 +24,6 @@ typedef bool bool_t;
 #endif
 
 #define cpu_has_amd_erratum(nr) 0
-#define mark_regs_dirty(r) ((void)(r))
 
 #include "x86_emulate/x86_emulate.h"
 #include "x86_emulate/x86_emulate.c"
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -141,7 +141,6 @@ static void continue_idle_domain(struct
 static void continue_nonidle_domain(struct vcpu *v)
 {
     check_wakeup_from_wait();
-    mark_regs_dirty(guest_cpu_user_regs());
     reset_stack_and_jump(ret_from_intr);
 }
 
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2126,7 +2126,6 @@ static int emulate_privileged_op(struct
             goto fail;
         if ( admin_io_okay(port, op_bytes, v, regs) )
         {
-            mark_regs_dirty(regs);
             io_emul(regs);            
         }
         else
@@ -2156,7 +2155,6 @@ static int emulate_privileged_op(struct
             goto fail;
         if ( admin_io_okay(port, op_bytes, v, regs) )
         {
-            mark_regs_dirty(regs);
             io_emul(regs);            
             if ( (op_bytes == 1) && pv_post_outb_hook )
                 pv_post_outb_hook(port, regs->eax);
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -14,7 +14,8 @@
 
 ENTRY(compat_hypercall)
         pushq $0
-        SAVE_VOLATILE type=TRAP_syscall compat=1
+        movl  $TRAP_syscall, 4(%rsp)
+        SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */
 
         cmpb  $0,untrusted_msi(%rip)
 UNLIKELY_START(ne, msi_check)
@@ -126,7 +127,6 @@ compat_test_guest_events:
 /* %rbx: struct vcpu */
 compat_process_softirqs:
         sti
-        andl  $~TRAP_regs_partial,UREGS_entry_vector(%rsp)
         call  do_softirq
         jmp   compat_test_all_events
 
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -126,7 +126,8 @@ ENTRY(syscall_enter)
         pushq %rcx
         pushq $0
         movq  24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before saving */
-        SAVE_VOLATILE TRAP_syscall
+        movl  $TRAP_syscall, 4(%rsp)
+        SAVE_ALL
         GET_CURRENT(%rbx)
         movq  VCPU_domain(%rbx),%rcx
         testb $1,DOMAIN_is_32bit_pv(%rcx)
@@ -224,7 +225,6 @@ test_guest_events:
 /* %rbx: struct vcpu */
 process_softirqs:
         sti       
-        SAVE_PRESERVED
         call do_softirq
         jmp  test_all_events
 
@@ -277,7 +277,8 @@ GLOBAL(sysenter_eflags_saved)
         pushq $3 /* ring 3 null cs */
         pushq $0 /* null rip */
         pushq $0
-        SAVE_VOLATILE TRAP_syscall
+        movl  $TRAP_syscall, 4(%rsp)
+        SAVE_ALL
         GET_CURRENT(%rbx)
         cmpb  $0,VCPU_sysenter_disables_events(%rbx)
         movq  VCPU_sysenter_addr(%rbx),%rax
@@ -294,7 +295,6 @@ UNLIKELY_END(sysenter_nt_set)
         leal  (,%rcx,TBF_INTERRUPT),%ecx
 UNLIKELY_START(z, sysenter_gpf)
         movq  VCPU_trap_ctxt(%rbx),%rsi
-        SAVE_PRESERVED
         movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)
         movl  %eax,TRAPBOUNCE_error_code(%rdx)
         movq  TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
@@ -311,7 +311,8 @@ UNLIKELY_END(sysenter_gpf)
 
 ENTRY(int80_direct_trap)
         pushq $0
-        SAVE_VOLATILE 0x80
+        movl  $0x80, 4(%rsp)
+        SAVE_ALL
 
         cmpb  $0,untrusted_msi(%rip)
 UNLIKELY_START(ne, msi_check)
@@ -340,7 +341,6 @@ int80_slow_path:
          * IDT entry with DPL==0.
          */
         movl  $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
-        SAVE_PRESERVED
         movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)
         /* A GPF wouldn't have incremented the instruction pointer. */
         subq  $2,UREGS_rip(%rsp)
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -81,15 +81,10 @@ static void _show_registers(
            regs->rbp, regs->rsp, regs->r8);
     printk("r9:  %016lx   r10: %016lx   r11: %016lx\n",
            regs->r9,  regs->r10, regs->r11);
-    if ( !(regs->entry_vector & TRAP_regs_partial) )
-    {
-        printk("r12: %016lx   r13: %016lx   r14: %016lx\n",
-               regs->r12, regs->r13, regs->r14);
-        printk("r15: %016lx   cr0: %016lx   cr4: %016lx\n",
-               regs->r15, crs[0], crs[4]);
-    }
-    else
-        printk("cr0: %016lx   cr4: %016lx\n", crs[0], crs[4]);
+    printk("r12: %016lx   r13: %016lx   r14: %016lx\n",
+           regs->r12, regs->r13, regs->r14);
+    printk("r15: %016lx   cr0: %016lx   cr4: %016lx\n",
+           regs->r15, crs[0], crs[4]);
     printk("cr3: %016lx   cr2: %016lx\n", crs[3], crs[2]);
     printk("fsb: %016lx   gsb: %016lx   gss: %016lx\n",
            crs[5], crs[6], crs[7]);
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -10,7 +10,6 @@
  */
 
 #include <asm/x86_emulate.h>
-#include <asm/asm_defns.h> /* mark_regs_dirty() */
 #include <asm/processor.h> /* current_cpu_info */
 #include <asm/amd.h> /* cpu_has_amd_erratum() */
 
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1402,10 +1402,10 @@ decode_register(
     case  9: p = &regs->r9;  break;
     case 10: p = &regs->r10; break;
     case 11: p = &regs->r11; break;
-    case 12: mark_regs_dirty(regs); p = &regs->r12; break;
-    case 13: mark_regs_dirty(regs); p = &regs->r13; break;
-    case 14: mark_regs_dirty(regs); p = &regs->r14; break;
-    case 15: mark_regs_dirty(regs); p = &regs->r15; break;
+    case 12: p = &regs->r12; break;
+    case 13: p = &regs->r13; break;
+    case 14: p = &regs->r14; break;
+    case 15: p = &regs->r15; break;
 #endif
     default: BUG(); p = NULL; break;
     }
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -129,7 +129,6 @@ static void __prepare_to_wait(struct wai
     unsigned long dummy;
     u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector;
 
-    cpu_info->guest_cpu_user_regs.entry_vector &= ~TRAP_regs_partial;
     ASSERT(wqv->esp == 0);
 
     /* Save current VCPU affinity; force wakeup on *this* CPU only. */
--- a/xen/include/asm-x86/x86_64/asm_defns.h
+++ b/xen/include/asm-x86/x86_64/asm_defns.h
@@ -3,15 +3,6 @@
 
 #include <asm/percpu.h>
 
-#ifdef CONFIG_FRAME_POINTER
-/* Indicate special exception stack frame by inverting the frame pointer. */
-#define SETUP_EXCEPTION_FRAME_POINTER(offs)     \
-        leaq  offs(%rsp),%rbp;                  \
-        notq  %rbp
-#else
-#define SETUP_EXCEPTION_FRAME_POINTER(offs)
-#endif
-
 #ifndef NDEBUG
 #define ASSERT_INTERRUPT_STATUS(x)              \
         pushf;                                  \
@@ -26,68 +17,10 @@
 #define ASSERT_INTERRUPTS_ENABLED  ASSERT_INTERRUPT_STATUS(nz)
 #define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
 
-/*
- * This flag is set in an exception frame when registers R12-R15 did not get
- * saved.
- */
-#define _TRAP_regs_partial 16
-#define TRAP_regs_partial  (1 << _TRAP_regs_partial)
-/*
- * This flag gets set in an exception frame when registers R12-R15 possibly
- * get modified from their originally saved values and hence need to be
- * restored even if the normal call flow would restore register values.
- *
- * The flag being set implies _TRAP_regs_partial to be unset. Restoring
- * R12-R15 thus is
- * - required when this flag is set,
- * - safe when _TRAP_regs_partial is unset.
- */
-#define _TRAP_regs_dirty   17
-#define TRAP_regs_dirty    (1 << _TRAP_regs_dirty)
-
-#define mark_regs_dirty(r) ({ \
-        struct cpu_user_regs *r__ = (r); \
-        ASSERT(!((r__)->entry_vector & TRAP_regs_partial)); \
-        r__->entry_vector |= TRAP_regs_dirty; \
-})
-
-#define SAVE_ALL                                \
-        addq  $-(UREGS_error_code-UREGS_r15), %rsp; \
-        cld;                                    \
-        movq  %rdi,UREGS_rdi(%rsp);             \
-        movq  %rsi,UREGS_rsi(%rsp);             \
-        movq  %rdx,UREGS_rdx(%rsp);             \
-        movq  %rcx,UREGS_rcx(%rsp);             \
-        movq  %rax,UREGS_rax(%rsp);             \
-        movq  %r8,UREGS_r8(%rsp);               \
-        movq  %r9,UREGS_r9(%rsp);               \
-        movq  %r10,UREGS_r10(%rsp);             \
-        movq  %r11,UREGS_r11(%rsp);             \
-        movq  %rbx,UREGS_rbx(%rsp);             \
-        movq  %rbp,UREGS_rbp(%rsp);             \
-        SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp); \
-        movq  %r12,UREGS_r12(%rsp);             \
-        movq  %r13,UREGS_r13(%rsp);             \
-        movq  %r14,UREGS_r14(%rsp);             \
-        movq  %r15,UREGS_r15(%rsp);             \
-
 #ifdef __ASSEMBLY__
 
-/*
- * Save all registers not preserved by C code or used in entry/exit code. Mark
- * the frame as partial.
- *
- * @type: exception type
- * @compat: R8-R15 don't need saving, and the frame nevertheless is complete
- */
-.macro SAVE_VOLATILE type compat=0
-.if \compat
-        movl  $\type,UREGS_entry_vector-UREGS_error_code(%rsp)
-.else
-        movl  $\type|TRAP_regs_partial,\
-              UREGS_entry_vector-UREGS_error_code(%rsp)
-.endif
-        addq  $-(UREGS_error_code-UREGS_r15),%rsp
+.macro SAVE_ALL compat=0
+        addq  $-(UREGS_error_code-UREGS_r15), %rsp
         cld
         movq  %rdi,UREGS_rdi(%rsp)
         movq  %rsi,UREGS_rsi(%rsp)
@@ -102,20 +35,17 @@
 .endif
         movq  %rbx,UREGS_rbx(%rsp)
         movq  %rbp,UREGS_rbp(%rsp)
-        SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp)
-.endm
-
-/*
- * Complete a frame potentially only partially saved.
- */
-.macro SAVE_PRESERVED
-        btrl  $_TRAP_regs_partial,UREGS_entry_vector(%rsp)
-        jnc   987f
+#ifdef CONFIG_FRAME_POINTER
+/* Indicate special exception stack frame by inverting the frame pointer. */
+        leaq  UREGS_rbp(%rsp), %rbp
+        notq  %rbp
+#endif
+.if !\compat
         movq  %r12,UREGS_r12(%rsp)
         movq  %r13,UREGS_r13(%rsp)
         movq  %r14,UREGS_r14(%rsp)
         movq  %r15,UREGS_r15(%rsp)
-987:
+.endif
 .endm
 
 /*
@@ -147,33 +77,14 @@
  * @compat: R8-R15 don't need reloading
  */
 .macro RESTORE_ALL adj=0 compat=0
-.if !\compat
-        testl $TRAP_regs_dirty,UREGS_entry_vector(%rsp)
-.endif
         LOAD_C_CLOBBERED \compat
 .if !\compat
-        jz    987f
         movq  UREGS_r15(%rsp),%r15
         movq  UREGS_r14(%rsp),%r14
         movq  UREGS_r13(%rsp),%r13
         movq  UREGS_r12(%rsp),%r12
-#ifndef NDEBUG
-        .subsection 1
-987:    testl $TRAP_regs_partial,UREGS_entry_vector(%rsp)
-        jnz   987f
-        cmpq  UREGS_r15(%rsp),%r15
-        jne   789f
-        cmpq  UREGS_r14(%rsp),%r14
-        jne   789f
-        cmpq  UREGS_r13(%rsp),%r13
-        jne   789f
-        cmpq  UREGS_r12(%rsp),%r12
-        je    987f
-789:    ud2
-        .subsection 0
-#endif
 .endif
-987:    movq  UREGS_rbp(%rsp),%rbp
+        movq  UREGS_rbp(%rsp),%rbp
         movq  UREGS_rbx(%rsp),%rbx
         subq  $-(UREGS_error_code-UREGS_r15+\adj), %rsp
 .endm
openSUSE Build Service is sponsored by