File xsa348-1.patch of Package xen.17645

x86: replace reset_stack_and_jump_nolp()

Move the necessary check into check_for_livepatch_work(), rather than
mostly duplicating reset_stack_and_jump() for this purpose. This is to
prevent an inflation of reset_stack_and_jump() flavors.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Juergen Gross <jgross@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -192,7 +192,7 @@ static void noreturn continue_idle_domai
 {
     /* Idle vcpus might be attached to non-idle units! */
     if ( !is_idle_domain(v->sched_unit->domain) )
-        reset_stack_and_jump_nolp(guest_idle_loop);
+        reset_stack_and_jump(guest_idle_loop);
 
     reset_stack_and_jump(idle_loop);
 }
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1032,7 +1032,7 @@ static void noreturn svm_do_resume(struc
 
     hvm_do_resume(v);
 
-    reset_stack_and_jump_nolp(svm_asm_do_resume);
+    reset_stack_and_jump(svm_asm_do_resume);
 }
 
 void svm_vmenter_helper(const struct cpu_user_regs *regs)
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1889,7 +1889,7 @@ void vmx_do_resume(struct vcpu *v)
     if ( host_cr4 != read_cr4() )
         __vmwrite(HOST_CR4, read_cr4());
 
-    reset_stack_and_jump_nolp(vmx_asm_do_vmentry);
+    reset_stack_and_jump(vmx_asm_do_vmentry);
 }
 
 static inline unsigned long vmr(unsigned long field)
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -61,7 +61,7 @@ custom_runtime_param("pcid", parse_pcid)
 static void noreturn continue_nonidle_domain(struct vcpu *v)
 {
     check_wakeup_from_wait();
-    reset_stack_and_jump_nolp(ret_from_intr);
+    reset_stack_and_jump(ret_from_intr);
 }
 
 static int setup_compat_l4(struct vcpu *v)
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -631,7 +631,7 @@ static void __init noreturn reinit_bsp_s
     stack_base[0] = stack;
     memguard_guard_stack(stack);
 
-    reset_stack_and_jump_nolp(init_done);
+    reset_stack_and_jump(init_done);
 }
 
 /*
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -1300,6 +1300,11 @@ void check_for_livepatch_work(void)
     s_time_t timeout;
     unsigned long flags;
 
+    /* Only do any work when invoked in truly idle state. */
+    if ( system_state != SYS_STATE_active ||
+         !is_idle_domain(current->sched_unit->domain) )
+        return;
+
     /* Fast path: no work to do. */
     if ( !per_cpu(work_to_do, cpu ) )
         return;
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -129,22 +129,16 @@ unsigned long get_stack_dump_bottom (uns
 # define CHECK_FOR_LIVEPATCH_WORK ""
 #endif
 
-#define switch_stack_and_jump(fn, instr)                                \
+#define reset_stack_and_jump(fn)                                        \
     ({                                                                  \
         __asm__ __volatile__ (                                          \
             "mov %0,%%"__OP"sp;"                                        \
-            instr                                                       \
+            CHECK_FOR_LIVEPATCH_WORK                                    \
              "jmp %c1"                                                  \
             : : "r" (guest_cpu_user_regs()), "i" (fn) : "memory" );     \
         unreachable();                                                  \
     })
 
-#define reset_stack_and_jump(fn)                                        \
-    switch_stack_and_jump(fn, CHECK_FOR_LIVEPATCH_WORK)
-
-#define reset_stack_and_jump_nolp(fn)                                   \
-    switch_stack_and_jump(fn, "")
-
 /*
  * Which VCPU's state is currently running on each CPU?
  * This is not necesasrily the same as 'current' as a CPU may be
openSUSE Build Service is sponsored by