File vbox-upd07-fix-fpu-crpt.diff of Package virtualbox52

  https://www.virtualbox.org/ticket/20914
  https://www.spinics.net/lists/kernel/msg4373008.html

--- a/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h	2023-03-07 22:13:29.000000000 +0300
+++ b/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h	2023-03-07 22:16:48.528684922 +0300
@@ -166,6 +166,11 @@
 # include <asm/tlbflush.h>
 #endif
 
+/* for kernel_fpu_begin / kernel_fpu_end() */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)
+# include <asm/fpu/api.h>
+#endif
+
 /* for set_pages_x() */
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
 # include <asm/set_memory.h>
--- a/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c	2023-03-07 22:13:29.000000000 +0300
+++ b/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c	2023-03-07 22:21:18.332683893 +0300
@@ -40,6 +40,7 @@
 #include <iprt/semaphore.h>
 #include <iprt/initterm.h>
 #include <iprt/process.h>
+#include <iprt/thread.h>
 #include <VBox/err.h>
 #include <iprt/mem.h>
 #include <VBox/log.h>
@@ -1437,6 +1438,61 @@
 }
 
 
+SUPR0DECL(bool) SUPR0FpuBegin(bool fCtxHook)
+{
+    RT_NOREF(fCtxHook);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) /* Going back to 4.19.0 for better coverage, we
+                             probably only need 5.17.7+ in the end. */
+    /*
+     * HACK ALERT!
+     *
+     * We'd like to use the old __kernel_fpu_begin() API which was removed in
+     * early 2019, because we typically run with preemption enabled and have an
+     * preemption hook installed which will call kernel_fpu_end() in case we're
+     * scheduled out after getting in here.  The preemption hook is almost
+     * useless if we run with preemption disabled.
+     *
+     * For the case where the kernel does not have preemption hooks, we get here
+     * with preemption already disabled and one more count doesn't make any
+     * difference.
+     *
+     * So, after the kernel_fpu_begin() call we undo the implicit preempt_disable()
+     * call it does, so the preemption hook can do its work and the VBox user has
+     * a more responsive system.
+     *
+     * See @bugref{10209#c12} and onwards for more details.
+     */
+    Assert(fCtxHook || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+    kernel_fpu_begin();
+# if 0 /* Always do it for now for better test coverage. */
+    if (fCtxHook)
+# endif
+        preempt_enable();
+    return false; /** @todo Not sure if we have license to use any extended state, or
+                   *        if we're limited to the SSE & x87 FPU. If it's the former,
+                   *        we should return @a true and the caller can skip
+                   *        saving+restoring the host state and save some time. */
+#else
+    return false;
+#endif
+}
+
+
+SUPR0DECL(void) SUPR0FpuEnd(bool fCtxHook)
+{
+    RT_NOREF(fCtxHook);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)
+    /* HACK ALERT! See SUPR0FpuBegin for an explanation of this. */
+    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+# if 0 /* Always do it for now for better test coverage. */
+    if (fCtxHook)
+# endif
+        preempt_disable();
+    kernel_fpu_end();
+#endif
+}
+
+
 int VBOXCALL    supdrvOSGetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
 {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
--- a/include/VBox/sup.h	2020-07-09 19:49:50.000000000 +0300
+++ b/include/VBox/sup.h	2023-03-07 22:24:34.948683142 +0300
@@ -1951,6 +1951,26 @@
  */
 SUPR0DECL(uint32_t) SUPR0GetKernelFeatures(void);
 
+/**
+ * Notification from R0 VMM prior to loading the guest-FPU register state.
+ *
+ * @returns Whether the host-FPU register state has been saved by the host kernel.
+ * @param   fCtxHook    Whether thread-context hooks are enabled.
+ *
+ * @remarks Called with preemption disabled.
+ */
+SUPR0DECL(bool) SUPR0FpuBegin(bool fCtxHook);
+
+/**
+ * Notification from R0 VMM after saving the guest-FPU register state (and
+ * potentially restoring the host-FPU register state) in ring-0.
+ *
+ * @param   fCtxHook    Whether thread-context hooks are enabled.
+ *
+ * @remarks Called with preemption disabled.
+ */
+SUPR0DECL(void) SUPR0FpuEnd(bool fCtxHook);
+
 
 /** @name Absolute symbols
  * Take the address of these, don't try call them.
--- a/src/VBox/HostDrivers/Support/SUPDrv.cpp	2023-03-07 22:13:29.000000000 +0300
+++ b/src/VBox/HostDrivers/Support/SUPDrv.cpp	2023-03-07 22:34:59.768680757 +0300
@@ -75,6 +75,19 @@
 # define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
 #endif
 
+#if defined(RT_OS_LINUX) && !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+/* In Linux 5.18-rc1, memcpy became a wrapper which does fortify checks
+ * before triggering __underlying_memcpy() call. We do not pass these checks here,
+ * so bypass them for now.  */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
+#  define SUPDRV_MEMCPY __underlying_memcpy
+# else
+# define SUPDRV_MEMCPY  memcpy
+# endif
+#else
+# define SUPDRV_MEMCPY  memcpy
+#endif
+ 
 /*
  * Logging assignments:
  *      Log     - useful stuff, like failures.
@@ -196,6 +209,8 @@
     { "SUPR0ContFree",                          (void *)(uintptr_t)SUPR0ContFree },
     { "SUPR0ChangeCR4",                         (void *)(uintptr_t)SUPR0ChangeCR4 },
     { "SUPR0EnableVTx",                         (void *)(uintptr_t)SUPR0EnableVTx },
+    { "SUPR0FpuBegin",                          (void *)(uintptr_t)SUPR0FpuBegin },
+    { "SUPR0FpuEnd",                            (void *)(uintptr_t)SUPR0FpuEnd },
     { "SUPR0SuspendVTxOnCpu",                   (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
     { "SUPR0ResumeVTxOnCpu",                    (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
     { "SUPR0GetCurrentGdtRw",                   (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
@@ -1663,7 +1678,7 @@
 
             /* execute */
             pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
-            memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
+            SUPDRV_MEMCPY(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
             pReq->Hdr.rc = VINF_SUCCESS;
             return 0;
         }
--- a/src/VBox/VMM/VMMR0/CPUMR0.cpp	2020-07-09 19:57:54.000000000 +0300
+++ b/src/VBox/VMM/VMMR0/CPUMR0.cpp	2023-03-07 22:47:27.996677901 +0300
@@ -437,6 +437,11 @@
     Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST));
     Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
 
+    /* Notify the support driver prior to loading the guest-FPU register state. */
+    SUPR0FpuBegin(VMMR0ThreadCtxHookIsEnabled(pVCpu));
+    /** @todo use return value? Currently skipping that to be on the safe side
+     *        wrt. extended state (linux). */
+
 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
     {
@@ -535,6 +540,9 @@
                     cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
                 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_MANUAL_XMM_RESTORE;
             }
+
+            /* Notify the support driver after loading the host-FPU register state. */
+            SUPR0FpuEnd(VMMR0ThreadCtxHookIsEnabled(pVCpu));
         }
     }
     else
openSUSE Build Service is sponsored by