File vbox-upd06-misc-fixes.diff of Package virtualbox52
Backport of small non-invasive fixes and updates.
https://www.virtualbox.org/changeset/90161/vbox
https://www.virtualbox.org/changeset/90173/vbox
https://www.virtualbox.org/changeset/90177/vbox
https://www.virtualbox.org/changeset/87074/vbox
https://www.virtualbox.org/changeset/87356/vbox
* Plus some various trivial backports cherry-picked from later 6.1.x versions.
--- VirtualBox-5.2.44/include/VBox/err.h 2022-08-24 15:11:28.828935567 +0300
+++ VirtualBox-5.2.44_b/include/VBox/err.h 2022-08-24 15:13:03.168940346 +0300
@@ -1198,6 +1198,8 @@
/** SMAP enabled, but the AC flag was found to be clear - check the kernel
* log for details. */
#define VERR_VMM_SMAP_BUT_AC_CLEAR (-2717)
+/** Got back from vmmR0CallRing3SetJmp with the context hook still enabled. */
+#define VERR_VMM_CONTEXT_HOOK_STILL_ENABLED (-2719)
/** @} */
--- VirtualBox-5.2.44/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp 2022-08-24 15:15:46.052948598 +0300
+++ VirtualBox-5.2.44_b/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp 2022-08-24 15:16:53.036951992 +0300
@@ -266,6 +266,7 @@
case VERR_VMM_RING0_ASSERTION:
case VINF_EM_DBG_HYPER_ASSERTION:
case VERR_VMM_RING3_CALL_DISABLED:
+ case VERR_VMM_CONTEXT_HOOK_STILL_ENABLED:
{
const char *pszMsg1 = VMMR3GetRZAssertMsg1(pVM);
while (pszMsg1 && *pszMsg1 == '\n')
--- VirtualBox-5.2.44/src/VBox/VMM/VMMR0/VMMR0.cpp 2022-08-23 22:55:39.000000000 +0300
+++ VirtualBox-5.2.44_b/src/VBox/VMM/VMMR0/VMMR0.cpp 2022-08-24 15:22:17.124968412 +0300
@@ -1243,7 +1243,7 @@
pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
"Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
- rc = VERR_INVALID_STATE;
+ rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
}
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
--- VirtualBox-5.2.44/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm 2022-08-24 16:15:46.581131015 +0300
+++ VirtualBox-5.2.44_b/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm 2022-08-24 16:17:58.037137675 +0300
@@ -31,10 +31,15 @@
%define STACK_PADDING 0eeeeeeeeeeeeeeeeh
;; Workaround for linux 4.6 fast/slow syscall stack depth difference.
+;; Update: This got worse with linux 5.13 and CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT.
%ifdef VMM_R0_SWITCH_STACK
%define STACK_FUZZ_SIZE 0
%else
- %define STACK_FUZZ_SIZE 128
+ %ifdef RT_OS_LINUX
+ %define STACK_FUZZ_SIZE 384
+ %else
+ %define STACK_FUZZ_SIZE 128
+ %endif
%endif
--- a/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c 2023-03-10 18:49:56.736911580 +0300
+++ b/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c 2023-03-10 18:52:27.676915893 +0300
@@ -38,6 +38,9 @@
#include <linux/nsproxy.h>
#endif
#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0))
+# include <linux/ethtool.h>
+#endif
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/miscdevice.h>
@@ -339,8 +342,6 @@
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
-# include <linux/ethtool.h>
-
typedef struct ethtool_ops OVR_OPSTYPE;
# define OVR_OPS ethtool_ops
# define OVR_XMIT pfnStartXmit
@@ -924,7 +925,7 @@
for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
{
skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 18)
pSG->aSegs[iSeg].cb = pFrag->bv_len;
pSG->aSegs[iSeg].pv = VBOX_SKB_KMAP_FRAG(pFrag) + pFrag->bv_offset;
# else /* < KERNEL_VERSION(5, 4, 0) */
@@ -945,7 +946,7 @@
for (i = 0; i < skb_shinfo(pFragBuf)->nr_frags; i++)
{
skb_frag_t *pFrag = &skb_shinfo(pFragBuf)->frags[i];
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 18)
pSG->aSegs[iSeg].cb = pFrag->bv_len;
pSG->aSegs[iSeg].pv = VBOX_SKB_KMAP_FRAG(pFrag) + pFrag->bv_offset;
# else /* < KERNEL_VERSION(5, 4, 0) */
@@ -2342,7 +2343,13 @@
vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
Log6(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
Log6(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
+ local_bh_disable();
+ err = netif_rx(pBuf);
+ local_bh_enable();
+#else
err = netif_rx_ni(pBuf);
+#endif
if (err)
rc = RTErrConvertFromErrno(err);
}
--- a/src/VBox/VMM/VMMAll/IEMAllAImpl.asm 2020-07-09 19:57:52.000000000 +0300
+++ b/src/VBox/VMM/VMMAll/IEMAllAImpl.asm 2023-03-10 19:18:21.180960276 +0300
@@ -855,8 +855,8 @@
lock cmpxchg16b [rdi]
- mov [rsi], eax
- mov [rsi + 8], edx
+ mov [rsi], rax
+ mov [rsi + 8], rdx
IEM_SAVE_FLAGS r10, (X86_EFL_ZF), 0 ; clobbers T0+T1 (eax, r11)
pop rbx
@@ -1861,7 +1861,7 @@
fninit
fld tword [A3]
FPU_LD_FXSTATE_FCW_AND_SAFE_FSW A0
- fisttp dword [A2]
+ fisttp word [A2]
fnstsw word [A1]
--- a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c 2020-07-09 19:57:39.000000000 +0300
+++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c 2023-03-03 19:21:00.000000000 +0300
@@ -50,6 +50,10 @@
# define PAGE_READONLY_EXEC PAGE_READONLY
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
+# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
+#endif
+
/*
* 2.6.29+ kernels don't work with remap_pfn_range() anymore because
* track_pfn_vma_new() is apparently not defined for non-RAM pages.
@@ -70,6 +74,21 @@
# define gfp_t unsigned
#endif
+/*
+ * Wrappers around mmap_lock/mmap_sem difference.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+# define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_lock)
+# define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_lock)
+# define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_lock)
+# define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_lock)
+#else
+# define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_sem)
+# define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_sem)
+# define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_sem)
+# define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_sem)
+#endif
+
/*********************************************************************************************************************************
* Structures and Typedefs *
@@ -220,9 +239,9 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
#else
- down_write(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_WRITE(pTask->mm);
ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
- up_write(&pTask->mm->mmap_sem);
+ LNX_MM_UP_WRITE(pTask->mm);
#endif
}
else
@@ -230,9 +249,9 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
#else
- down_write(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_WRITE(pTask->mm);
ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
- up_write(&pTask->mm->mmap_sem);
+ LNX_MM_UP_WRITE(pTask->mm);
#endif
if ( !(ulAddr & ~PAGE_MASK)
&& (ulAddr & (uAlignment - 1)))
@@ -267,13 +286,13 @@
Assert(pTask == current); RT_NOREF_PV(pTask);
vm_munmap((unsigned long)pv, cb);
#elif defined(USE_RHEL4_MUNMAP)
- down_write(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_WRITE(pTask->mm);
do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
- up_write(&pTask->mm->mmap_sem);
+ LNX_MM_UP_WRITE(pTask->mm);
#else
- down_write(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_WRITE(pTask->mm);
do_munmap(pTask->mm, (unsigned long)pv, cb);
- up_write(&pTask->mm->mmap_sem);
+ LNX_MM_UP_WRITE(pTask->mm);
#endif
}
@@ -469,6 +488,42 @@
}
+#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
+/**
+ * User data passed to the apply_to_page_range() callback.
+ */
+typedef struct LNXAPPLYPGRANGE
+{
+ /** Pointer to the memory object. */
+ PRTR0MEMOBJLNX pMemLnx;
+ /** The page protection flags to apply. */
+ pgprot_t fPg;
+} LNXAPPLYPGRANGE;
+/** Pointer to the user data. */
+typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
+/** Pointer to the const user data. */
+typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
+
+/**
+ * Callback called in apply_to_page_range().
+ *
+ * @returns Linux status code.
+ * @param pPte Pointer to the page table entry for the given address.
+ * @param uAddr The address to apply the new protection to.
+ * @param pvUser The opaque user data.
+ */
+static int rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser)
+{
+ PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
+ PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
+ size_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
+
+ set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
+ return 0;
+}
+#endif
+
+
/**
* Maps the allocation into ring-0.
*
@@ -591,7 +646,7 @@
size_t iPage;
Assert(pTask);
if (pTask && pTask->mm)
- down_read(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_READ(pTask->mm);
iPage = pMemLnx->cPages;
while (iPage-- > 0)
@@ -606,7 +661,7 @@
}
if (pTask && pTask->mm)
- up_read(&pTask->mm->mmap_sem);
+ LNX_MM_UP_READ(pTask->mm);
}
/* else: kernel memory - nothing to do here. */
break;
@@ -1073,7 +1128,7 @@
papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
if (papVMAs)
{
- down_read(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_READ(pTask->mm);
/*
* Get user pages.
@@ -1098,7 +1153,9 @@
*/
else
rc = get_user_pages_remote(
+# if GET_USER_PAGES_API < KERNEL_VERSION(5, 9, 0)
pTask, /* Task for fault accounting. */
+# endif
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
@@ -1158,7 +1215,7 @@
papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED);
}
- up_read(&pTask->mm->mmap_sem);
+ LNX_MM_UP_READ(pTask->mm);
RTMemFree(papVMAs);
@@ -1185,7 +1242,7 @@
#endif
}
- up_read(&pTask->mm->mmap_sem);
+ LNX_MM_UP_READ(pTask->mm);
RTMemFree(papVMAs);
rc = VERR_LOCK_FAILED;
@@ -1595,7 +1652,7 @@
const size_t cPages = pMemLnxToMap->Core.cb >> PAGE_SHIFT;
size_t iPage;
- down_write(&pTask->mm->mmap_sem);
+ LNX_MM_DOWN_WRITE(pTask->mm);
rc = VINF_SUCCESS;
if (pMemLnxToMap->cPages)
@@ -1712,7 +1769,7 @@
}
#endif /* CONFIG_NUMA_BALANCING */
- up_write(&pTask->mm->mmap_sem);
+ LNX_MM_UP_WRITE(pTask->mm);
if (RT_SUCCESS(rc))
{
@@ -1744,6 +1801,23 @@
DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
{
+#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
+ if ( pMemLnx->fExecutable
+ && pMemLnx->fMappedToRing0)
+ {
+ LNXAPPLYPGRANGE Args;
+ Args.pMemLnx = pMemLnx;
+ Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
+ int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub,
+ rtR0MemObjLinuxApplyPageRange, (void *)&Args);
+ if (rcLnx)
+ return VERR_NOT_SUPPORTED;
+
+ return VINF_SUCCESS;
+ }
+#endif
+
NOREF(pMem);
NOREF(offSub);
NOREF(cbSub);
--- a/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h 2020-07-09 19:57:39.000000000 +0300
+++ b/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h 2022-04-28 06:06:38.000000000 +0300
@@ -128,7 +128,11 @@
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/compiler.h>
-#ifndef HAVE_UNLOCKED_IOCTL /* linux/fs.h defines this */
+/* linux/fs.h defines HAVE_UNLOCKED_IOCTL from 2.6.11 till 5.9, but its meaning remains valid */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,18)
+#define HAVE_UNLOCKED_IOCTL 1
+#endif
+#if !defined(HAVE_UNLOCKED_IOCTL) && (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38))
# include <linux/smp_lock.h>
#endif
/* For the shared folders module */
--- a/src/VBox/Runtime/r0drv/linux/thread2-r0drv-linux.c 2020-07-09 19:57:39.000000000 +0300
+++ b/src/VBox/Runtime/r0drv/linux/thread2-r0drv-linux.c 2022-04-28 06:08:50.000000000 +0300
@@ -54,51 +54,48 @@
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
- /* See comment near MAX_RT_PRIO in linux/sched.h for details on
- sched_priority. */
- int iSchedClass = SCHED_NORMAL;
- struct sched_param Param = { .sched_priority = MAX_PRIO - 1 };
+ int iSchedClass = SCHED_FIFO;
+ int rc = VINF_SUCCESS;
+ struct sched_param Param = { 0 };
+
+ RT_NOREF_PV(pThread);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)
+ RT_NOREF_PV(iSchedClass);
+ RT_NOREF_PV(Param);
+#endif
switch (enmType)
{
- case RTTHREADTYPE_INFREQUENT_POLLER:
- Param.sched_priority = MAX_RT_PRIO + 5;
- break;
-
- case RTTHREADTYPE_EMULATION:
- Param.sched_priority = MAX_RT_PRIO + 4;
- break;
-
- case RTTHREADTYPE_DEFAULT:
- Param.sched_priority = MAX_RT_PRIO + 3;
- break;
-
- case RTTHREADTYPE_MSG_PUMP:
- Param.sched_priority = MAX_RT_PRIO + 2;
- break;
-
case RTTHREADTYPE_IO:
- iSchedClass = SCHED_FIFO;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5,9,0)
+ /* Set max. priority to preempt all other threads on this CPU. */
Param.sched_priority = MAX_RT_PRIO - 1;
+#else
+ /* Effectively changes prio to 50 */
+ sched_set_fifo(current);
+#endif
break;
case RTTHREADTYPE_TIMER:
- iSchedClass = SCHED_FIFO;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5,9,0)
Param.sched_priority = 1; /* not 0 just in case */
+#else
+ /* Just one above SCHED_NORMAL class */
+ sched_set_fifo_low(current);
+#endif
break;
default:
- AssertMsgFailed(("enmType=%d\n", enmType));
- return VERR_INVALID_PARAMETER;
+ /* pretend success instead of VERR_NOT_SUPPORTED */
+ return rc;
}
- sched_setscheduler(current, iSchedClass, &Param);
-#else
- RT_NOREF_PV(enmType);
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5,9,0)
+ if ((sched_setscheduler(current, iSchedClass, &Param)) != 0) {
+ rc = VERR_GENERAL_FAILURE;
+ }
#endif
- RT_NOREF_PV(pThread);
- return VINF_SUCCESS;
+ return rc;
}
--- a/src/VBox/HostDrivers/VBoxPci/linux/VBoxPci-linux.c 2020-07-09 19:57:02.000000000 +0300
+++ b/src/VBox/HostDrivers/VBoxPci/linux/VBoxPci-linux.c 2022-08-16 00:29:29.000000000 +0300
@@ -369,16 +369,20 @@
static int vboxPciFileWrite(struct file* file, unsigned long long offset, unsigned char* data, unsigned int size)
{
int ret;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
mm_segment_t fs_save;
fs_save = get_fs();
set_fs(KERNEL_DS);
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
ret = kernel_write(file, data, size, &offset);
#else
ret = vfs_write(file, data, size, &offset);
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
set_fs(fs_save);
+#endif
if (ret < 0)
printk(KERN_DEBUG "vboxPciFileWrite: error %d\n", ret);
--- a/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c 2020-07-09 19:57:39.000000000 +0300
+++ b/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c 2022-08-15 23:42:37.000000000 +0300
@@ -93,7 +93,7 @@
if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
return false;
-# if defined(cpu_possible)
+# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,2)) || defined(cpu_possible)
return cpu_possible(idCpu);
# else /* < 2.5.29 */
return idCpu < (RTCPUID)smp_num_cpus;
@@ -147,7 +147,7 @@
#ifdef CONFIG_SMP
if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
return false;
-# ifdef cpu_online
+# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) || defined(cpu_online)
return cpu_online(idCpu);
# else /* 2.4: */
return cpu_online_map & RT_BIT_64(idCpu);
--- a/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c 2020-07-09 19:57:39.000000000 +0300
+++ b/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c 2023-03-01 22:06:02.000000000 +0300
@@ -38,7 +38,7 @@
#if (defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING)) && !defined(RTMEMALLOC_EXEC_HEAP)
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+# if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0))) || defined(VBOX_COMPAT_KRNL)
/**
* Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate
* memory in the moduel range. This is preferrable to the exec heap below.
--- a/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c 2020-07-09 19:57:00.000000000 +0300
+++ b/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c 2022-08-19 17:52:00.000000000 +0300
@@ -755,6 +755,15 @@
RTCCUINTREG VBOXCALL supdrvOSChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+ unsigned long fSavedFlags;
+ local_irq_save(fSavedFlags);
+ RTCCUINTREG const uOld = cr4_read_shadow();
+ cr4_update_irqsoff(fOrMask, ~fAndMask); /* Same as this function, only it is not returning the old value. */
+ AssertMsg(cr4_read_shadow() == ((uOld & fAndMask) | fOrMask),
+ ("fOrMask=%#RTreg fAndMask=%#RTreg uOld=%#RTreg; new cr4=%#llx\n", fOrMask, fAndMask, uOld, cr4_read_shadow()));
+ local_irq_restore(fSavedFlags);
+#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
RTCCUINTREG uOld = this_cpu_read(cpu_tlbstate.cr4);
RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
@@ -769,6 +778,7 @@
if (uNew != uOld)
ASMSetCR4(uNew);
#endif
+#endif
return uOld;
}