File 6227866b-Arm-allow-SMCCC_ARCH_WORKAROUND_3-use.patch of Package xen.23582

# Commit c0a56ea0fd92ecb471936b7355ddbecbaea3707c
# Date 2022-03-08 16:38:02 +0000
# Author Bertrand Marquis <bertrand.marquis@arm.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
xen/arm: Allow to discover and use SMCCC_ARCH_WORKAROUND_3

Allow guest to discover whether or not SMCCC_ARCH_WORKAROUND_3 is
supported and create a fastpath in the code to handle guests request to
do the workaround.

The function SMCCC_ARCH_WORKAROUND_3 will be called by the guest for
flushing the branch history. So we want the handling to be as fast as
possible.

As the mitigation is applied on every guest exit, we can check for the
call before saving all context and return very early.

This is part of XSA-398 / CVE-2022-23960.

Signed-off-by: Bertrand Marquis <bertrand.marquis@arm.com>
Reviewed-by: Julien Grall <julien@xen.org>

--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -338,16 +338,26 @@ guest_sync:
         cbnz    x1, guest_sync_slowpath         /* should be 0 for HVC #0 */
 
         /*
-         * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1.
-         * The workaround has already been applied on the exception
+         * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1 and
+         * ARM_SMCCC_ARCH_WORKAROUND_3.
+         * The workaround needed has already been applied on the exception
          * entry from the guest, so let's quickly get back to the guest.
          *
          * Note that eor is used because the function identifier cannot
          * be encoded as an immediate for cmp.
          */
         eor     w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
-        cbnz    w0, check_wa2
+        cbz     w0, fastpath_out_workaround
 
+        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ ARM_SMCCC_ARCH_WORKAROUND_2_FID)
+        cbz     w0, wa2_ssbd
+
+        /* Fastpath out for ARM_SMCCC_ARCH_WORKAROUND_3 */
+        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_2_FID ^ ARM_SMCCC_ARCH_WORKAROUND_3_FID)
+        cbnz    w0, guest_sync_slowpath
+
+fastpath_out_workaround:
         /*
          * Clobber both x0 and x1 to prevent leakage. Note that thanks
          * the eor, x0 = 0.
@@ -356,10 +366,7 @@ guest_sync:
         eret
         sb
 
-check_wa2:
-        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
-        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ ARM_SMCCC_ARCH_WORKAROUND_2_FID)
-        cbnz    w0, guest_sync_slowpath
+wa2_ssbd:
 #ifdef CONFIG_ARM_SSBD
 alternative_cb arm_enable_wa2_handling
         b       wa2_end
--- a/xen/arch/arm/vsmc.c
+++ b/xen/arch/arm/vsmc.c
@@ -124,6 +124,10 @@ static bool handle_arch(struct cpu_user_
                 break;
             }
             break;
+        case ARM_SMCCC_ARCH_WORKAROUND_3_FID:
+            if ( cpus_have_cap(ARM_WORKAROUND_BHB_SMCC_3) )
+                ret = 0;
+            break;
         }
 
         set_user_reg(regs, 0, ret);
@@ -132,6 +136,7 @@ static bool handle_arch(struct cpu_user_
     }
 
     case ARM_SMCCC_ARCH_WORKAROUND_1_FID:
+    case ARM_SMCCC_ARCH_WORKAROUND_3_FID:
         /* No return value */
         return true;
 
openSUSE Build Service is sponsored by