File jdk-8354941-aarch32.patch of Package java-1_8_0-openjdk

--- openjdk/hotspot/src/cpu/aarch32/vm/assembler_aarch32.cpp	2025-08-25 12:58:03.747170504 +0200
+++ openjdk/hotspot/src/cpu/aarch32/vm/assembler_aarch32.cpp	2025-08-25 12:59:08.519612682 +0200
@@ -1341,7 +1341,7 @@
     }
     bool U = offset >= 0;
     assert(0 == (offset & 3), "Can only access aligned data");
-    unsigned imm8 = uabs(offset) / 4;
+    unsigned imm8 = g_uabs(offset) / 4;
     i->f(U, 23), i->rf(_base, 16), i->f(imm8, 7, 0);
   } else {
     ShouldNotReachHere();
@@ -1735,7 +1735,7 @@
 
   //Try plan B - a mov first - need to have destination that is not an arg
   assert(Rd != Rn, "Can't use imm and can't do a mov. I'm in a jam.");
-  mov_immediate(Rd, (uint32_t)uabs(imm), cond, s);
+  mov_immediate(Rd, (uint32_t)g_uabs(imm), cond, s);
   //Now do the non immediate version - copied from the immediate encodings
   {
     starti;
--- openjdk/hotspot/src/cpu/aarch32/vm/assembler_aarch32.hpp	2025-08-25 12:58:03.747239554 +0200
+++ openjdk/hotspot/src/cpu/aarch32/vm/assembler_aarch32.hpp	2025-08-25 12:59:21.028743552 +0200
@@ -264,7 +264,7 @@
 
 #define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use)
 
-static inline unsigned long uabs(long n) { return uabs((jlong)n); }
+static inline unsigned long g_uabs(long n) { return g_uabs((jlong)n); }
 
 #define S_DFLT ::lsl()
 #define C_DFLT AL
@@ -1169,10 +1169,10 @@
     switch(decode) {
       case 0b010:
         // LDR, LDRB, STR, STRB
-        return uabs(offset) < (1 << 12);
+        return g_uabs(offset) < (1 << 12);
       case 0b000:
         //LDRD, LDRH, LDRSB, LDRSH, STRH, STRD
-        return uabs(offset) < (1 << 8);
+        return g_uabs(offset) < (1 << 8);
       default:
         ShouldNotReachHere();
     }
@@ -1516,7 +1516,7 @@
   // are out of range.
   static const unsigned long branch_range = NOT_DEBUG(32 * M) DEBUG_ONLY(2 * M);
   static bool reachable_from_branch_at(address branch, address target) {
-    return uabs(target - branch) < branch_range;
+    return g_uabs(target - branch) < branch_range;
   }
 
   void branch_imm_instr(int decode, address dest, Condition cond) {
@@ -2102,7 +2102,7 @@
   static bool is_valid_for_imm12(int imm);
 
   static bool is_valid_for_offset_imm(int imm, int nbits) {
-    return uabs(imm) < (1u << nbits);
+    return g_uabs(imm) < (1u << nbits);
   }
 
   static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
--- openjdk/hotspot/src/cpu/aarch32/vm/macroAssembler_aarch32.cpp	2025-08-25 12:58:03.748987989 +0200
+++ openjdk/hotspot/src/cpu/aarch32/vm/macroAssembler_aarch32.cpp	2025-08-25 12:59:15.501298302 +0200
@@ -89,23 +89,23 @@
       instructions = patch_oop(branch, target) / NativeInstruction::arm_insn_sz;
   } else if (0b010 == (opc >> 1)) {
     // LDR, LDRB, STR, STRB
-    Instruction_aarch32::patch(branch, 11, 0, uabs(offset));
+    Instruction_aarch32::patch(branch, 11, 0, g_uabs(offset));
     Instruction_aarch32::patch(branch, 23, 23, add);
   } else if (0b000 == (opc >> 1)) {
     // LDRH, LDRSH, LDRSB, LDRD, STRH, STRD
-    offset = uabs(offset);
+    offset = g_uabs(offset);
     Instruction_aarch32::patch(branch, 3, 0, offset & 0xf);
     Instruction_aarch32::patch(branch, 11, 8, offset >> 4);
     Instruction_aarch32::patch(branch, 23, 23, add);
   } else if (0b1101 == opc) {
     // VLDR, VSTR - NOTE VSTR(lit) is deprecated
-    offset = uabs(offset);
+    offset = g_uabs(offset);
     assert(0 == (offset & 3), "vldr, vstr can't do unaligned access");
     Instruction_aarch32::patch(branch, 7, 0, offset >> 2);
     Instruction_aarch32::patch(branch, 23, 23, add);
   } else if (0b0010 == opc) {
     // ADR
-    Instruction_aarch32::patch(branch, 11, 0, encode_imm12(uabs(offset)));
+    Instruction_aarch32::patch(branch, 11, 0, encode_imm12(g_uabs(offset)));
     Instruction_aarch32::patch(branch, 23, 22, add ? 0b10 : 0b01 );
   } else {
     ShouldNotReachHere();
@@ -2942,7 +2942,7 @@
 int MacroAssembler::ldrd(Register Rt, Register Rt2, const Address& adr, Register Rtmp, Condition cond) {
     if((0 == Rt->encoding_nocheck() % 2 &&
          (Rt->encoding_nocheck() + 1 == Rt2->encoding_nocheck())) &&
-      (uabs(adr.offset()) < (1 << 8))) {
+      (g_uabs(adr.offset()) < (1 << 8))) {
       /* Good to go with a ldrd */
       ldrd(Rt, adr, cond);
       return 0x0;
@@ -2955,7 +2955,7 @@
 int MacroAssembler::strd(Register Rt, Register Rt2, const Address& adr, Condition cond) {
     if((0 == Rt->encoding_nocheck() % 2 &&
          (Rt->encoding_nocheck() + 1 == Rt2->encoding_nocheck())) &&
-      (uabs(adr.offset()) < (1 << 8))) {
+      (g_uabs(adr.offset()) < (1 << 8))) {
       /* Good to go with a strd */
       strd(Rt, adr, cond);
     } else {
--- openjdk/hotspot/src/cpu/aarch32/vm/stubGenerator_aarch32.cpp	2025-08-25 12:58:03.752817598 +0200
+++ openjdk/hotspot/src/cpu/aarch32/vm/stubGenerator_aarch32.cpp	2025-08-25 12:59:28.974868583 +0200
@@ -654,7 +654,7 @@
   // bytes, so a caller doesn't have to mask them.
 
   void copy_memory_small(Register s, Register d, Register count, Register tmp, bool is_aligned, int step) {
-    const int granularity = uabs(step);
+    const int granularity = g_uabs(step);
     const bool gen_always = !is_aligned || (-4 < step && step < 0);
     Label halfword, done;
 
@@ -716,7 +716,7 @@
   void copy_memory(bool is_aligned, Register s, Register d,
                    Register count, Register tmp, int step) {
     const int small_copy_size = 32; // 1 copy by ldm pays off alignment efforts and push/pop of temp set
-    const int granularity = uabs(step);
+    const int granularity = g_uabs(step);
     const Register tmp2 = rscratch2;
     const Register t0 = r3;
     Label small;
openSUSE Build Service is sponsored by