File libjava-aarch64-support.diff of Package gcc48.31962
Index: boehm-gc/include/private/gcconfig.h
===================================================================
--- boehm-gc/include/private/gcconfig.h.orig
+++ boehm-gc/include/private/gcconfig.h
@@ -60,6 +60,13 @@
# endif
/* Determine the machine type: */
+# if defined(__aarch64__)
+# define AARCH64
+# if !defined(LINUX)
+# define NOSYS
+# define mach_type_known
+# endif
+# endif
# if defined(__arm__) || defined(__thumb__)
# define ARM32
# if !defined(LINUX) && !defined(NETBSD)
@@ -239,6 +246,10 @@
# define IA64
# define mach_type_known
# endif
+# if defined(LINUX) && defined(__aarch64__)
+# define AARCH64
+# define mach_type_known
+# endif
# if defined(LINUX) && defined(__arm__)
# define ARM32
# define mach_type_known
@@ -500,6 +511,7 @@
/* running Amdahl UTS4 */
/* S390 ==> 390-like machine */
/* running LINUX */
+ /* AARCH64 ==> ARM AArch64 */
/* ARM32 ==> Intel StrongARM */
/* IA64 ==> Intel IPF */
/* (e.g. Itanium) */
@@ -1833,6 +1845,32 @@
# define HEURISTIC1
# endif
+# ifdef AARCH64
+# define USE_GENERIC_PUSH_REGS
+# define CPP_WORDSZ 64
+# define MACH_TYPE "AARCH64"
+# define ALIGNMENT 8
+# ifndef HBLKSIZE
+# define HBLKSIZE 4096
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# define DYNAMIC_LOADING
+ extern int __data_start[];
+# define DATASTART ((ptr_t)__data_start)
+ extern char _end[];
+# define DATAEND ((ptr_t)(&_end))
+# endif
+# ifdef NOSYS
+ /* __data_start is usually defined in the target linker script. */
+ extern int __data_start[];
+# define DATASTART ((ptr_t)__data_start)
+ extern void *__stack_base__;
+# define STACKBOTTOM ((ptr_t)__stack_base__)
+# endif
+# endif
+
# ifdef ARM32
# define CPP_WORDSZ 32
# define MACH_TYPE "ARM32"
Index: configure
===================================================================
--- configure.orig
+++ configure
@@ -3272,6 +3272,8 @@ esac
# Disable Java if libffi is not supported.
case "${target}" in
+ aarch64*-*-*)
+ ;;
alpha*-*-*)
;;
arm*-*-*)
Index: configure.ac
===================================================================
--- configure.ac.orig
+++ configure.ac
@@ -611,6 +611,8 @@ esac
# Disable Java if libffi is not supported.
case "${target}" in
+ aarch64*-*-*)
+ ;;
alpha*-*-*)
;;
arm*-*-*)
Index: libjava/classpath/native/fdlibm/ieeefp.h
===================================================================
--- libjava/classpath/native/fdlibm/ieeefp.h.orig
+++ libjava/classpath/native/fdlibm/ieeefp.h
@@ -4,6 +4,14 @@
#ifndef __IEEE_BIG_ENDIAN
#ifndef __IEEE_LITTLE_ENDIAN
+#ifdef __aarch64__
+#ifdef __AARCH64EB__
+#define __IEEE_BIG_ENDIAN
+#else
+#define __IEEE_LITTLE_ENDIAN
+#endif
+#endif
+
#ifdef __alpha__
#define __IEEE_LITTLE_ENDIAN
#endif
Index: libjava/configure.host
===================================================================
--- libjava/configure.host.orig
+++ libjava/configure.host
@@ -81,6 +81,11 @@ ATOMICSPEC=
# This case statement supports per-CPU defaults.
case "${host}" in
+ aarch64*-linux*)
+ libgcj_interpreter=yes
+ sysdeps_dir=aarch64
+ ATOMICSPEC=-fuse-atomic-builtins
+ ;;
arm*-elf)
with_libffi_default=no
PROCESS=Ecos
@@ -289,6 +294,12 @@ EOF
sysdeps_dir=i386
DIVIDESPEC=-f%{m32:no-}use-divide-subroutine
;;
+ aarch64*-linux* )
+ slow_pthread_self=no
+ can_unwind_signal=no
+ CHECKREFSPEC=-fcheck-references
+ DIVIDESPEC=-fuse-divide-subroutine
+ ;;
arm*-linux* )
slow_pthread_self=no
can_unwind_signal=no
Index: libjava/sysdep/aarch64/locks.h
===================================================================
--- /dev/null
+++ libjava/sysdep/aarch64/locks.h
@@ -0,0 +1,58 @@
+// locks.h - Thread synchronization primitives. AArch64 implementation.
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t; /* Integer type big enough for object */
+ /* address. */
+
+// Atomically replace *addr by new_val if it was initially equal to old.
+// Return true if the comparison succeeded.
+// Assumed to have acquire semantics, i.e. later memory operations
+// cannot execute before the compare_and_swap finishes.
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+ obj_addr_t old,
+ obj_addr_t new_val)
+{
+ return __sync_bool_compare_and_swap(addr, old, new_val);
+}
+
+// Set *addr to new_val with release semantics, i.e. making sure
+// that prior loads and stores complete before this
+// assignment.
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+ __sync_synchronize();
+ *(addr) = new_val;
+}
+
+// Compare_and_swap with release semantics instead of acquire semantics.
+// On many architecture, the operation makes both guarantees, so the
+// implementation can be the same.
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+ obj_addr_t old,
+ obj_addr_t new_val)
+{
+ return __sync_bool_compare_and_swap(addr, old, new_val);
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+// On X86, the hardware ensures that reads are properly ordered.
+inline static void
+read_barrier()
+{
+ __sync_synchronize();
+}
+
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+ __sync_synchronize();
+}
+#endif