File libffi-aarch64.patch of Package libjnidispatch
Index: twall-jna-3e5b84f/native/libffi/Makefile.am
===================================================================
--- twall-jna-3e5b84f.orig/native/libffi/Makefile.am
+++ twall-jna-3e5b84f/native/libffi/Makefile.am
@@ -5,6 +5,7 @@ AUTOMAKE_OPTIONS = foreign subdir-object
SUBDIRS = include testsuite man
EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
+ src/aarch64/ffi.c src/aarch64/ffitarget.h \
src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \
src/arm/ffi.c src/arm/sysv.S src/arm/trampoline.S \
src/arm/ffitarget.h src/avr32/ffi.c src/avr32/sysv.S \
@@ -144,6 +145,9 @@ endif
if POWERPC_FREEBSD
nodist_libffi_la_SOURCES += src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S
endif
+if AARCH64
+nodist_libffi_la_SOURCES += src/aarch64/sysv.S src/aarch64/ffi.c
+endif
if ARM
nodist_libffi_la_SOURCES += src/arm/sysv.S src/arm/ffi.c
if FFI_EXEC_TRAMPOLINE_TABLE
Index: twall-jna-3e5b84f/native/libffi/Makefile.in
===================================================================
--- twall-jna-3e5b84f.orig/native/libffi/Makefile.in
+++ twall-jna-3e5b84f/native/libffi/Makefile.in
@@ -52,20 +52,21 @@ target_triplet = @target@
@POWERPC_AIX_TRUE@am__append_14 = src/powerpc/ffi_darwin.c src/powerpc/aix.S src/powerpc/aix_closure.S
@POWERPC_DARWIN_TRUE@am__append_15 = src/powerpc/ffi_darwin.c src/powerpc/darwin.S src/powerpc/darwin_closure.S
@POWERPC_FREEBSD_TRUE@am__append_16 = src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S
-@ARM_TRUE@am__append_17 = src/arm/sysv.S src/arm/ffi.c
-@ARM_TRUE@@FFI_EXEC_TRAMPOLINE_TABLE_TRUE@am__append_18 = src/arm/trampoline.S
-@AVR32_TRUE@am__append_19 = src/avr32/sysv.S src/avr32/ffi.c
-@LIBFFI_CRIS_TRUE@am__append_20 = src/cris/sysv.S src/cris/ffi.c
-@FRV_TRUE@am__append_21 = src/frv/eabi.S src/frv/ffi.c
-@S390_TRUE@am__append_22 = src/s390/sysv.S src/s390/ffi.c
-@X86_64_TRUE@am__append_23 = src/x86/ffi64.c src/x86/unix64.S src/x86/ffi.c src/x86/sysv.S
-@SH_TRUE@am__append_24 = src/sh/sysv.S src/sh/ffi.c
-@SH64_TRUE@am__append_25 = src/sh64/sysv.S src/sh64/ffi.c
-@PA_LINUX_TRUE@am__append_26 = src/pa/linux.S src/pa/ffi.c
-@PA_HPUX_TRUE@am__append_27 = src/pa/hpux32.S src/pa/ffi.c
+@AARCH64_TRUE@am__append_17 = src/aarch64/sysv.S src/aarch64/ffi.c
+@ARM_TRUE@am__append_18 = src/arm/sysv.S src/arm/ffi.c
+@ARM_TRUE@@FFI_EXEC_TRAMPOLINE_TABLE_TRUE@am__append_19 = src/arm/trampoline.S
+@AVR32_TRUE@am__append_20 = src/avr32/sysv.S src/avr32/ffi.c
+@LIBFFI_CRIS_TRUE@am__append_21 = src/cris/sysv.S src/cris/ffi.c
+@FRV_TRUE@am__append_22 = src/frv/eabi.S src/frv/ffi.c
+@S390_TRUE@am__append_23 = src/s390/sysv.S src/s390/ffi.c
+@X86_64_TRUE@am__append_24 = src/x86/ffi64.c src/x86/unix64.S src/x86/ffi.c src/x86/sysv.S
+@SH_TRUE@am__append_25 = src/sh/sysv.S src/sh/ffi.c
+@SH64_TRUE@am__append_26 = src/sh64/sysv.S src/sh64/ffi.c
+@PA_LINUX_TRUE@am__append_27 = src/pa/linux.S src/pa/ffi.c
+@PA_HPUX_TRUE@am__append_28 = src/pa/hpux32.S src/pa/ffi.c
# Build debug. Define FFI_DEBUG on the commandline so that, when building with
# MSVC, it can link against the debug CRT.
-@FFI_DEBUG_TRUE@am__append_28 = -DFFI_DEBUG
+@FFI_DEBUG_TRUE@am__append_29 = -DFFI_DEBUG
subdir = .
DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(srcdir)/doc/stamp-vti \
@@ -150,18 +151,19 @@ am_libffi_la_OBJECTS = src/prep_cif.lo s
@POWERPC_FREEBSD_TRUE@am__objects_16 = src/powerpc/ffi.lo \
@POWERPC_FREEBSD_TRUE@ src/powerpc/sysv.lo \
@POWERPC_FREEBSD_TRUE@ src/powerpc/ppc_closure.lo
-@ARM_TRUE@am__objects_17 = src/arm/sysv.lo src/arm/ffi.lo
-@ARM_TRUE@@FFI_EXEC_TRAMPOLINE_TABLE_TRUE@am__objects_18 = src/arm/trampoline.lo
-@AVR32_TRUE@am__objects_19 = src/avr32/sysv.lo src/avr32/ffi.lo
-@LIBFFI_CRIS_TRUE@am__objects_20 = src/cris/sysv.lo src/cris/ffi.lo
-@FRV_TRUE@am__objects_21 = src/frv/eabi.lo src/frv/ffi.lo
-@S390_TRUE@am__objects_22 = src/s390/sysv.lo src/s390/ffi.lo
-@X86_64_TRUE@am__objects_23 = src/x86/ffi64.lo src/x86/unix64.lo \
+@AARCH64_TRUE@am__objects_17 = src/aarch64/sysv.lo src/aarch64/ffi.lo
+@ARM_TRUE@am__objects_18 = src/arm/sysv.lo src/arm/ffi.lo
+@ARM_TRUE@@FFI_EXEC_TRAMPOLINE_TABLE_TRUE@am__objects_19 = src/arm/trampoline.lo
+@AVR32_TRUE@am__objects_20 = src/avr32/sysv.lo src/avr32/ffi.lo
+@LIBFFI_CRIS_TRUE@am__objects_21 = src/cris/sysv.lo src/cris/ffi.lo
+@FRV_TRUE@am__objects_22 = src/frv/eabi.lo src/frv/ffi.lo
+@S390_TRUE@am__objects_23 = src/s390/sysv.lo src/s390/ffi.lo
+@X86_64_TRUE@am__objects_24 = src/x86/ffi64.lo src/x86/unix64.lo \
@X86_64_TRUE@ src/x86/ffi.lo src/x86/sysv.lo
-@SH_TRUE@am__objects_24 = src/sh/sysv.lo src/sh/ffi.lo
-@SH64_TRUE@am__objects_25 = src/sh64/sysv.lo src/sh64/ffi.lo
-@PA_LINUX_TRUE@am__objects_26 = src/pa/linux.lo src/pa/ffi.lo
-@PA_HPUX_TRUE@am__objects_27 = src/pa/hpux32.lo src/pa/ffi.lo
+@SH_TRUE@am__objects_25 = src/sh/sysv.lo src/sh/ffi.lo
+@SH64_TRUE@am__objects_26 = src/sh64/sysv.lo src/sh64/ffi.lo
+@PA_LINUX_TRUE@am__objects_27 = src/pa/linux.lo src/pa/ffi.lo
+@PA_HPUX_TRUE@am__objects_28 = src/pa/hpux32.lo src/pa/ffi.lo
nodist_libffi_la_OBJECTS = $(am__objects_1) $(am__objects_2) \
$(am__objects_3) $(am__objects_4) $(am__objects_5) \
$(am__objects_6) $(am__objects_7) $(am__objects_8) \
@@ -171,17 +173,17 @@ nodist_libffi_la_OBJECTS = $(am__objects
$(am__objects_18) $(am__objects_19) $(am__objects_20) \
$(am__objects_21) $(am__objects_22) $(am__objects_23) \
$(am__objects_24) $(am__objects_25) $(am__objects_26) \
- $(am__objects_27)
+ $(am__objects_27) $(am__objects_28)
libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) \
$(nodist_libffi_la_OBJECTS)
libffi_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
$(libffi_la_LDFLAGS) $(LDFLAGS) -o $@
libffi_convenience_la_LIBADD =
-am__objects_28 = src/prep_cif.lo src/types.lo src/raw_api.lo \
+am__objects_29 = src/prep_cif.lo src/types.lo src/raw_api.lo \
src/java_raw_api.lo src/closures.lo
-am_libffi_convenience_la_OBJECTS = $(am__objects_28)
-am__objects_29 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
+am_libffi_convenience_la_OBJECTS = $(am__objects_29)
+am__objects_30 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
$(am__objects_4) $(am__objects_5) $(am__objects_6) \
$(am__objects_7) $(am__objects_8) $(am__objects_9) \
$(am__objects_10) $(am__objects_11) $(am__objects_12) \
@@ -189,8 +191,9 @@ am__objects_29 = $(am__objects_1) $(am__
$(am__objects_16) $(am__objects_17) $(am__objects_18) \
$(am__objects_19) $(am__objects_20) $(am__objects_21) \
$(am__objects_22) $(am__objects_23) $(am__objects_24) \
- $(am__objects_25) $(am__objects_26) $(am__objects_27)
-nodist_libffi_convenience_la_OBJECTS = $(am__objects_29)
+ $(am__objects_25) $(am__objects_26) $(am__objects_27) \
+ $(am__objects_28)
+nodist_libffi_convenience_la_OBJECTS = $(am__objects_30)
libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \
$(nodist_libffi_convenience_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@
@@ -415,6 +418,7 @@ top_srcdir = @top_srcdir@
AUTOMAKE_OPTIONS = foreign subdir-objects
SUBDIRS = include testsuite man
EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
+ src/aarch64/ffi.c src/aarch64/ffitarget.h \
src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \
src/arm/ffi.c src/arm/sysv.S src/arm/trampoline.S \
src/arm/ffitarget.h src/avr32/ffi.c src/avr32/sysv.S \
@@ -502,10 +506,10 @@ nodist_libffi_la_SOURCES = $(am__append_
$(am__append_18) $(am__append_19) $(am__append_20) \
$(am__append_21) $(am__append_22) $(am__append_23) \
$(am__append_24) $(am__append_25) $(am__append_26) \
- $(am__append_27)
+ $(am__append_27) $(am__append_28)
libffi_convenience_la_SOURCES = $(libffi_la_SOURCES)
nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES)
-AM_CFLAGS = -g $(am__append_28)
+AM_CFLAGS = -g $(am__append_29)
libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(LTLDFLAGS) $(AM_LTLDFLAGS)
AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src
AM_CCASFLAGS = $(AM_CPPFLAGS)
@@ -731,6 +735,16 @@ src/powerpc/darwin.lo: src/powerpc/$(am_
src/powerpc/$(DEPDIR)/$(am__dirstamp)
src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \
src/powerpc/$(DEPDIR)/$(am__dirstamp)
+src/aarch64/$(am__dirstamp):
+ @$(MKDIR_P) src/aarch64
+ @: > src/aarch64/$(am__dirstamp)
+src/aarch64/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/aarch64/$(DEPDIR)
+ @: > src/aarch64/$(DEPDIR)/$(am__dirstamp)
+src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \
+ src/aarch64/$(DEPDIR)/$(am__dirstamp)
+src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \
+ src/aarch64/$(DEPDIR)/$(am__dirstamp)
src/arm/$(am__dirstamp):
@$(MKDIR_P) src/arm
@: > src/arm/$(am__dirstamp)
@@ -822,6 +836,10 @@ libffi_convenience.la: $(libffi_convenie
mostlyclean-compile:
-rm -f *.$(OBJEXT)
+ -rm -f src/aarch64/ffi.$(OBJEXT)
+ -rm -f src/aarch64/ffi.lo
+ -rm -f src/aarch64/sysv.$(OBJEXT)
+ -rm -f src/aarch64/sysv.lo
-rm -f src/alpha/ffi.$(OBJEXT)
-rm -f src/alpha/ffi.lo
-rm -f src/alpha/osf.$(OBJEXT)
@@ -946,6 +964,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/prep_cif.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/raw_api.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/types.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/sysv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/osf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/ffi.Plo@am__quote@
@@ -1052,6 +1072,7 @@ mostlyclean-libtool:
clean-libtool:
-rm -rf .libs _libs
-rm -rf src/.libs src/_libs
+ -rm -rf src/aarch64/.libs src/aarch64/_libs
-rm -rf src/alpha/.libs src/alpha/_libs
-rm -rf src/arm/.libs src/arm/_libs
-rm -rf src/avr32/.libs src/avr32/_libs
@@ -1602,6 +1623,8 @@ distclean-generic:
-rm -f doc/$(am__dirstamp)
-rm -f src/$(DEPDIR)/$(am__dirstamp)
-rm -f src/$(am__dirstamp)
+ -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/aarch64/$(am__dirstamp)
-rm -f src/alpha/$(DEPDIR)/$(am__dirstamp)
-rm -f src/alpha/$(am__dirstamp)
-rm -f src/arm/$(DEPDIR)/$(am__dirstamp)
@@ -1645,7 +1668,7 @@ clean-am: clean-aminfo clean-generic cle
distclean: distclean-recursive
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
- -rm -rf src/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR)
+ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-hdr distclean-libtool distclean-tags
@@ -1765,7 +1788,7 @@ installcheck-am:
maintainer-clean: maintainer-clean-recursive
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -rf $(top_srcdir)/autom4te.cache
- -rm -rf src/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR)
+ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-aminfo \
maintainer-clean-generic maintainer-clean-vti
Index: twall-jna-3e5b84f/native/libffi/configure
===================================================================
--- twall-jna-3e5b84f.orig/native/libffi/configure
+++ twall-jna-3e5b84f/native/libffi/configure
@@ -649,6 +649,8 @@ AVR32_FALSE
AVR32_TRUE
ARM_FALSE
ARM_TRUE
+AARCH64_FALSE
+AARCH64_TRUE
POWERPC_FREEBSD_FALSE
POWERPC_FREEBSD_TRUE
POWERPC_DARWIN_FALSE
@@ -13035,6 +13037,10 @@ fi
TARGETDIR="unknown"
case "$host" in
+ aarch64*-*-*)
+ TARGET=AARCH64; TARGETDIR=aarch64
+ ;;
+
alpha*-*-*)
TARGET=ALPHA; TARGETDIR=alpha;
# Support 128-bit long double, changeable via command-line switch.
@@ -13306,6 +13312,14 @@ else
POWERPC_FREEBSD_FALSE=
fi
+ if test x$TARGET = xAARCH64; then
+ AARCH64_TRUE=
+ AARCH64_FALSE='#'
+else
+ AARCH64_TRUE='#'
+ AARCH64_FALSE=
+fi
+
if test x$TARGET = xARM; then
ARM_TRUE=
ARM_FALSE='#'
@@ -14689,6 +14703,10 @@ if test -z "${POWERPC_FREEBSD_TRUE}" &&
as_fn_error $? "conditional \"POWERPC_FREEBSD\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${AARCH64_TRUE}" && test -z "${AARCH64_FALSE}"; then
+ as_fn_error $? "conditional \"AARCH64\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${ARM_TRUE}" && test -z "${ARM_FALSE}"; then
as_fn_error $? "conditional \"ARM\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
Index: twall-jna-3e5b84f/native/libffi/configure.ac
===================================================================
--- twall-jna-3e5b84f.orig/native/libffi/configure.ac
+++ twall-jna-3e5b84f/native/libffi/configure.ac
@@ -53,6 +53,10 @@ AM_CONDITIONAL(TESTSUBDIR, test -d $srcd
TARGETDIR="unknown"
case "$host" in
+ aarch64*-*-*)
+ TARGET=AARCH64; TARGETDIR=aarch64
+ ;;
+
alpha*-*-*)
TARGET=ALPHA; TARGETDIR=alpha;
# Support 128-bit long double, changeable via command-line switch.
@@ -212,6 +216,7 @@ AM_CONDITIONAL(POWERPC, test x$TARGET =
AM_CONDITIONAL(POWERPC_AIX, test x$TARGET = xPOWERPC_AIX)
AM_CONDITIONAL(POWERPC_DARWIN, test x$TARGET = xPOWERPC_DARWIN)
AM_CONDITIONAL(POWERPC_FREEBSD, test x$TARGET = xPOWERPC_FREEBSD)
+AM_CONDITIONAL(AARCH64, test x$TARGET = xAARCH64)
AM_CONDITIONAL(ARM, test x$TARGET = xARM)
AM_CONDITIONAL(AVR32, test x$TARGET = xAVR32)
AM_CONDITIONAL(LIBFFI_CRIS, test x$TARGET = xLIBFFI_CRIS)
Index: twall-jna-3e5b84f/native/libffi/src/aarch64/ffi.c
===================================================================
--- /dev/null
+++ twall-jna-3e5b84f/native/libffi/src/aarch64/ffi.c
@@ -0,0 +1,1076 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include <stdio.h>
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+
+/* Stack alignment requirement in bytes */
+#define AARCH64_STACK_ALIGN 16
+
+#define N_X_ARG_REG 8
+#define N_V_ARG_REG 8
+
+#define AARCH64_FFI_WITH_V (1 << AARCH64_FFI_WITH_V_BIT)
+
+union _d
+{
+ UINT64 d;
+ UINT32 s[2];
+};
+
+struct call_context
+{
+ UINT64 x [AARCH64_N_XREG];
+ struct
+ {
+ union _d d[2];
+ } v [AARCH64_N_VREG];
+};
+
+static void *
+get_x_addr (struct call_context *context, unsigned n)
+{
+ return &context->x[n];
+}
+
+static void *
+get_s_addr (struct call_context *context, unsigned n)
+{
+#if defined __AARCH64EB__
+ return &context->v[n].d[1].s[1];
+#else
+ return &context->v[n].d[0].s[0];
+#endif
+}
+
+static void *
+get_d_addr (struct call_context *context, unsigned n)
+{
+#if defined __AARCH64EB__
+ return &context->v[n].d[1];
+#else
+ return &context->v[n].d[0];
+#endif
+}
+
+static void *
+get_v_addr (struct call_context *context, unsigned n)
+{
+ return &context->v[n];
+}
+
+/* Return the memory location at which a basic type would reside
+ were it to have been stored in register n. */
+
+static void *
+get_basic_type_addr (unsigned short type, struct call_context *context,
+ unsigned n)
+{
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ return get_s_addr (context, n);
+ case FFI_TYPE_DOUBLE:
+ return get_d_addr (context, n);
+ case FFI_TYPE_LONGDOUBLE:
+ return get_v_addr (context, n);
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ return get_x_addr (context, n);
+ default:
+ FFI_ASSERT (0);
+ return NULL;
+ }
+}
+
+/* Return the alignment width for each of the basic types. */
+
+static size_t
+get_basic_type_alignment (unsigned short type)
+{
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ return sizeof (UINT64);
+ case FFI_TYPE_LONGDOUBLE:
+ return sizeof (long double);
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ return sizeof (UINT64);
+
+ default:
+ FFI_ASSERT (0);
+ return 0;
+ }
+}
+
+/* Return the size in bytes for each of the basic types. */
+
+static size_t
+get_basic_type_size (unsigned short type)
+{
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ return sizeof (UINT32);
+ case FFI_TYPE_DOUBLE:
+ return sizeof (UINT64);
+ case FFI_TYPE_LONGDOUBLE:
+ return sizeof (long double);
+ case FFI_TYPE_UINT8:
+ return sizeof (UINT8);
+ case FFI_TYPE_SINT8:
+ return sizeof (SINT8);
+ case FFI_TYPE_UINT16:
+ return sizeof (UINT16);
+ case FFI_TYPE_SINT16:
+ return sizeof (SINT16);
+ case FFI_TYPE_UINT32:
+ return sizeof (UINT32);
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ return sizeof (SINT32);
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ return sizeof (UINT64);
+ case FFI_TYPE_SINT64:
+ return sizeof (SINT64);
+
+ default:
+ FFI_ASSERT (0);
+ return 0;
+ }
+}
+
+extern void
+ffi_call_SYSV (unsigned (*)(struct call_context *context, unsigned char *,
+ extended_cif *),
+ struct call_context *context,
+ extended_cif *,
+ unsigned,
+ void (*fn)(void));
+
+extern void
+ffi_closure_SYSV (ffi_closure *);
+
+/* Test for an FFI floating point representation. */
+
+static unsigned
+is_floating_type (unsigned short type)
+{
+ return (type == FFI_TYPE_FLOAT || type == FFI_TYPE_DOUBLE
+ || type == FFI_TYPE_LONGDOUBLE);
+}
+
+/* Test for a homogeneous structure. */
+
+static unsigned short
+get_homogeneous_type (ffi_type *ty)
+{
+ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
+ {
+ unsigned i;
+ unsigned short candidate_type
+ = get_homogeneous_type (ty->elements[0]);
+ for (i =1; ty->elements[i]; i++)
+ {
+ unsigned short iteration_type = 0;
+ /* If we have a nested struct, we must find its homogeneous type.
+ If that fits with our candidate type, we are still
+ homogeneous. */
+ if (ty->elements[i]->type == FFI_TYPE_STRUCT
+ && ty->elements[i]->elements)
+ {
+ iteration_type = get_homogeneous_type (ty->elements[i]);
+ }
+ else
+ {
+ iteration_type = ty->elements[i]->type;
+ }
+
+ /* If we are not homogeneous, return FFI_TYPE_STRUCT. */
+ if (candidate_type != iteration_type)
+ return FFI_TYPE_STRUCT;
+ }
+ return candidate_type;
+ }
+
+ /* Base case, we have no more levels of nesting, so we
+ are a basic type, and so, trivially homogeneous in that type. */
+ return ty->type;
+}
+
+/* Determine the number of elements within a STRUCT.
+
+ Note, we must handle nested structs.
+
+ If ty is not a STRUCT this function will return 0. */
+
+static unsigned
+element_count (ffi_type *ty)
+{
+ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
+ {
+ unsigned n;
+ unsigned elems = 0;
+ for (n = 0; ty->elements[n]; n++)
+ {
+ if (ty->elements[n]->type == FFI_TYPE_STRUCT
+ && ty->elements[n]->elements)
+ elems += element_count (ty->elements[n]);
+ else
+ elems++;
+ }
+ return elems;
+ }
+ return 0;
+}
+
+/* Test for a homogeneous floating point aggregate.
+
+ A homogeneous floating point aggregate is a homogeneous aggregate of
+ a half- single- or double- precision floating point type with one
+ to four elements. Note that this includes nested structs of the
+ basic type. */
+
+static int
+is_hfa (ffi_type *ty)
+{
+ if (ty->type == FFI_TYPE_STRUCT
+ && ty->elements[0]
+ && is_floating_type (get_homogeneous_type (ty)))
+ {
+ unsigned n = element_count (ty);
+ return n >= 1 && n <= 4;
+ }
+ return 0;
+}
+
+/* Test if an ffi_type is a candidate for passing in a register.
+
+ This test does not check that sufficient registers of the
+ appropriate class are actually available, merely that IFF
+ sufficient registers are available then the argument will be passed
+ in register(s).
+
+ Note that an ffi_type that is deemed to be a register candidate
+ will always be returned in registers.
+
+ Returns 1 if a register candidate else 0. */
+
+static int
+is_register_candidate (ffi_type *ty)
+{
+ switch (ty->type)
+ {
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT64:
+ return 1;
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (ty))
+ {
+ return 1;
+ }
+ else if (ty->size > 16)
+ {
+ /* Too large. Will be replaced with a pointer to memory. The
+ pointer MAY be passed in a register, but the value will
+ not. This test specifically fails since the argument will
+ never be passed by value in registers. */
+ return 0;
+ }
+ else
+ {
+ /* Might be passed in registers depending on the number of
+ registers required. */
+ return (ty->size + 7) / 8 < N_X_ARG_REG;
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+
+ return 0;
+}
+
+/* Test if an ffi_type argument or result is a candidate for a vector
+ register. */
+
+static int
+is_v_register_candidate (ffi_type *ty)
+{
+ return is_floating_type (ty->type)
+ || (ty->type == FFI_TYPE_STRUCT && is_hfa (ty));
+}
+
+/* Representation of the procedure call argument marshalling
+ state.
+
+ The terse state variable names match the names used in the AARCH64
+ PCS. */
+
+struct arg_state
+{
+ unsigned ngrn; /* Next general-purpose register number. */
+ unsigned nsrn; /* Next vector register number. */
+ unsigned nsaa; /* Next stack offset. */
+};
+
+/* Initialize a procedure call argument marshalling state. */
+static void
+arg_init (struct arg_state *state, unsigned call_frame_size)
+{
+ state->ngrn = 0;
+ state->nsrn = 0;
+ state->nsaa = 0;
+}
+
+/* Return the number of available consecutive core argument
+ registers. */
+
+static unsigned
+available_x (struct arg_state *state)
+{
+ return N_X_ARG_REG - state->ngrn;
+}
+
+/* Return the number of available consecutive vector argument
+ registers. */
+
+static unsigned
+available_v (struct arg_state *state)
+{
+ return N_V_ARG_REG - state->nsrn;
+}
+
+static void *
+allocate_to_x (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->ngrn < N_X_ARG_REG)
+ return get_x_addr (context, (state->ngrn)++);
+}
+
+static void *
+allocate_to_s (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
+ return get_s_addr (context, (state->nsrn)++);
+}
+
+static void *
+allocate_to_d (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
+ return get_d_addr (context, (state->nsrn)++);
+}
+
+static void *
+allocate_to_v (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
+ return get_v_addr (context, (state->nsrn)++);
+}
+
+/* Allocate an aligned slot on the stack and return a pointer to it. */
+static void *
+allocate_to_stack (struct arg_state *state, void *stack, unsigned alignment,
+ unsigned size)
+{
+ void *allocation;
+
+ /* Round up the NSAA to the larger of 8 or the natural
+ alignment of the argument's type. */
+ state->nsaa = ALIGN (state->nsaa, alignment);
+ state->nsaa = ALIGN (state->nsaa, alignment);
+ state->nsaa = ALIGN (state->nsaa, 8);
+
+ allocation = stack + state->nsaa;
+
+ state->nsaa += size;
+ return allocation;
+}
+
+static void
+copy_basic_type (void *dest, void *source, unsigned short type)
+{
+ /* This is neccessary to ensure that basic types are copied
+ sign extended to 64-bits as libffi expects. */
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ *(float *) dest = *(float *) source;
+ break;
+ case FFI_TYPE_DOUBLE:
+ *(double *) dest = *(double *) source;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ *(long double *) dest = *(long double *) source;
+ break;
+ case FFI_TYPE_UINT8:
+ *(ffi_arg *) dest = *(UINT8 *) source;
+ break;
+ case FFI_TYPE_SINT8:
+ *(ffi_sarg *) dest = *(SINT8 *) source;
+ break;
+ case FFI_TYPE_UINT16:
+ *(ffi_arg *) dest = *(UINT16 *) source;
+ break;
+ case FFI_TYPE_SINT16:
+ *(ffi_sarg *) dest = *(SINT16 *) source;
+ break;
+ case FFI_TYPE_UINT32:
+ *(ffi_arg *) dest = *(UINT32 *) source;
+ break;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ *(ffi_sarg *) dest = *(SINT32 *) source;
+ break;
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ *(ffi_arg *) dest = *(UINT64 *) source;
+ break;
+ case FFI_TYPE_SINT64:
+ *(ffi_sarg *) dest = *(SINT64 *) source;
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ }
+}
+
+static void
+copy_hfa_to_reg_or_stack (void *memory,
+ ffi_type *ty,
+ struct call_context *context,
+ unsigned char *stack,
+ struct arg_state *state)
+{
+ unsigned elems = element_count (ty);
+ if (available_v (state) < elems)
+ {
+ /* There are insufficient V registers. Further V register allocations
+ are prevented, the NSAA is adjusted (by allocate_to_stack ())
+ and the argument is copied to memory at the adjusted NSAA. */
+ state->nsrn = N_V_ARG_REG;
+ memcpy (allocate_to_stack (state, stack, ty->alignment, ty->size),
+ memory,
+ ty->size);
+ }
+ else
+ {
+ int i;
+ unsigned short type = get_homogeneous_type (ty);
+ unsigned elems = element_count (ty);
+ for (i = 0; i < elems; i++)
+ {
+ void *reg = allocate_to_v (context, state);
+ copy_basic_type (reg, memory, type);
+ memory += get_basic_type_size (type);
+ }
+ }
+}
+
+/* Either allocate an appropriate register for the argument type, or if
+ none are available, allocate a stack slot and return a pointer
+ to the allocated space. */
+
+static void *
+allocate_to_register_or_stack (struct call_context *context,
+ unsigned char *stack,
+ struct arg_state *state,
+ unsigned short type)
+{
+ size_t alignment = get_basic_type_alignment (type);
+ size_t size = alignment;
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ /* This is the only case for which the allocated stack size
+ should not match the alignment of the type. */
+ size = sizeof (UINT32);
+ /* Fall through. */
+ case FFI_TYPE_DOUBLE:
+ if (state->nsrn < N_V_ARG_REG)
+ return allocate_to_d (context, state);
+ state->nsrn = N_V_ARG_REG;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ if (state->nsrn < N_V_ARG_REG)
+ return allocate_to_v (context, state);
+ state->nsrn = N_V_ARG_REG;
+ break;
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ if (state->ngrn < N_X_ARG_REG)
+ return allocate_to_x (context, state);
+ state->ngrn = N_X_ARG_REG;
+ break;
+ default:
+ FFI_ASSERT (0);
+ }
+
+ return allocate_to_stack (state, stack, alignment, size);
+}
+
+/* Copy a value to an appropriate register, or if none are
+ available, to the stack. */
+
+static void
+copy_to_register_or_stack (struct call_context *context,
+ unsigned char *stack,
+ struct arg_state *state,
+ void *value,
+ unsigned short type)
+{
+ copy_basic_type (
+ allocate_to_register_or_stack (context, stack, state, type),
+ value,
+ type);
+}
+
+/* Marshall the arguments from FFI representation to procedure call
+ context and stack. */
+
+static unsigned
+aarch64_prep_args (struct call_context *context, unsigned char *stack,
+ extended_cif *ecif)
+{
+ int i;
+ struct arg_state state;
+
+ arg_init (&state, ALIGN(ecif->cif->bytes, 16));
+
+ for (i = 0; i < ecif->cif->nargs; i++)
+ {
+ ffi_type *ty = ecif->cif->arg_types[i];
+ switch (ty->type)
+ {
+ case FFI_TYPE_VOID:
+ FFI_ASSERT (0);
+ break;
+
+ /* If the argument is a basic type the argument is allocated to an
+ appropriate register, or if none are available, to the stack. */
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ copy_to_register_or_stack (context, stack, &state,
+ ecif->avalue[i], ty->type);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (ty))
+ {
+ copy_hfa_to_reg_or_stack (ecif->avalue[i], ty, context,
+ stack, &state);
+ }
+ else if (ty->size > 16)
+ {
+ /* If the argument is a composite type that is larger than 16
+ bytes, then the argument has been copied to memory, and
+ the argument is replaced by a pointer to the copy. */
+
+ copy_to_register_or_stack (context, stack, &state,
+ &(ecif->avalue[i]), FFI_TYPE_POINTER);
+ }
+ else if (available_x (&state) >= (ty->size + 7) / 8)
+ {
+ /* If the argument is a composite type and the size in
+ double-words is not more than the number of available
+ X registers, then the argument is copied into consecutive
+ X registers. */
+ int j;
+ for (j = 0; j < (ty->size + 7) / 8; j++)
+ {
+ memcpy (allocate_to_x (context, &state),
+ &(((UINT64 *) ecif->avalue[i])[j]),
+ sizeof (UINT64));
+ }
+ }
+ else
+ {
+ /* Otherwise, there are insufficient X registers. Further X
+ register allocations are prevented, the NSAA is adjusted
+ (by allocate_to_stack ()) and the argument is copied to
+ memory at the adjusted NSAA. */
+ state.ngrn = N_X_ARG_REG;
+
+ memcpy (allocate_to_stack (&state, stack, ty->alignment,
+ ty->size), ecif->avalue + i, ty->size);
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+
+ return ecif->cif->aarch64_flags;
+}
+
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ /* Round the stack up to a multiple of the stack alignment requirement. */
+ cif->bytes =
+ (cif->bytes + (AARCH64_STACK_ALIGN - 1)) & ~ (AARCH64_STACK_ALIGN - 1);
+
+ /* Initialize our flags. We are interested if this CIF will touch a
+ vector register, if so we will enable context save and load to
+ those registers, otherwise not. This is intended to be friendly
+ to lazy float context switching in the kernel. */
+ cif->aarch64_flags = 0;
+
+ if (is_v_register_candidate (cif->rtype))
+ {
+ cif->aarch64_flags |= AARCH64_FFI_WITH_V;
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < cif->nargs; i++)
+ if (is_v_register_candidate (cif->arg_types[i]))
+ {
+ cif->aarch64_flags |= AARCH64_FFI_WITH_V;
+ break;
+ }
+ }
+
+ return FFI_OK;
+}
+
+/* Call a function with the provided arguments and capture the return
+ value. */
+void
+ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ extended_cif ecif;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+ ecif.rvalue = rvalue;
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ {
+ struct call_context context;
+ unsigned stack_bytes;
+
+ /* Figure out the total amount of stack space we need, the
+ above call frame space needs to be 16 bytes aligned to
+ ensure correct alignment of the first object inserted in
+ that space hence the ALIGN applied to cif->bytes.*/
+ stack_bytes = ALIGN(cif->bytes, 16);
+
+ memset (&context, 0, sizeof (context));
+ if (is_register_candidate (cif->rtype))
+ {
+ ffi_call_SYSV (aarch64_prep_args, &context, &ecif, stack_bytes, fn);
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT64:
+ {
+ void *addr = get_basic_type_addr (cif->rtype->type,
+ &context, 0);
+ copy_basic_type (rvalue, addr, cif->rtype->type);
+ break;
+ }
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (cif->rtype))
+ {
+ int j;
+ unsigned short type = get_homogeneous_type (cif->rtype);
+ unsigned elems = element_count (cif->rtype);
+ for (j = 0; j < elems; j++)
+ {
+ void *reg = get_basic_type_addr (type, &context, j);
+ copy_basic_type (rvalue, reg, type);
+ rvalue += get_basic_type_size (type);
+ }
+ }
+ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG)
+ {
+ unsigned size = ALIGN (cif->rtype->size, sizeof (UINT64));
+ memcpy (rvalue, get_x_addr (&context, 0), size);
+ }
+ else
+ {
+ FFI_ASSERT (0);
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+ else
+ {
+ memcpy (get_x_addr (&context, 8), &rvalue, sizeof (UINT64));
+ ffi_call_SYSV (aarch64_prep_args, &context, &ecif,
+ stack_bytes, fn);
+ }
+ break;
+ }
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+}
+
+static unsigned char trampoline [] =
+{ 0x70, 0x00, 0x00, 0x58, /* ldr x16, 1f */
+ 0x91, 0x00, 0x00, 0x10, /* adr x17, 2f */
+ 0x00, 0x02, 0x1f, 0xd6 /* br x16 */
+};
+
+/* Build a trampoline. */
+
+#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX,FLAGS) \
+ ({unsigned char *__tramp = (unsigned char*)(TRAMP); \
+ UINT64 __fun = (UINT64)(FUN); \
+ UINT64 __ctx = (UINT64)(CTX); \
+ UINT64 __flags = (UINT64)(FLAGS); \
+ memcpy (__tramp, trampoline, sizeof (trampoline)); \
+ memcpy (__tramp + 12, &__fun, sizeof (__fun)); \
+ memcpy (__tramp + 20, &__ctx, sizeof (__ctx)); \
+ memcpy (__tramp + 28, &__flags, sizeof (__flags)); \
+ __clear_cache(__tramp, __tramp + FFI_TRAMPOLINE_SIZE); \
+ })
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void *codeloc)
+{
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_SYSV, codeloc,
+ cif->aarch64_flags);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+/* Primary handler to setup and invoke a function within a closure.
+
+ A closure when invoked enters via the assembler wrapper
+ ffi_closure_SYSV(). The wrapper allocates a call context on the
+ stack, saves the interesting registers (from the perspective of
+ the calling convention) into the context then passes control to
+ ffi_closure_SYSV_inner() passing the saved context and a pointer to
+ the stack at the point ffi_closure_SYSV() was invoked.
+
+ On the return path the assembler wrapper will reload call context
+ regsiters.
+
+ ffi_closure_SYSV_inner() marshalls the call context into ffi value
+ desriptors, invokes the wrapped function, then marshalls the return
+ value back into the call context. */
+
+void
+ffi_closure_SYSV_inner (ffi_closure *closure, struct call_context *context,
+ void *stack)
+{
+ ffi_cif *cif = closure->cif;
+ void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
+ void *rvalue = NULL;
+ int i;
+ struct arg_state state;
+
+ arg_init (&state, ALIGN(cif->bytes, 16));
+
+ for (i = 0; i < cif->nargs; i++)
+ {
+ ffi_type *ty = cif->arg_types[i];
+
+ switch (ty->type)
+ {
+ case FFI_TYPE_VOID:
+ FFI_ASSERT (0);
+ break;
+
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ avalue[i] = allocate_to_register_or_stack (context, stack,
+ &state, ty->type);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (ty))
+ {
+ unsigned n = element_count (ty);
+ if (available_v (&state) < n)
+ {
+ state.nsrn = N_V_ARG_REG;
+ avalue[i] = allocate_to_stack (&state, stack, ty->alignment,
+ ty->size);
+ }
+ else
+ {
+ switch (get_homogeneous_type (ty))
+ {
+ case FFI_TYPE_FLOAT:
+ {
+ /* Eeek! We need a pointer to the structure,
+ however the homogeneous float elements are
+ being passed in individual S registers,
+ therefore the structure is not represented as
+ a contiguous sequence of bytes in our saved
+ register context. We need to fake up a copy
+ of the structure layed out in memory
+ correctly. The fake can be tossed once the
+ closure function has returned hence alloca()
+ is sufficient. */
+ int j;
+ UINT32 *p = avalue[i] = alloca (ty->size);
+ for (j = 0; j < element_count (ty); j++)
+ memcpy (&p[j],
+ allocate_to_s (context, &state),
+ sizeof (*p));
+ break;
+ }
+
+ case FFI_TYPE_DOUBLE:
+ {
+ /* Eeek! We need a pointer to the structure,
+ however the homogeneous float elements are
+ being passed in individual S registers,
+ therefore the structure is not represented as
+ a contiguous sequence of bytes in our saved
+ register context. We need to fake up a copy
+ of the structure layed out in memory
+ correctly. The fake can be tossed once the
+ closure function has returned hence alloca()
+ is sufficient. */
+ int j;
+ UINT64 *p = avalue[i] = alloca (ty->size);
+ for (j = 0; j < element_count (ty); j++)
+ memcpy (&p[j],
+ allocate_to_d (context, &state),
+ sizeof (*p));
+ break;
+ }
+
+ case FFI_TYPE_LONGDOUBLE:
+ memcpy (&avalue[i],
+ allocate_to_v (context, &state),
+ sizeof (*avalue));
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+ }
+ else if (ty->size > 16)
+ {
+ /* Replace Composite type of size greater than 16 with a
+ pointer. */
+ memcpy (&avalue[i],
+ allocate_to_register_or_stack (context, stack,
+ &state, FFI_TYPE_POINTER),
+ sizeof (avalue[i]));
+ }
+ else if (available_x (&state) >= (ty->size + 7) / 8)
+ {
+ avalue[i] = get_x_addr (context, state.ngrn);
+ state.ngrn += (ty->size + 7) / 8;
+ }
+ else
+ {
+ state.ngrn = N_X_ARG_REG;
+
+ avalue[i] = allocate_to_stack (&state, stack, ty->alignment,
+ ty->size);
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+
+ /* Figure out where the return value will be passed, either in
+ registers or in a memory block allocated by the caller and passed
+ in x8. */
+
+ if (is_register_candidate (cif->rtype))
+ {
+ /* Register candidates are *always* returned in registers. */
+
+ /* Allocate a scratchpad for the return value, we will let the
+ callee scrible the result into the scratch pad then move the
+ contents into the appropriate return value location for the
+ call convention. */
+ rvalue = alloca (cif->rtype->size);
+ (closure->fun) (cif, rvalue, avalue, closure->user_data);
+
+ /* Copy the return value into the call context so that it is returned
+ as expected to our caller. */
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ break;
+
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ {
+ void *addr = get_basic_type_addr (cif->rtype->type, context, 0);
+ copy_basic_type (addr, rvalue, cif->rtype->type);
+ break;
+ }
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (cif->rtype))
+ {
+ int i;
+ unsigned short type = get_homogeneous_type (cif->rtype);
+ unsigned elems = element_count (cif->rtype);
+ for (i = 0; i < elems; i++)
+ {
+ void *reg = get_basic_type_addr (type, context, i);
+ copy_basic_type (reg, rvalue, type);
+ rvalue += get_basic_type_size (type);
+ }
+ }
+ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG)
+ {
+ unsigned size = ALIGN (cif->rtype->size, sizeof (UINT64)) ;
+ memcpy (get_x_addr (context, 0), rvalue, size);
+ }
+ else
+ {
+ FFI_ASSERT (0);
+ }
+ break;
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+ else
+ {
+ memcpy (&rvalue, get_x_addr (context, 8), sizeof (UINT64));
+ (closure->fun) (cif, rvalue, avalue, closure->user_data);
+ }
+}
+
Index: twall-jna-3e5b84f/native/libffi/src/aarch64/ffitarget.h
===================================================================
--- /dev/null
+++ twall-jna-3e5b84f/native/libffi/src/aarch64/ffitarget.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi
+ {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+ } ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 36
+#define FFI_NATIVE_RAW_API 0
+
+/* ---- Internal ---- */
+
+
+#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_flags
+
+#define AARCH64_FFI_WITH_V_BIT 0
+
+#define AARCH64_N_XREG 32
+#define AARCH64_N_VREG 32
+#define AARCH64_CALL_CONTEXT_SIZE (AARCH64_N_XREG * 8 + AARCH64_N_VREG * 16)
+
+#endif
Index: twall-jna-3e5b84f/native/libffi/src/aarch64/sysv.S
===================================================================
--- /dev/null
+++ twall-jna-3e5b84f/native/libffi/src/aarch64/sysv.S
@@ -0,0 +1,307 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
+#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
+#define cfi_restore(reg) .cfi_restore reg
+#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg
+
+ .text
+ .globl ffi_call_SYSV
+ .type ffi_call_SYSV, #function
+
+/* ffi_call_SYSV()
+
+ Create a stack frame, setup an argument context, call the callee
+ and extract the result.
+
+ The maximum required argument stack size is provided,
+ ffi_call_SYSV() allocates that stack space then calls the
+ prepare_fn to populate register context and stack. The
+ argument passing registers are loaded from the register
+ context and the callee called, on return the register passing
+ register are saved back to the context. Our caller will
+ extract the return value from the final state of the saved
+ register context.
+
+ Prototype:
+
+ extern unsigned
+ ffi_call_SYSV (void (*)(struct call_context *context, unsigned char *,
+ extended_cif *),
+ struct call_context *context,
+ extended_cif *,
+ unsigned required_stack_size,
+ void (*fn)(void));
+
+ Therefore on entry we have:
+
+ x0 prepare_fn
+ x1 &context
+ x2 &ecif
+ x3 bytes
+ x4 fn
+
+ This function uses the following stack frame layout:
+
+ ==
+ saved x30(lr)
+ x29(fp)-> saved x29(fp)
+ saved x24
+ saved x23
+ saved x22
+ sp' -> saved x21
+ ...
+ sp -> (constructed callee stack arguments)
+ ==
+
+ Voila! */
+
+#define ffi_call_SYSV_FS (8 * 4)
+
+ .cfi_startproc
+ffi_call_SYSV:
+ stp x29, x30, [sp, #-16]!
+ cfi_adjust_cfa_offset (16)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ mov x29, sp
+ cfi_def_cfa_register (x29)
+ sub sp, sp, #ffi_call_SYSV_FS
+
+ stp x21, x22, [sp, 0]
+ cfi_rel_offset (x21, 0 - ffi_call_SYSV_FS)
+ cfi_rel_offset (x22, 8 - ffi_call_SYSV_FS)
+
+ stp x23, x24, [sp, 16]
+ cfi_rel_offset (x23, 16 - ffi_call_SYSV_FS)
+ cfi_rel_offset (x24, 24 - ffi_call_SYSV_FS)
+
+ mov x21, x1
+ mov x22, x2
+ mov x24, x4
+
+ /* Allocate the stack space for the actual arguments, many
+ arguments will be passed in registers, but we assume
+ worst case and allocate sufficient stack for ALL of
+ the arguments. */
+ sub sp, sp, x3
+
+ /* unsigned (*prepare_fn) (struct call_context *context,
+ unsigned char *stack, extended_cif *ecif);
+ */
+ mov x23, x0
+ mov x0, x1
+ mov x1, sp
+ /* x2 already in place */
+ blr x23
+
+ /* Preserve the flags returned. */
+ mov x23, x0
+
+ /* Figure out if we should touch the vector registers. */
+ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Load the vector argument passing registers. */
+ ldp q0, q1, [x21, #8*32 + 0]
+ ldp q2, q3, [x21, #8*32 + 32]
+ ldp q4, q5, [x21, #8*32 + 64]
+ ldp q6, q7, [x21, #8*32 + 96]
+1:
+ /* Load the core argument passing registers. */
+ ldp x0, x1, [x21, #0]
+ ldp x2, x3, [x21, #16]
+ ldp x4, x5, [x21, #32]
+ ldp x6, x7, [x21, #48]
+
+ /* Don't forget x8 which may be holding the address of a return buffer.
+ */
+ ldr x8, [x21, #8*8]
+
+ blr x24
+
+ /* Save the core argument passing registers. */
+ stp x0, x1, [x21, #0]
+ stp x2, x3, [x21, #16]
+ stp x4, x5, [x21, #32]
+ stp x6, x7, [x21, #48]
+
+ /* Note nothing useful ever comes back in x8! */
+
+ /* Figure out if we should touch the vector registers. */
+ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Save the vector argument passing registers. */
+ stp q0, q1, [x21, #8*32 + 0]
+ stp q2, q3, [x21, #8*32 + 32]
+ stp q4, q5, [x21, #8*32 + 64]
+ stp q6, q7, [x21, #8*32 + 96]
+1:
+ /* All done, unwind our stack frame. */
+ ldp x21, x22, [x29, # - ffi_call_SYSV_FS]
+ cfi_restore (x21)
+ cfi_restore (x22)
+
+ ldp x23, x24, [x29, # - ffi_call_SYSV_FS + 16]
+ cfi_restore (x23)
+ cfi_restore (x24)
+
+ mov sp, x29
+ cfi_def_cfa_register (sp)
+
+ ldp x29, x30, [sp], #16
+ cfi_adjust_cfa_offset (-16)
+ cfi_restore (x29)
+ cfi_restore (x30)
+
+ ret
+
+ .cfi_endproc
+ .size ffi_call_SYSV, .-ffi_call_SYSV
+
+#define ffi_closure_SYSV_FS (8 * 2 + AARCH64_CALL_CONTEXT_SIZE)
+
+/* ffi_closure_SYSV
+
+ Closure invocation glue. This is the low level code invoked directly by
+ the closure trampoline to setup and call a closure.
+
+ On entry x17 points to a struct trampoline_data, x16 has been clobbered
+ all other registers are preserved.
+
+ We allocate a call context and save the argument passing registers,
+ then invoked the generic C ffi_closure_SYSV_inner() function to do all
+ the real work, on return we load the result passing registers back from
+ the call context.
+
+ On entry
+
+ extern void
+ ffi_closure_SYSV (struct trampoline_data *);
+
+ struct trampoline_data
+ {
+ UINT64 *ffi_closure;
+ UINT64 flags;
+ };
+
+ This function uses the following stack frame layout:
+
+ ==
+ saved x30(lr)
+ x29(fp)-> saved x29(fp)
+ saved x22
+ saved x21
+ ...
+ sp -> call_context
+ ==
+
+ Voila! */
+
+ .text
+ .globl ffi_closure_SYSV
+ .cfi_startproc
+ffi_closure_SYSV:
+ stp x29, x30, [sp, #-16]!
+ cfi_adjust_cfa_offset (16)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ mov x29, sp
+
+ sub sp, sp, #ffi_closure_SYSV_FS
+ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
+
+ stp x21, x22, [x29, #-16]
+ cfi_rel_offset (x21, 0)
+ cfi_rel_offset (x22, 8)
+
+ /* Load x21 with &call_context. */
+ mov x21, sp
+ /* Preserve our struct trampoline_data * */
+ mov x22, x17
+
+ /* Save the rest of the argument passing registers. */
+ stp x0, x1, [x21, #0]
+ stp x2, x3, [x21, #16]
+ stp x4, x5, [x21, #32]
+ stp x6, x7, [x21, #48]
+ /* Don't forget we may have been given a result scratch pad address.
+ */
+ str x8, [x21, #64]
+
+ /* Figure out if we should touch the vector registers. */
+ ldr x0, [x22, #8]
+ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Save the argument passing vector registers. */
+ stp q0, q1, [x21, #8*32 + 0]
+ stp q2, q3, [x21, #8*32 + 32]
+ stp q4, q5, [x21, #8*32 + 64]
+ stp q6, q7, [x21, #8*32 + 96]
+1:
+ /* Load &ffi_closure.. */
+ ldr x0, [x22, #0]
+ mov x1, x21
+ /* Compute the location of the stack at the point that the
+ trampoline was called. */
+ add x2, x29, #16
+
+ bl ffi_closure_SYSV_inner
+
+ /* Figure out if we should touch the vector registers. */
+ ldr x0, [x22, #8]
+ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Load the result passing vector registers. */
+ ldp q0, q1, [x21, #8*32 + 0]
+ ldp q2, q3, [x21, #8*32 + 32]
+ ldp q4, q5, [x21, #8*32 + 64]
+ ldp q6, q7, [x21, #8*32 + 96]
+1:
+ /* Load the result passing core registers. */
+ ldp x0, x1, [x21, #0]
+ ldp x2, x3, [x21, #16]
+ ldp x4, x5, [x21, #32]
+ ldp x6, x7, [x21, #48]
+ /* Note nothing usefull is returned in x8. */
+
+ /* We are done, unwind our frame. */
+ ldp x21, x22, [x29, #-16]
+ cfi_restore (x21)
+ cfi_restore (x22)
+
+ mov sp, x29
+ cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS)
+
+ ldp x29, x30, [sp], #16
+ cfi_adjust_cfa_offset (-16)
+ cfi_restore (x29)
+ cfi_restore (x30)
+
+ ret
+ .cfi_endproc
+ .size ffi_closure_SYSV, .-ffi_closure_SYSV