Enable java for aarch64
authorschwab <schwab@138bc75d-0d04-0410-961f-82ee72b054a4>
Tue, 16 Apr 2013 08:49:51 +0000 (08:49 +0000)
committerschwab <schwab@138bc75d-0d04-0410-961f-82ee72b054a4>
Tue, 16 Apr 2013 08:49:51 +0000 (08:49 +0000)
* configure.ac (aarch64-*-*): Don't disable java.
* configure: Regenerate.

libjava/:
* configure.host: Add support for aarch64.
* sysdep/aarch64/locks.h: New file.

libjava/classpath/:
* native/fdlibm/ieeefp.h: Add support for aarch64.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@197997 138bc75d-0d04-0410-961f-82ee72b054a4

ChangeLog
configure
configure.ac
libjava/ChangeLog
libjava/classpath/ChangeLog.gcj
libjava/classpath/native/fdlibm/ieeefp.h
libjava/configure.host
libjava/sysdep/aarch64/locks.h [new file with mode: 0644]

index 2c74fe5..965e227 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-04-16  Andreas Schwab  <schwab@suse.de>
+
+       * configure.ac (aarch64-*-*): Don't disable java.
+       * configure: Regenerate.
+
 2013-04-10  Chung-Ju Wu  <jasonwucj@gmail.com>
 
        * MAINTAINERS (Write After Approval): Add myself.
index d809535..e161cad 100755 (executable)
--- a/configure
+++ b/configure
@@ -3272,6 +3272,8 @@ esac
 
 # Disable Java if libffi is not supported.
 case "${target}" in
+  aarch64-*-*)
+    ;;
   alpha*-*-*)
     ;;
   arm*-*-*)
index 48ec1aa..bec489f 100644 (file)
@@ -611,6 +611,8 @@ esac
 
 # Disable Java if libffi is not supported.
 case "${target}" in
+  aarch64-*-*)
+    ;;
   alpha*-*-*)
     ;;
   arm*-*-*)
index 7ad315e..ba3bd57 100644 (file)
@@ -1,3 +1,8 @@
+2013-04-16  Andreas Schwab  <schwab@suse.de>
+
+       * configure.host: Add support for aarch64.
+       * sysdep/aarch64/locks.h: New file.
+
 2013-03-12  Andrew John Hughes  <gnu.andrew@redhat.com>
 
        * include/posix-threads.h:
index 62105e2..6395554 100644 (file)
@@ -1,3 +1,7 @@
+2013-04-16  Andreas Schwab  <schwab@suse.de>
+
+       * native/fdlibm/ieeefp.h: Add support for aarch64.
+
 2013-02-21  Jakub Jelinek  <jakub@redhat.com>
 
        PR bootstrap/56258
index c230bbb..7ef2ae7 100644 (file)
@@ -4,6 +4,14 @@
 #ifndef __IEEE_BIG_ENDIAN
 #ifndef __IEEE_LITTLE_ENDIAN
 
+#ifdef __aarch64__
+#ifdef __AARCH64EB__
+#define __IEEE_BIG_ENDIAN
+#else
+#define __IEEE_LITTLE_ENDIAN
+#endif
+#endif
+
 #ifdef __alpha__
 #define __IEEE_LITTLE_ENDIAN
 #endif
index 0c3b41c..f2d1bb5 100644 (file)
@@ -81,6 +81,11 @@ ATOMICSPEC=
 
 # This case statement supports per-CPU defaults.
 case "${host}" in
+  aarch64*-linux*)
+       libgcj_interpreter=yes
+       sysdeps_dir=aarch64
+       ATOMICSPEC=-fuse-atomic-builtins
+       ;;
   arm*-elf)
        with_libffi_default=no
        PROCESS=Ecos
@@ -289,6 +294,12 @@ EOF
        sysdeps_dir=i386
        DIVIDESPEC=-f%{m32:no-}use-divide-subroutine
        ;;
+  aarch64*-linux* )
+       slow_pthread_self=no
+       can_unwind_signal=no
+       CHECKREFSPEC=-fcheck-references
+       DIVIDESPEC=-fuse-divide-subroutine
+       ;;
   arm*-linux* )
        slow_pthread_self=no
        can_unwind_signal=no
diff --git a/libjava/sysdep/aarch64/locks.h b/libjava/sysdep/aarch64/locks.h
new file mode 100644 (file)
index 0000000..f91473d
--- /dev/null
@@ -0,0 +1,57 @@
+// locks.h - Thread synchronization primitives. AArch64 implementation.
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t;     /* Integer type big enough for object   */
+                               /* address.                             */
+
+// Atomically replace *addr by new_val if it was initially equal to old.
+// Return true if the comparison succeeded.
+// Assumed to have acquire semantics, i.e. later memory operations
+// cannot execute before the compare_and_swap finishes.
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+                 obj_addr_t old,
+                 obj_addr_t new_val)
+{
+  return __sync_bool_compare_and_swap(addr, old, new_val);
+}
+
+// Set *addr to new_val with release semantics, i.e. making sure
+// that prior loads and stores complete before this
+// assignment.
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+  __sync_synchronize();
+  *addr = new_val;
+}
+
+// Compare_and_swap with release semantics instead of acquire semantics.
+// On many architecture, the operation makes both guarantees, so the
+// implementation can be the same.
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+                        obj_addr_t old,
+                        obj_addr_t new_val)
+{
+  return __sync_bool_compare_and_swap(addr, old, new_val);
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+inline static void
+read_barrier()
+{
+  __sync_synchronize();
+}
+
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+  __sync_synchronize();
+}
+#endif