OSDN Git Service

* os_dep.c, include/private/gc_locks.h: Import CRIS port by
[pf3gnuchains/gcc-fork.git] / boehm-gc / include / private / gc_locks.h
index b9ff0cf..46605ca 100644 (file)
@@ -43,6 +43,7 @@
  *   
  */  
 # ifdef THREADS
+   void GC_noop1 GC_PROTO((word));
 #  ifdef PCR_OBSOLETE  /* Faster, but broken with multiple lwp's       */
 #    include  "th/PCR_Th.h"
 #    include  "th/PCR_ThCrSec.h"
@@ -75,7 +76,7 @@
 #    define LOCK() RT0u__inCritical++
 #    define UNLOCK() RT0u__inCritical--
 #  endif
-#  ifdef SOLARIS_THREADS
+#  ifdef GC_SOLARIS_THREADS
 #    include <thread.h>
 #    include <signal.h>
      extern mutex_t GC_allocate_ml;
 #      define GC_TEST_AND_SET_DEFINED
 #    endif
 #    if defined(IA64)
+#     include <ia64intrin.h>
        inline static int GC_test_and_set(volatile unsigned int *addr) {
-         long oldval, n = 1;
-         __asm__ __volatile__("xchg4 %0=%1,%2"
-               : "=r"(oldval), "=m"(*addr)
-               : "r"(n), "1"(*addr) : "memory");
-         return oldval;
+         return __sync_lock_test_and_set(addr, 1);
        }
 #      define GC_TEST_AND_SET_DEFINED
-       /* Should this handle post-increment addressing?? */
        inline static void GC_clear(volatile unsigned int *addr) {
-        __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
+         *addr = 0;
        }
 #      define GC_CLEAR_DEFINED
 #    endif
 #    if defined(POWERPC)
         inline static int GC_test_and_set(volatile unsigned int *addr) {
           int oldval;
-          int temp = 1; // locked value
+          int temp = 1; /* locked value */
 
           __asm__ __volatile__(
-               "1:\tlwarx %0,0,%3\n"   // load and reserve
-               "\tcmpwi %0, 0\n"       // if load is
-               "\tbne 2f\n"            //   non-zero, return already set
-               "\tstwcx. %2,0,%1\n"    // else store conditional
-               "\tbne- 1b\n"           // retry if lost reservation
-               "2:\t\n"                // oldval is zero if we set
+               "1:\tlwarx %0,0,%3\n"   /* load and reserve               */
+               "\tcmpwi %0, 0\n"       /* if load is                     */
+               "\tbne 2f\n"            /*   non-zero, return already set */
+               "\tstwcx. %2,0,%1\n"    /* else store conditional         */
+               "\tbne- 1b\n"           /* retry if lost reservation      */
+               "\tsync\n"              /* import barrier                 */
+               "2:\t\n"                /* oldval is zero if we set       */
               : "=&r"(oldval), "=p"(addr)
               : "r"(temp), "1"(addr)
-              : "memory");
-          return (int)oldval;
+              : "cr0","memory");
+          return oldval;
         }
 #       define GC_TEST_AND_SET_DEFINED
         inline static void GC_clear(volatile unsigned int *addr) {
-         __asm__ __volatile__("eieio" ::: "memory");
+         __asm__ __volatile__("eieio" : : : "memory");
           *(addr) = 0;
         }
 #       define GC_CLEAR_DEFINED
                              "       bne %2,2f\n"
                              "       xor %0,%3,%0\n"
                              "       stl_c %0,%1\n"
+#      ifdef __ELF__
                              "       beq %0,3f\n"
+#      else
+                             "       beq %0,1b\n"
+#      endif
                              "       mb\n"
                              "2:\n"
+#      ifdef __ELF__
                              ".section .text2,\"ax\"\n"
                              "3:     br 1b\n"
                              ".previous"
+#      endif
                              :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
                              :"Ir" (1), "m" (*addr)
                             :"memory");
           return oldvalue;
         }
 #       define GC_TEST_AND_SET_DEFINED
-        /* Should probably also define GC_clear, since it needs        */
-        /* a memory barrier ??                                 */
+        inline static void GC_clear(volatile unsigned int *addr) {
+          __asm__ __volatile__("mb" : : : "memory");
+          *(addr) = 0;
+        }
+#       define GC_CLEAR_DEFINED
 #    endif /* ALPHA */
 #    ifdef ARM32
         inline static int GC_test_and_set(volatile unsigned int *addr) {
         }
 #       define GC_TEST_AND_SET_DEFINED
 #    endif /* ARM32 */
+#    ifdef CRIS
+        inline static int GC_test_and_set(volatile unsigned int *addr) {
+         /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h.        */
+         /* Included with Hans-Peter Nilsson's permission.             */
+         register unsigned long int ret;
+
+         /* Note the use of a dummy output of *addr to expose the write.
+          * The memory barrier is to stop *other* writes being moved past
+          * this code.
+          */
+           __asm__ __volatile__("clearf\n"
+                                "0:\n\t"
+                                "movu.b [%2],%0\n\t"
+                                "ax\n\t"
+                                "move.b %3,[%2]\n\t"
+                                "bwf 0b\n\t"
+                                "clearf"
+                                : "=&r" (ret), "=m" (*addr)
+                                : "r" (addr), "r" ((int) 1), "m" (*addr)
+                                : "memory");
+           return ret;
+        }
+#       define GC_TEST_AND_SET_DEFINED
+#    endif /* CRIS */
+#    ifdef S390
+       inline static int GC_test_and_set(volatile unsigned int *addr) {
+         int ret;
+         __asm__ __volatile__ (
+          "     l     %0,0(%2)\n"
+          "0:   cs    %0,%1,0(%2)\n"
+          "     jl    0b"
+          : "=&d" (ret)
+          : "d" (1), "a" (addr)
+          : "cc", "memory");
+         return ret;
+       }
+#    endif
 #  endif /* __GNUC__ */
 #  if (defined(ALPHA) && !defined(__GNUC__))
-#    define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1)
+#    ifndef OSF1
+       --> We currently assume that if gcc is not used, we are
+       --> running under Tru64.
+#    endif
+#    include <machine/builtins.h>
+#    include <c_asm.h>
+#    define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
 #    define GC_TEST_AND_SET_DEFINED
+#    define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
+#    define GC_CLEAR_DEFINED
 #  endif
 #  if defined(MSWIN32)
 #    define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
 #    define GC_TEST_AND_SET_DEFINED
 #  endif
 #  ifdef MIPS
-#    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
+#    ifdef LINUX
+#      include <sys/tas.h>
+#      define GC_test_and_set(addr) _test_and_set((int *) addr,1)
+#      define GC_TEST_AND_SET_DEFINED
+#    elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
        || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
-#        define GC_test_and_set(addr, v) test_and_set(addr,v)
+#       ifdef __GNUC__
+#          define GC_test_and_set(addr) _test_and_set((void *)addr,1)
+#       else
+#          define GC_test_and_set(addr) test_and_set((void *)addr,1)
+#       endif
 #    else
-#       define GC_test_and_set(addr, v) __test_and_set(addr,v)
+#       define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
 #       define GC_clear(addr) __lock_release(addr);
 #       define GC_CLEAR_DEFINED
 #    endif
 #    define GC_TEST_AND_SET_DEFINED
 #  endif /* MIPS */
+#  if defined(_AIX)
+#    include <sys/atomic_op.h>
+#    if (defined(_POWER) || defined(_POWERPC)) 
+#      if defined(__GNUC__)  
+         inline static void GC_memsync() {
+           __asm__ __volatile__ ("sync" : : : "memory");
+         }
+#      else
+#        ifndef inline
+#          define inline __inline
+#        endif
+#        pragma mc_func GC_memsync { \
+           "7c0004ac" /* sync (same opcode used for dcs)*/ \
+         }
+#      endif
+#    else 
+#    error dont know how to memsync
+#    endif
+     inline static int GC_test_and_set(volatile unsigned int * addr) {
+          int oldvalue = 0;
+          if (compare_and_swap((void *)addr, &oldvalue, 1)) {
+            GC_memsync();
+            return 0;
+          } else return 1;
+     }
+#    define GC_TEST_AND_SET_DEFINED
+     inline static void GC_clear(volatile unsigned int *addr) {
+          GC_memsync();
+          *(addr) = 0;
+     }
+#    define GC_CLEAR_DEFINED
+
+#  endif
 #  if 0 /* defined(HP_PA) */
      /* The official recommendation seems to be to not use ldcw from   */
      /* user mode.  Since multithreaded incremental collection doesn't */
      /* "set" means 0 and "clear" means 1 here.                */
 #    define GC_test_and_set(addr) !GC_test_and_clear(addr);
 #    define GC_TEST_AND_SET_DEFINED
-#    define GC_clear(addr) GC_noop1(addr); *(volatile unsigned int *)addr = 1;
+#    define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
        /* The above needs a memory barrier! */
 #    define GC_CLEAR_DEFINED
 #  endif
 #    define USE_PTHREAD_LOCKS
 #  endif
 
-#  if defined(LINUX_THREADS) || defined(OSF1_THREADS) \
-      || defined(HPUX_THREADS)
+#  if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
+      && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
 #    define NO_THREAD (pthread_t)(-1)
 #    include <pthread.h>
 #    if defined(PARALLEL_MARK) 
          {
           char result;
           __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
-               : "=m"(*(addr)), "=r"(result)
-               : "r" (new_val), "0"(*(addr)), "a"(old) : "memory");
+               : "+m"(*(addr)), "=r"(result)
+               : "r" (new_val), "a"(old) : "memory");
           return (GC_bool) result;
          }
 #      endif /* !GENERIC_COMPARE_AND_SWAP */
-       inline static void GC_memory_write_barrier()
+       inline static void GC_memory_barrier()
        {
         /* We believe the processor ensures at least processor */
         /* consistent ordering.  Thus a compiler barrier       */
          __asm__ __volatile__("" : : : "memory");
        }
 #     endif /* I386 */
+
+#     if defined(POWERPC)
+#      if !defined(GENERIC_COMPARE_AND_SWAP)
+        /* Returns TRUE if the comparison succeeded. */
+        inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
+            GC_word old, GC_word new_val) 
+        {
+            int result, dummy;
+            __asm__ __volatile__(
+                "1:\tlwarx %0,0,%5\n"
+                  "\tcmpw %0,%4\n"
+                  "\tbne  2f\n"
+                  "\tstwcx. %3,0,%2\n"
+                  "\tbne- 1b\n"
+                  "\tsync\n"
+                  "\tli %1, 1\n"
+                  "\tb 3f\n"
+                "2:\tli %1, 0\n"
+                "3:\t\n"
+                :  "=&r" (dummy), "=r" (result), "=p" (addr)
+                :  "r" (new_val), "r" (old), "2"(addr)
+                : "cr0","memory");
+            return (GC_bool) result;
+        }
+#      endif /* !GENERIC_COMPARE_AND_SWAP */
+        inline static void GC_memory_barrier()
+        {
+            __asm__ __volatile__("sync" : : : "memory");
+        }
+#     endif /* POWERPC */
+
 #     if defined(IA64)
 #      if !defined(GENERIC_COMPARE_AND_SWAP)
          inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
-                                                      GC_word old, GC_word new_val) 
+                                                      GC_word old,
+                                                      GC_word new_val) 
         {
-         unsigned long oldval;
-         __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
-               : "=r"(oldval), "=m"(*addr)
-               : "r"(new_val), "1"(*addr), "r"(old) : "memory");
-         return (oldval == old);
+          return __sync_bool_compare_and_swap (addr, old, new_val);
          }
 #      endif /* !GENERIC_COMPARE_AND_SWAP */
 #      if 0
        /* Shouldn't be needed; we use volatile stores instead. */
-        inline static void GC_memory_write_barrier()
+        inline static void GC_memory_barrier()
         {
-          __asm__ __volatile__("mf" : : : "memory");
+          __sync_synchronize ();
         }
 #      endif /* 0 */
 #     endif /* IA64 */
+#     if defined(ALPHA)
+#      if !defined(GENERIC_COMPARE_AND_SWAP)
+#        if defined(__GNUC__)
+           inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
+                                                        GC_word old, GC_word new_val) 
+          {
+            unsigned long was_equal;
+             unsigned long temp;
+
+             __asm__ __volatile__(
+                             "1:     ldq_l %0,%1\n"
+                             "       cmpeq %0,%4,%2\n"
+                            "       mov %3,%0\n"
+                             "       beq %2,2f\n"
+                             "       stq_c %0,%1\n"
+                             "       beq %0,1b\n"
+                             "2:\n"
+                             "       mb\n"
+                             :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
+                             : "r" (new_val), "Ir" (old)
+                            :"memory");
+             return was_equal;
+           }
+#        else /* !__GNUC__ */
+           inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
+                                                        GC_word old, GC_word new_val) 
+         {
+           return __CMP_STORE_QUAD(addr, old, new_val, addr);
+          }
+#        endif /* !__GNUC__ */
+#      endif /* !GENERIC_COMPARE_AND_SWAP */
+#      ifdef __GNUC__
+         inline static void GC_memory_barrier()
+         {
+           __asm__ __volatile__("mb" : : : "memory");
+         }
+#      else
+#       define GC_memory_barrier() asm("mb")
+#      endif /* !__GNUC__ */
+#     endif /* ALPHA */
+#     if defined(S390)
+#      if !defined(GENERIC_COMPARE_AND_SWAP)
+         inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
+                                         GC_word old, GC_word new_val)
+         {
+           int retval;
+           __asm__ __volatile__ (
+#            ifndef __s390x__
+               "     cs  %1,%2,0(%3)\n"
+#            else
+               "     csg %1,%2,0(%3)\n"
+#            endif
+             "     ipm %0\n"
+             "     srl %0,28\n"
+             : "=&d" (retval), "+d" (old)
+             : "d" (new_val), "a" (addr)
+             : "cc", "memory");
+           return retval == 0;
+         }
+#      endif
+#     endif
 #     if !defined(GENERIC_COMPARE_AND_SWAP)
         /* Returns the original value of *addr.        */
         inline static GC_word GC_atomic_add(volatile GC_word *addr,
                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
                  pthread_mutex_unlock(&GC_allocate_ml); }
 #      else /* !GC_ASSERTIONS */
+#        if defined(NO_PTHREAD_TRYLOCK)
+#          define LOCK() GC_lock();
+#        else /* !defined(NO_PTHREAD_TRYLOCK) */
 #        define LOCK() \
           { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
+#        endif
 #        define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
 #      endif /* !GC_ASSERTIONS */
 #   endif /* USE_PTHREAD_LOCKS */
 #   ifdef GC_ASSERTIONS
       extern pthread_t GC_mark_lock_holder;
 #   endif
-#  endif /* LINUX_THREADS || OSF1_THREADS  || HPUX_THREADS */
-#  if defined(IRIX_THREADS)
+#  endif /* GC_PTHREADS with linux_threads.c implementation */
+#  if defined(GC_IRIX_THREADS)
 #    include <pthread.h>
      /* This probably should never be included, but I can't test       */
      /* on Irix anymore.                                               */
 #    include <mutex.h>
 
-     extern unsigned long GC_allocate_lock;
+     extern volatile unsigned int GC_allocate_lock;
        /* This is not a mutex because mutexes that obey the (optional)         */
        /* POSIX scheduling rules are subject to convoys in high contention     */
        /* applications.  This is basically a spin lock.                        */
 #    define NO_THREAD (pthread_t)(-1)
 #    define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
 #    define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
-#    define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+#    define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
 #    define UNLOCK() GC_clear(&GC_allocate_lock);
      extern VOLATILE GC_bool GC_collecting;
 #    define ENTER_GC() \
                    GC_collecting = 1; \
                }
 #    define EXIT_GC() GC_collecting = 0;
-#  endif /* IRIX_THREADS */
-#  ifdef WIN32_THREADS
-#    include <windows.h>
-     GC_API CRITICAL_SECTION GC_allocate_ml;
-#    define LOCK() EnterCriticalSection(&GC_allocate_ml);
-#    define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
+#  endif /* GC_IRIX_THREADS */
+#  if defined(GC_WIN32_THREADS)
+#    if defined(GC_PTHREADS)
+#      include <pthread.h>
+       extern pthread_mutex_t GC_allocate_ml;
+#      define LOCK()   pthread_mutex_lock(&GC_allocate_ml)
+#      define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+#    else
+#      include <windows.h>
+       GC_API CRITICAL_SECTION GC_allocate_ml;
+#      define LOCK() EnterCriticalSection(&GC_allocate_ml);
+#      define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
+#    endif
 #  endif
 #  ifndef SET_LOCK_HOLDER
 #      define SET_LOCK_HOLDER()