home *** CD-ROM | disk | FTP | other *** search
/ PC Welt 2006 November (DVD) / PCWELT_11_2006.ISO / casper / filesystem.squashfs / usr / src / linux-headers-2.6.17-6 / include / asm-ia64 / spinlock.h < prev    next >
Encoding:
C/C++ Source or Header  |  2006-08-11  |  6.6 KB  |  217 lines

  1. #ifndef _ASM_IA64_SPINLOCK_H
  2. #define _ASM_IA64_SPINLOCK_H
  3.  
  4. /*
  5.  * Copyright (C) 1998-2003 Hewlett-Packard Co
  6.  *    David Mosberger-Tang <davidm@hpl.hp.com>
  7.  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  8.  *
  9.  * This file is used for SMP configurations only.
  10.  */
  11.  
  12. #include <linux/compiler.h>
  13. #include <linux/kernel.h>
  14.  
  15. #include <asm/atomic.h>
  16. #include <asm/bitops.h>
  17. #include <asm/intrinsics.h>
  18. #include <asm/system.h>
  19.  
  20. #define __raw_spin_lock_init(x)            ((x)->lock = 0)
  21.  
  22. #ifdef ASM_SUPPORTED
  23. /*
  24.  * Try to get the lock.  If we fail to get the lock, make a non-standard call to
  25.  * ia64_spinlock_contention().  We do not use a normal call because that would force all
  26.  * callers of __raw_spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
  27.  * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
  28.  */
  29.  
  30. #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
  31.  
  32. static inline void
  33. __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
  34. {
  35.     register volatile unsigned int *ptr asm ("r31") = &lock->lock;
  36.  
  37. #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
  38. # ifdef CONFIG_ITANIUM
  39.     /* don't use brl on Itanium... */
  40.     asm volatile ("{\n\t"
  41.               "  mov ar.ccv = r0\n\t"
  42.               "  mov r28 = ip\n\t"
  43.               "  mov r30 = 1;;\n\t"
  44.               "}\n\t"
  45.               "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
  46.               "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
  47.               "cmp4.ne p14, p0 = r30, r0\n\t"
  48.               "mov b6 = r29;;\n\t"
  49.               "mov r27=%2\n\t"
  50.               "(p14) br.cond.spnt.many b6"
  51.               : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  52. # else
  53.     asm volatile ("{\n\t"
  54.               "  mov ar.ccv = r0\n\t"
  55.               "  mov r28 = ip\n\t"
  56.               "  mov r30 = 1;;\n\t"
  57.               "}\n\t"
  58.               "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
  59.               "cmp4.ne p14, p0 = r30, r0\n\t"
  60.               "mov r27=%2\n\t"
  61.               "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
  62.               : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  63. # endif /* CONFIG_MCKINLEY */
  64. #else
  65. # ifdef CONFIG_ITANIUM
  66.     /* don't use brl on Itanium... */
  67.     /* mis-declare, so we get the entry-point, not it's function descriptor: */
  68.     asm volatile ("mov r30 = 1\n\t"
  69.               "mov r27=%2\n\t"
  70.               "mov ar.ccv = r0;;\n\t"
  71.               "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
  72.               "movl r29 = ia64_spinlock_contention;;\n\t"
  73.               "cmp4.ne p14, p0 = r30, r0\n\t"
  74.               "mov b6 = r29;;\n\t"
  75.               "(p14) br.call.spnt.many b6 = b6"
  76.               : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  77. # else
  78.     asm volatile ("mov r30 = 1\n\t"
  79.               "mov r27=%2\n\t"
  80.               "mov ar.ccv = r0;;\n\t"
  81.               "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
  82.               "cmp4.ne p14, p0 = r30, r0\n\t"
  83.               "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
  84.               : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  85. # endif /* CONFIG_MCKINLEY */
  86. #endif
  87. }
  88.  
  89. #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
  90.  
  91. /* Unlock by doing an ordered store and releasing the cacheline with nta */
  92. static inline void __raw_spin_unlock(raw_spinlock_t *x) {
  93.     barrier();
  94.     asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
  95. }
  96.  
  97. #else /* !ASM_SUPPORTED */
  98. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  99. # define __raw_spin_lock(x)                                \
  100. do {                                            \
  101.     __u32 *ia64_spinlock_ptr = (__u32 *) (x);                    \
  102.     __u64 ia64_spinlock_val;                            \
  103.     ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);            \
  104.     if (unlikely(ia64_spinlock_val)) {                        \
  105.         do {                                    \
  106.             while (*ia64_spinlock_ptr)                    \
  107.                 ia64_barrier();                        \
  108.             ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);    \
  109.         } while (ia64_spinlock_val);                        \
  110.     }                                        \
  111. } while (0)
  112. #define __raw_spin_unlock(x)    do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
  113. #endif /* !ASM_SUPPORTED */
  114.  
  115. #define __raw_spin_is_locked(x)        ((x)->lock != 0)
  116. #define __raw_spin_trylock(x)        (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
  117. #define __raw_spin_unlock_wait(lock) \
  118.     do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
  119.  
  120. #define __raw_read_can_lock(rw)        (*(volatile int *)(rw) >= 0)
  121. #define __raw_write_can_lock(rw)    (*(volatile int *)(rw) == 0)
  122.  
  123. #define __raw_read_lock(rw)                                \
  124. do {                                            \
  125.     raw_rwlock_t *__read_lock_ptr = (rw);                        \
  126.                                             \
  127.     while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {        \
  128.         ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);            \
  129.         while (*(volatile int *)__read_lock_ptr < 0)                \
  130.             cpu_relax();                            \
  131.     }                                        \
  132. } while (0)
  133.  
  134. #define __raw_read_unlock(rw)                    \
  135. do {                                \
  136.     raw_rwlock_t *__read_lock_ptr = (rw);            \
  137.     ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);    \
  138. } while (0)
  139.  
  140. #ifdef ASM_SUPPORTED
  141. #define __raw_write_lock(rw)                            \
  142. do {                                        \
  143.      __asm__ __volatile__ (                            \
  144.         "mov ar.ccv = r0\n"                        \
  145.         "dep r29 = -1, r0, 31, 1;;\n"                    \
  146.         "1:\n"                                \
  147.         "ld4 r2 = [%0];;\n"                        \
  148.         "cmp4.eq p0,p7 = r0,r2\n"                    \
  149.         "(p7) br.cond.spnt.few 1b \n"                    \
  150.         "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"            \
  151.         "cmp4.eq p0,p7 = r0, r2\n"                    \
  152.         "(p7) br.cond.spnt.few 1b;;\n"                    \
  153.         :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");        \
  154. } while(0)
  155.  
  156. #define __raw_write_trylock(rw)                            \
  157. ({                                        \
  158.     register long result;                            \
  159.                                         \
  160.     __asm__ __volatile__ (                            \
  161.         "mov ar.ccv = r0\n"                        \
  162.         "dep r29 = -1, r0, 31, 1;;\n"                    \
  163.         "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                \
  164.         : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");        \
  165.     (result == 0);                                \
  166. })
  167.  
  168. static inline void __raw_write_unlock(raw_rwlock_t *x)
  169. {
  170.     u8 *y = (u8 *)x;
  171.     barrier();
  172.     asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
  173. }
  174.  
  175. #else /* !ASM_SUPPORTED */
  176.  
  177. #define __raw_write_lock(l)                                \
  178. ({                                            \
  179.     __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);            \
  180.     __u32 *ia64_write_lock_ptr = (__u32 *) (l);                    \
  181.     do {                                        \
  182.         while (*ia64_write_lock_ptr)                        \
  183.             ia64_barrier();                            \
  184.         ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);    \
  185.     } while (ia64_val);                                \
  186. })
  187.  
  188. #define __raw_write_trylock(rw)                        \
  189. ({                                    \
  190.     __u64 ia64_val;                            \
  191.     __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);            \
  192.     ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);    \
  193.     (ia64_val == 0);                        \
  194. })
  195.  
  196. static inline void __raw_write_unlock(raw_rwlock_t *x)
  197. {
  198.     barrier();
  199.     x->write_lock = 0;
  200. }
  201.  
  202. #endif /* !ASM_SUPPORTED */
  203.  
  204. static inline int __raw_read_trylock(raw_rwlock_t *x)
  205. {
  206.     union {
  207.         raw_rwlock_t lock;
  208.         __u32 word;
  209.     } old, new;
  210.     old.lock = new.lock = *x;
  211.     old.lock.write_lock = new.lock.write_lock = 0;
  212.     ++new.lock.read_counter;
  213.     return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
  214. }
  215.  
  216. #endif /*  _ASM_IA64_SPINLOCK_H */
  217.