home *** CD-ROM | disk | FTP | other *** search
/ PC Welt 2006 November (DVD) / PCWELT_11_2006.ISO / casper / filesystem.squashfs / usr / src / linux-headers-2.6.17-6 / include / asm-x86_64 / mutex.h < prev    next >
Encoding:
C/C++ Source or Header  |  2006-08-11  |  3.1 KB  |  114 lines

  1. /*
  2.  * Assembly implementation of the mutex fastpath, based on atomic
  3.  * decrement/increment.
  4.  *
  5.  * started by Ingo Molnar:
  6.  *
  7.  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8.  */
  9. #ifndef _ASM_MUTEX_H
  10. #define _ASM_MUTEX_H
  11.  
  12. /**
  13.  * __mutex_fastpath_lock - decrement and call function if negative
  14.  * @v: pointer of type atomic_t
  15.  * @fail_fn: function to call if the result is negative
  16.  *
  17.  * Atomically decrements @v and calls <fail_fn> if the result is negative.
  18.  */
  19. #define __mutex_fastpath_lock(v, fail_fn)                \
  20. do {                                    \
  21.     unsigned long dummy;                        \
  22.                                     \
  23.     typecheck(atomic_t *, v);                    \
  24.     typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);        \
  25.                                     \
  26.     __asm__ __volatile__(                        \
  27.         LOCK_PREFIX "   decl (%%rdi)    \n"            \
  28.             "   js 2f        \n"            \
  29.             "1:            \n"            \
  30.                                     \
  31.         LOCK_SECTION_START("")                    \
  32.             "2: call "#fail_fn"    \n"            \
  33.             "   jmp 1b        \n"            \
  34.         LOCK_SECTION_END                    \
  35.                                     \
  36.         :"=D" (dummy)                        \
  37.         : "D" (v)                        \
  38.         : "rax", "rsi", "rdx", "rcx",                \
  39.           "r8", "r9", "r10", "r11", "memory");            \
  40. } while (0)
  41.  
  42. /**
  43.  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
  44.  *                                 from 1 to a 0 value
  45.  *  @count: pointer of type atomic_t
  46.  *  @fail_fn: function to call if the original value was not 1
  47.  *
  48.  * Change the count from 1 to a value lower than 1, and call <fail_fn> if
  49.  * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  50.  * or anything the slow path function returns
  51.  */
  52. static inline int
  53. __mutex_fastpath_lock_retval(atomic_t *count,
  54.                  int fastcall (*fail_fn)(atomic_t *))
  55. {
  56.     if (unlikely(atomic_dec_return(count) < 0))
  57.         return fail_fn(count);
  58.     else
  59.         return 0;
  60. }
  61.  
  62. /**
  63.  * __mutex_fastpath_unlock - increment and call function if nonpositive
  64.  * @v: pointer of type atomic_t
  65.  * @fail_fn: function to call if the result is nonpositive
  66.  *
  67.  * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
  68.  */
  69. #define __mutex_fastpath_unlock(v, fail_fn)                \
  70. do {                                    \
  71.     unsigned long dummy;                        \
  72.                                     \
  73.     typecheck(atomic_t *, v);                    \
  74.     typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);        \
  75.                                     \
  76.     __asm__ __volatile__(                        \
  77.         LOCK_PREFIX "   incl (%%rdi)    \n"            \
  78.             "   jle 2f        \n"            \
  79.             "1:            \n"            \
  80.                                     \
  81.         LOCK_SECTION_START("")                    \
  82.             "2: call "#fail_fn"    \n"            \
  83.             "   jmp 1b        \n"            \
  84.         LOCK_SECTION_END                    \
  85.                                     \
  86.         :"=D" (dummy)                        \
  87.         : "D" (v)                        \
  88.         : "rax", "rsi", "rdx", "rcx",                \
  89.           "r8", "r9", "r10", "r11", "memory");            \
  90. } while (0)
  91.  
  92. #define __mutex_slowpath_needs_to_unlock()    1
  93.  
  94. /**
  95.  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  96.  *
  97.  *  @count: pointer of type atomic_t
  98.  *  @fail_fn: fallback function
  99.  *
  100.  * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
  101.  * if it wasn't 1 originally. [the fallback function is never used on
  102.  * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
  103.  */
  104. static inline int
  105. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  106. {
  107.     if (likely(atomic_cmpxchg(count, 1, 0) == 1))
  108.         return 1;
  109.     else
  110.         return 0;
  111. }
  112.  
  113. #endif
  114.