include/asm-x86/mutex_32.h: checkpatch cleanups - formatting only
authorJoe Perches <joe@perches.com>
Sun, 23 Mar 2008 08:02:53 +0000 (01:02 -0700)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:25 +0000 (17:41 +0200)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/asm-x86/mutex_32.h

index 9a6b3da..73e928e 100644 (file)
  * wasn't 1 originally. This function MUST leave the value lower than 1
  * even when the "1" assertion wasn't true.
  */
-#define __mutex_fastpath_lock(count, fail_fn)                          \
-do {                                                                   \
-       unsigned int dummy;                                             \
-                                                                       \
-       typecheck(atomic_t *, count);                                   \
+#define __mutex_fastpath_lock(count, fail_fn)                  \
+do {                                                           \
+       unsigned int dummy;                                     \
+                                                               \
+       typecheck(atomic_t *, count);                           \
        typecheck_fn(void (*)(atomic_t *), fail_fn);            \
-                                                                       \
-       __asm__ __volatile__(                                           \
-               LOCK_PREFIX "   decl (%%eax)    \n"                     \
-                       "   jns 1f              \n"                     \
-                       "   call "#fail_fn"     \n"                     \
-                       "1:                     \n"                     \
-                                                                       \
-               :"=a" (dummy)                                           \
-               : "a" (count)                                           \
-               : "memory", "ecx", "edx");                              \
+                                                               \
+       asm volatile(LOCK_PREFIX "   decl (%%eax)\n"            \
+                    "   jns 1f \n"                             \
+                    "   call " #fail_fn "\n"                   \
+                    "1:\n"                                     \
+                    : "=a" (dummy)                             \
+                    : "a" (count)                              \
+                    : "memory", "ecx", "edx");                 \
 } while (0)
 
 
@@ -50,8 +48,8 @@ do {                                                                  \
  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  * or anything the slow path function returns
  */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+                                              int (*fail_fn)(atomic_t *))
 {
        if (unlikely(atomic_dec_return(count) < 0))
                return fail_fn(count);
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  * to return 0 otherwise.
  */
-#define __mutex_fastpath_unlock(count, fail_fn)                                \
-do {                                                                   \
-       unsigned int dummy;                                             \
-                                                                       \
-       typecheck(atomic_t *, count);                                   \
+#define __mutex_fastpath_unlock(count, fail_fn)                        \
+do {                                                           \
+       unsigned int dummy;                                     \
+                                                               \
+       typecheck(atomic_t *, count);                           \
        typecheck_fn(void (*)(atomic_t *), fail_fn);            \
-                                                                       \
-       __asm__ __volatile__(                                           \
-               LOCK_PREFIX "   incl (%%eax)    \n"                     \
-                       "   jg  1f              \n"                     \
-                       "   call "#fail_fn"     \n"                     \
-                       "1:                     \n"                     \
-                                                                       \
-               :"=a" (dummy)                                           \
-               : "a" (count)                                           \
-               : "memory", "ecx", "edx");                              \
+                                                               \
+       asm volatile(LOCK_PREFIX "   incl (%%eax)\n"            \
+                    "   jg     1f\n"                           \
+                    "   call " #fail_fn "\n"                   \
+                    "1:\n"                                     \
+                    : "=a" (dummy)                             \
+                    : "a" (count)                              \
+                    : "memory", "ecx", "edx");                 \
 } while (0)
 
 #define __mutex_slowpath_needs_to_unlock()     1
@@ -104,8 +100,8 @@ do {                                                                        \
  * Additionally, if the value was < 0 originally, this function must not leave
  * it to 0 on failure.
  */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+                                          int (*fail_fn)(atomic_t *))
 {
        /*
         * We have two variants here. The cmpxchg based one is the best one