cd517091fb21d4fa74a3a6247773f93dc72a28b3
[platform/kernel/linux-rpi.git] / kernel / locking / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19
20 #include "rtmutex_common.h"
21
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner        bit0
29  * NULL         0       lock is free (fast acquire possible)
30  * NULL         1       lock is free and has waiters and the top waiter
31  *                              is going to take the lock*
32  * taskpointer  0       lock is held (fast release possible)
33  * taskpointer  1       lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52         unsigned long val = (unsigned long)owner;
53
54         if (rt_mutex_has_waiters(lock))
55                 val |= RT_MUTEX_HAS_WAITERS;
56
57         lock->owner = (struct task_struct *)val;
58 }
59
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62         lock->owner = (struct task_struct *)
63                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68         if (!rt_mutex_has_waiters(lock))
69                 clear_rt_mutex_waiters(lock);
70 }
71
72 /*
73  * We can speed up the acquire/release, if the architecture
74  * supports cmpxchg and if there's no debugging state to be set up
75  */
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 {
80         unsigned long owner, *p = (unsigned long *) &lock->owner;
81
82         do {
83                 owner = *p;
84         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85 }
86
87 /*
88  * Safe fastpath aware unlock:
89  * 1) Clear the waiters bit
90  * 2) Drop lock->wait_lock
91  * 3) Try to unlock the lock with cmpxchg
92  */
93 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
94         __releases(lock->wait_lock)
95 {
96         struct task_struct *owner = rt_mutex_owner(lock);
97
98         clear_rt_mutex_waiters(lock);
99         raw_spin_unlock(&lock->wait_lock);
100         /*
101          * If a new waiter comes in between the unlock and the cmpxchg
102          * we have two situations:
103          *
104          * unlock(wait_lock);
105          *                                      lock(wait_lock);
106          * cmpxchg(p, owner, 0) == owner
107          *                                      mark_rt_mutex_waiters(lock);
108          *                                      acquire(lock);
109          * or:
110          *
111          * unlock(wait_lock);
112          *                                      lock(wait_lock);
113          *                                      mark_rt_mutex_waiters(lock);
114          *
115          * cmpxchg(p, owner, 0) != owner
116          *                                      enqueue_waiter();
117          *                                      unlock(wait_lock);
118          * lock(wait_lock);
119          * wake waiter();
120          * unlock(wait_lock);
121          *                                      lock(wait_lock);
122          *                                      acquire(lock);
123          */
124         return rt_mutex_cmpxchg(lock, owner, NULL);
125 }
126
127 #else
128 # define rt_mutex_cmpxchg(l,c,n)        (0)
129 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
130 {
131         lock->owner = (struct task_struct *)
132                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
133 }
134
135 /*
136  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
137  */
138 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
139         __releases(lock->wait_lock)
140 {
141         lock->owner = NULL;
142         raw_spin_unlock(&lock->wait_lock);
143         return true;
144 }
145 #endif
146
147 static inline int
148 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
149                      struct rt_mutex_waiter *right)
150 {
151         if (left->prio < right->prio)
152                 return 1;
153
154         /*
155          * If both waiters have dl_prio(), we check the deadlines of the
156          * associated tasks.
157          * If left waiter has a dl_prio(), and we didn't return 1 above,
158          * then right waiter has a dl_prio() too.
159          */
160         if (dl_prio(left->prio))
161                 return (left->task->dl.deadline < right->task->dl.deadline);
162
163         return 0;
164 }
165
166 static void
167 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
168 {
169         struct rb_node **link = &lock->waiters.rb_node;
170         struct rb_node *parent = NULL;
171         struct rt_mutex_waiter *entry;
172         int leftmost = 1;
173
174         while (*link) {
175                 parent = *link;
176                 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
177                 if (rt_mutex_waiter_less(waiter, entry)) {
178                         link = &parent->rb_left;
179                 } else {
180                         link = &parent->rb_right;
181                         leftmost = 0;
182                 }
183         }
184
185         if (leftmost)
186                 lock->waiters_leftmost = &waiter->tree_entry;
187
188         rb_link_node(&waiter->tree_entry, parent, link);
189         rb_insert_color(&waiter->tree_entry, &lock->waiters);
190 }
191
192 static void
193 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
194 {
195         if (RB_EMPTY_NODE(&waiter->tree_entry))
196                 return;
197
198         if (lock->waiters_leftmost == &waiter->tree_entry)
199                 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
200
201         rb_erase(&waiter->tree_entry, &lock->waiters);
202         RB_CLEAR_NODE(&waiter->tree_entry);
203 }
204
205 static void
206 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
207 {
208         struct rb_node **link = &task->pi_waiters.rb_node;
209         struct rb_node *parent = NULL;
210         struct rt_mutex_waiter *entry;
211         int leftmost = 1;
212
213         while (*link) {
214                 parent = *link;
215                 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
216                 if (rt_mutex_waiter_less(waiter, entry)) {
217                         link = &parent->rb_left;
218                 } else {
219                         link = &parent->rb_right;
220                         leftmost = 0;
221                 }
222         }
223
224         if (leftmost)
225                 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
226
227         rb_link_node(&waiter->pi_tree_entry, parent, link);
228         rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
229 }
230
231 static void
232 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
233 {
234         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
235                 return;
236
237         if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
238                 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
239
240         rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
241         RB_CLEAR_NODE(&waiter->pi_tree_entry);
242 }
243
244 /*
245  * Calculate task priority from the waiter tree priority
246  *
247  * Return task->normal_prio when the waiter tree is empty or when
248  * the waiter is not allowed to do priority boosting
249  */
250 int rt_mutex_getprio(struct task_struct *task)
251 {
252         if (likely(!task_has_pi_waiters(task)))
253                 return task->normal_prio;
254
255         return min(task_top_pi_waiter(task)->prio,
256                    task->normal_prio);
257 }
258
259 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
260 {
261         if (likely(!task_has_pi_waiters(task)))
262                 return NULL;
263
264         return task_top_pi_waiter(task)->task;
265 }
266
267 /*
268  * Called by sched_setscheduler() to check whether the priority change
269  * is overruled by a possible priority boosting.
270  */
271 int rt_mutex_check_prio(struct task_struct *task, int newprio)
272 {
273         if (!task_has_pi_waiters(task))
274                 return 0;
275
276         return task_top_pi_waiter(task)->task->prio <= newprio;
277 }
278
279 /*
280  * Adjust the priority of a task, after its pi_waiters got modified.
281  *
282  * This can be both boosting and unboosting. task->pi_lock must be held.
283  */
284 static void __rt_mutex_adjust_prio(struct task_struct *task)
285 {
286         int prio = rt_mutex_getprio(task);
287
288         if (task->prio != prio || dl_prio(prio))
289                 rt_mutex_setprio(task, prio);
290 }
291
292 /*
293  * Adjust task priority (undo boosting). Called from the exit path of
294  * rt_mutex_slowunlock() and rt_mutex_slowlock().
295  *
296  * (Note: We do this outside of the protection of lock->wait_lock to
297  * allow the lock to be taken while or before we readjust the priority
298  * of task. We do not use the spin_xx_mutex() variants here as we are
299  * outside of the debug path.)
300  */
301 static void rt_mutex_adjust_prio(struct task_struct *task)
302 {
303         unsigned long flags;
304
305         raw_spin_lock_irqsave(&task->pi_lock, flags);
306         __rt_mutex_adjust_prio(task);
307         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
308 }
309
310 /*
311  * Max number of times we'll walk the boosting chain:
312  */
313 int max_lock_depth = 1024;
314
315 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
316 {
317         return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
318 }
319
320 /*
321  * Adjust the priority chain. Also used for deadlock detection.
322  * Decreases task's usage by one - may thus free the task.
323  *
324  * @task:       the task owning the mutex (owner) for which a chain walk is
325  *              probably needed
326  * @deadlock_detect: do we have to carry out deadlock detection?
327  * @orig_lock:  the mutex (can be NULL if we are walking the chain to recheck
328  *              things for a task that has just got its priority adjusted, and
329  *              is waiting on a mutex)
330  * @next_lock:  the mutex on which the owner of @orig_lock was blocked before
331  *              we dropped its pi_lock. Is never dereferenced, only used for
332  *              comparison to detect lock chain changes.
333  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
334  *              its priority to the mutex owner (can be NULL in the case
335  *              depicted above or if the top waiter is gone away and we are
336  *              actually deboosting the owner)
337  * @top_task:   the current top waiter
338  *
339  * Returns 0 or -EDEADLK.
340  */
341 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
342                                       int deadlock_detect,
343                                       struct rt_mutex *orig_lock,
344                                       struct rt_mutex *next_lock,
345                                       struct rt_mutex_waiter *orig_waiter,
346                                       struct task_struct *top_task)
347 {
348         struct rt_mutex *lock;
349         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
350         int detect_deadlock, ret = 0, depth = 0;
351         unsigned long flags;
352
353         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
354                                                          deadlock_detect);
355
356         /*
357          * The (de)boosting is a step by step approach with a lot of
358          * pitfalls. We want this to be preemptible and we want hold a
359          * maximum of two locks per step. So we have to check
360          * carefully whether things change under us.
361          */
362  again:
363         if (++depth > max_lock_depth) {
364                 static int prev_max;
365
366                 /*
367                  * Print this only once. If the admin changes the limit,
368                  * print a new message when reaching the limit again.
369                  */
370                 if (prev_max != max_lock_depth) {
371                         prev_max = max_lock_depth;
372                         printk(KERN_WARNING "Maximum lock depth %d reached "
373                                "task: %s (%d)\n", max_lock_depth,
374                                top_task->comm, task_pid_nr(top_task));
375                 }
376                 put_task_struct(task);
377
378                 return -EDEADLK;
379         }
380  retry:
381         /*
382          * Task can not go away as we did a get_task() before !
383          */
384         raw_spin_lock_irqsave(&task->pi_lock, flags);
385
386         waiter = task->pi_blocked_on;
387         /*
388          * Check whether the end of the boosting chain has been
389          * reached or the state of the chain has changed while we
390          * dropped the locks.
391          */
392         if (!waiter)
393                 goto out_unlock_pi;
394
395         /*
396          * Check the orig_waiter state. After we dropped the locks,
397          * the previous owner of the lock might have released the lock.
398          */
399         if (orig_waiter && !rt_mutex_owner(orig_lock))
400                 goto out_unlock_pi;
401
402         /*
403          * We dropped all locks after taking a refcount on @task, so
404          * the task might have moved on in the lock chain or even left
405          * the chain completely and blocks now on an unrelated lock or
406          * on @orig_lock.
407          *
408          * We stored the lock on which @task was blocked in @next_lock,
409          * so we can detect the chain change.
410          */
411         if (next_lock != waiter->lock)
412                 goto out_unlock_pi;
413
414         /*
415          * Drop out, when the task has no waiters. Note,
416          * top_waiter can be NULL, when we are in the deboosting
417          * mode!
418          */
419         if (top_waiter) {
420                 if (!task_has_pi_waiters(task))
421                         goto out_unlock_pi;
422                 /*
423                  * If deadlock detection is off, we stop here if we
424                  * are not the top pi waiter of the task.
425                  */
426                 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
427                         goto out_unlock_pi;
428         }
429
430         /*
431          * When deadlock detection is off then we check, if further
432          * priority adjustment is necessary.
433          */
434         if (!detect_deadlock && waiter->prio == task->prio)
435                 goto out_unlock_pi;
436
437         lock = waiter->lock;
438         if (!raw_spin_trylock(&lock->wait_lock)) {
439                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
440                 cpu_relax();
441                 goto retry;
442         }
443
444         /*
445          * Deadlock detection. If the lock is the same as the original
446          * lock which caused us to walk the lock chain or if the
447          * current lock is owned by the task which initiated the chain
448          * walk, we detected a deadlock.
449          */
450         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
451                 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
452                 raw_spin_unlock(&lock->wait_lock);
453                 ret = -EDEADLK;
454                 goto out_unlock_pi;
455         }
456
457         top_waiter = rt_mutex_top_waiter(lock);
458
459         /* Requeue the waiter */
460         rt_mutex_dequeue(lock, waiter);
461         waiter->prio = task->prio;
462         rt_mutex_enqueue(lock, waiter);
463
464         /* Release the task */
465         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
466         put_task_struct(task);
467
468         if (!rt_mutex_owner(lock)) {
469                 /*
470                  * If the requeue above changed the top waiter, then we need
471                  * to wake the new top waiter up to try to get the lock.
472                  */
473
474                 if (top_waiter != rt_mutex_top_waiter(lock))
475                         wake_up_process(rt_mutex_top_waiter(lock)->task);
476                 raw_spin_unlock(&lock->wait_lock);
477                 return 0;
478         }
479
480         /* Grab the next task */
481         task = rt_mutex_owner(lock);
482         get_task_struct(task);
483         raw_spin_lock_irqsave(&task->pi_lock, flags);
484
485         if (waiter == rt_mutex_top_waiter(lock)) {
486                 /* Boost the owner */
487                 rt_mutex_dequeue_pi(task, top_waiter);
488                 rt_mutex_enqueue_pi(task, waiter);
489                 __rt_mutex_adjust_prio(task);
490
491         } else if (top_waiter == waiter) {
492                 /* Deboost the owner */
493                 rt_mutex_dequeue_pi(task, waiter);
494                 waiter = rt_mutex_top_waiter(lock);
495                 rt_mutex_enqueue_pi(task, waiter);
496                 __rt_mutex_adjust_prio(task);
497         }
498
499         /*
500          * Check whether the task which owns the current lock is pi
501          * blocked itself. If yes we store a pointer to the lock for
502          * the lock chain change detection above. After we dropped
503          * task->pi_lock next_lock cannot be dereferenced anymore.
504          */
505         next_lock = task_blocked_on_lock(task);
506
507         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
508
509         top_waiter = rt_mutex_top_waiter(lock);
510         raw_spin_unlock(&lock->wait_lock);
511
512         /*
513          * We reached the end of the lock chain. Stop right here. No
514          * point to go back just to figure that out.
515          */
516         if (!next_lock)
517                 goto out_put_task;
518
519         if (!detect_deadlock && waiter != top_waiter)
520                 goto out_put_task;
521
522         goto again;
523
524  out_unlock_pi:
525         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
526  out_put_task:
527         put_task_struct(task);
528
529         return ret;
530 }
531
532 /*
533  * Try to take an rt-mutex
534  *
535  * Must be called with lock->wait_lock held.
536  *
537  * @lock:   The lock to be acquired.
538  * @task:   The task which wants to acquire the lock
539  * @waiter: The waiter that is queued to the lock's wait list if the
540  *          callsite called task_blocked_on_lock(), otherwise NULL
541  */
542 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
543                                 struct rt_mutex_waiter *waiter)
544 {
545         unsigned long flags;
546
547         /*
548          * Before testing whether we can acquire @lock, we set the
549          * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
550          * other tasks which try to modify @lock into the slow path
551          * and they serialize on @lock->wait_lock.
552          *
553          * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
554          * as explained at the top of this file if and only if:
555          *
556          * - There is a lock owner. The caller must fixup the
557          *   transient state if it does a trylock or leaves the lock
558          *   function due to a signal or timeout.
559          *
560          * - @task acquires the lock and there are no other
561          *   waiters. This is undone in rt_mutex_set_owner(@task) at
562          *   the end of this function.
563          */
564         mark_rt_mutex_waiters(lock);
565
566         /*
567          * If @lock has an owner, give up.
568          */
569         if (rt_mutex_owner(lock))
570                 return 0;
571
572         /*
573          * If @waiter != NULL, @task has already enqueued the waiter
574          * into @lock waiter list. If @waiter == NULL then this is a
575          * trylock attempt.
576          */
577         if (waiter) {
578                 /*
579                  * If waiter is not the highest priority waiter of
580                  * @lock, give up.
581                  */
582                 if (waiter != rt_mutex_top_waiter(lock))
583                         return 0;
584
585                 /*
586                  * We can acquire the lock. Remove the waiter from the
587                  * lock waiters list.
588                  */
589                 rt_mutex_dequeue(lock, waiter);
590
591         } else {
592                 /*
593                  * If the lock has waiters already we check whether @task is
594                  * eligible to take over the lock.
595                  *
596                  * If there are no other waiters, @task can acquire
597                  * the lock.  @task->pi_blocked_on is NULL, so it does
598                  * not need to be dequeued.
599                  */
600                 if (rt_mutex_has_waiters(lock)) {
601                         /*
602                          * If @task->prio is greater than or equal to
603                          * the top waiter priority (kernel view),
604                          * @task lost.
605                          */
606                         if (task->prio >= rt_mutex_top_waiter(lock)->prio)
607                                 return 0;
608
609                         /*
610                          * The current top waiter stays enqueued. We
611                          * don't have to change anything in the lock
612                          * waiters order.
613                          */
614                 } else {
615                         /*
616                          * No waiters. Take the lock without the
617                          * pi_lock dance.@task->pi_blocked_on is NULL
618                          * and we have no waiters to enqueue in @task
619                          * pi waiters list.
620                          */
621                         goto takeit;
622                 }
623         }
624
625         /*
626          * Clear @task->pi_blocked_on. Requires protection by
627          * @task->pi_lock. Redundant operation for the @waiter == NULL
628          * case, but conditionals are more expensive than a redundant
629          * store.
630          */
631         raw_spin_lock_irqsave(&task->pi_lock, flags);
632         task->pi_blocked_on = NULL;
633         /*
634          * Finish the lock acquisition. @task is the new owner. If
635          * other waiters exist we have to insert the highest priority
636          * waiter into @task->pi_waiters list.
637          */
638         if (rt_mutex_has_waiters(lock))
639                 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
640         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
641
642 takeit:
643         /* We got the lock. */
644         debug_rt_mutex_lock(lock);
645
646         /*
647          * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
648          * are still waiters or clears it.
649          */
650         rt_mutex_set_owner(lock, task);
651
652         rt_mutex_deadlock_account_lock(lock, task);
653
654         return 1;
655 }
656
657 /*
658  * Task blocks on lock.
659  *
660  * Prepare waiter and propagate pi chain
661  *
662  * This must be called with lock->wait_lock held.
663  */
664 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
665                                    struct rt_mutex_waiter *waiter,
666                                    struct task_struct *task,
667                                    int detect_deadlock)
668 {
669         struct task_struct *owner = rt_mutex_owner(lock);
670         struct rt_mutex_waiter *top_waiter = waiter;
671         struct rt_mutex *next_lock;
672         int chain_walk = 0, res;
673         unsigned long flags;
674
675         /*
676          * Early deadlock detection. We really don't want the task to
677          * enqueue on itself just to untangle the mess later. It's not
678          * only an optimization. We drop the locks, so another waiter
679          * can come in before the chain walk detects the deadlock. So
680          * the other will detect the deadlock and return -EDEADLOCK,
681          * which is wrong, as the other waiter is not in a deadlock
682          * situation.
683          */
684         if (owner == task)
685                 return -EDEADLK;
686
687         raw_spin_lock_irqsave(&task->pi_lock, flags);
688         __rt_mutex_adjust_prio(task);
689         waiter->task = task;
690         waiter->lock = lock;
691         waiter->prio = task->prio;
692
693         /* Get the top priority waiter on the lock */
694         if (rt_mutex_has_waiters(lock))
695                 top_waiter = rt_mutex_top_waiter(lock);
696         rt_mutex_enqueue(lock, waiter);
697
698         task->pi_blocked_on = waiter;
699
700         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
701
702         if (!owner)
703                 return 0;
704
705         raw_spin_lock_irqsave(&owner->pi_lock, flags);
706         if (waiter == rt_mutex_top_waiter(lock)) {
707                 rt_mutex_dequeue_pi(owner, top_waiter);
708                 rt_mutex_enqueue_pi(owner, waiter);
709
710                 __rt_mutex_adjust_prio(owner);
711                 if (owner->pi_blocked_on)
712                         chain_walk = 1;
713         } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
714                 chain_walk = 1;
715         }
716
717         /* Store the lock on which owner is blocked or NULL */
718         next_lock = task_blocked_on_lock(owner);
719
720         raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
721         /*
722          * Even if full deadlock detection is on, if the owner is not
723          * blocked itself, we can avoid finding this out in the chain
724          * walk.
725          */
726         if (!chain_walk || !next_lock)
727                 return 0;
728
729         /*
730          * The owner can't disappear while holding a lock,
731          * so the owner struct is protected by wait_lock.
732          * Gets dropped in rt_mutex_adjust_prio_chain()!
733          */
734         get_task_struct(owner);
735
736         raw_spin_unlock(&lock->wait_lock);
737
738         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
739                                          next_lock, waiter, task);
740
741         raw_spin_lock(&lock->wait_lock);
742
743         return res;
744 }
745
746 /*
747  * Wake up the next waiter on the lock.
748  *
749  * Remove the top waiter from the current tasks pi waiter list and
750  * wake it up.
751  *
752  * Called with lock->wait_lock held.
753  */
754 static void wakeup_next_waiter(struct rt_mutex *lock)
755 {
756         struct rt_mutex_waiter *waiter;
757         unsigned long flags;
758
759         raw_spin_lock_irqsave(&current->pi_lock, flags);
760
761         waiter = rt_mutex_top_waiter(lock);
762
763         /*
764          * Remove it from current->pi_waiters. We do not adjust a
765          * possible priority boost right now. We execute wakeup in the
766          * boosted mode and go back to normal after releasing
767          * lock->wait_lock.
768          */
769         rt_mutex_dequeue_pi(current, waiter);
770
771         /*
772          * As we are waking up the top waiter, and the waiter stays
773          * queued on the lock until it gets the lock, this lock
774          * obviously has waiters. Just set the bit here and this has
775          * the added benefit of forcing all new tasks into the
776          * slow path making sure no task of lower priority than
777          * the top waiter can steal this lock.
778          */
779         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
780
781         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
782
783         /*
784          * It's safe to dereference waiter as it cannot go away as
785          * long as we hold lock->wait_lock. The waiter task needs to
786          * acquire it in order to dequeue the waiter.
787          */
788         wake_up_process(waiter->task);
789 }
790
791 /*
792  * Remove a waiter from a lock and give up
793  *
794  * Must be called with lock->wait_lock held and
795  * have just failed to try_to_take_rt_mutex().
796  */
797 static void remove_waiter(struct rt_mutex *lock,
798                           struct rt_mutex_waiter *waiter)
799 {
800         int first = (waiter == rt_mutex_top_waiter(lock));
801         struct task_struct *owner = rt_mutex_owner(lock);
802         struct rt_mutex *next_lock = NULL;
803         unsigned long flags;
804
805         raw_spin_lock_irqsave(&current->pi_lock, flags);
806         rt_mutex_dequeue(lock, waiter);
807         current->pi_blocked_on = NULL;
808         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
809
810         if (!owner)
811                 return;
812
813         if (first) {
814
815                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
816
817                 rt_mutex_dequeue_pi(owner, waiter);
818
819                 if (rt_mutex_has_waiters(lock)) {
820                         struct rt_mutex_waiter *next;
821
822                         next = rt_mutex_top_waiter(lock);
823                         rt_mutex_enqueue_pi(owner, next);
824                 }
825                 __rt_mutex_adjust_prio(owner);
826
827                 /* Store the lock on which owner is blocked or NULL */
828                 next_lock = task_blocked_on_lock(owner);
829
830                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
831         }
832
833         if (!next_lock)
834                 return;
835
836         /* gets dropped in rt_mutex_adjust_prio_chain()! */
837         get_task_struct(owner);
838
839         raw_spin_unlock(&lock->wait_lock);
840
841         rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
842
843         raw_spin_lock(&lock->wait_lock);
844 }
845
846 /*
847  * Recheck the pi chain, in case we got a priority setting
848  *
849  * Called from sched_setscheduler
850  */
851 void rt_mutex_adjust_pi(struct task_struct *task)
852 {
853         struct rt_mutex_waiter *waiter;
854         struct rt_mutex *next_lock;
855         unsigned long flags;
856
857         raw_spin_lock_irqsave(&task->pi_lock, flags);
858
859         waiter = task->pi_blocked_on;
860         if (!waiter || (waiter->prio == task->prio &&
861                         !dl_prio(task->prio))) {
862                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
863                 return;
864         }
865         next_lock = waiter->lock;
866         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
867
868         /* gets dropped in rt_mutex_adjust_prio_chain()! */
869         get_task_struct(task);
870
871         rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
872 }
873
874 /**
875  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
876  * @lock:                the rt_mutex to take
877  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
878  *                       or TASK_UNINTERRUPTIBLE)
879  * @timeout:             the pre-initialized and started timer, or NULL for none
880  * @waiter:              the pre-initialized rt_mutex_waiter
881  *
882  * lock->wait_lock must be held by the caller.
883  */
884 static int __sched
885 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
886                     struct hrtimer_sleeper *timeout,
887                     struct rt_mutex_waiter *waiter)
888 {
889         int ret = 0;
890
891         for (;;) {
892                 /* Try to acquire the lock: */
893                 if (try_to_take_rt_mutex(lock, current, waiter))
894                         break;
895
896                 /*
897                  * TASK_INTERRUPTIBLE checks for signals and
898                  * timeout. Ignored otherwise.
899                  */
900                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
901                         /* Signal pending? */
902                         if (signal_pending(current))
903                                 ret = -EINTR;
904                         if (timeout && !timeout->task)
905                                 ret = -ETIMEDOUT;
906                         if (ret)
907                                 break;
908                 }
909
910                 raw_spin_unlock(&lock->wait_lock);
911
912                 debug_rt_mutex_print_deadlock(waiter);
913
914                 schedule_rt_mutex(lock);
915
916                 raw_spin_lock(&lock->wait_lock);
917                 set_current_state(state);
918         }
919
920         return ret;
921 }
922
923 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
924                                      struct rt_mutex_waiter *w)
925 {
926         /*
927          * If the result is not -EDEADLOCK or the caller requested
928          * deadlock detection, nothing to do here.
929          */
930         if (res != -EDEADLOCK || detect_deadlock)
931                 return;
932
933         /*
934          * Yell lowdly and stop the task right here.
935          */
936         rt_mutex_print_deadlock(w);
937         while (1) {
938                 set_current_state(TASK_INTERRUPTIBLE);
939                 schedule();
940         }
941 }
942
943 /*
944  * Slow path lock function:
945  */
946 static int __sched
947 rt_mutex_slowlock(struct rt_mutex *lock, int state,
948                   struct hrtimer_sleeper *timeout,
949                   int detect_deadlock)
950 {
951         struct rt_mutex_waiter waiter;
952         int ret = 0;
953
954         debug_rt_mutex_init_waiter(&waiter);
955         RB_CLEAR_NODE(&waiter.pi_tree_entry);
956         RB_CLEAR_NODE(&waiter.tree_entry);
957
958         raw_spin_lock(&lock->wait_lock);
959
960         /* Try to acquire the lock again: */
961         if (try_to_take_rt_mutex(lock, current, NULL)) {
962                 raw_spin_unlock(&lock->wait_lock);
963                 return 0;
964         }
965
966         set_current_state(state);
967
968         /* Setup the timer, when timeout != NULL */
969         if (unlikely(timeout)) {
970                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
971                 if (!hrtimer_active(&timeout->timer))
972                         timeout->task = NULL;
973         }
974
975         ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
976
977         if (likely(!ret))
978                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
979
980         set_current_state(TASK_RUNNING);
981
982         if (unlikely(ret)) {
983                 remove_waiter(lock, &waiter);
984                 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
985         }
986
987         /*
988          * try_to_take_rt_mutex() sets the waiter bit
989          * unconditionally. We might have to fix that up.
990          */
991         fixup_rt_mutex_waiters(lock);
992
993         raw_spin_unlock(&lock->wait_lock);
994
995         /* Remove pending timer: */
996         if (unlikely(timeout))
997                 hrtimer_cancel(&timeout->timer);
998
999         debug_rt_mutex_free_waiter(&waiter);
1000
1001         return ret;
1002 }
1003
1004 /*
1005  * Slow path try-lock function:
1006  */
1007 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1008 {
1009         int ret;
1010
1011         /*
1012          * If the lock already has an owner we fail to get the lock.
1013          * This can be done without taking the @lock->wait_lock as
1014          * it is only being read, and this is a trylock anyway.
1015          */
1016         if (rt_mutex_owner(lock))
1017                 return 0;
1018
1019         /*
1020          * The mutex has currently no owner. Lock the wait lock and
1021          * try to acquire the lock.
1022          */
1023         raw_spin_lock(&lock->wait_lock);
1024
1025         ret = try_to_take_rt_mutex(lock, current, NULL);
1026
1027         /*
1028          * try_to_take_rt_mutex() sets the lock waiters bit
1029          * unconditionally. Clean this up.
1030          */
1031         fixup_rt_mutex_waiters(lock);
1032
1033         raw_spin_unlock(&lock->wait_lock);
1034
1035         return ret;
1036 }
1037
1038 /*
1039  * Slow path to release a rt-mutex:
1040  */
1041 static void __sched
1042 rt_mutex_slowunlock(struct rt_mutex *lock)
1043 {
1044         raw_spin_lock(&lock->wait_lock);
1045
1046         debug_rt_mutex_unlock(lock);
1047
1048         rt_mutex_deadlock_account_unlock(current);
1049
1050         /*
1051          * We must be careful here if the fast path is enabled. If we
1052          * have no waiters queued we cannot set owner to NULL here
1053          * because of:
1054          *
1055          * foo->lock->owner = NULL;
1056          *                      rtmutex_lock(foo->lock);   <- fast path
1057          *                      free = atomic_dec_and_test(foo->refcnt);
1058          *                      rtmutex_unlock(foo->lock); <- fast path
1059          *                      if (free)
1060          *                              kfree(foo);
1061          * raw_spin_unlock(foo->lock->wait_lock);
1062          *
1063          * So for the fastpath enabled kernel:
1064          *
1065          * Nothing can set the waiters bit as long as we hold
1066          * lock->wait_lock. So we do the following sequence:
1067          *
1068          *      owner = rt_mutex_owner(lock);
1069          *      clear_rt_mutex_waiters(lock);
1070          *      raw_spin_unlock(&lock->wait_lock);
1071          *      if (cmpxchg(&lock->owner, owner, 0) == owner)
1072          *              return;
1073          *      goto retry;
1074          *
1075          * The fastpath disabled variant is simple as all access to
1076          * lock->owner is serialized by lock->wait_lock:
1077          *
1078          *      lock->owner = NULL;
1079          *      raw_spin_unlock(&lock->wait_lock);
1080          */
1081         while (!rt_mutex_has_waiters(lock)) {
1082                 /* Drops lock->wait_lock ! */
1083                 if (unlock_rt_mutex_safe(lock) == true)
1084                         return;
1085                 /* Relock the rtmutex and try again */
1086                 raw_spin_lock(&lock->wait_lock);
1087         }
1088
1089         /*
1090          * The wakeup next waiter path does not suffer from the above
1091          * race. See the comments there.
1092          */
1093         wakeup_next_waiter(lock);
1094
1095         raw_spin_unlock(&lock->wait_lock);
1096
1097         /* Undo pi boosting if necessary: */
1098         rt_mutex_adjust_prio(current);
1099 }
1100
1101 /*
1102  * debug aware fast / slowpath lock,trylock,unlock
1103  *
1104  * The atomic acquire/release ops are compiled away, when either the
1105  * architecture does not support cmpxchg or when debugging is enabled.
1106  */
1107 static inline int
1108 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1109                   int detect_deadlock,
1110                   int (*slowfn)(struct rt_mutex *lock, int state,
1111                                 struct hrtimer_sleeper *timeout,
1112                                 int detect_deadlock))
1113 {
1114         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1115                 rt_mutex_deadlock_account_lock(lock, current);
1116                 return 0;
1117         } else
1118                 return slowfn(lock, state, NULL, detect_deadlock);
1119 }
1120
1121 static inline int
1122 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1123                         struct hrtimer_sleeper *timeout, int detect_deadlock,
1124                         int (*slowfn)(struct rt_mutex *lock, int state,
1125                                       struct hrtimer_sleeper *timeout,
1126                                       int detect_deadlock))
1127 {
1128         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1129                 rt_mutex_deadlock_account_lock(lock, current);
1130                 return 0;
1131         } else
1132                 return slowfn(lock, state, timeout, detect_deadlock);
1133 }
1134
1135 static inline int
1136 rt_mutex_fasttrylock(struct rt_mutex *lock,
1137                      int (*slowfn)(struct rt_mutex *lock))
1138 {
1139         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1140                 rt_mutex_deadlock_account_lock(lock, current);
1141                 return 1;
1142         }
1143         return slowfn(lock);
1144 }
1145
1146 static inline void
1147 rt_mutex_fastunlock(struct rt_mutex *lock,
1148                     void (*slowfn)(struct rt_mutex *lock))
1149 {
1150         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
1151                 rt_mutex_deadlock_account_unlock(current);
1152         else
1153                 slowfn(lock);
1154 }
1155
1156 /**
1157  * rt_mutex_lock - lock a rt_mutex
1158  *
1159  * @lock: the rt_mutex to be locked
1160  */
1161 void __sched rt_mutex_lock(struct rt_mutex *lock)
1162 {
1163         might_sleep();
1164
1165         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
1166 }
1167 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1168
1169 /**
1170  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1171  *
1172  * @lock:               the rt_mutex to be locked
1173  * @detect_deadlock:    deadlock detection on/off
1174  *
1175  * Returns:
1176  *  0           on success
1177  * -EINTR       when interrupted by a signal
1178  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
1179  */
1180 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
1181                                                  int detect_deadlock)
1182 {
1183         might_sleep();
1184
1185         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
1186                                  detect_deadlock, rt_mutex_slowlock);
1187 }
1188 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1189
1190 /**
1191  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1192  *                      the timeout structure is provided
1193  *                      by the caller
1194  *
1195  * @lock:               the rt_mutex to be locked
1196  * @timeout:            timeout structure or NULL (no timeout)
1197  * @detect_deadlock:    deadlock detection on/off
1198  *
1199  * Returns:
1200  *  0           on success
1201  * -EINTR       when interrupted by a signal
1202  * -ETIMEDOUT   when the timeout expired
1203  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
1204  */
1205 int
1206 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
1207                     int detect_deadlock)
1208 {
1209         might_sleep();
1210
1211         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1212                                        detect_deadlock, rt_mutex_slowlock);
1213 }
1214 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1215
1216 /**
1217  * rt_mutex_trylock - try to lock a rt_mutex
1218  *
1219  * @lock:       the rt_mutex to be locked
1220  *
1221  * Returns 1 on success and 0 on contention
1222  */
1223 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1224 {
1225         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1226 }
1227 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1228
1229 /**
1230  * rt_mutex_unlock - unlock a rt_mutex
1231  *
1232  * @lock: the rt_mutex to be unlocked
1233  */
1234 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1235 {
1236         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1237 }
1238 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1239
1240 /**
1241  * rt_mutex_destroy - mark a mutex unusable
1242  * @lock: the mutex to be destroyed
1243  *
1244  * This function marks the mutex uninitialized, and any subsequent
1245  * use of the mutex is forbidden. The mutex must not be locked when
1246  * this function is called.
1247  */
1248 void rt_mutex_destroy(struct rt_mutex *lock)
1249 {
1250         WARN_ON(rt_mutex_is_locked(lock));
1251 #ifdef CONFIG_DEBUG_RT_MUTEXES
1252         lock->magic = NULL;
1253 #endif
1254 }
1255
1256 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1257
1258 /**
1259  * __rt_mutex_init - initialize the rt lock
1260  *
1261  * @lock: the rt lock to be initialized
1262  *
1263  * Initialize the rt lock to unlocked state.
1264  *
1265  * Initializing of a locked rt lock is not allowed
1266  */
1267 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1268 {
1269         lock->owner = NULL;
1270         raw_spin_lock_init(&lock->wait_lock);
1271         lock->waiters = RB_ROOT;
1272         lock->waiters_leftmost = NULL;
1273
1274         debug_rt_mutex_init(lock, name);
1275 }
1276 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1277
1278 /**
1279  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1280  *                              proxy owner
1281  *
1282  * @lock:       the rt_mutex to be locked
1283  * @proxy_owner:the task to set as owner
1284  *
1285  * No locking. Caller has to do serializing itself
1286  * Special API call for PI-futex support
1287  */
1288 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1289                                 struct task_struct *proxy_owner)
1290 {
1291         __rt_mutex_init(lock, NULL);
1292         debug_rt_mutex_proxy_lock(lock, proxy_owner);
1293         rt_mutex_set_owner(lock, proxy_owner);
1294         rt_mutex_deadlock_account_lock(lock, proxy_owner);
1295 }
1296
1297 /**
1298  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1299  *
1300  * @lock:       the rt_mutex to be locked
1301  *
1302  * No locking. Caller has to do serializing itself
1303  * Special API call for PI-futex support
1304  */
1305 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1306                            struct task_struct *proxy_owner)
1307 {
1308         debug_rt_mutex_proxy_unlock(lock);
1309         rt_mutex_set_owner(lock, NULL);
1310         rt_mutex_deadlock_account_unlock(proxy_owner);
1311 }
1312
1313 /**
1314  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1315  * @lock:               the rt_mutex to take
1316  * @waiter:             the pre-initialized rt_mutex_waiter
1317  * @task:               the task to prepare
1318  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1319  *
1320  * Returns:
1321  *  0 - task blocked on lock
1322  *  1 - acquired the lock for task, caller should wake it up
1323  * <0 - error
1324  *
1325  * Special API call for FUTEX_REQUEUE_PI support.
1326  */
1327 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1328                               struct rt_mutex_waiter *waiter,
1329                               struct task_struct *task, int detect_deadlock)
1330 {
1331         int ret;
1332
1333         raw_spin_lock(&lock->wait_lock);
1334
1335         if (try_to_take_rt_mutex(lock, task, NULL)) {
1336                 raw_spin_unlock(&lock->wait_lock);
1337                 return 1;
1338         }
1339
1340         /* We enforce deadlock detection for futexes */
1341         ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1342
1343         if (ret && !rt_mutex_owner(lock)) {
1344                 /*
1345                  * Reset the return value. We might have
1346                  * returned with -EDEADLK and the owner
1347                  * released the lock while we were walking the
1348                  * pi chain.  Let the waiter sort it out.
1349                  */
1350                 ret = 0;
1351         }
1352
1353         if (unlikely(ret))
1354                 remove_waiter(lock, waiter);
1355
1356         raw_spin_unlock(&lock->wait_lock);
1357
1358         debug_rt_mutex_print_deadlock(waiter);
1359
1360         return ret;
1361 }
1362
1363 /**
1364  * rt_mutex_next_owner - return the next owner of the lock
1365  *
1366  * @lock: the rt lock query
1367  *
1368  * Returns the next owner of the lock or NULL
1369  *
1370  * Caller has to serialize against other accessors to the lock
1371  * itself.
1372  *
1373  * Special API call for PI-futex support
1374  */
1375 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1376 {
1377         if (!rt_mutex_has_waiters(lock))
1378                 return NULL;
1379
1380         return rt_mutex_top_waiter(lock)->task;
1381 }
1382
1383 /**
1384  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1385  * @lock:               the rt_mutex we were woken on
1386  * @to:                 the timeout, null if none. hrtimer should already have
1387  *                      been started.
1388  * @waiter:             the pre-initialized rt_mutex_waiter
1389  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1390  *
1391  * Complete the lock acquisition started our behalf by another thread.
1392  *
1393  * Returns:
1394  *  0 - success
1395  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1396  *
1397  * Special API call for PI-futex requeue support
1398  */
1399 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1400                                struct hrtimer_sleeper *to,
1401                                struct rt_mutex_waiter *waiter,
1402                                int detect_deadlock)
1403 {
1404         int ret;
1405
1406         raw_spin_lock(&lock->wait_lock);
1407
1408         set_current_state(TASK_INTERRUPTIBLE);
1409
1410         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1411
1412         set_current_state(TASK_RUNNING);
1413
1414         if (unlikely(ret))
1415                 remove_waiter(lock, waiter);
1416
1417         /*
1418          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1419          * have to fix that up.
1420          */
1421         fixup_rt_mutex_waiters(lock);
1422
1423         raw_spin_unlock(&lock->wait_lock);
1424
1425         return ret;
1426 }