Bluetooth: Enable sniff mode for incoming connection
[platform/kernel/linux-rpi.git] / net / sunrpc / sched.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/net/sunrpc/sched.c
4  *
5  * Scheduling for synchronous and asynchronous RPC requests.
6  *
7  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8  *
9  * TCP NFS related read + write fixes
10  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11  */
12
13 #include <linux/module.h>
14
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/mempool.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 #include <linux/freezer.h>
23 #include <linux/sched/mm.h>
24
25 #include <linux/sunrpc/clnt.h>
26 #include <linux/sunrpc/metrics.h>
27
28 #include "sunrpc.h"
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/sunrpc.h>
32
33 /*
34  * RPC slabs and memory pools
35  */
36 #define RPC_BUFFER_MAXSIZE      (2048)
37 #define RPC_BUFFER_POOLSIZE     (8)
38 #define RPC_TASK_POOLSIZE       (8)
39 static struct kmem_cache        *rpc_task_slabp __read_mostly;
40 static struct kmem_cache        *rpc_buffer_slabp __read_mostly;
41 static mempool_t        *rpc_task_mempool __read_mostly;
42 static mempool_t        *rpc_buffer_mempool __read_mostly;
43
44 static void                     rpc_async_schedule(struct work_struct *);
45 static void                      rpc_release_task(struct rpc_task *task);
46 static void __rpc_queue_timer_fn(struct work_struct *);
47
48 /*
49  * RPC tasks sit here while waiting for conditions to improve.
50  */
51 static struct rpc_wait_queue delay_queue;
52
53 /*
54  * rpciod-related stuff
55  */
56 struct workqueue_struct *rpciod_workqueue __read_mostly;
57 struct workqueue_struct *xprtiod_workqueue __read_mostly;
58 EXPORT_SYMBOL_GPL(xprtiod_workqueue);
59
60 unsigned long
61 rpc_task_timeout(const struct rpc_task *task)
62 {
63         unsigned long timeout = READ_ONCE(task->tk_timeout);
64
65         if (timeout != 0) {
66                 unsigned long now = jiffies;
67                 if (time_before(now, timeout))
68                         return timeout - now;
69         }
70         return 0;
71 }
72 EXPORT_SYMBOL_GPL(rpc_task_timeout);
73
74 /*
75  * Disable the timer for a given RPC task. Should be called with
76  * queue->lock and bh_disabled in order to avoid races within
77  * rpc_run_timer().
78  */
79 static void
80 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
81 {
82         if (list_empty(&task->u.tk_wait.timer_list))
83                 return;
84         task->tk_timeout = 0;
85         list_del(&task->u.tk_wait.timer_list);
86         if (list_empty(&queue->timer_list.list))
87                 cancel_delayed_work(&queue->timer_list.dwork);
88 }
89
90 static void
91 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
92 {
93         unsigned long now = jiffies;
94         queue->timer_list.expires = expires;
95         if (time_before_eq(expires, now))
96                 expires = 0;
97         else
98                 expires -= now;
99         mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
100 }
101
102 /*
103  * Set up a timer for the current task.
104  */
105 static void
106 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
107                 unsigned long timeout)
108 {
109         task->tk_timeout = timeout;
110         if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
111                 rpc_set_queue_timer(queue, timeout);
112         list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
113 }
114
115 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
116 {
117         if (queue->priority != priority) {
118                 queue->priority = priority;
119                 queue->nr = 1U << priority;
120         }
121 }
122
123 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
124 {
125         rpc_set_waitqueue_priority(queue, queue->maxpriority);
126 }
127
128 /*
129  * Add a request to a queue list
130  */
131 static void
132 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
133 {
134         struct rpc_task *t;
135
136         list_for_each_entry(t, q, u.tk_wait.list) {
137                 if (t->tk_owner == task->tk_owner) {
138                         list_add_tail(&task->u.tk_wait.links,
139                                         &t->u.tk_wait.links);
140                         /* Cache the queue head in task->u.tk_wait.list */
141                         task->u.tk_wait.list.next = q;
142                         task->u.tk_wait.list.prev = NULL;
143                         return;
144                 }
145         }
146         INIT_LIST_HEAD(&task->u.tk_wait.links);
147         list_add_tail(&task->u.tk_wait.list, q);
148 }
149
150 /*
151  * Remove request from a queue list
152  */
153 static void
154 __rpc_list_dequeue_task(struct rpc_task *task)
155 {
156         struct list_head *q;
157         struct rpc_task *t;
158
159         if (task->u.tk_wait.list.prev == NULL) {
160                 list_del(&task->u.tk_wait.links);
161                 return;
162         }
163         if (!list_empty(&task->u.tk_wait.links)) {
164                 t = list_first_entry(&task->u.tk_wait.links,
165                                 struct rpc_task,
166                                 u.tk_wait.links);
167                 /* Assume __rpc_list_enqueue_task() cached the queue head */
168                 q = t->u.tk_wait.list.next;
169                 list_add_tail(&t->u.tk_wait.list, q);
170                 list_del(&task->u.tk_wait.links);
171         }
172         list_del(&task->u.tk_wait.list);
173 }
174
175 /*
176  * Add new request to a priority queue.
177  */
178 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
179                 struct rpc_task *task,
180                 unsigned char queue_priority)
181 {
182         if (unlikely(queue_priority > queue->maxpriority))
183                 queue_priority = queue->maxpriority;
184         __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
185 }
186
187 /*
188  * Add new request to wait queue.
189  */
190 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
191                 struct rpc_task *task,
192                 unsigned char queue_priority)
193 {
194         INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
195         if (RPC_IS_PRIORITY(queue))
196                 __rpc_add_wait_queue_priority(queue, task, queue_priority);
197         else
198                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
199         task->tk_waitqueue = queue;
200         queue->qlen++;
201         /* barrier matches the read in rpc_wake_up_task_queue_locked() */
202         smp_wmb();
203         rpc_set_queued(task);
204 }
205
206 /*
207  * Remove request from a priority queue.
208  */
209 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
210 {
211         __rpc_list_dequeue_task(task);
212 }
213
214 /*
215  * Remove request from queue.
216  * Note: must be called with spin lock held.
217  */
218 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
219 {
220         __rpc_disable_timer(queue, task);
221         if (RPC_IS_PRIORITY(queue))
222                 __rpc_remove_wait_queue_priority(task);
223         else
224                 list_del(&task->u.tk_wait.list);
225         queue->qlen--;
226 }
227
228 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
229 {
230         int i;
231
232         spin_lock_init(&queue->lock);
233         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
234                 INIT_LIST_HEAD(&queue->tasks[i]);
235         queue->maxpriority = nr_queues - 1;
236         rpc_reset_waitqueue_priority(queue);
237         queue->qlen = 0;
238         queue->timer_list.expires = 0;
239         INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
240         INIT_LIST_HEAD(&queue->timer_list.list);
241         rpc_assign_waitqueue_name(queue, qname);
242 }
243
244 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245 {
246         __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
247 }
248 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
249
250 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
251 {
252         __rpc_init_priority_wait_queue(queue, qname, 1);
253 }
254 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
255
256 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
257 {
258         cancel_delayed_work_sync(&queue->timer_list.dwork);
259 }
260 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
261
262 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
263 {
264         freezable_schedule_unsafe();
265         if (signal_pending_state(mode, current))
266                 return -ERESTARTSYS;
267         return 0;
268 }
269
270 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
271 static void rpc_task_set_debuginfo(struct rpc_task *task)
272 {
273         static atomic_t rpc_pid;
274
275         task->tk_pid = atomic_inc_return(&rpc_pid);
276 }
277 #else
278 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
279 {
280 }
281 #endif
282
283 static void rpc_set_active(struct rpc_task *task)
284 {
285         rpc_task_set_debuginfo(task);
286         set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
287         trace_rpc_task_begin(task, NULL);
288 }
289
290 /*
291  * Mark an RPC call as having completed by clearing the 'active' bit
292  * and then waking up all tasks that were sleeping.
293  */
294 static int rpc_complete_task(struct rpc_task *task)
295 {
296         void *m = &task->tk_runstate;
297         wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
298         struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
299         unsigned long flags;
300         int ret;
301
302         trace_rpc_task_complete(task, NULL);
303
304         spin_lock_irqsave(&wq->lock, flags);
305         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
306         ret = atomic_dec_and_test(&task->tk_count);
307         if (waitqueue_active(wq))
308                 __wake_up_locked_key(wq, TASK_NORMAL, &k);
309         spin_unlock_irqrestore(&wq->lock, flags);
310         return ret;
311 }
312
313 /*
314  * Allow callers to wait for completion of an RPC call
315  *
316  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
317  * to enforce taking of the wq->lock and hence avoid races with
318  * rpc_complete_task().
319  */
320 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
321 {
322         if (action == NULL)
323                 action = rpc_wait_bit_killable;
324         return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
325                         action, TASK_KILLABLE);
326 }
327 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
328
329 /*
330  * Make an RPC task runnable.
331  *
332  * Note: If the task is ASYNC, and is being made runnable after sitting on an
333  * rpc_wait_queue, this must be called with the queue spinlock held to protect
334  * the wait queue operation.
335  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
336  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
337  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
338  * the RPC_TASK_RUNNING flag.
339  */
340 static void rpc_make_runnable(struct workqueue_struct *wq,
341                 struct rpc_task *task)
342 {
343         bool need_wakeup = !rpc_test_and_set_running(task);
344
345         rpc_clear_queued(task);
346         if (!need_wakeup)
347                 return;
348         if (RPC_IS_ASYNC(task)) {
349                 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
350                 queue_work(wq, &task->u.tk_work);
351         } else
352                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
353 }
354
355 /*
356  * Prepare for sleeping on a wait queue.
357  * By always appending tasks to the list we ensure FIFO behavior.
358  * NB: An RPC task will only receive interrupt-driven events as long
359  * as it's on a wait queue.
360  */
361 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
362                 struct rpc_task *task,
363                 unsigned char queue_priority)
364 {
365         trace_rpc_task_sleep(task, q);
366
367         __rpc_add_wait_queue(q, task, queue_priority);
368 }
369
370 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
371                 struct rpc_task *task,
372                 unsigned char queue_priority)
373 {
374         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
375                 return;
376         __rpc_do_sleep_on_priority(q, task, queue_priority);
377 }
378
379 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
380                 struct rpc_task *task, unsigned long timeout,
381                 unsigned char queue_priority)
382 {
383         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
384                 return;
385         if (time_is_after_jiffies(timeout)) {
386                 __rpc_do_sleep_on_priority(q, task, queue_priority);
387                 __rpc_add_timer(q, task, timeout);
388         } else
389                 task->tk_status = -ETIMEDOUT;
390 }
391
392 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
393 {
394         if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
395                 task->tk_callback = action;
396 }
397
398 static bool rpc_sleep_check_activated(struct rpc_task *task)
399 {
400         /* We shouldn't ever put an inactive task to sleep */
401         if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
402                 task->tk_status = -EIO;
403                 rpc_put_task_async(task);
404                 return false;
405         }
406         return true;
407 }
408
409 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
410                                 rpc_action action, unsigned long timeout)
411 {
412         if (!rpc_sleep_check_activated(task))
413                 return;
414
415         rpc_set_tk_callback(task, action);
416
417         /*
418          * Protect the queue operations.
419          */
420         spin_lock(&q->lock);
421         __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
422         spin_unlock(&q->lock);
423 }
424 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
425
426 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
427                                 rpc_action action)
428 {
429         if (!rpc_sleep_check_activated(task))
430                 return;
431
432         rpc_set_tk_callback(task, action);
433
434         WARN_ON_ONCE(task->tk_timeout != 0);
435         /*
436          * Protect the queue operations.
437          */
438         spin_lock(&q->lock);
439         __rpc_sleep_on_priority(q, task, task->tk_priority);
440         spin_unlock(&q->lock);
441 }
442 EXPORT_SYMBOL_GPL(rpc_sleep_on);
443
444 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
445                 struct rpc_task *task, unsigned long timeout, int priority)
446 {
447         if (!rpc_sleep_check_activated(task))
448                 return;
449
450         priority -= RPC_PRIORITY_LOW;
451         /*
452          * Protect the queue operations.
453          */
454         spin_lock(&q->lock);
455         __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
456         spin_unlock(&q->lock);
457 }
458 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
459
460 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
461                 int priority)
462 {
463         if (!rpc_sleep_check_activated(task))
464                 return;
465
466         WARN_ON_ONCE(task->tk_timeout != 0);
467         priority -= RPC_PRIORITY_LOW;
468         /*
469          * Protect the queue operations.
470          */
471         spin_lock(&q->lock);
472         __rpc_sleep_on_priority(q, task, priority);
473         spin_unlock(&q->lock);
474 }
475 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
476
477 /**
478  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
479  * @wq: workqueue on which to run task
480  * @queue: wait queue
481  * @task: task to be woken up
482  *
483  * Caller must hold queue->lock, and have cleared the task queued flag.
484  */
485 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
486                 struct rpc_wait_queue *queue,
487                 struct rpc_task *task)
488 {
489         /* Has the task been executed yet? If not, we cannot wake it up! */
490         if (!RPC_IS_ACTIVATED(task)) {
491                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
492                 return;
493         }
494
495         trace_rpc_task_wakeup(task, queue);
496
497         __rpc_remove_wait_queue(queue, task);
498
499         rpc_make_runnable(wq, task);
500 }
501
502 /*
503  * Wake up a queued task while the queue lock is being held
504  */
505 static struct rpc_task *
506 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
507                 struct rpc_wait_queue *queue, struct rpc_task *task,
508                 bool (*action)(struct rpc_task *, void *), void *data)
509 {
510         if (RPC_IS_QUEUED(task)) {
511                 smp_rmb();
512                 if (task->tk_waitqueue == queue) {
513                         if (action == NULL || action(task, data)) {
514                                 __rpc_do_wake_up_task_on_wq(wq, queue, task);
515                                 return task;
516                         }
517                 }
518         }
519         return NULL;
520 }
521
522 /*
523  * Wake up a queued task while the queue lock is being held
524  */
525 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
526                                           struct rpc_task *task)
527 {
528         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
529                                                    task, NULL, NULL);
530 }
531
532 /*
533  * Wake up a task on a specific queue
534  */
535 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
536 {
537         if (!RPC_IS_QUEUED(task))
538                 return;
539         spin_lock(&queue->lock);
540         rpc_wake_up_task_queue_locked(queue, task);
541         spin_unlock(&queue->lock);
542 }
543 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
544
545 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
546 {
547         task->tk_status = *(int *)status;
548         return true;
549 }
550
551 static void
552 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
553                 struct rpc_task *task, int status)
554 {
555         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
556                         task, rpc_task_action_set_status, &status);
557 }
558
559 /**
560  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
561  * @queue: pointer to rpc_wait_queue
562  * @task: pointer to rpc_task
563  * @status: integer error value
564  *
565  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
566  * set to the value of @status.
567  */
568 void
569 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
570                 struct rpc_task *task, int status)
571 {
572         if (!RPC_IS_QUEUED(task))
573                 return;
574         spin_lock(&queue->lock);
575         rpc_wake_up_task_queue_set_status_locked(queue, task, status);
576         spin_unlock(&queue->lock);
577 }
578
579 /*
580  * Wake up the next task on a priority queue.
581  */
582 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
583 {
584         struct list_head *q;
585         struct rpc_task *task;
586
587         /*
588          * Service the privileged queue.
589          */
590         q = &queue->tasks[RPC_NR_PRIORITY - 1];
591         if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
592                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
593                 goto out;
594         }
595
596         /*
597          * Service a batch of tasks from a single owner.
598          */
599         q = &queue->tasks[queue->priority];
600         if (!list_empty(q) && queue->nr) {
601                 queue->nr--;
602                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
603                 goto out;
604         }
605
606         /*
607          * Service the next queue.
608          */
609         do {
610                 if (q == &queue->tasks[0])
611                         q = &queue->tasks[queue->maxpriority];
612                 else
613                         q = q - 1;
614                 if (!list_empty(q)) {
615                         task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
616                         goto new_queue;
617                 }
618         } while (q != &queue->tasks[queue->priority]);
619
620         rpc_reset_waitqueue_priority(queue);
621         return NULL;
622
623 new_queue:
624         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
625 out:
626         return task;
627 }
628
629 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
630 {
631         if (RPC_IS_PRIORITY(queue))
632                 return __rpc_find_next_queued_priority(queue);
633         if (!list_empty(&queue->tasks[0]))
634                 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
635         return NULL;
636 }
637
638 /*
639  * Wake up the first task on the wait queue.
640  */
641 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
642                 struct rpc_wait_queue *queue,
643                 bool (*func)(struct rpc_task *, void *), void *data)
644 {
645         struct rpc_task *task = NULL;
646
647         spin_lock(&queue->lock);
648         task = __rpc_find_next_queued(queue);
649         if (task != NULL)
650                 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
651                                 task, func, data);
652         spin_unlock(&queue->lock);
653
654         return task;
655 }
656
657 /*
658  * Wake up the first task on the wait queue.
659  */
660 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
661                 bool (*func)(struct rpc_task *, void *), void *data)
662 {
663         return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
664 }
665 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
666
667 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
668 {
669         return true;
670 }
671
672 /*
673  * Wake up the next task on the wait queue.
674 */
675 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
676 {
677         return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
678 }
679 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
680
681 /**
682  * rpc_wake_up_locked - wake up all rpc_tasks
683  * @queue: rpc_wait_queue on which the tasks are sleeping
684  *
685  */
686 static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
687 {
688         struct rpc_task *task;
689
690         for (;;) {
691                 task = __rpc_find_next_queued(queue);
692                 if (task == NULL)
693                         break;
694                 rpc_wake_up_task_queue_locked(queue, task);
695         }
696 }
697
698 /**
699  * rpc_wake_up - wake up all rpc_tasks
700  * @queue: rpc_wait_queue on which the tasks are sleeping
701  *
702  * Grabs queue->lock
703  */
704 void rpc_wake_up(struct rpc_wait_queue *queue)
705 {
706         spin_lock(&queue->lock);
707         rpc_wake_up_locked(queue);
708         spin_unlock(&queue->lock);
709 }
710 EXPORT_SYMBOL_GPL(rpc_wake_up);
711
712 /**
713  * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
714  * @queue: rpc_wait_queue on which the tasks are sleeping
715  * @status: status value to set
716  */
717 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
718 {
719         struct rpc_task *task;
720
721         for (;;) {
722                 task = __rpc_find_next_queued(queue);
723                 if (task == NULL)
724                         break;
725                 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
726         }
727 }
728
729 /**
730  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
731  * @queue: rpc_wait_queue on which the tasks are sleeping
732  * @status: status value to set
733  *
734  * Grabs queue->lock
735  */
736 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
737 {
738         spin_lock(&queue->lock);
739         rpc_wake_up_status_locked(queue, status);
740         spin_unlock(&queue->lock);
741 }
742 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
743
744 static void __rpc_queue_timer_fn(struct work_struct *work)
745 {
746         struct rpc_wait_queue *queue = container_of(work,
747                         struct rpc_wait_queue,
748                         timer_list.dwork.work);
749         struct rpc_task *task, *n;
750         unsigned long expires, now, timeo;
751
752         spin_lock(&queue->lock);
753         expires = now = jiffies;
754         list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
755                 timeo = task->tk_timeout;
756                 if (time_after_eq(now, timeo)) {
757                         trace_rpc_task_timeout(task, task->tk_action);
758                         task->tk_status = -ETIMEDOUT;
759                         rpc_wake_up_task_queue_locked(queue, task);
760                         continue;
761                 }
762                 if (expires == now || time_after(expires, timeo))
763                         expires = timeo;
764         }
765         if (!list_empty(&queue->timer_list.list))
766                 rpc_set_queue_timer(queue, expires);
767         spin_unlock(&queue->lock);
768 }
769
770 static void __rpc_atrun(struct rpc_task *task)
771 {
772         if (task->tk_status == -ETIMEDOUT)
773                 task->tk_status = 0;
774 }
775
776 /*
777  * Run a task at a later time
778  */
779 void rpc_delay(struct rpc_task *task, unsigned long delay)
780 {
781         rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
782 }
783 EXPORT_SYMBOL_GPL(rpc_delay);
784
785 /*
786  * Helper to call task->tk_ops->rpc_call_prepare
787  */
788 void rpc_prepare_task(struct rpc_task *task)
789 {
790         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
791 }
792
793 static void
794 rpc_init_task_statistics(struct rpc_task *task)
795 {
796         /* Initialize retry counters */
797         task->tk_garb_retry = 2;
798         task->tk_cred_retry = 2;
799         task->tk_rebind_retry = 2;
800
801         /* starting timestamp */
802         task->tk_start = ktime_get();
803 }
804
805 static void
806 rpc_reset_task_statistics(struct rpc_task *task)
807 {
808         task->tk_timeouts = 0;
809         task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
810         rpc_init_task_statistics(task);
811 }
812
813 /*
814  * Helper that calls task->tk_ops->rpc_call_done if it exists
815  */
816 void rpc_exit_task(struct rpc_task *task)
817 {
818         trace_rpc_task_end(task, task->tk_action);
819         task->tk_action = NULL;
820         if (task->tk_ops->rpc_count_stats)
821                 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
822         else if (task->tk_client)
823                 rpc_count_iostats(task, task->tk_client->cl_metrics);
824         if (task->tk_ops->rpc_call_done != NULL) {
825                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
826                 if (task->tk_action != NULL) {
827                         /* Always release the RPC slot and buffer memory */
828                         xprt_release(task);
829                         rpc_reset_task_statistics(task);
830                 }
831         }
832 }
833
834 void rpc_signal_task(struct rpc_task *task)
835 {
836         struct rpc_wait_queue *queue;
837
838         if (!RPC_IS_ACTIVATED(task))
839                 return;
840
841         trace_rpc_task_signalled(task, task->tk_action);
842         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
843         smp_mb__after_atomic();
844         queue = READ_ONCE(task->tk_waitqueue);
845         if (queue)
846                 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
847 }
848
849 void rpc_exit(struct rpc_task *task, int status)
850 {
851         task->tk_status = status;
852         task->tk_action = rpc_exit_task;
853         rpc_wake_up_queued_task(task->tk_waitqueue, task);
854 }
855 EXPORT_SYMBOL_GPL(rpc_exit);
856
857 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
858 {
859         if (ops->rpc_release != NULL)
860                 ops->rpc_release(calldata);
861 }
862
863 /*
864  * This is the RPC `scheduler' (or rather, the finite state machine).
865  */
866 static void __rpc_execute(struct rpc_task *task)
867 {
868         struct rpc_wait_queue *queue;
869         int task_is_async = RPC_IS_ASYNC(task);
870         int status = 0;
871
872         WARN_ON_ONCE(RPC_IS_QUEUED(task));
873         if (RPC_IS_QUEUED(task))
874                 return;
875
876         for (;;) {
877                 void (*do_action)(struct rpc_task *);
878
879                 /*
880                  * Perform the next FSM step or a pending callback.
881                  *
882                  * tk_action may be NULL if the task has been killed.
883                  * In particular, note that rpc_killall_tasks may
884                  * do this at any time, so beware when dereferencing.
885                  */
886                 do_action = task->tk_action;
887                 if (task->tk_callback) {
888                         do_action = task->tk_callback;
889                         task->tk_callback = NULL;
890                 }
891                 if (!do_action)
892                         break;
893                 trace_rpc_task_run_action(task, do_action);
894                 do_action(task);
895
896                 /*
897                  * Lockless check for whether task is sleeping or not.
898                  */
899                 if (!RPC_IS_QUEUED(task))
900                         continue;
901
902                 /*
903                  * Signalled tasks should exit rather than sleep.
904                  */
905                 if (RPC_SIGNALLED(task)) {
906                         task->tk_rpc_status = -ERESTARTSYS;
907                         rpc_exit(task, -ERESTARTSYS);
908                 }
909
910                 /*
911                  * The queue->lock protects against races with
912                  * rpc_make_runnable().
913                  *
914                  * Note that once we clear RPC_TASK_RUNNING on an asynchronous
915                  * rpc_task, rpc_make_runnable() can assign it to a
916                  * different workqueue. We therefore cannot assume that the
917                  * rpc_task pointer may still be dereferenced.
918                  */
919                 queue = task->tk_waitqueue;
920                 spin_lock(&queue->lock);
921                 if (!RPC_IS_QUEUED(task)) {
922                         spin_unlock(&queue->lock);
923                         continue;
924                 }
925                 rpc_clear_running(task);
926                 spin_unlock(&queue->lock);
927                 if (task_is_async)
928                         return;
929
930                 /* sync task: sleep here */
931                 trace_rpc_task_sync_sleep(task, task->tk_action);
932                 status = out_of_line_wait_on_bit(&task->tk_runstate,
933                                 RPC_TASK_QUEUED, rpc_wait_bit_killable,
934                                 TASK_KILLABLE);
935                 if (status < 0) {
936                         /*
937                          * When a sync task receives a signal, it exits with
938                          * -ERESTARTSYS. In order to catch any callbacks that
939                          * clean up after sleeping on some queue, we don't
940                          * break the loop here, but go around once more.
941                          */
942                         trace_rpc_task_signalled(task, task->tk_action);
943                         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
944                         task->tk_rpc_status = -ERESTARTSYS;
945                         rpc_exit(task, -ERESTARTSYS);
946                 }
947                 trace_rpc_task_sync_wake(task, task->tk_action);
948         }
949
950         /* Release all resources associated with the task */
951         rpc_release_task(task);
952 }
953
954 /*
955  * User-visible entry point to the scheduler.
956  *
957  * This may be called recursively if e.g. an async NFS task updates
958  * the attributes and finds that dirty pages must be flushed.
959  * NOTE: Upon exit of this function the task is guaranteed to be
960  *       released. In particular note that tk_release() will have
961  *       been called, so your task memory may have been freed.
962  */
963 void rpc_execute(struct rpc_task *task)
964 {
965         bool is_async = RPC_IS_ASYNC(task);
966
967         rpc_set_active(task);
968         rpc_make_runnable(rpciod_workqueue, task);
969         if (!is_async) {
970                 unsigned int pflags = memalloc_nofs_save();
971                 __rpc_execute(task);
972                 memalloc_nofs_restore(pflags);
973         }
974 }
975
976 static void rpc_async_schedule(struct work_struct *work)
977 {
978         unsigned int pflags = memalloc_nofs_save();
979
980         __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
981         memalloc_nofs_restore(pflags);
982 }
983
984 /**
985  * rpc_malloc - allocate RPC buffer resources
986  * @task: RPC task
987  *
988  * A single memory region is allocated, which is split between the
989  * RPC call and RPC reply that this task is being used for. When
990  * this RPC is retired, the memory is released by calling rpc_free.
991  *
992  * To prevent rpciod from hanging, this allocator never sleeps,
993  * returning -ENOMEM and suppressing warning if the request cannot
994  * be serviced immediately. The caller can arrange to sleep in a
995  * way that is safe for rpciod.
996  *
997  * Most requests are 'small' (under 2KiB) and can be serviced from a
998  * mempool, ensuring that NFS reads and writes can always proceed,
999  * and that there is good locality of reference for these buffers.
1000  */
1001 int rpc_malloc(struct rpc_task *task)
1002 {
1003         struct rpc_rqst *rqst = task->tk_rqstp;
1004         size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1005         struct rpc_buffer *buf;
1006         gfp_t gfp = GFP_NOFS;
1007
1008         if (RPC_IS_ASYNC(task))
1009                 gfp = GFP_NOWAIT | __GFP_NOWARN;
1010         if (RPC_IS_SWAPPER(task))
1011                 gfp |= __GFP_MEMALLOC;
1012
1013         size += sizeof(struct rpc_buffer);
1014         if (size <= RPC_BUFFER_MAXSIZE)
1015                 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1016         else
1017                 buf = kmalloc(size, gfp);
1018
1019         if (!buf)
1020                 return -ENOMEM;
1021
1022         buf->len = size;
1023         rqst->rq_buffer = buf->data;
1024         rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1025         return 0;
1026 }
1027 EXPORT_SYMBOL_GPL(rpc_malloc);
1028
1029 /**
1030  * rpc_free - free RPC buffer resources allocated via rpc_malloc
1031  * @task: RPC task
1032  *
1033  */
1034 void rpc_free(struct rpc_task *task)
1035 {
1036         void *buffer = task->tk_rqstp->rq_buffer;
1037         size_t size;
1038         struct rpc_buffer *buf;
1039
1040         buf = container_of(buffer, struct rpc_buffer, data);
1041         size = buf->len;
1042
1043         if (size <= RPC_BUFFER_MAXSIZE)
1044                 mempool_free(buf, rpc_buffer_mempool);
1045         else
1046                 kfree(buf);
1047 }
1048 EXPORT_SYMBOL_GPL(rpc_free);
1049
1050 /*
1051  * Creation and deletion of RPC task structures
1052  */
1053 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1054 {
1055         memset(task, 0, sizeof(*task));
1056         atomic_set(&task->tk_count, 1);
1057         task->tk_flags  = task_setup_data->flags;
1058         task->tk_ops = task_setup_data->callback_ops;
1059         task->tk_calldata = task_setup_data->callback_data;
1060         INIT_LIST_HEAD(&task->tk_task);
1061
1062         task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1063         task->tk_owner = current->tgid;
1064
1065         /* Initialize workqueue for async tasks */
1066         task->tk_workqueue = task_setup_data->workqueue;
1067
1068         task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1069                         xprt_get(task_setup_data->rpc_xprt));
1070
1071         task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1072
1073         if (task->tk_ops->rpc_call_prepare != NULL)
1074                 task->tk_action = rpc_prepare_task;
1075
1076         rpc_init_task_statistics(task);
1077 }
1078
1079 static struct rpc_task *
1080 rpc_alloc_task(void)
1081 {
1082         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1083 }
1084
1085 /*
1086  * Create a new task for the specified client.
1087  */
1088 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1089 {
1090         struct rpc_task *task = setup_data->task;
1091         unsigned short flags = 0;
1092
1093         if (task == NULL) {
1094                 task = rpc_alloc_task();
1095                 flags = RPC_TASK_DYNAMIC;
1096         }
1097
1098         rpc_init_task(task, setup_data);
1099         task->tk_flags |= flags;
1100         return task;
1101 }
1102
1103 /*
1104  * rpc_free_task - release rpc task and perform cleanups
1105  *
1106  * Note that we free up the rpc_task _after_ rpc_release_calldata()
1107  * in order to work around a workqueue dependency issue.
1108  *
1109  * Tejun Heo states:
1110  * "Workqueue currently considers two work items to be the same if they're
1111  * on the same address and won't execute them concurrently - ie. it
1112  * makes a work item which is queued again while being executed wait
1113  * for the previous execution to complete.
1114  *
1115  * If a work function frees the work item, and then waits for an event
1116  * which should be performed by another work item and *that* work item
1117  * recycles the freed work item, it can create a false dependency loop.
1118  * There really is no reliable way to detect this short of verifying
1119  * every memory free."
1120  *
1121  */
1122 static void rpc_free_task(struct rpc_task *task)
1123 {
1124         unsigned short tk_flags = task->tk_flags;
1125
1126         put_rpccred(task->tk_op_cred);
1127         rpc_release_calldata(task->tk_ops, task->tk_calldata);
1128
1129         if (tk_flags & RPC_TASK_DYNAMIC)
1130                 mempool_free(task, rpc_task_mempool);
1131 }
1132
1133 static void rpc_async_release(struct work_struct *work)
1134 {
1135         unsigned int pflags = memalloc_nofs_save();
1136
1137         rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1138         memalloc_nofs_restore(pflags);
1139 }
1140
1141 static void rpc_release_resources_task(struct rpc_task *task)
1142 {
1143         xprt_release(task);
1144         if (task->tk_msg.rpc_cred) {
1145                 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1146                         put_cred(task->tk_msg.rpc_cred);
1147                 task->tk_msg.rpc_cred = NULL;
1148         }
1149         rpc_task_release_client(task);
1150 }
1151
1152 static void rpc_final_put_task(struct rpc_task *task,
1153                 struct workqueue_struct *q)
1154 {
1155         if (q != NULL) {
1156                 INIT_WORK(&task->u.tk_work, rpc_async_release);
1157                 queue_work(q, &task->u.tk_work);
1158         } else
1159                 rpc_free_task(task);
1160 }
1161
1162 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1163 {
1164         if (atomic_dec_and_test(&task->tk_count)) {
1165                 rpc_release_resources_task(task);
1166                 rpc_final_put_task(task, q);
1167         }
1168 }
1169
1170 void rpc_put_task(struct rpc_task *task)
1171 {
1172         rpc_do_put_task(task, NULL);
1173 }
1174 EXPORT_SYMBOL_GPL(rpc_put_task);
1175
1176 void rpc_put_task_async(struct rpc_task *task)
1177 {
1178         rpc_do_put_task(task, task->tk_workqueue);
1179 }
1180 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1181
1182 static void rpc_release_task(struct rpc_task *task)
1183 {
1184         WARN_ON_ONCE(RPC_IS_QUEUED(task));
1185
1186         rpc_release_resources_task(task);
1187
1188         /*
1189          * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1190          * so it should be safe to use task->tk_count as a test for whether
1191          * or not any other processes still hold references to our rpc_task.
1192          */
1193         if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1194                 /* Wake up anyone who may be waiting for task completion */
1195                 if (!rpc_complete_task(task))
1196                         return;
1197         } else {
1198                 if (!atomic_dec_and_test(&task->tk_count))
1199                         return;
1200         }
1201         rpc_final_put_task(task, task->tk_workqueue);
1202 }
1203
1204 int rpciod_up(void)
1205 {
1206         return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1207 }
1208
1209 void rpciod_down(void)
1210 {
1211         module_put(THIS_MODULE);
1212 }
1213
1214 /*
1215  * Start up the rpciod workqueue.
1216  */
1217 static int rpciod_start(void)
1218 {
1219         struct workqueue_struct *wq;
1220
1221         /*
1222          * Create the rpciod thread and wait for it to start.
1223          */
1224         wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1225         if (!wq)
1226                 goto out_failed;
1227         rpciod_workqueue = wq;
1228         /* Note: highpri because network receive is latency sensitive */
1229         wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1230         if (!wq)
1231                 goto free_rpciod;
1232         xprtiod_workqueue = wq;
1233         return 1;
1234 free_rpciod:
1235         wq = rpciod_workqueue;
1236         rpciod_workqueue = NULL;
1237         destroy_workqueue(wq);
1238 out_failed:
1239         return 0;
1240 }
1241
1242 static void rpciod_stop(void)
1243 {
1244         struct workqueue_struct *wq = NULL;
1245
1246         if (rpciod_workqueue == NULL)
1247                 return;
1248
1249         wq = rpciod_workqueue;
1250         rpciod_workqueue = NULL;
1251         destroy_workqueue(wq);
1252         wq = xprtiod_workqueue;
1253         xprtiod_workqueue = NULL;
1254         destroy_workqueue(wq);
1255 }
1256
1257 void
1258 rpc_destroy_mempool(void)
1259 {
1260         rpciod_stop();
1261         mempool_destroy(rpc_buffer_mempool);
1262         mempool_destroy(rpc_task_mempool);
1263         kmem_cache_destroy(rpc_task_slabp);
1264         kmem_cache_destroy(rpc_buffer_slabp);
1265         rpc_destroy_wait_queue(&delay_queue);
1266 }
1267
1268 int
1269 rpc_init_mempool(void)
1270 {
1271         /*
1272          * The following is not strictly a mempool initialisation,
1273          * but there is no harm in doing it here
1274          */
1275         rpc_init_wait_queue(&delay_queue, "delayq");
1276         if (!rpciod_start())
1277                 goto err_nomem;
1278
1279         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1280                                              sizeof(struct rpc_task),
1281                                              0, SLAB_HWCACHE_ALIGN,
1282                                              NULL);
1283         if (!rpc_task_slabp)
1284                 goto err_nomem;
1285         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1286                                              RPC_BUFFER_MAXSIZE,
1287                                              0, SLAB_HWCACHE_ALIGN,
1288                                              NULL);
1289         if (!rpc_buffer_slabp)
1290                 goto err_nomem;
1291         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1292                                                     rpc_task_slabp);
1293         if (!rpc_task_mempool)
1294                 goto err_nomem;
1295         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1296                                                       rpc_buffer_slabp);
1297         if (!rpc_buffer_mempool)
1298                 goto err_nomem;
1299         return 0;
1300 err_nomem:
1301         rpc_destroy_mempool();
1302         return -ENOMEM;
1303 }