ASoC: topology: Log control load errors in soc_tplg_control_load()
[platform/kernel/linux-starfive.git] / net / sunrpc / sched.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/net/sunrpc/sched.c
4  *
5  * Scheduling for synchronous and asynchronous RPC requests.
6  *
7  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8  *
9  * TCP NFS related read + write fixes
10  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11  */
12
13 #include <linux/module.h>
14
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/mempool.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 #include <linux/freezer.h>
23 #include <linux/sched/mm.h>
24
25 #include <linux/sunrpc/clnt.h>
26 #include <linux/sunrpc/metrics.h>
27
28 #include "sunrpc.h"
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/sunrpc.h>
32
33 /*
34  * RPC slabs and memory pools
35  */
36 #define RPC_BUFFER_MAXSIZE      (2048)
37 #define RPC_BUFFER_POOLSIZE     (8)
38 #define RPC_TASK_POOLSIZE       (8)
39 static struct kmem_cache        *rpc_task_slabp __read_mostly;
40 static struct kmem_cache        *rpc_buffer_slabp __read_mostly;
41 static mempool_t        *rpc_task_mempool __read_mostly;
42 static mempool_t        *rpc_buffer_mempool __read_mostly;
43
44 static void                     rpc_async_schedule(struct work_struct *);
45 static void                      rpc_release_task(struct rpc_task *task);
46 static void __rpc_queue_timer_fn(struct work_struct *);
47
48 /*
49  * RPC tasks sit here while waiting for conditions to improve.
50  */
51 static struct rpc_wait_queue delay_queue;
52
53 /*
54  * rpciod-related stuff
55  */
56 struct workqueue_struct *rpciod_workqueue __read_mostly;
57 struct workqueue_struct *xprtiod_workqueue __read_mostly;
58 EXPORT_SYMBOL_GPL(xprtiod_workqueue);
59
60 gfp_t rpc_task_gfp_mask(void)
61 {
62         if (current->flags & PF_WQ_WORKER)
63                 return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
64         return GFP_KERNEL;
65 }
66 EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
67
68 bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
69 {
70         if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
71                 return true;
72         return false;
73 }
74
75 unsigned long
76 rpc_task_timeout(const struct rpc_task *task)
77 {
78         unsigned long timeout = READ_ONCE(task->tk_timeout);
79
80         if (timeout != 0) {
81                 unsigned long now = jiffies;
82                 if (time_before(now, timeout))
83                         return timeout - now;
84         }
85         return 0;
86 }
87 EXPORT_SYMBOL_GPL(rpc_task_timeout);
88
89 /*
90  * Disable the timer for a given RPC task. Should be called with
91  * queue->lock and bh_disabled in order to avoid races within
92  * rpc_run_timer().
93  */
94 static void
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
96 {
97         if (list_empty(&task->u.tk_wait.timer_list))
98                 return;
99         task->tk_timeout = 0;
100         list_del(&task->u.tk_wait.timer_list);
101         if (list_empty(&queue->timer_list.list))
102                 cancel_delayed_work(&queue->timer_list.dwork);
103 }
104
105 static void
106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
107 {
108         unsigned long now = jiffies;
109         queue->timer_list.expires = expires;
110         if (time_before_eq(expires, now))
111                 expires = 0;
112         else
113                 expires -= now;
114         mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
115 }
116
117 /*
118  * Set up a timer for the current task.
119  */
120 static void
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
122                 unsigned long timeout)
123 {
124         task->tk_timeout = timeout;
125         if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
126                 rpc_set_queue_timer(queue, timeout);
127         list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
128 }
129
130 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
131 {
132         if (queue->priority != priority) {
133                 queue->priority = priority;
134                 queue->nr = 1U << priority;
135         }
136 }
137
138 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
139 {
140         rpc_set_waitqueue_priority(queue, queue->maxpriority);
141 }
142
143 /*
144  * Add a request to a queue list
145  */
146 static void
147 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
148 {
149         struct rpc_task *t;
150
151         list_for_each_entry(t, q, u.tk_wait.list) {
152                 if (t->tk_owner == task->tk_owner) {
153                         list_add_tail(&task->u.tk_wait.links,
154                                         &t->u.tk_wait.links);
155                         /* Cache the queue head in task->u.tk_wait.list */
156                         task->u.tk_wait.list.next = q;
157                         task->u.tk_wait.list.prev = NULL;
158                         return;
159                 }
160         }
161         INIT_LIST_HEAD(&task->u.tk_wait.links);
162         list_add_tail(&task->u.tk_wait.list, q);
163 }
164
165 /*
166  * Remove request from a queue list
167  */
168 static void
169 __rpc_list_dequeue_task(struct rpc_task *task)
170 {
171         struct list_head *q;
172         struct rpc_task *t;
173
174         if (task->u.tk_wait.list.prev == NULL) {
175                 list_del(&task->u.tk_wait.links);
176                 return;
177         }
178         if (!list_empty(&task->u.tk_wait.links)) {
179                 t = list_first_entry(&task->u.tk_wait.links,
180                                 struct rpc_task,
181                                 u.tk_wait.links);
182                 /* Assume __rpc_list_enqueue_task() cached the queue head */
183                 q = t->u.tk_wait.list.next;
184                 list_add_tail(&t->u.tk_wait.list, q);
185                 list_del(&task->u.tk_wait.links);
186         }
187         list_del(&task->u.tk_wait.list);
188 }
189
190 /*
191  * Add new request to a priority queue.
192  */
193 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
194                 struct rpc_task *task,
195                 unsigned char queue_priority)
196 {
197         if (unlikely(queue_priority > queue->maxpriority))
198                 queue_priority = queue->maxpriority;
199         __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
200 }
201
202 /*
203  * Add new request to wait queue.
204  */
205 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
206                 struct rpc_task *task,
207                 unsigned char queue_priority)
208 {
209         INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
210         if (RPC_IS_PRIORITY(queue))
211                 __rpc_add_wait_queue_priority(queue, task, queue_priority);
212         else
213                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
214         task->tk_waitqueue = queue;
215         queue->qlen++;
216         /* barrier matches the read in rpc_wake_up_task_queue_locked() */
217         smp_wmb();
218         rpc_set_queued(task);
219 }
220
221 /*
222  * Remove request from a priority queue.
223  */
224 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
225 {
226         __rpc_list_dequeue_task(task);
227 }
228
229 /*
230  * Remove request from queue.
231  * Note: must be called with spin lock held.
232  */
233 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
234 {
235         __rpc_disable_timer(queue, task);
236         if (RPC_IS_PRIORITY(queue))
237                 __rpc_remove_wait_queue_priority(task);
238         else
239                 list_del(&task->u.tk_wait.list);
240         queue->qlen--;
241 }
242
243 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
244 {
245         int i;
246
247         spin_lock_init(&queue->lock);
248         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
249                 INIT_LIST_HEAD(&queue->tasks[i]);
250         queue->maxpriority = nr_queues - 1;
251         rpc_reset_waitqueue_priority(queue);
252         queue->qlen = 0;
253         queue->timer_list.expires = 0;
254         INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
255         INIT_LIST_HEAD(&queue->timer_list.list);
256         rpc_assign_waitqueue_name(queue, qname);
257 }
258
259 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
260 {
261         __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
262 }
263 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
264
265 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
266 {
267         __rpc_init_priority_wait_queue(queue, qname, 1);
268 }
269 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
270
271 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
272 {
273         cancel_delayed_work_sync(&queue->timer_list.dwork);
274 }
275 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
276
277 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
278 {
279         schedule();
280         if (signal_pending_state(mode, current))
281                 return -ERESTARTSYS;
282         return 0;
283 }
284
285 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
286 static void rpc_task_set_debuginfo(struct rpc_task *task)
287 {
288         struct rpc_clnt *clnt = task->tk_client;
289
290         /* Might be a task carrying a reverse-direction operation */
291         if (!clnt) {
292                 static atomic_t rpc_pid;
293
294                 task->tk_pid = atomic_inc_return(&rpc_pid);
295                 return;
296         }
297
298         task->tk_pid = atomic_inc_return(&clnt->cl_pid);
299 }
300 #else
301 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
302 {
303 }
304 #endif
305
306 static void rpc_set_active(struct rpc_task *task)
307 {
308         rpc_task_set_debuginfo(task);
309         set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
310         trace_rpc_task_begin(task, NULL);
311 }
312
313 /*
314  * Mark an RPC call as having completed by clearing the 'active' bit
315  * and then waking up all tasks that were sleeping.
316  */
317 static int rpc_complete_task(struct rpc_task *task)
318 {
319         void *m = &task->tk_runstate;
320         wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
321         struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
322         unsigned long flags;
323         int ret;
324
325         trace_rpc_task_complete(task, NULL);
326
327         spin_lock_irqsave(&wq->lock, flags);
328         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
329         ret = atomic_dec_and_test(&task->tk_count);
330         if (waitqueue_active(wq))
331                 __wake_up_locked_key(wq, TASK_NORMAL, &k);
332         spin_unlock_irqrestore(&wq->lock, flags);
333         return ret;
334 }
335
336 /*
337  * Allow callers to wait for completion of an RPC call
338  *
339  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
340  * to enforce taking of the wq->lock and hence avoid races with
341  * rpc_complete_task().
342  */
343 int rpc_wait_for_completion_task(struct rpc_task *task)
344 {
345         return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
346                         rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
347 }
348 EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task);
349
350 /*
351  * Make an RPC task runnable.
352  *
353  * Note: If the task is ASYNC, and is being made runnable after sitting on an
354  * rpc_wait_queue, this must be called with the queue spinlock held to protect
355  * the wait queue operation.
356  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
357  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
358  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
359  * the RPC_TASK_RUNNING flag.
360  */
361 static void rpc_make_runnable(struct workqueue_struct *wq,
362                 struct rpc_task *task)
363 {
364         bool need_wakeup = !rpc_test_and_set_running(task);
365
366         rpc_clear_queued(task);
367         if (!need_wakeup)
368                 return;
369         if (RPC_IS_ASYNC(task)) {
370                 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
371                 queue_work(wq, &task->u.tk_work);
372         } else
373                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
374 }
375
376 /*
377  * Prepare for sleeping on a wait queue.
378  * By always appending tasks to the list we ensure FIFO behavior.
379  * NB: An RPC task will only receive interrupt-driven events as long
380  * as it's on a wait queue.
381  */
382 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
383                 struct rpc_task *task,
384                 unsigned char queue_priority)
385 {
386         trace_rpc_task_sleep(task, q);
387
388         __rpc_add_wait_queue(q, task, queue_priority);
389 }
390
391 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
392                 struct rpc_task *task,
393                 unsigned char queue_priority)
394 {
395         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
396                 return;
397         __rpc_do_sleep_on_priority(q, task, queue_priority);
398 }
399
400 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
401                 struct rpc_task *task, unsigned long timeout,
402                 unsigned char queue_priority)
403 {
404         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
405                 return;
406         if (time_is_after_jiffies(timeout)) {
407                 __rpc_do_sleep_on_priority(q, task, queue_priority);
408                 __rpc_add_timer(q, task, timeout);
409         } else
410                 task->tk_status = -ETIMEDOUT;
411 }
412
413 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
414 {
415         if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
416                 task->tk_callback = action;
417 }
418
419 static bool rpc_sleep_check_activated(struct rpc_task *task)
420 {
421         /* We shouldn't ever put an inactive task to sleep */
422         if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
423                 task->tk_status = -EIO;
424                 rpc_put_task_async(task);
425                 return false;
426         }
427         return true;
428 }
429
430 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
431                                 rpc_action action, unsigned long timeout)
432 {
433         if (!rpc_sleep_check_activated(task))
434                 return;
435
436         rpc_set_tk_callback(task, action);
437
438         /*
439          * Protect the queue operations.
440          */
441         spin_lock(&q->lock);
442         __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
443         spin_unlock(&q->lock);
444 }
445 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
446
447 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
448                                 rpc_action action)
449 {
450         if (!rpc_sleep_check_activated(task))
451                 return;
452
453         rpc_set_tk_callback(task, action);
454
455         WARN_ON_ONCE(task->tk_timeout != 0);
456         /*
457          * Protect the queue operations.
458          */
459         spin_lock(&q->lock);
460         __rpc_sleep_on_priority(q, task, task->tk_priority);
461         spin_unlock(&q->lock);
462 }
463 EXPORT_SYMBOL_GPL(rpc_sleep_on);
464
465 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
466                 struct rpc_task *task, unsigned long timeout, int priority)
467 {
468         if (!rpc_sleep_check_activated(task))
469                 return;
470
471         priority -= RPC_PRIORITY_LOW;
472         /*
473          * Protect the queue operations.
474          */
475         spin_lock(&q->lock);
476         __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
477         spin_unlock(&q->lock);
478 }
479 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
480
481 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
482                 int priority)
483 {
484         if (!rpc_sleep_check_activated(task))
485                 return;
486
487         WARN_ON_ONCE(task->tk_timeout != 0);
488         priority -= RPC_PRIORITY_LOW;
489         /*
490          * Protect the queue operations.
491          */
492         spin_lock(&q->lock);
493         __rpc_sleep_on_priority(q, task, priority);
494         spin_unlock(&q->lock);
495 }
496 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
497
498 /**
499  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
500  * @wq: workqueue on which to run task
501  * @queue: wait queue
502  * @task: task to be woken up
503  *
504  * Caller must hold queue->lock, and have cleared the task queued flag.
505  */
506 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
507                 struct rpc_wait_queue *queue,
508                 struct rpc_task *task)
509 {
510         /* Has the task been executed yet? If not, we cannot wake it up! */
511         if (!RPC_IS_ACTIVATED(task)) {
512                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
513                 return;
514         }
515
516         trace_rpc_task_wakeup(task, queue);
517
518         __rpc_remove_wait_queue(queue, task);
519
520         rpc_make_runnable(wq, task);
521 }
522
523 /*
524  * Wake up a queued task while the queue lock is being held
525  */
526 static struct rpc_task *
527 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
528                 struct rpc_wait_queue *queue, struct rpc_task *task,
529                 bool (*action)(struct rpc_task *, void *), void *data)
530 {
531         if (RPC_IS_QUEUED(task)) {
532                 smp_rmb();
533                 if (task->tk_waitqueue == queue) {
534                         if (action == NULL || action(task, data)) {
535                                 __rpc_do_wake_up_task_on_wq(wq, queue, task);
536                                 return task;
537                         }
538                 }
539         }
540         return NULL;
541 }
542
543 /*
544  * Wake up a queued task while the queue lock is being held
545  */
546 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
547                                           struct rpc_task *task)
548 {
549         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
550                                                    task, NULL, NULL);
551 }
552
553 /*
554  * Wake up a task on a specific queue
555  */
556 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
557 {
558         if (!RPC_IS_QUEUED(task))
559                 return;
560         spin_lock(&queue->lock);
561         rpc_wake_up_task_queue_locked(queue, task);
562         spin_unlock(&queue->lock);
563 }
564 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
565
566 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
567 {
568         task->tk_status = *(int *)status;
569         return true;
570 }
571
572 static void
573 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
574                 struct rpc_task *task, int status)
575 {
576         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
577                         task, rpc_task_action_set_status, &status);
578 }
579
580 /**
581  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
582  * @queue: pointer to rpc_wait_queue
583  * @task: pointer to rpc_task
584  * @status: integer error value
585  *
586  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
587  * set to the value of @status.
588  */
589 void
590 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
591                 struct rpc_task *task, int status)
592 {
593         if (!RPC_IS_QUEUED(task))
594                 return;
595         spin_lock(&queue->lock);
596         rpc_wake_up_task_queue_set_status_locked(queue, task, status);
597         spin_unlock(&queue->lock);
598 }
599
600 /*
601  * Wake up the next task on a priority queue.
602  */
603 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
604 {
605         struct list_head *q;
606         struct rpc_task *task;
607
608         /*
609          * Service the privileged queue.
610          */
611         q = &queue->tasks[RPC_NR_PRIORITY - 1];
612         if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
613                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
614                 goto out;
615         }
616
617         /*
618          * Service a batch of tasks from a single owner.
619          */
620         q = &queue->tasks[queue->priority];
621         if (!list_empty(q) && queue->nr) {
622                 queue->nr--;
623                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
624                 goto out;
625         }
626
627         /*
628          * Service the next queue.
629          */
630         do {
631                 if (q == &queue->tasks[0])
632                         q = &queue->tasks[queue->maxpriority];
633                 else
634                         q = q - 1;
635                 if (!list_empty(q)) {
636                         task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
637                         goto new_queue;
638                 }
639         } while (q != &queue->tasks[queue->priority]);
640
641         rpc_reset_waitqueue_priority(queue);
642         return NULL;
643
644 new_queue:
645         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
646 out:
647         return task;
648 }
649
650 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
651 {
652         if (RPC_IS_PRIORITY(queue))
653                 return __rpc_find_next_queued_priority(queue);
654         if (!list_empty(&queue->tasks[0]))
655                 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
656         return NULL;
657 }
658
659 /*
660  * Wake up the first task on the wait queue.
661  */
662 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
663                 struct rpc_wait_queue *queue,
664                 bool (*func)(struct rpc_task *, void *), void *data)
665 {
666         struct rpc_task *task = NULL;
667
668         spin_lock(&queue->lock);
669         task = __rpc_find_next_queued(queue);
670         if (task != NULL)
671                 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
672                                 task, func, data);
673         spin_unlock(&queue->lock);
674
675         return task;
676 }
677
678 /*
679  * Wake up the first task on the wait queue.
680  */
681 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
682                 bool (*func)(struct rpc_task *, void *), void *data)
683 {
684         return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
685 }
686 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
687
688 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
689 {
690         return true;
691 }
692
693 /*
694  * Wake up the next task on the wait queue.
695 */
696 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
697 {
698         return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
699 }
700 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
701
702 /**
703  * rpc_wake_up_locked - wake up all rpc_tasks
704  * @queue: rpc_wait_queue on which the tasks are sleeping
705  *
706  */
707 static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
708 {
709         struct rpc_task *task;
710
711         for (;;) {
712                 task = __rpc_find_next_queued(queue);
713                 if (task == NULL)
714                         break;
715                 rpc_wake_up_task_queue_locked(queue, task);
716         }
717 }
718
719 /**
720  * rpc_wake_up - wake up all rpc_tasks
721  * @queue: rpc_wait_queue on which the tasks are sleeping
722  *
723  * Grabs queue->lock
724  */
725 void rpc_wake_up(struct rpc_wait_queue *queue)
726 {
727         spin_lock(&queue->lock);
728         rpc_wake_up_locked(queue);
729         spin_unlock(&queue->lock);
730 }
731 EXPORT_SYMBOL_GPL(rpc_wake_up);
732
733 /**
734  * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
735  * @queue: rpc_wait_queue on which the tasks are sleeping
736  * @status: status value to set
737  */
738 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
739 {
740         struct rpc_task *task;
741
742         for (;;) {
743                 task = __rpc_find_next_queued(queue);
744                 if (task == NULL)
745                         break;
746                 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
747         }
748 }
749
750 /**
751  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
752  * @queue: rpc_wait_queue on which the tasks are sleeping
753  * @status: status value to set
754  *
755  * Grabs queue->lock
756  */
757 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
758 {
759         spin_lock(&queue->lock);
760         rpc_wake_up_status_locked(queue, status);
761         spin_unlock(&queue->lock);
762 }
763 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
764
765 static void __rpc_queue_timer_fn(struct work_struct *work)
766 {
767         struct rpc_wait_queue *queue = container_of(work,
768                         struct rpc_wait_queue,
769                         timer_list.dwork.work);
770         struct rpc_task *task, *n;
771         unsigned long expires, now, timeo;
772
773         spin_lock(&queue->lock);
774         expires = now = jiffies;
775         list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
776                 timeo = task->tk_timeout;
777                 if (time_after_eq(now, timeo)) {
778                         trace_rpc_task_timeout(task, task->tk_action);
779                         task->tk_status = -ETIMEDOUT;
780                         rpc_wake_up_task_queue_locked(queue, task);
781                         continue;
782                 }
783                 if (expires == now || time_after(expires, timeo))
784                         expires = timeo;
785         }
786         if (!list_empty(&queue->timer_list.list))
787                 rpc_set_queue_timer(queue, expires);
788         spin_unlock(&queue->lock);
789 }
790
791 static void __rpc_atrun(struct rpc_task *task)
792 {
793         if (task->tk_status == -ETIMEDOUT)
794                 task->tk_status = 0;
795 }
796
797 /*
798  * Run a task at a later time
799  */
800 void rpc_delay(struct rpc_task *task, unsigned long delay)
801 {
802         rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
803 }
804 EXPORT_SYMBOL_GPL(rpc_delay);
805
806 /*
807  * Helper to call task->tk_ops->rpc_call_prepare
808  */
809 void rpc_prepare_task(struct rpc_task *task)
810 {
811         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
812 }
813
814 static void
815 rpc_init_task_statistics(struct rpc_task *task)
816 {
817         /* Initialize retry counters */
818         task->tk_garb_retry = 2;
819         task->tk_cred_retry = 2;
820
821         /* starting timestamp */
822         task->tk_start = ktime_get();
823 }
824
825 static void
826 rpc_reset_task_statistics(struct rpc_task *task)
827 {
828         task->tk_timeouts = 0;
829         task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
830         rpc_init_task_statistics(task);
831 }
832
833 /*
834  * Helper that calls task->tk_ops->rpc_call_done if it exists
835  */
836 void rpc_exit_task(struct rpc_task *task)
837 {
838         trace_rpc_task_end(task, task->tk_action);
839         task->tk_action = NULL;
840         if (task->tk_ops->rpc_count_stats)
841                 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
842         else if (task->tk_client)
843                 rpc_count_iostats(task, task->tk_client->cl_metrics);
844         if (task->tk_ops->rpc_call_done != NULL) {
845                 trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
846                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
847                 if (task->tk_action != NULL) {
848                         /* Always release the RPC slot and buffer memory */
849                         xprt_release(task);
850                         rpc_reset_task_statistics(task);
851                 }
852         }
853 }
854
855 void rpc_signal_task(struct rpc_task *task)
856 {
857         struct rpc_wait_queue *queue;
858
859         if (!RPC_IS_ACTIVATED(task))
860                 return;
861
862         if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
863                 return;
864         trace_rpc_task_signalled(task, task->tk_action);
865         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
866         smp_mb__after_atomic();
867         queue = READ_ONCE(task->tk_waitqueue);
868         if (queue)
869                 rpc_wake_up_queued_task(queue, task);
870 }
871
872 void rpc_task_try_cancel(struct rpc_task *task, int error)
873 {
874         struct rpc_wait_queue *queue;
875
876         if (!rpc_task_set_rpc_status(task, error))
877                 return;
878         queue = READ_ONCE(task->tk_waitqueue);
879         if (queue)
880                 rpc_wake_up_queued_task(queue, task);
881 }
882
883 void rpc_exit(struct rpc_task *task, int status)
884 {
885         task->tk_status = status;
886         task->tk_action = rpc_exit_task;
887         rpc_wake_up_queued_task(task->tk_waitqueue, task);
888 }
889 EXPORT_SYMBOL_GPL(rpc_exit);
890
891 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
892 {
893         if (ops->rpc_release != NULL)
894                 ops->rpc_release(calldata);
895 }
896
897 static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk)
898 {
899         if (!xprt)
900                 return false;
901         if (!atomic_read(&xprt->swapper))
902                 return false;
903         return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk;
904 }
905
906 /*
907  * This is the RPC `scheduler' (or rather, the finite state machine).
908  */
909 static void __rpc_execute(struct rpc_task *task)
910 {
911         struct rpc_wait_queue *queue;
912         int task_is_async = RPC_IS_ASYNC(task);
913         int status = 0;
914         unsigned long pflags = current->flags;
915
916         WARN_ON_ONCE(RPC_IS_QUEUED(task));
917         if (RPC_IS_QUEUED(task))
918                 return;
919
920         for (;;) {
921                 void (*do_action)(struct rpc_task *);
922
923                 /*
924                  * Perform the next FSM step or a pending callback.
925                  *
926                  * tk_action may be NULL if the task has been killed.
927                  */
928                 do_action = task->tk_action;
929                 /* Tasks with an RPC error status should exit */
930                 if (do_action != rpc_exit_task &&
931                     (status = READ_ONCE(task->tk_rpc_status)) != 0) {
932                         task->tk_status = status;
933                         if (do_action != NULL)
934                                 do_action = rpc_exit_task;
935                 }
936                 /* Callbacks override all actions */
937                 if (task->tk_callback) {
938                         do_action = task->tk_callback;
939                         task->tk_callback = NULL;
940                 }
941                 if (!do_action)
942                         break;
943                 if (RPC_IS_SWAPPER(task) ||
944                     xprt_needs_memalloc(task->tk_xprt, task))
945                         current->flags |= PF_MEMALLOC;
946
947                 trace_rpc_task_run_action(task, do_action);
948                 do_action(task);
949
950                 /*
951                  * Lockless check for whether task is sleeping or not.
952                  */
953                 if (!RPC_IS_QUEUED(task)) {
954                         cond_resched();
955                         continue;
956                 }
957
958                 /*
959                  * The queue->lock protects against races with
960                  * rpc_make_runnable().
961                  *
962                  * Note that once we clear RPC_TASK_RUNNING on an asynchronous
963                  * rpc_task, rpc_make_runnable() can assign it to a
964                  * different workqueue. We therefore cannot assume that the
965                  * rpc_task pointer may still be dereferenced.
966                  */
967                 queue = task->tk_waitqueue;
968                 spin_lock(&queue->lock);
969                 if (!RPC_IS_QUEUED(task)) {
970                         spin_unlock(&queue->lock);
971                         continue;
972                 }
973                 /* Wake up any task that has an exit status */
974                 if (READ_ONCE(task->tk_rpc_status) != 0) {
975                         rpc_wake_up_task_queue_locked(queue, task);
976                         spin_unlock(&queue->lock);
977                         continue;
978                 }
979                 rpc_clear_running(task);
980                 spin_unlock(&queue->lock);
981                 if (task_is_async)
982                         goto out;
983
984                 /* sync task: sleep here */
985                 trace_rpc_task_sync_sleep(task, task->tk_action);
986                 status = out_of_line_wait_on_bit(&task->tk_runstate,
987                                 RPC_TASK_QUEUED, rpc_wait_bit_killable,
988                                 TASK_KILLABLE|TASK_FREEZABLE);
989                 if (status < 0) {
990                         /*
991                          * When a sync task receives a signal, it exits with
992                          * -ERESTARTSYS. In order to catch any callbacks that
993                          * clean up after sleeping on some queue, we don't
994                          * break the loop here, but go around once more.
995                          */
996                         rpc_signal_task(task);
997                 }
998                 trace_rpc_task_sync_wake(task, task->tk_action);
999         }
1000
1001         /* Release all resources associated with the task */
1002         rpc_release_task(task);
1003 out:
1004         current_restore_flags(pflags, PF_MEMALLOC);
1005 }
1006
1007 /*
1008  * User-visible entry point to the scheduler.
1009  *
1010  * This may be called recursively if e.g. an async NFS task updates
1011  * the attributes and finds that dirty pages must be flushed.
1012  * NOTE: Upon exit of this function the task is guaranteed to be
1013  *       released. In particular note that tk_release() will have
1014  *       been called, so your task memory may have been freed.
1015  */
1016 void rpc_execute(struct rpc_task *task)
1017 {
1018         bool is_async = RPC_IS_ASYNC(task);
1019
1020         rpc_set_active(task);
1021         rpc_make_runnable(rpciod_workqueue, task);
1022         if (!is_async) {
1023                 unsigned int pflags = memalloc_nofs_save();
1024                 __rpc_execute(task);
1025                 memalloc_nofs_restore(pflags);
1026         }
1027 }
1028
1029 static void rpc_async_schedule(struct work_struct *work)
1030 {
1031         unsigned int pflags = memalloc_nofs_save();
1032
1033         __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
1034         memalloc_nofs_restore(pflags);
1035 }
1036
1037 /**
1038  * rpc_malloc - allocate RPC buffer resources
1039  * @task: RPC task
1040  *
1041  * A single memory region is allocated, which is split between the
1042  * RPC call and RPC reply that this task is being used for. When
1043  * this RPC is retired, the memory is released by calling rpc_free.
1044  *
1045  * To prevent rpciod from hanging, this allocator never sleeps,
1046  * returning -ENOMEM and suppressing warning if the request cannot
1047  * be serviced immediately. The caller can arrange to sleep in a
1048  * way that is safe for rpciod.
1049  *
1050  * Most requests are 'small' (under 2KiB) and can be serviced from a
1051  * mempool, ensuring that NFS reads and writes can always proceed,
1052  * and that there is good locality of reference for these buffers.
1053  */
1054 int rpc_malloc(struct rpc_task *task)
1055 {
1056         struct rpc_rqst *rqst = task->tk_rqstp;
1057         size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1058         struct rpc_buffer *buf;
1059         gfp_t gfp = rpc_task_gfp_mask();
1060
1061         size += sizeof(struct rpc_buffer);
1062         if (size <= RPC_BUFFER_MAXSIZE) {
1063                 buf = kmem_cache_alloc(rpc_buffer_slabp, gfp);
1064                 /* Reach for the mempool if dynamic allocation fails */
1065                 if (!buf && RPC_IS_ASYNC(task))
1066                         buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT);
1067         } else
1068                 buf = kmalloc(size, gfp);
1069
1070         if (!buf)
1071                 return -ENOMEM;
1072
1073         buf->len = size;
1074         rqst->rq_buffer = buf->data;
1075         rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1076         return 0;
1077 }
1078 EXPORT_SYMBOL_GPL(rpc_malloc);
1079
1080 /**
1081  * rpc_free - free RPC buffer resources allocated via rpc_malloc
1082  * @task: RPC task
1083  *
1084  */
1085 void rpc_free(struct rpc_task *task)
1086 {
1087         void *buffer = task->tk_rqstp->rq_buffer;
1088         size_t size;
1089         struct rpc_buffer *buf;
1090
1091         buf = container_of(buffer, struct rpc_buffer, data);
1092         size = buf->len;
1093
1094         if (size <= RPC_BUFFER_MAXSIZE)
1095                 mempool_free(buf, rpc_buffer_mempool);
1096         else
1097                 kfree(buf);
1098 }
1099 EXPORT_SYMBOL_GPL(rpc_free);
1100
1101 /*
1102  * Creation and deletion of RPC task structures
1103  */
1104 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1105 {
1106         memset(task, 0, sizeof(*task));
1107         atomic_set(&task->tk_count, 1);
1108         task->tk_flags  = task_setup_data->flags;
1109         task->tk_ops = task_setup_data->callback_ops;
1110         task->tk_calldata = task_setup_data->callback_data;
1111         INIT_LIST_HEAD(&task->tk_task);
1112
1113         task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1114         task->tk_owner = current->tgid;
1115
1116         /* Initialize workqueue for async tasks */
1117         task->tk_workqueue = task_setup_data->workqueue;
1118
1119         task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1120                         xprt_get(task_setup_data->rpc_xprt));
1121
1122         task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1123
1124         if (task->tk_ops->rpc_call_prepare != NULL)
1125                 task->tk_action = rpc_prepare_task;
1126
1127         rpc_init_task_statistics(task);
1128 }
1129
1130 static struct rpc_task *rpc_alloc_task(void)
1131 {
1132         struct rpc_task *task;
1133
1134         task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask());
1135         if (task)
1136                 return task;
1137         return mempool_alloc(rpc_task_mempool, GFP_NOWAIT);
1138 }
1139
1140 /*
1141  * Create a new task for the specified client.
1142  */
1143 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1144 {
1145         struct rpc_task *task = setup_data->task;
1146         unsigned short flags = 0;
1147
1148         if (task == NULL) {
1149                 task = rpc_alloc_task();
1150                 if (task == NULL) {
1151                         rpc_release_calldata(setup_data->callback_ops,
1152                                              setup_data->callback_data);
1153                         return ERR_PTR(-ENOMEM);
1154                 }
1155                 flags = RPC_TASK_DYNAMIC;
1156         }
1157
1158         rpc_init_task(task, setup_data);
1159         task->tk_flags |= flags;
1160         return task;
1161 }
1162
1163 /*
1164  * rpc_free_task - release rpc task and perform cleanups
1165  *
1166  * Note that we free up the rpc_task _after_ rpc_release_calldata()
1167  * in order to work around a workqueue dependency issue.
1168  *
1169  * Tejun Heo states:
1170  * "Workqueue currently considers two work items to be the same if they're
1171  * on the same address and won't execute them concurrently - ie. it
1172  * makes a work item which is queued again while being executed wait
1173  * for the previous execution to complete.
1174  *
1175  * If a work function frees the work item, and then waits for an event
1176  * which should be performed by another work item and *that* work item
1177  * recycles the freed work item, it can create a false dependency loop.
1178  * There really is no reliable way to detect this short of verifying
1179  * every memory free."
1180  *
1181  */
1182 static void rpc_free_task(struct rpc_task *task)
1183 {
1184         unsigned short tk_flags = task->tk_flags;
1185
1186         put_rpccred(task->tk_op_cred);
1187         rpc_release_calldata(task->tk_ops, task->tk_calldata);
1188
1189         if (tk_flags & RPC_TASK_DYNAMIC)
1190                 mempool_free(task, rpc_task_mempool);
1191 }
1192
1193 static void rpc_async_release(struct work_struct *work)
1194 {
1195         unsigned int pflags = memalloc_nofs_save();
1196
1197         rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1198         memalloc_nofs_restore(pflags);
1199 }
1200
1201 static void rpc_release_resources_task(struct rpc_task *task)
1202 {
1203         xprt_release(task);
1204         if (task->tk_msg.rpc_cred) {
1205                 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1206                         put_cred(task->tk_msg.rpc_cred);
1207                 task->tk_msg.rpc_cred = NULL;
1208         }
1209         rpc_task_release_client(task);
1210 }
1211
1212 static void rpc_final_put_task(struct rpc_task *task,
1213                 struct workqueue_struct *q)
1214 {
1215         if (q != NULL) {
1216                 INIT_WORK(&task->u.tk_work, rpc_async_release);
1217                 queue_work(q, &task->u.tk_work);
1218         } else
1219                 rpc_free_task(task);
1220 }
1221
1222 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1223 {
1224         if (atomic_dec_and_test(&task->tk_count)) {
1225                 rpc_release_resources_task(task);
1226                 rpc_final_put_task(task, q);
1227         }
1228 }
1229
1230 void rpc_put_task(struct rpc_task *task)
1231 {
1232         rpc_do_put_task(task, NULL);
1233 }
1234 EXPORT_SYMBOL_GPL(rpc_put_task);
1235
1236 void rpc_put_task_async(struct rpc_task *task)
1237 {
1238         rpc_do_put_task(task, task->tk_workqueue);
1239 }
1240 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1241
1242 static void rpc_release_task(struct rpc_task *task)
1243 {
1244         WARN_ON_ONCE(RPC_IS_QUEUED(task));
1245
1246         rpc_release_resources_task(task);
1247
1248         /*
1249          * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1250          * so it should be safe to use task->tk_count as a test for whether
1251          * or not any other processes still hold references to our rpc_task.
1252          */
1253         if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1254                 /* Wake up anyone who may be waiting for task completion */
1255                 if (!rpc_complete_task(task))
1256                         return;
1257         } else {
1258                 if (!atomic_dec_and_test(&task->tk_count))
1259                         return;
1260         }
1261         rpc_final_put_task(task, task->tk_workqueue);
1262 }
1263
1264 int rpciod_up(void)
1265 {
1266         return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1267 }
1268
1269 void rpciod_down(void)
1270 {
1271         module_put(THIS_MODULE);
1272 }
1273
1274 /*
1275  * Start up the rpciod workqueue.
1276  */
1277 static int rpciod_start(void)
1278 {
1279         struct workqueue_struct *wq;
1280
1281         /*
1282          * Create the rpciod thread and wait for it to start.
1283          */
1284         wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1285         if (!wq)
1286                 goto out_failed;
1287         rpciod_workqueue = wq;
1288         wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
1289         if (!wq)
1290                 goto free_rpciod;
1291         xprtiod_workqueue = wq;
1292         return 1;
1293 free_rpciod:
1294         wq = rpciod_workqueue;
1295         rpciod_workqueue = NULL;
1296         destroy_workqueue(wq);
1297 out_failed:
1298         return 0;
1299 }
1300
1301 static void rpciod_stop(void)
1302 {
1303         struct workqueue_struct *wq = NULL;
1304
1305         if (rpciod_workqueue == NULL)
1306                 return;
1307
1308         wq = rpciod_workqueue;
1309         rpciod_workqueue = NULL;
1310         destroy_workqueue(wq);
1311         wq = xprtiod_workqueue;
1312         xprtiod_workqueue = NULL;
1313         destroy_workqueue(wq);
1314 }
1315
1316 void
1317 rpc_destroy_mempool(void)
1318 {
1319         rpciod_stop();
1320         mempool_destroy(rpc_buffer_mempool);
1321         mempool_destroy(rpc_task_mempool);
1322         kmem_cache_destroy(rpc_task_slabp);
1323         kmem_cache_destroy(rpc_buffer_slabp);
1324         rpc_destroy_wait_queue(&delay_queue);
1325 }
1326
1327 int
1328 rpc_init_mempool(void)
1329 {
1330         /*
1331          * The following is not strictly a mempool initialisation,
1332          * but there is no harm in doing it here
1333          */
1334         rpc_init_wait_queue(&delay_queue, "delayq");
1335         if (!rpciod_start())
1336                 goto err_nomem;
1337
1338         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1339                                              sizeof(struct rpc_task),
1340                                              0, SLAB_HWCACHE_ALIGN,
1341                                              NULL);
1342         if (!rpc_task_slabp)
1343                 goto err_nomem;
1344         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1345                                              RPC_BUFFER_MAXSIZE,
1346                                              0, SLAB_HWCACHE_ALIGN,
1347                                              NULL);
1348         if (!rpc_buffer_slabp)
1349                 goto err_nomem;
1350         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1351                                                     rpc_task_slabp);
1352         if (!rpc_task_mempool)
1353                 goto err_nomem;
1354         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1355                                                       rpc_buffer_slabp);
1356         if (!rpc_buffer_mempool)
1357                 goto err_nomem;
1358         return 0;
1359 err_nomem:
1360         rpc_destroy_mempool();
1361         return -ENOMEM;
1362 }