Merge tag 'ata-6.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal...
[platform/kernel/linux-starfive.git] / kernel / watch_queue.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Watch queue and general notification mechanism, built on pipes
3  *
4  * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  * See Documentation/core-api/watch_queue.rst
8  */
9
10 #define pr_fmt(fmt) "watchq: " fmt
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/printk.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/poll.h>
21 #include <linux/uaccess.h>
22 #include <linux/vmalloc.h>
23 #include <linux/file.h>
24 #include <linux/security.h>
25 #include <linux/cred.h>
26 #include <linux/sched/signal.h>
27 #include <linux/watch_queue.h>
28 #include <linux/pipe_fs_i.h>
29
30 MODULE_DESCRIPTION("Watch queue");
31 MODULE_AUTHOR("Red Hat, Inc.");
32
33 #define WATCH_QUEUE_NOTE_SIZE 128
34 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
35
36 /*
37  * This must be called under the RCU read-lock, which makes
38  * sure that the wqueue still exists. It can then take the lock,
39  * and check that the wqueue hasn't been destroyed, which in
40  * turn makes sure that the notification pipe still exists.
41  */
42 static inline bool lock_wqueue(struct watch_queue *wqueue)
43 {
44         spin_lock_bh(&wqueue->lock);
45         if (unlikely(wqueue->defunct)) {
46                 spin_unlock_bh(&wqueue->lock);
47                 return false;
48         }
49         return true;
50 }
51
52 static inline void unlock_wqueue(struct watch_queue *wqueue)
53 {
54         spin_unlock_bh(&wqueue->lock);
55 }
56
57 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
58                                          struct pipe_buffer *buf)
59 {
60         struct watch_queue *wqueue = (struct watch_queue *)buf->private;
61         struct page *page;
62         unsigned int bit;
63
64         /* We need to work out which note within the page this refers to, but
65          * the note might have been maximum size, so merely ANDing the offset
66          * off doesn't work.  OTOH, the note must've been more than zero size.
67          */
68         bit = buf->offset + buf->len;
69         if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
70                 bit -= WATCH_QUEUE_NOTE_SIZE;
71         bit /= WATCH_QUEUE_NOTE_SIZE;
72
73         page = buf->page;
74         bit += page->index;
75
76         set_bit(bit, wqueue->notes_bitmap);
77         generic_pipe_buf_release(pipe, buf);
78 }
79
80 // No try_steal function => no stealing
81 #define watch_queue_pipe_buf_try_steal NULL
82
83 /* New data written to a pipe may be appended to a buffer with this type. */
84 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
85         .release        = watch_queue_pipe_buf_release,
86         .try_steal      = watch_queue_pipe_buf_try_steal,
87         .get            = generic_pipe_buf_get,
88 };
89
90 /*
91  * Post a notification to a watch queue.
92  *
93  * Must be called with the RCU lock for reading, and the
94  * watch_queue lock held, which guarantees that the pipe
95  * hasn't been released.
96  */
97 static bool post_one_notification(struct watch_queue *wqueue,
98                                   struct watch_notification *n)
99 {
100         void *p;
101         struct pipe_inode_info *pipe = wqueue->pipe;
102         struct pipe_buffer *buf;
103         struct page *page;
104         unsigned int head, tail, mask, note, offset, len;
105         bool done = false;
106
107         if (!pipe)
108                 return false;
109
110         spin_lock_irq(&pipe->rd_wait.lock);
111
112         mask = pipe->ring_size - 1;
113         head = pipe->head;
114         tail = pipe->tail;
115         if (pipe_full(head, tail, pipe->ring_size))
116                 goto lost;
117
118         note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
119         if (note >= wqueue->nr_notes)
120                 goto lost;
121
122         page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
123         offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
124         get_page(page);
125         len = n->info & WATCH_INFO_LENGTH;
126         p = kmap_atomic(page);
127         memcpy(p + offset, n, len);
128         kunmap_atomic(p);
129
130         buf = &pipe->bufs[head & mask];
131         buf->page = page;
132         buf->private = (unsigned long)wqueue;
133         buf->ops = &watch_queue_pipe_buf_ops;
134         buf->offset = offset;
135         buf->len = len;
136         buf->flags = PIPE_BUF_FLAG_WHOLE;
137         smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
138
139         if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
140                 spin_unlock_irq(&pipe->rd_wait.lock);
141                 BUG();
142         }
143         wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
144         done = true;
145
146 out:
147         spin_unlock_irq(&pipe->rd_wait.lock);
148         if (done)
149                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
150         return done;
151
152 lost:
153         buf = &pipe->bufs[(head - 1) & mask];
154         buf->flags |= PIPE_BUF_FLAG_LOSS;
155         goto out;
156 }
157
158 /*
159  * Apply filter rules to a notification.
160  */
161 static bool filter_watch_notification(const struct watch_filter *wf,
162                                       const struct watch_notification *n)
163 {
164         const struct watch_type_filter *wt;
165         unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
166         unsigned int st_index = n->subtype / st_bits;
167         unsigned int st_bit = 1U << (n->subtype % st_bits);
168         int i;
169
170         if (!test_bit(n->type, wf->type_filter))
171                 return false;
172
173         for (i = 0; i < wf->nr_filters; i++) {
174                 wt = &wf->filters[i];
175                 if (n->type == wt->type &&
176                     (wt->subtype_filter[st_index] & st_bit) &&
177                     (n->info & wt->info_mask) == wt->info_filter)
178                         return true;
179         }
180
181         return false; /* If there is a filter, the default is to reject. */
182 }
183
184 /**
185  * __post_watch_notification - Post an event notification
186  * @wlist: The watch list to post the event to.
187  * @n: The notification record to post.
188  * @cred: The creds of the process that triggered the notification.
189  * @id: The ID to match on the watch.
190  *
191  * Post a notification of an event into a set of watch queues and let the users
192  * know.
193  *
194  * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
195  * should be in units of sizeof(*n).
196  */
197 void __post_watch_notification(struct watch_list *wlist,
198                                struct watch_notification *n,
199                                const struct cred *cred,
200                                u64 id)
201 {
202         const struct watch_filter *wf;
203         struct watch_queue *wqueue;
204         struct watch *watch;
205
206         if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
207                 WARN_ON(1);
208                 return;
209         }
210
211         rcu_read_lock();
212
213         hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
214                 if (watch->id != id)
215                         continue;
216                 n->info &= ~WATCH_INFO_ID;
217                 n->info |= watch->info_id;
218
219                 wqueue = rcu_dereference(watch->queue);
220                 wf = rcu_dereference(wqueue->filter);
221                 if (wf && !filter_watch_notification(wf, n))
222                         continue;
223
224                 if (security_post_notification(watch->cred, cred, n) < 0)
225                         continue;
226
227                 if (lock_wqueue(wqueue)) {
228                         post_one_notification(wqueue, n);
229                         unlock_wqueue(wqueue);
230                 }
231         }
232
233         rcu_read_unlock();
234 }
235 EXPORT_SYMBOL(__post_watch_notification);
236
237 /*
238  * Allocate sufficient pages to preallocation for the requested number of
239  * notifications.
240  */
241 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
242 {
243         struct watch_queue *wqueue = pipe->watch_queue;
244         struct page **pages;
245         unsigned long *bitmap;
246         unsigned long user_bufs;
247         int ret, i, nr_pages;
248
249         if (!wqueue)
250                 return -ENODEV;
251         if (wqueue->notes)
252                 return -EBUSY;
253
254         if (nr_notes < 1 ||
255             nr_notes > 512) /* TODO: choose a better hard limit */
256                 return -EINVAL;
257
258         nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
259         nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
260         user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
261
262         if (nr_pages > pipe->max_usage &&
263             (too_many_pipe_buffers_hard(user_bufs) ||
264              too_many_pipe_buffers_soft(user_bufs)) &&
265             pipe_is_unprivileged_user()) {
266                 ret = -EPERM;
267                 goto error;
268         }
269
270         nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
271         ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
272         if (ret < 0)
273                 goto error;
274
275         ret = -ENOMEM;
276         pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
277         if (!pages)
278                 goto error;
279
280         for (i = 0; i < nr_pages; i++) {
281                 pages[i] = alloc_page(GFP_KERNEL);
282                 if (!pages[i])
283                         goto error_p;
284                 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
285         }
286
287         bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
288         if (!bitmap)
289                 goto error_p;
290
291         bitmap_fill(bitmap, nr_notes);
292         wqueue->notes = pages;
293         wqueue->notes_bitmap = bitmap;
294         wqueue->nr_pages = nr_pages;
295         wqueue->nr_notes = nr_notes;
296         return 0;
297
298 error_p:
299         while (--i >= 0)
300                 __free_page(pages[i]);
301         kfree(pages);
302 error:
303         (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
304         return ret;
305 }
306
307 /*
308  * Set the filter on a watch queue.
309  */
310 long watch_queue_set_filter(struct pipe_inode_info *pipe,
311                             struct watch_notification_filter __user *_filter)
312 {
313         struct watch_notification_type_filter *tf;
314         struct watch_notification_filter filter;
315         struct watch_type_filter *q;
316         struct watch_filter *wfilter;
317         struct watch_queue *wqueue = pipe->watch_queue;
318         int ret, nr_filter = 0, i;
319
320         if (!wqueue)
321                 return -ENODEV;
322
323         if (!_filter) {
324                 /* Remove the old filter */
325                 wfilter = NULL;
326                 goto set;
327         }
328
329         /* Grab the user's filter specification */
330         if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
331                 return -EFAULT;
332         if (filter.nr_filters == 0 ||
333             filter.nr_filters > 16 ||
334             filter.__reserved != 0)
335                 return -EINVAL;
336
337         tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
338         if (IS_ERR(tf))
339                 return PTR_ERR(tf);
340
341         ret = -EINVAL;
342         for (i = 0; i < filter.nr_filters; i++) {
343                 if ((tf[i].info_filter & ~tf[i].info_mask) ||
344                     tf[i].info_mask & WATCH_INFO_LENGTH)
345                         goto err_filter;
346                 /* Ignore any unknown types */
347                 if (tf[i].type >= WATCH_TYPE__NR)
348                         continue;
349                 nr_filter++;
350         }
351
352         /* Now we need to build the internal filter from only the relevant
353          * user-specified filters.
354          */
355         ret = -ENOMEM;
356         wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
357         if (!wfilter)
358                 goto err_filter;
359         wfilter->nr_filters = nr_filter;
360
361         q = wfilter->filters;
362         for (i = 0; i < filter.nr_filters; i++) {
363                 if (tf[i].type >= WATCH_TYPE__NR)
364                         continue;
365
366                 q->type                 = tf[i].type;
367                 q->info_filter          = tf[i].info_filter;
368                 q->info_mask            = tf[i].info_mask;
369                 q->subtype_filter[0]    = tf[i].subtype_filter[0];
370                 __set_bit(q->type, wfilter->type_filter);
371                 q++;
372         }
373
374         kfree(tf);
375 set:
376         pipe_lock(pipe);
377         wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
378                                       lockdep_is_held(&pipe->mutex));
379         pipe_unlock(pipe);
380         if (wfilter)
381                 kfree_rcu(wfilter, rcu);
382         return 0;
383
384 err_filter:
385         kfree(tf);
386         return ret;
387 }
388
389 static void __put_watch_queue(struct kref *kref)
390 {
391         struct watch_queue *wqueue =
392                 container_of(kref, struct watch_queue, usage);
393         struct watch_filter *wfilter;
394         int i;
395
396         for (i = 0; i < wqueue->nr_pages; i++)
397                 __free_page(wqueue->notes[i]);
398         kfree(wqueue->notes);
399         bitmap_free(wqueue->notes_bitmap);
400
401         wfilter = rcu_access_pointer(wqueue->filter);
402         if (wfilter)
403                 kfree_rcu(wfilter, rcu);
404         kfree_rcu(wqueue, rcu);
405 }
406
407 /**
408  * put_watch_queue - Dispose of a ref on a watchqueue.
409  * @wqueue: The watch queue to unref.
410  */
411 void put_watch_queue(struct watch_queue *wqueue)
412 {
413         kref_put(&wqueue->usage, __put_watch_queue);
414 }
415 EXPORT_SYMBOL(put_watch_queue);
416
417 static void free_watch(struct rcu_head *rcu)
418 {
419         struct watch *watch = container_of(rcu, struct watch, rcu);
420
421         put_watch_queue(rcu_access_pointer(watch->queue));
422         atomic_dec(&watch->cred->user->nr_watches);
423         put_cred(watch->cred);
424         kfree(watch);
425 }
426
427 static void __put_watch(struct kref *kref)
428 {
429         struct watch *watch = container_of(kref, struct watch, usage);
430
431         call_rcu(&watch->rcu, free_watch);
432 }
433
434 /*
435  * Discard a watch.
436  */
437 static void put_watch(struct watch *watch)
438 {
439         kref_put(&watch->usage, __put_watch);
440 }
441
442 /**
443  * init_watch - Initialise a watch
444  * @watch: The watch to initialise.
445  * @wqueue: The queue to assign.
446  *
447  * Initialise a watch and set the watch queue.
448  */
449 void init_watch(struct watch *watch, struct watch_queue *wqueue)
450 {
451         kref_init(&watch->usage);
452         INIT_HLIST_NODE(&watch->list_node);
453         INIT_HLIST_NODE(&watch->queue_node);
454         rcu_assign_pointer(watch->queue, wqueue);
455 }
456
457 static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
458 {
459         const struct cred *cred;
460         struct watch *w;
461
462         hlist_for_each_entry(w, &wlist->watchers, list_node) {
463                 struct watch_queue *wq = rcu_access_pointer(w->queue);
464                 if (wqueue == wq && watch->id == w->id)
465                         return -EBUSY;
466         }
467
468         cred = current_cred();
469         if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
470                 atomic_dec(&cred->user->nr_watches);
471                 return -EAGAIN;
472         }
473
474         watch->cred = get_cred(cred);
475         rcu_assign_pointer(watch->watch_list, wlist);
476
477         kref_get(&wqueue->usage);
478         kref_get(&watch->usage);
479         hlist_add_head(&watch->queue_node, &wqueue->watches);
480         hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
481         return 0;
482 }
483
484 /**
485  * add_watch_to_object - Add a watch on an object to a watch list
486  * @watch: The watch to add
487  * @wlist: The watch list to add to
488  *
489  * @watch->queue must have been set to point to the queue to post notifications
490  * to and the watch list of the object to be watched.  @watch->cred must also
491  * have been set to the appropriate credentials and a ref taken on them.
492  *
493  * The caller must pin the queue and the list both and must hold the list
494  * locked against racing watch additions/removals.
495  */
496 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
497 {
498         struct watch_queue *wqueue;
499         int ret = -ENOENT;
500
501         rcu_read_lock();
502
503         wqueue = rcu_access_pointer(watch->queue);
504         if (lock_wqueue(wqueue)) {
505                 spin_lock(&wlist->lock);
506                 ret = add_one_watch(watch, wlist, wqueue);
507                 spin_unlock(&wlist->lock);
508                 unlock_wqueue(wqueue);
509         }
510
511         rcu_read_unlock();
512         return ret;
513 }
514 EXPORT_SYMBOL(add_watch_to_object);
515
516 /**
517  * remove_watch_from_object - Remove a watch or all watches from an object.
518  * @wlist: The watch list to remove from
519  * @wq: The watch queue of interest (ignored if @all is true)
520  * @id: The ID of the watch to remove (ignored if @all is true)
521  * @all: True to remove all objects
522  *
523  * Remove a specific watch or all watches from an object.  A notification is
524  * sent to the watcher to tell them that this happened.
525  */
526 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
527                              u64 id, bool all)
528 {
529         struct watch_notification_removal n;
530         struct watch_queue *wqueue;
531         struct watch *watch;
532         int ret = -EBADSLT;
533
534         rcu_read_lock();
535
536 again:
537         spin_lock(&wlist->lock);
538         hlist_for_each_entry(watch, &wlist->watchers, list_node) {
539                 if (all ||
540                     (watch->id == id && rcu_access_pointer(watch->queue) == wq))
541                         goto found;
542         }
543         spin_unlock(&wlist->lock);
544         goto out;
545
546 found:
547         ret = 0;
548         hlist_del_init_rcu(&watch->list_node);
549         rcu_assign_pointer(watch->watch_list, NULL);
550         spin_unlock(&wlist->lock);
551
552         /* We now own the reference on watch that used to belong to wlist. */
553
554         n.watch.type = WATCH_TYPE_META;
555         n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
556         n.watch.info = watch->info_id | watch_sizeof(n.watch);
557         n.id = id;
558         if (id != 0)
559                 n.watch.info = watch->info_id | watch_sizeof(n);
560
561         wqueue = rcu_dereference(watch->queue);
562
563         if (lock_wqueue(wqueue)) {
564                 post_one_notification(wqueue, &n.watch);
565
566                 if (!hlist_unhashed(&watch->queue_node)) {
567                         hlist_del_init_rcu(&watch->queue_node);
568                         put_watch(watch);
569                 }
570
571                 unlock_wqueue(wqueue);
572         }
573
574         if (wlist->release_watch) {
575                 void (*release_watch)(struct watch *);
576
577                 release_watch = wlist->release_watch;
578                 rcu_read_unlock();
579                 (*release_watch)(watch);
580                 rcu_read_lock();
581         }
582         put_watch(watch);
583
584         if (all && !hlist_empty(&wlist->watchers))
585                 goto again;
586 out:
587         rcu_read_unlock();
588         return ret;
589 }
590 EXPORT_SYMBOL(remove_watch_from_object);
591
592 /*
593  * Remove all the watches that are contributory to a queue.  This has the
594  * potential to race with removal of the watches by the destruction of the
595  * objects being watched or with the distribution of notifications.
596  */
597 void watch_queue_clear(struct watch_queue *wqueue)
598 {
599         struct watch_list *wlist;
600         struct watch *watch;
601         bool release;
602
603         rcu_read_lock();
604         spin_lock_bh(&wqueue->lock);
605
606         /* Prevent new notifications from being stored. */
607         wqueue->defunct = true;
608
609         while (!hlist_empty(&wqueue->watches)) {
610                 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
611                 hlist_del_init_rcu(&watch->queue_node);
612                 /* We now own a ref on the watch. */
613                 spin_unlock_bh(&wqueue->lock);
614
615                 /* We can't do the next bit under the queue lock as we need to
616                  * get the list lock - which would cause a deadlock if someone
617                  * was removing from the opposite direction at the same time or
618                  * posting a notification.
619                  */
620                 wlist = rcu_dereference(watch->watch_list);
621                 if (wlist) {
622                         void (*release_watch)(struct watch *);
623
624                         spin_lock(&wlist->lock);
625
626                         release = !hlist_unhashed(&watch->list_node);
627                         if (release) {
628                                 hlist_del_init_rcu(&watch->list_node);
629                                 rcu_assign_pointer(watch->watch_list, NULL);
630
631                                 /* We now own a second ref on the watch. */
632                         }
633
634                         release_watch = wlist->release_watch;
635                         spin_unlock(&wlist->lock);
636
637                         if (release) {
638                                 if (release_watch) {
639                                         rcu_read_unlock();
640                                         /* This might need to call dput(), so
641                                          * we have to drop all the locks.
642                                          */
643                                         (*release_watch)(watch);
644                                         rcu_read_lock();
645                                 }
646                                 put_watch(watch);
647                         }
648                 }
649
650                 put_watch(watch);
651                 spin_lock_bh(&wqueue->lock);
652         }
653
654         spin_unlock_bh(&wqueue->lock);
655         rcu_read_unlock();
656 }
657
658 /**
659  * get_watch_queue - Get a watch queue from its file descriptor.
660  * @fd: The fd to query.
661  */
662 struct watch_queue *get_watch_queue(int fd)
663 {
664         struct pipe_inode_info *pipe;
665         struct watch_queue *wqueue = ERR_PTR(-EINVAL);
666         struct fd f;
667
668         f = fdget(fd);
669         if (f.file) {
670                 pipe = get_pipe_info(f.file, false);
671                 if (pipe && pipe->watch_queue) {
672                         wqueue = pipe->watch_queue;
673                         kref_get(&wqueue->usage);
674                 }
675                 fdput(f);
676         }
677
678         return wqueue;
679 }
680 EXPORT_SYMBOL(get_watch_queue);
681
682 /*
683  * Initialise a watch queue
684  */
685 int watch_queue_init(struct pipe_inode_info *pipe)
686 {
687         struct watch_queue *wqueue;
688
689         wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
690         if (!wqueue)
691                 return -ENOMEM;
692
693         wqueue->pipe = pipe;
694         kref_init(&wqueue->usage);
695         spin_lock_init(&wqueue->lock);
696         INIT_HLIST_HEAD(&wqueue->watches);
697
698         pipe->watch_queue = wqueue;
699         return 0;
700 }