cpufreq: dt-platdev: Add JH7110 SOC to the allowlist
[platform/kernel/linux-starfive.git] / kernel / watch_queue.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Watch queue and general notification mechanism, built on pipes
3  *
4  * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  * See Documentation/core-api/watch_queue.rst
8  */
9
10 #define pr_fmt(fmt) "watchq: " fmt
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/printk.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/poll.h>
21 #include <linux/uaccess.h>
22 #include <linux/vmalloc.h>
23 #include <linux/file.h>
24 #include <linux/security.h>
25 #include <linux/cred.h>
26 #include <linux/sched/signal.h>
27 #include <linux/watch_queue.h>
28 #include <linux/pipe_fs_i.h>
29
30 MODULE_DESCRIPTION("Watch queue");
31 MODULE_AUTHOR("Red Hat, Inc.");
32 MODULE_LICENSE("GPL");
33
34 #define WATCH_QUEUE_NOTE_SIZE 128
35 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
36
37 /*
38  * This must be called under the RCU read-lock, which makes
39  * sure that the wqueue still exists. It can then take the lock,
40  * and check that the wqueue hasn't been destroyed, which in
41  * turn makes sure that the notification pipe still exists.
42  */
43 static inline bool lock_wqueue(struct watch_queue *wqueue)
44 {
45         spin_lock_bh(&wqueue->lock);
46         if (unlikely(!wqueue->pipe)) {
47                 spin_unlock_bh(&wqueue->lock);
48                 return false;
49         }
50         return true;
51 }
52
53 static inline void unlock_wqueue(struct watch_queue *wqueue)
54 {
55         spin_unlock_bh(&wqueue->lock);
56 }
57
58 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
59                                          struct pipe_buffer *buf)
60 {
61         struct watch_queue *wqueue = (struct watch_queue *)buf->private;
62         struct page *page;
63         unsigned int bit;
64
65         /* We need to work out which note within the page this refers to, but
66          * the note might have been maximum size, so merely ANDing the offset
67          * off doesn't work.  OTOH, the note must've been more than zero size.
68          */
69         bit = buf->offset + buf->len;
70         if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
71                 bit -= WATCH_QUEUE_NOTE_SIZE;
72         bit /= WATCH_QUEUE_NOTE_SIZE;
73
74         page = buf->page;
75         bit += page->index;
76
77         set_bit(bit, wqueue->notes_bitmap);
78         generic_pipe_buf_release(pipe, buf);
79 }
80
81 // No try_steal function => no stealing
82 #define watch_queue_pipe_buf_try_steal NULL
83
84 /* New data written to a pipe may be appended to a buffer with this type. */
85 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
86         .release        = watch_queue_pipe_buf_release,
87         .try_steal      = watch_queue_pipe_buf_try_steal,
88         .get            = generic_pipe_buf_get,
89 };
90
91 /*
92  * Post a notification to a watch queue.
93  *
94  * Must be called with the RCU lock for reading, and the
95  * watch_queue lock held, which guarantees that the pipe
96  * hasn't been released.
97  */
98 static bool post_one_notification(struct watch_queue *wqueue,
99                                   struct watch_notification *n)
100 {
101         void *p;
102         struct pipe_inode_info *pipe = wqueue->pipe;
103         struct pipe_buffer *buf;
104         struct page *page;
105         unsigned int head, tail, mask, note, offset, len;
106         bool done = false;
107
108         spin_lock_irq(&pipe->rd_wait.lock);
109
110         mask = pipe->ring_size - 1;
111         head = pipe->head;
112         tail = pipe->tail;
113         if (pipe_full(head, tail, pipe->ring_size))
114                 goto lost;
115
116         note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
117         if (note >= wqueue->nr_notes)
118                 goto lost;
119
120         page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
121         offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
122         get_page(page);
123         len = n->info & WATCH_INFO_LENGTH;
124         p = kmap_atomic(page);
125         memcpy(p + offset, n, len);
126         kunmap_atomic(p);
127
128         buf = &pipe->bufs[head & mask];
129         buf->page = page;
130         buf->private = (unsigned long)wqueue;
131         buf->ops = &watch_queue_pipe_buf_ops;
132         buf->offset = offset;
133         buf->len = len;
134         buf->flags = PIPE_BUF_FLAG_WHOLE;
135         smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
136
137         if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
138                 spin_unlock_irq(&pipe->rd_wait.lock);
139                 BUG();
140         }
141         wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
142         done = true;
143
144 out:
145         spin_unlock_irq(&pipe->rd_wait.lock);
146         if (done)
147                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
148         return done;
149
150 lost:
151         buf = &pipe->bufs[(head - 1) & mask];
152         buf->flags |= PIPE_BUF_FLAG_LOSS;
153         goto out;
154 }
155
156 /*
157  * Apply filter rules to a notification.
158  */
159 static bool filter_watch_notification(const struct watch_filter *wf,
160                                       const struct watch_notification *n)
161 {
162         const struct watch_type_filter *wt;
163         unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
164         unsigned int st_index = n->subtype / st_bits;
165         unsigned int st_bit = 1U << (n->subtype % st_bits);
166         int i;
167
168         if (!test_bit(n->type, wf->type_filter))
169                 return false;
170
171         for (i = 0; i < wf->nr_filters; i++) {
172                 wt = &wf->filters[i];
173                 if (n->type == wt->type &&
174                     (wt->subtype_filter[st_index] & st_bit) &&
175                     (n->info & wt->info_mask) == wt->info_filter)
176                         return true;
177         }
178
179         return false; /* If there is a filter, the default is to reject. */
180 }
181
182 /**
183  * __post_watch_notification - Post an event notification
184  * @wlist: The watch list to post the event to.
185  * @n: The notification record to post.
186  * @cred: The creds of the process that triggered the notification.
187  * @id: The ID to match on the watch.
188  *
189  * Post a notification of an event into a set of watch queues and let the users
190  * know.
191  *
192  * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
193  * should be in units of sizeof(*n).
194  */
195 void __post_watch_notification(struct watch_list *wlist,
196                                struct watch_notification *n,
197                                const struct cred *cred,
198                                u64 id)
199 {
200         const struct watch_filter *wf;
201         struct watch_queue *wqueue;
202         struct watch *watch;
203
204         if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
205                 WARN_ON(1);
206                 return;
207         }
208
209         rcu_read_lock();
210
211         hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
212                 if (watch->id != id)
213                         continue;
214                 n->info &= ~WATCH_INFO_ID;
215                 n->info |= watch->info_id;
216
217                 wqueue = rcu_dereference(watch->queue);
218                 wf = rcu_dereference(wqueue->filter);
219                 if (wf && !filter_watch_notification(wf, n))
220                         continue;
221
222                 if (security_post_notification(watch->cred, cred, n) < 0)
223                         continue;
224
225                 if (lock_wqueue(wqueue)) {
226                         post_one_notification(wqueue, n);
227                         unlock_wqueue(wqueue);
228                 }
229         }
230
231         rcu_read_unlock();
232 }
233 EXPORT_SYMBOL(__post_watch_notification);
234
235 /*
236  * Allocate sufficient pages to preallocation for the requested number of
237  * notifications.
238  */
239 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
240 {
241         struct watch_queue *wqueue = pipe->watch_queue;
242         struct page **pages;
243         unsigned long *bitmap;
244         unsigned long user_bufs;
245         int ret, i, nr_pages;
246
247         if (!wqueue)
248                 return -ENODEV;
249         if (wqueue->notes)
250                 return -EBUSY;
251
252         if (nr_notes < 1 ||
253             nr_notes > 512) /* TODO: choose a better hard limit */
254                 return -EINVAL;
255
256         nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
257         nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
258         user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
259
260         if (nr_pages > pipe->max_usage &&
261             (too_many_pipe_buffers_hard(user_bufs) ||
262              too_many_pipe_buffers_soft(user_bufs)) &&
263             pipe_is_unprivileged_user()) {
264                 ret = -EPERM;
265                 goto error;
266         }
267
268         nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
269         ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
270         if (ret < 0)
271                 goto error;
272
273         ret = -ENOMEM;
274         pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
275         if (!pages)
276                 goto error;
277
278         for (i = 0; i < nr_pages; i++) {
279                 pages[i] = alloc_page(GFP_KERNEL);
280                 if (!pages[i])
281                         goto error_p;
282                 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
283         }
284
285         bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
286         if (!bitmap)
287                 goto error_p;
288
289         bitmap_fill(bitmap, nr_notes);
290         wqueue->notes = pages;
291         wqueue->notes_bitmap = bitmap;
292         wqueue->nr_pages = nr_pages;
293         wqueue->nr_notes = nr_notes;
294         return 0;
295
296 error_p:
297         while (--i >= 0)
298                 __free_page(pages[i]);
299         kfree(pages);
300 error:
301         (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
302         return ret;
303 }
304
305 /*
306  * Set the filter on a watch queue.
307  */
308 long watch_queue_set_filter(struct pipe_inode_info *pipe,
309                             struct watch_notification_filter __user *_filter)
310 {
311         struct watch_notification_type_filter *tf;
312         struct watch_notification_filter filter;
313         struct watch_type_filter *q;
314         struct watch_filter *wfilter;
315         struct watch_queue *wqueue = pipe->watch_queue;
316         int ret, nr_filter = 0, i;
317
318         if (!wqueue)
319                 return -ENODEV;
320
321         if (!_filter) {
322                 /* Remove the old filter */
323                 wfilter = NULL;
324                 goto set;
325         }
326
327         /* Grab the user's filter specification */
328         if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
329                 return -EFAULT;
330         if (filter.nr_filters == 0 ||
331             filter.nr_filters > 16 ||
332             filter.__reserved != 0)
333                 return -EINVAL;
334
335         tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
336         if (IS_ERR(tf))
337                 return PTR_ERR(tf);
338
339         ret = -EINVAL;
340         for (i = 0; i < filter.nr_filters; i++) {
341                 if ((tf[i].info_filter & ~tf[i].info_mask) ||
342                     tf[i].info_mask & WATCH_INFO_LENGTH)
343                         goto err_filter;
344                 /* Ignore any unknown types */
345                 if (tf[i].type >= WATCH_TYPE__NR)
346                         continue;
347                 nr_filter++;
348         }
349
350         /* Now we need to build the internal filter from only the relevant
351          * user-specified filters.
352          */
353         ret = -ENOMEM;
354         wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
355         if (!wfilter)
356                 goto err_filter;
357         wfilter->nr_filters = nr_filter;
358
359         q = wfilter->filters;
360         for (i = 0; i < filter.nr_filters; i++) {
361                 if (tf[i].type >= WATCH_TYPE__NR)
362                         continue;
363
364                 q->type                 = tf[i].type;
365                 q->info_filter          = tf[i].info_filter;
366                 q->info_mask            = tf[i].info_mask;
367                 q->subtype_filter[0]    = tf[i].subtype_filter[0];
368                 __set_bit(q->type, wfilter->type_filter);
369                 q++;
370         }
371
372         kfree(tf);
373 set:
374         pipe_lock(pipe);
375         wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
376                                       lockdep_is_held(&pipe->mutex));
377         pipe_unlock(pipe);
378         if (wfilter)
379                 kfree_rcu(wfilter, rcu);
380         return 0;
381
382 err_filter:
383         kfree(tf);
384         return ret;
385 }
386
387 static void __put_watch_queue(struct kref *kref)
388 {
389         struct watch_queue *wqueue =
390                 container_of(kref, struct watch_queue, usage);
391         struct watch_filter *wfilter;
392         int i;
393
394         for (i = 0; i < wqueue->nr_pages; i++)
395                 __free_page(wqueue->notes[i]);
396         kfree(wqueue->notes);
397         bitmap_free(wqueue->notes_bitmap);
398
399         wfilter = rcu_access_pointer(wqueue->filter);
400         if (wfilter)
401                 kfree_rcu(wfilter, rcu);
402         kfree_rcu(wqueue, rcu);
403 }
404
405 /**
406  * put_watch_queue - Dispose of a ref on a watchqueue.
407  * @wqueue: The watch queue to unref.
408  */
409 void put_watch_queue(struct watch_queue *wqueue)
410 {
411         kref_put(&wqueue->usage, __put_watch_queue);
412 }
413 EXPORT_SYMBOL(put_watch_queue);
414
415 static void free_watch(struct rcu_head *rcu)
416 {
417         struct watch *watch = container_of(rcu, struct watch, rcu);
418
419         put_watch_queue(rcu_access_pointer(watch->queue));
420         atomic_dec(&watch->cred->user->nr_watches);
421         put_cred(watch->cred);
422         kfree(watch);
423 }
424
425 static void __put_watch(struct kref *kref)
426 {
427         struct watch *watch = container_of(kref, struct watch, usage);
428
429         call_rcu(&watch->rcu, free_watch);
430 }
431
432 /*
433  * Discard a watch.
434  */
435 static void put_watch(struct watch *watch)
436 {
437         kref_put(&watch->usage, __put_watch);
438 }
439
440 /**
441  * init_watch - Initialise a watch
442  * @watch: The watch to initialise.
443  * @wqueue: The queue to assign.
444  *
445  * Initialise a watch and set the watch queue.
446  */
447 void init_watch(struct watch *watch, struct watch_queue *wqueue)
448 {
449         kref_init(&watch->usage);
450         INIT_HLIST_NODE(&watch->list_node);
451         INIT_HLIST_NODE(&watch->queue_node);
452         rcu_assign_pointer(watch->queue, wqueue);
453 }
454
455 static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
456 {
457         const struct cred *cred;
458         struct watch *w;
459
460         hlist_for_each_entry(w, &wlist->watchers, list_node) {
461                 struct watch_queue *wq = rcu_access_pointer(w->queue);
462                 if (wqueue == wq && watch->id == w->id)
463                         return -EBUSY;
464         }
465
466         cred = current_cred();
467         if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
468                 atomic_dec(&cred->user->nr_watches);
469                 return -EAGAIN;
470         }
471
472         watch->cred = get_cred(cred);
473         rcu_assign_pointer(watch->watch_list, wlist);
474
475         kref_get(&wqueue->usage);
476         kref_get(&watch->usage);
477         hlist_add_head(&watch->queue_node, &wqueue->watches);
478         hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
479         return 0;
480 }
481
482 /**
483  * add_watch_to_object - Add a watch on an object to a watch list
484  * @watch: The watch to add
485  * @wlist: The watch list to add to
486  *
487  * @watch->queue must have been set to point to the queue to post notifications
488  * to and the watch list of the object to be watched.  @watch->cred must also
489  * have been set to the appropriate credentials and a ref taken on them.
490  *
491  * The caller must pin the queue and the list both and must hold the list
492  * locked against racing watch additions/removals.
493  */
494 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
495 {
496         struct watch_queue *wqueue;
497         int ret = -ENOENT;
498
499         rcu_read_lock();
500
501         wqueue = rcu_access_pointer(watch->queue);
502         if (lock_wqueue(wqueue)) {
503                 spin_lock(&wlist->lock);
504                 ret = add_one_watch(watch, wlist, wqueue);
505                 spin_unlock(&wlist->lock);
506                 unlock_wqueue(wqueue);
507         }
508
509         rcu_read_unlock();
510         return ret;
511 }
512 EXPORT_SYMBOL(add_watch_to_object);
513
514 /**
515  * remove_watch_from_object - Remove a watch or all watches from an object.
516  * @wlist: The watch list to remove from
517  * @wq: The watch queue of interest (ignored if @all is true)
518  * @id: The ID of the watch to remove (ignored if @all is true)
519  * @all: True to remove all objects
520  *
521  * Remove a specific watch or all watches from an object.  A notification is
522  * sent to the watcher to tell them that this happened.
523  */
524 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
525                              u64 id, bool all)
526 {
527         struct watch_notification_removal n;
528         struct watch_queue *wqueue;
529         struct watch *watch;
530         int ret = -EBADSLT;
531
532         rcu_read_lock();
533
534 again:
535         spin_lock(&wlist->lock);
536         hlist_for_each_entry(watch, &wlist->watchers, list_node) {
537                 if (all ||
538                     (watch->id == id && rcu_access_pointer(watch->queue) == wq))
539                         goto found;
540         }
541         spin_unlock(&wlist->lock);
542         goto out;
543
544 found:
545         ret = 0;
546         hlist_del_init_rcu(&watch->list_node);
547         rcu_assign_pointer(watch->watch_list, NULL);
548         spin_unlock(&wlist->lock);
549
550         /* We now own the reference on watch that used to belong to wlist. */
551
552         n.watch.type = WATCH_TYPE_META;
553         n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
554         n.watch.info = watch->info_id | watch_sizeof(n.watch);
555         n.id = id;
556         if (id != 0)
557                 n.watch.info = watch->info_id | watch_sizeof(n);
558
559         wqueue = rcu_dereference(watch->queue);
560
561         if (lock_wqueue(wqueue)) {
562                 post_one_notification(wqueue, &n.watch);
563
564                 if (!hlist_unhashed(&watch->queue_node)) {
565                         hlist_del_init_rcu(&watch->queue_node);
566                         put_watch(watch);
567                 }
568
569                 unlock_wqueue(wqueue);
570         }
571
572         if (wlist->release_watch) {
573                 void (*release_watch)(struct watch *);
574
575                 release_watch = wlist->release_watch;
576                 rcu_read_unlock();
577                 (*release_watch)(watch);
578                 rcu_read_lock();
579         }
580         put_watch(watch);
581
582         if (all && !hlist_empty(&wlist->watchers))
583                 goto again;
584 out:
585         rcu_read_unlock();
586         return ret;
587 }
588 EXPORT_SYMBOL(remove_watch_from_object);
589
590 /*
591  * Remove all the watches that are contributory to a queue.  This has the
592  * potential to race with removal of the watches by the destruction of the
593  * objects being watched or with the distribution of notifications.
594  */
595 void watch_queue_clear(struct watch_queue *wqueue)
596 {
597         struct watch_list *wlist;
598         struct watch *watch;
599         bool release;
600
601         rcu_read_lock();
602         spin_lock_bh(&wqueue->lock);
603
604         /*
605          * This pipe can be freed by callers like free_pipe_info().
606          * Removing this reference also prevents new notifications.
607          */
608         wqueue->pipe = NULL;
609
610         while (!hlist_empty(&wqueue->watches)) {
611                 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
612                 hlist_del_init_rcu(&watch->queue_node);
613                 /* We now own a ref on the watch. */
614                 spin_unlock_bh(&wqueue->lock);
615
616                 /* We can't do the next bit under the queue lock as we need to
617                  * get the list lock - which would cause a deadlock if someone
618                  * was removing from the opposite direction at the same time or
619                  * posting a notification.
620                  */
621                 wlist = rcu_dereference(watch->watch_list);
622                 if (wlist) {
623                         void (*release_watch)(struct watch *);
624
625                         spin_lock(&wlist->lock);
626
627                         release = !hlist_unhashed(&watch->list_node);
628                         if (release) {
629                                 hlist_del_init_rcu(&watch->list_node);
630                                 rcu_assign_pointer(watch->watch_list, NULL);
631
632                                 /* We now own a second ref on the watch. */
633                         }
634
635                         release_watch = wlist->release_watch;
636                         spin_unlock(&wlist->lock);
637
638                         if (release) {
639                                 if (release_watch) {
640                                         rcu_read_unlock();
641                                         /* This might need to call dput(), so
642                                          * we have to drop all the locks.
643                                          */
644                                         (*release_watch)(watch);
645                                         rcu_read_lock();
646                                 }
647                                 put_watch(watch);
648                         }
649                 }
650
651                 put_watch(watch);
652                 spin_lock_bh(&wqueue->lock);
653         }
654
655         spin_unlock_bh(&wqueue->lock);
656         rcu_read_unlock();
657 }
658
659 /**
660  * get_watch_queue - Get a watch queue from its file descriptor.
661  * @fd: The fd to query.
662  */
663 struct watch_queue *get_watch_queue(int fd)
664 {
665         struct pipe_inode_info *pipe;
666         struct watch_queue *wqueue = ERR_PTR(-EINVAL);
667         struct fd f;
668
669         f = fdget(fd);
670         if (f.file) {
671                 pipe = get_pipe_info(f.file, false);
672                 if (pipe && pipe->watch_queue) {
673                         wqueue = pipe->watch_queue;
674                         kref_get(&wqueue->usage);
675                 }
676                 fdput(f);
677         }
678
679         return wqueue;
680 }
681 EXPORT_SYMBOL(get_watch_queue);
682
683 /*
684  * Initialise a watch queue
685  */
686 int watch_queue_init(struct pipe_inode_info *pipe)
687 {
688         struct watch_queue *wqueue;
689
690         wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
691         if (!wqueue)
692                 return -ENOMEM;
693
694         wqueue->pipe = pipe;
695         kref_init(&wqueue->usage);
696         spin_lock_init(&wqueue->lock);
697         INIT_HLIST_HEAD(&wqueue->watches);
698
699         pipe->watch_queue = wqueue;
700         return 0;
701 }