Merge tag 'lsm-pr-20220801' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm
[platform/kernel/linux-starfive.git] / io_uring / kbuf.h
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4
5 #include <uapi/linux/io_uring.h>
6
7 struct io_buffer_list {
8         /*
9          * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10          * then these are classic provided buffers and ->buf_list is used.
11          */
12         union {
13                 struct list_head buf_list;
14                 struct {
15                         struct page **buf_pages;
16                         struct io_uring_buf_ring *buf_ring;
17                 };
18         };
19         __u16 bgid;
20
21         /* below is for ring provided buffers */
22         __u16 buf_nr_pages;
23         __u16 nr_entries;
24         __u16 head;
25         __u16 mask;
26 };
27
28 struct io_buffer {
29         struct list_head list;
30         __u64 addr;
31         __u32 len;
32         __u16 bid;
33         __u16 bgid;
34 };
35
36 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
37                               unsigned int issue_flags);
38 void io_destroy_buffers(struct io_ring_ctx *ctx);
39
40 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
41 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
42
43 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
44 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
45
46 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
47 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
48
49 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
50
51 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
52
53 static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
54 {
55         /*
56          * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
57          * the flag and hence ensure that bl->head doesn't get incremented.
58          * If the tail has already been incremented, hang on to it.
59          * The exception is partial io, that case we should increment bl->head
60          * to monopolize the buffer.
61          */
62         if (req->buf_list) {
63                 if (req->flags & REQ_F_PARTIAL_IO) {
64                         /*
65                          * If we end up here, then the io_uring_lock has
66                          * been kept held since we retrieved the buffer.
67                          * For the io-wq case, we already cleared
68                          * req->buf_list when the buffer was retrieved,
69                          * hence it cannot be set here for that case.
70                          */
71                         req->buf_list->head++;
72                         req->buf_list = NULL;
73                 } else {
74                         req->buf_index = req->buf_list->bgid;
75                         req->flags &= ~REQ_F_BUFFER_RING;
76                 }
77         }
78 }
79
80 static inline bool io_do_buffer_select(struct io_kiocb *req)
81 {
82         if (!(req->flags & REQ_F_BUFFER_SELECT))
83                 return false;
84         return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
85 }
86
87 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
88 {
89         /*
90          * READV uses fields in `struct io_rw` (len/addr) to stash the selected
91          * buffer data. However if that buffer is recycled the original request
92          * data stored in addr is lost. Therefore forbid recycling for now.
93          */
94         if (req->opcode == IORING_OP_READV)
95                 return;
96
97         if (req->flags & REQ_F_BUFFER_SELECTED)
98                 io_kbuf_recycle_legacy(req, issue_flags);
99         if (req->flags & REQ_F_BUFFER_RING)
100                 io_kbuf_recycle_ring(req);
101 }
102
103 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
104                                               struct list_head *list)
105 {
106         unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
107
108         if (req->flags & REQ_F_BUFFER_RING) {
109                 if (req->buf_list) {
110                         req->buf_index = req->buf_list->bgid;
111                         req->buf_list->head++;
112                 }
113                 req->flags &= ~REQ_F_BUFFER_RING;
114         } else {
115                 req->buf_index = req->kbuf->bgid;
116                 list_add(&req->kbuf->list, list);
117                 req->flags &= ~REQ_F_BUFFER_SELECTED;
118         }
119
120         return ret;
121 }
122
123 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
124 {
125         lockdep_assert_held(&req->ctx->completion_lock);
126
127         if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
128                 return 0;
129         return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
130 }
131
132 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
133                                        unsigned issue_flags)
134 {
135
136         if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
137                 return 0;
138         return __io_put_kbuf(req, issue_flags);
139 }
140 #endif