Merge tag 'platform-drivers-x86-v6.1-3' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / drivers / infiniband / core / umem_dmabuf.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2020 Intel Corporation. All rights reserved.
4  */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10
11 #include "uverbs.h"
12
13 MODULE_IMPORT_NS(DMA_BUF);
14
15 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
16 {
17         struct sg_table *sgt;
18         struct scatterlist *sg;
19         unsigned long start, end, cur = 0;
20         unsigned int nmap = 0;
21         long ret;
22         int i;
23
24         dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
25
26         if (umem_dmabuf->sgt)
27                 goto wait_fence;
28
29         sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
30         if (IS_ERR(sgt))
31                 return PTR_ERR(sgt);
32
33         /* modify the sg list in-place to match umem address and length */
34
35         start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
36         end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
37                     PAGE_SIZE);
38         for_each_sgtable_dma_sg(sgt, sg, i) {
39                 if (start < cur + sg_dma_len(sg) && cur < end)
40                         nmap++;
41                 if (cur <= start && start < cur + sg_dma_len(sg)) {
42                         unsigned long offset = start - cur;
43
44                         umem_dmabuf->first_sg = sg;
45                         umem_dmabuf->first_sg_offset = offset;
46                         sg_dma_address(sg) += offset;
47                         sg_dma_len(sg) -= offset;
48                         cur += offset;
49                 }
50                 if (cur < end && end <= cur + sg_dma_len(sg)) {
51                         unsigned long trim = cur + sg_dma_len(sg) - end;
52
53                         umem_dmabuf->last_sg = sg;
54                         umem_dmabuf->last_sg_trim = trim;
55                         sg_dma_len(sg) -= trim;
56                         break;
57                 }
58                 cur += sg_dma_len(sg);
59         }
60
61         umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
62         umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
63         umem_dmabuf->sgt = sgt;
64
65 wait_fence:
66         /*
67          * Although the sg list is valid now, the content of the pages
68          * may be not up-to-date. Wait for the exporter to finish
69          * the migration.
70          */
71         ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
72                                      DMA_RESV_USAGE_KERNEL,
73                                      false, MAX_SCHEDULE_TIMEOUT);
74         if (ret < 0)
75                 return ret;
76         if (ret == 0)
77                 return -ETIMEDOUT;
78         return 0;
79 }
80 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
81
82 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
83 {
84         dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
85
86         if (!umem_dmabuf->sgt)
87                 return;
88
89         /* retore the original sg list */
90         if (umem_dmabuf->first_sg) {
91                 sg_dma_address(umem_dmabuf->first_sg) -=
92                         umem_dmabuf->first_sg_offset;
93                 sg_dma_len(umem_dmabuf->first_sg) +=
94                         umem_dmabuf->first_sg_offset;
95                 umem_dmabuf->first_sg = NULL;
96                 umem_dmabuf->first_sg_offset = 0;
97         }
98         if (umem_dmabuf->last_sg) {
99                 sg_dma_len(umem_dmabuf->last_sg) +=
100                         umem_dmabuf->last_sg_trim;
101                 umem_dmabuf->last_sg = NULL;
102                 umem_dmabuf->last_sg_trim = 0;
103         }
104
105         dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
106                                  DMA_BIDIRECTIONAL);
107
108         umem_dmabuf->sgt = NULL;
109 }
110 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
111
112 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
113                                           unsigned long offset, size_t size,
114                                           int fd, int access,
115                                           const struct dma_buf_attach_ops *ops)
116 {
117         struct dma_buf *dmabuf;
118         struct ib_umem_dmabuf *umem_dmabuf;
119         struct ib_umem *umem;
120         unsigned long end;
121         struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
122
123         if (check_add_overflow(offset, (unsigned long)size, &end))
124                 return ret;
125
126         if (unlikely(!ops || !ops->move_notify))
127                 return ret;
128
129         dmabuf = dma_buf_get(fd);
130         if (IS_ERR(dmabuf))
131                 return ERR_CAST(dmabuf);
132
133         if (dmabuf->size < end)
134                 goto out_release_dmabuf;
135
136         umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
137         if (!umem_dmabuf) {
138                 ret = ERR_PTR(-ENOMEM);
139                 goto out_release_dmabuf;
140         }
141
142         umem = &umem_dmabuf->umem;
143         umem->ibdev = device;
144         umem->length = size;
145         umem->address = offset;
146         umem->writable = ib_access_writable(access);
147         umem->is_dmabuf = 1;
148
149         if (!ib_umem_num_pages(umem))
150                 goto out_free_umem;
151
152         umem_dmabuf->attach = dma_buf_dynamic_attach(
153                                         dmabuf,
154                                         device->dma_device,
155                                         ops,
156                                         umem_dmabuf);
157         if (IS_ERR(umem_dmabuf->attach)) {
158                 ret = ERR_CAST(umem_dmabuf->attach);
159                 goto out_free_umem;
160         }
161         return umem_dmabuf;
162
163 out_free_umem:
164         kfree(umem_dmabuf);
165
166 out_release_dmabuf:
167         dma_buf_put(dmabuf);
168         return ret;
169 }
170 EXPORT_SYMBOL(ib_umem_dmabuf_get);
171
172 static void
173 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
174 {
175         struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
176
177         ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
178                                "Invalidate callback should not be called when memory is pinned\n");
179 }
180
181 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
182         .allow_peer2peer = true,
183         .move_notify = ib_umem_dmabuf_unsupported_move_notify,
184 };
185
186 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
187                                                  unsigned long offset,
188                                                  size_t size, int fd,
189                                                  int access)
190 {
191         struct ib_umem_dmabuf *umem_dmabuf;
192         int err;
193
194         umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
195                                          &ib_umem_dmabuf_attach_pinned_ops);
196         if (IS_ERR(umem_dmabuf))
197                 return umem_dmabuf;
198
199         dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
200         err = dma_buf_pin(umem_dmabuf->attach);
201         if (err)
202                 goto err_release;
203         umem_dmabuf->pinned = 1;
204
205         err = ib_umem_dmabuf_map_pages(umem_dmabuf);
206         if (err)
207                 goto err_unpin;
208         dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
209
210         return umem_dmabuf;
211
212 err_unpin:
213         dma_buf_unpin(umem_dmabuf->attach);
214 err_release:
215         dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
216         ib_umem_release(&umem_dmabuf->umem);
217         return ERR_PTR(err);
218 }
219 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
220
221 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
222 {
223         struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
224
225         dma_resv_lock(dmabuf->resv, NULL);
226         ib_umem_dmabuf_unmap_pages(umem_dmabuf);
227         if (umem_dmabuf->pinned)
228                 dma_buf_unpin(umem_dmabuf->attach);
229         dma_resv_unlock(dmabuf->resv);
230
231         dma_buf_detach(dmabuf, umem_dmabuf->attach);
232         dma_buf_put(dmabuf);
233         kfree(umem_dmabuf);
234 }