2 * Copyright (c) 2016 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
37 struct rds_ib_mr_pool *pool;
38 struct rds_ib_mr *ibmr = NULL;
39 struct rds_ib_fmr *fmr;
42 if (npages <= RDS_MR_8K_MSG_SIZE)
43 pool = rds_ibdev->mr_8k_pool;
45 pool = rds_ibdev->mr_1m_pool;
47 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
48 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
50 /* Switch pools if one of the pool is reaching upper limit */
51 if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
52 if (pool->pool_type == RDS_IB_MR_8K_POOL)
53 pool = rds_ibdev->mr_1m_pool;
55 pool = rds_ibdev->mr_8k_pool;
58 ibmr = rds_ib_try_reuse_ibmr(pool);
62 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
63 rdsibdev_to_node(rds_ibdev));
70 fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
71 (IB_ACCESS_LOCAL_WRITE |
72 IB_ACCESS_REMOTE_READ |
73 IB_ACCESS_REMOTE_WRITE |
74 IB_ACCESS_REMOTE_ATOMIC),
76 if (IS_ERR(fmr->fmr)) {
77 err = PTR_ERR(fmr->fmr);
79 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
84 if (pool->pool_type == RDS_IB_MR_8K_POOL)
85 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
87 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
93 atomic_dec(&pool->item_count);
98 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
99 struct rds_ib_mr *ibmr, struct scatterlist *sg,
102 struct ib_device *dev = rds_ibdev->dev;
103 struct rds_ib_fmr *fmr = &ibmr->u.fmr;
104 struct scatterlist *scat = sg;
108 int page_cnt, sg_dma_len;
112 sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
113 if (unlikely(!sg_dma_len)) {
114 pr_warn("RDS/IB: %s failed!\n", __func__);
121 for (i = 0; i < sg_dma_len; ++i) {
122 unsigned int dma_len = sg_dma_len(&scat[i]);
123 u64 dma_addr = sg_dma_address(&scat[i]);
125 if (dma_addr & ~PAGE_MASK) {
127 ib_dma_unmap_sg(dev, sg, nents,
134 if ((dma_addr + dma_len) & ~PAGE_MASK) {
135 if (i < sg_dma_len - 1) {
136 ib_dma_unmap_sg(dev, sg, nents,
147 page_cnt += len >> PAGE_SHIFT;
148 if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
149 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
153 dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC,
154 rdsibdev_to_node(rds_ibdev));
156 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
161 for (i = 0; i < sg_dma_len; ++i) {
162 unsigned int dma_len = sg_dma_len(&scat[i]);
163 u64 dma_addr = sg_dma_address(&scat[i]);
165 for (j = 0; j < dma_len; j += PAGE_SIZE)
166 dma_pages[page_cnt++] =
167 (dma_addr & PAGE_MASK) + j;
170 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
172 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
176 /* Success - we successfully remapped the MR, so we can
177 * safely tear down the old mapping.
179 rds_ib_teardown_mr(ibmr);
182 ibmr->sg_len = nents;
183 ibmr->sg_dma_len = sg_dma_len;
186 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
187 rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
189 rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
198 struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
199 struct scatterlist *sg,
203 struct rds_ib_mr *ibmr = NULL;
204 struct rds_ib_fmr *fmr;
207 ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
211 ibmr->device = rds_ibdev;
213 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
215 *key = fmr->fmr->rkey;
217 rds_ib_free_mr(ibmr, 0);
222 void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
223 unsigned long *unpinned, unsigned int goal)
225 struct rds_ib_mr *ibmr, *next;
226 struct rds_ib_fmr *fmr;
229 unsigned int freed = *nfreed;
231 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
232 list_for_each_entry(ibmr, list, unmap_list) {
234 list_add(&fmr->fmr->list, &fmr_list);
237 ret = ib_unmap_fmr(&fmr_list);
239 pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
241 /* Now we can destroy the DMA mapping and unpin any pages */
242 list_for_each_entry_safe(ibmr, next, list, unmap_list) {
244 *unpinned += ibmr->sg_len;
245 __rds_ib_teardown_mr(ibmr);
247 ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
248 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
249 rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
251 rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
252 list_del(&ibmr->unmap_list);
253 ib_dealloc_fmr(fmr->fmr);
261 void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
263 struct rds_ib_mr_pool *pool = ibmr->pool;
265 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
266 llist_add(&ibmr->llnode, &pool->drop_list);
268 llist_add(&ibmr->llnode, &pool->free_list);