2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kref.h>
36 #include <linux/random.h>
37 #include <linux/debugfs.h>
38 #include <linux/export.h>
39 #include <linux/delay.h>
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
42 #include <rdma/ib_umem_odp.h>
48 MAX_PENDING_REG_MR = 8,
51 #define MLX5_UMR_ALIGN 2048
54 create_mkey_callback(int status, struct mlx5_async_work *context);
55 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
56 u64 iova, int access_flags,
57 unsigned int page_size, bool populate);
59 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
62 struct mlx5_ib_dev *dev = to_mdev(pd->device);
64 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
65 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
66 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
67 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
68 MLX5_SET(mkc, mkc, lr, 1);
70 if (acc & IB_ACCESS_RELAXED_ORDERING) {
71 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
72 MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
74 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
75 (MLX5_CAP_GEN(dev->mdev,
76 relaxed_ordering_read_pci_enabled) &&
77 pcie_relaxed_ordering_enabled(dev->mdev->pdev)))
78 MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
81 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
82 MLX5_SET(mkc, mkc, qpn, 0xffffff);
83 MLX5_SET64(mkc, mkc, start_addr, start_addr);
86 static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
88 u8 key = atomic_inc_return(&dev->mkey_var);
91 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
92 MLX5_SET(mkc, mkc, mkey_7_0, key);
96 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
97 struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
101 assign_mkey_variant(dev, &mkey->key, in);
102 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
104 init_waitqueue_head(&mkey->wait);
109 static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
111 struct mlx5_ib_dev *dev = async_create->ent->dev;
112 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
113 size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
115 MLX5_SET(create_mkey_in, async_create->in, opcode,
116 MLX5_CMD_OP_CREATE_MKEY);
117 assign_mkey_variant(dev, &async_create->mkey, async_create->in);
118 return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
119 async_create->out, outlen, create_mkey_callback,
120 &async_create->cb_work);
123 static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
126 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
133 static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
135 if (status == -ENXIO) /* core driver is not available */
138 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
139 if (status != -EREMOTEIO) /* driver specific failure */
142 /* Failed in FW, print cmd out failure details */
143 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
146 static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings,
149 XA_STATE(xas, &ent->mkeys, 0);
152 if (limit_pendings &&
153 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR)
158 * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
159 * doesn't transparently unlock. Instead we set the xas index to
160 * the current value of reserved every iteration.
162 xas_set(&xas, ent->reserved);
163 curr = xas_load(&xas);
165 if (to_store && ent->stored == ent->reserved)
166 xas_store(&xas, to_store);
168 xas_store(&xas, XA_ZERO_ENTRY);
169 if (xas_valid(&xas)) {
172 if (ent->stored != ent->reserved)
173 __xa_store(&ent->mkeys,
178 queue_adjust_cache_locked(ent);
179 WRITE_ONCE(ent->dev->cache.last_add,
184 xa_unlock_irq(&ent->mkeys);
187 * Notice xas_nomem() must always be called as it cleans
188 * up any cached allocation.
190 if (!xas_nomem(&xas, GFP_KERNEL))
192 xa_lock_irq(&ent->mkeys);
194 xa_lock_irq(&ent->mkeys);
196 return xas_error(&xas);
202 static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
207 xa_lock_irq(&ent->mkeys);
208 ret = push_mkey_locked(ent, limit_pendings, to_store);
209 xa_unlock_irq(&ent->mkeys);
213 static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
218 old = __xa_erase(&ent->mkeys, ent->reserved);
222 static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
226 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
231 static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
238 if (ent->stored == ent->reserved) {
239 xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
241 return (u32)xa_to_value(xa_mkey);
244 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
246 WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
247 old = __xa_erase(&ent->mkeys, ent->reserved);
249 return (u32)xa_to_value(xa_mkey);
252 static void create_mkey_callback(int status, struct mlx5_async_work *context)
254 struct mlx5r_async_create_mkey *mkey_out =
255 container_of(context, struct mlx5r_async_create_mkey, cb_work);
256 struct mlx5_cache_ent *ent = mkey_out->ent;
257 struct mlx5_ib_dev *dev = ent->dev;
261 create_mkey_warn(dev, status, mkey_out->out);
263 xa_lock_irqsave(&ent->mkeys, flags);
264 undo_push_reserve_mkey(ent);
265 WRITE_ONCE(dev->fill_delay, 1);
266 xa_unlock_irqrestore(&ent->mkeys, flags);
267 mod_timer(&dev->delay_timer, jiffies + HZ);
271 mkey_out->mkey |= mlx5_idx_to_mkey(
272 MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
273 WRITE_ONCE(dev->cache.last_add, jiffies);
275 xa_lock_irqsave(&ent->mkeys, flags);
276 push_to_reserved(ent, mkey_out->mkey);
277 /* If we are doing fill_to_high_water then keep going. */
278 queue_adjust_cache_locked(ent);
279 xa_unlock_irqrestore(&ent->mkeys, flags);
283 static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
287 switch (access_mode) {
288 case MLX5_MKC_ACCESS_MODE_MTT:
289 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
290 sizeof(struct mlx5_mtt));
292 case MLX5_MKC_ACCESS_MODE_KSM:
293 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
294 sizeof(struct mlx5_klm));
302 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
304 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
305 MLX5_SET(mkc, mkc, free, 1);
306 MLX5_SET(mkc, mkc, umr_en, 1);
307 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
308 MLX5_SET(mkc, mkc, access_mode_4_2,
309 (ent->rb_key.access_mode >> 2) & 0x7);
311 MLX5_SET(mkc, mkc, translations_octword_size,
312 get_mkc_octo_size(ent->rb_key.access_mode,
313 ent->rb_key.ndescs));
314 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
317 /* Asynchronously schedule new MRs to be populated in the cache. */
318 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
320 struct mlx5r_async_create_mkey *async_create;
325 for (i = 0; i < num; i++) {
326 async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
330 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
331 memory_key_mkey_entry);
332 set_cache_mkc(ent, mkc);
333 async_create->ent = ent;
335 err = push_mkey(ent, true, NULL);
337 goto free_async_create;
339 err = mlx5_ib_create_mkey_cb(async_create);
341 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
342 goto err_undo_reserve;
349 xa_lock_irq(&ent->mkeys);
350 undo_push_reserve_mkey(ent);
351 xa_unlock_irq(&ent->mkeys);
357 /* Synchronously create a MR in the cache */
358 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
360 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
365 in = kzalloc(inlen, GFP_KERNEL);
368 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
369 set_cache_mkc(ent, mkc);
371 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
375 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
381 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
385 lockdep_assert_held(&ent->mkeys.xa_lock);
388 mkey = pop_stored_mkey(ent);
389 xa_unlock_irq(&ent->mkeys);
390 mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
391 xa_lock_irq(&ent->mkeys);
394 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
396 __acquires(&ent->mkeys) __releases(&ent->mkeys)
400 lockdep_assert_held(&ent->mkeys.xa_lock);
404 target = ent->limit * 2;
405 if (target == ent->reserved)
407 if (target > ent->reserved) {
408 u32 todo = target - ent->reserved;
410 xa_unlock_irq(&ent->mkeys);
411 err = add_keys(ent, todo);
413 usleep_range(3000, 5000);
414 xa_lock_irq(&ent->mkeys);
421 remove_cache_mr_locked(ent);
426 static ssize_t size_write(struct file *filp, const char __user *buf,
427 size_t count, loff_t *pos)
429 struct mlx5_cache_ent *ent = filp->private_data;
433 err = kstrtou32_from_user(buf, count, 0, &target);
438 * Target is the new value of total_mrs the user requests, however we
439 * cannot free MRs that are in use. Compute the target value for stored
442 xa_lock_irq(&ent->mkeys);
443 if (target < ent->in_use) {
447 target = target - ent->in_use;
448 if (target < ent->limit || target > ent->limit*2) {
452 err = resize_available_mrs(ent, target, false);
455 xa_unlock_irq(&ent->mkeys);
460 xa_unlock_irq(&ent->mkeys);
464 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
467 struct mlx5_cache_ent *ent = filp->private_data;
471 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
475 return simple_read_from_buffer(buf, count, pos, lbuf, err);
478 static const struct file_operations size_fops = {
479 .owner = THIS_MODULE,
485 static ssize_t limit_write(struct file *filp, const char __user *buf,
486 size_t count, loff_t *pos)
488 struct mlx5_cache_ent *ent = filp->private_data;
492 err = kstrtou32_from_user(buf, count, 0, &var);
497 * Upon set we immediately fill the cache to high water mark implied by
500 xa_lock_irq(&ent->mkeys);
502 err = resize_available_mrs(ent, 0, true);
503 xa_unlock_irq(&ent->mkeys);
509 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
512 struct mlx5_cache_ent *ent = filp->private_data;
516 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
520 return simple_read_from_buffer(buf, count, pos, lbuf, err);
523 static const struct file_operations limit_fops = {
524 .owner = THIS_MODULE,
526 .write = limit_write,
530 static bool someone_adding(struct mlx5_mkey_cache *cache)
532 struct mlx5_cache_ent *ent;
533 struct rb_node *node;
536 mutex_lock(&cache->rb_lock);
537 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
538 ent = rb_entry(node, struct mlx5_cache_ent, node);
539 xa_lock_irq(&ent->mkeys);
540 ret = ent->stored < ent->limit;
541 xa_unlock_irq(&ent->mkeys);
543 mutex_unlock(&cache->rb_lock);
547 mutex_unlock(&cache->rb_lock);
552 * Check if the bucket is outside the high/low water mark and schedule an async
553 * update. The cache refill has hysteresis, once the low water mark is hit it is
554 * refilled up to the high mark.
556 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
558 lockdep_assert_held(&ent->mkeys.xa_lock);
560 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
562 if (ent->stored < ent->limit) {
563 ent->fill_to_high_water = true;
564 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
565 } else if (ent->fill_to_high_water &&
566 ent->reserved < 2 * ent->limit) {
568 * Once we start populating due to hitting a low water mark
569 * continue until we pass the high water mark.
571 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
572 } else if (ent->stored == 2 * ent->limit) {
573 ent->fill_to_high_water = false;
574 } else if (ent->stored > 2 * ent->limit) {
575 /* Queue deletion of excess entries */
576 ent->fill_to_high_water = false;
577 if (ent->stored != ent->reserved)
578 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
579 msecs_to_jiffies(1000));
581 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
585 static void __cache_work_func(struct mlx5_cache_ent *ent)
587 struct mlx5_ib_dev *dev = ent->dev;
588 struct mlx5_mkey_cache *cache = &dev->cache;
591 xa_lock_irq(&ent->mkeys);
595 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
596 !READ_ONCE(dev->fill_delay)) {
597 xa_unlock_irq(&ent->mkeys);
598 err = add_keys(ent, 1);
599 xa_lock_irq(&ent->mkeys);
604 * EAGAIN only happens if there are pending MRs, so we
605 * will be rescheduled when storing them. The only
606 * failure path here is ENOMEM.
608 if (err != -EAGAIN) {
611 "add keys command failed, err %d\n",
613 queue_delayed_work(cache->wq, &ent->dwork,
614 msecs_to_jiffies(1000));
617 } else if (ent->stored > 2 * ent->limit) {
621 * The remove_cache_mr() logic is performed as garbage
622 * collection task. Such task is intended to be run when no
623 * other active processes are running.
625 * The need_resched() will return TRUE if there are user tasks
626 * to be activated in near future.
628 * In such case, we don't execute remove_cache_mr() and postpone
629 * the garbage collection work to try to run in next cycle, in
630 * order to free CPU resources to other tasks.
632 xa_unlock_irq(&ent->mkeys);
633 need_delay = need_resched() || someone_adding(cache) ||
635 READ_ONCE(cache->last_add) + 300 * HZ);
636 xa_lock_irq(&ent->mkeys);
640 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
643 remove_cache_mr_locked(ent);
644 queue_adjust_cache_locked(ent);
647 xa_unlock_irq(&ent->mkeys);
650 static void delayed_cache_work_func(struct work_struct *work)
652 struct mlx5_cache_ent *ent;
654 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
655 __cache_work_func(ent);
658 static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
659 struct mlx5r_cache_rb_key key2)
663 res = key1.ats - key2.ats;
667 res = key1.access_mode - key2.access_mode;
671 res = key1.access_flags - key2.access_flags;
676 * keep ndescs the last in the compare table since the find function
677 * searches for an exact match on all properties and only closest
680 return key1.ndescs - key2.ndescs;
683 static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
684 struct mlx5_cache_ent *ent)
686 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL;
687 struct mlx5_cache_ent *cur;
690 /* Figure out where to put new node */
692 cur = rb_entry(*new, struct mlx5_cache_ent, node);
694 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key);
696 new = &((*new)->rb_left);
698 new = &((*new)->rb_right);
700 mutex_unlock(&cache->rb_lock);
705 /* Add new node and rebalance tree. */
706 rb_link_node(&ent->node, parent, new);
707 rb_insert_color(&ent->node, &cache->rb_root);
712 static struct mlx5_cache_ent *
713 mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
714 struct mlx5r_cache_rb_key rb_key)
716 struct rb_node *node = dev->cache.rb_root.rb_node;
717 struct mlx5_cache_ent *cur, *smallest = NULL;
721 * Find the smallest ent with order >= requested_order.
724 cur = rb_entry(node, struct mlx5_cache_ent, node);
725 cmp = cache_ent_key_cmp(cur->rb_key, rb_key);
728 node = node->rb_left;
731 node = node->rb_right;
737 smallest->rb_key.access_mode == rb_key.access_mode &&
738 smallest->rb_key.access_flags == rb_key.access_flags &&
739 smallest->rb_key.ats == rb_key.ats) ?
744 static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
745 struct mlx5_cache_ent *ent,
748 struct mlx5_ib_mr *mr;
751 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
753 return ERR_PTR(-ENOMEM);
755 xa_lock_irq(&ent->mkeys);
759 queue_adjust_cache_locked(ent);
761 xa_unlock_irq(&ent->mkeys);
762 err = create_cache_mkey(ent, &mr->mmkey.key);
764 xa_lock_irq(&ent->mkeys);
766 xa_unlock_irq(&ent->mkeys);
771 mr->mmkey.key = pop_stored_mkey(ent);
772 queue_adjust_cache_locked(ent);
773 xa_unlock_irq(&ent->mkeys);
775 mr->mmkey.cache_ent = ent;
776 mr->mmkey.type = MLX5_MKEY_MR;
777 init_waitqueue_head(&mr->mmkey.wait);
781 static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
786 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
787 MLX5_CAP_GEN(dev->mdev, atomic) &&
788 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
789 ret |= IB_ACCESS_REMOTE_ATOMIC;
791 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
792 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
793 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
794 ret |= IB_ACCESS_RELAXED_ORDERING;
796 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
797 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
798 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
799 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
800 ret |= IB_ACCESS_RELAXED_ORDERING;
805 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
806 int access_flags, int access_mode,
809 struct mlx5r_cache_rb_key rb_key = {
811 .access_mode = access_mode,
812 .access_flags = get_unchangeable_access_flags(dev, access_flags)
814 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
817 return ERR_PTR(-EOPNOTSUPP);
819 return _mlx5_mr_cache_alloc(dev, ent, access_flags);
822 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
826 cancel_delayed_work(&ent->dwork);
827 xa_lock_irq(&ent->mkeys);
828 while (ent->stored) {
829 mkey = pop_stored_mkey(ent);
830 xa_unlock_irq(&ent->mkeys);
831 mlx5_core_destroy_mkey(dev->mdev, mkey);
832 xa_lock_irq(&ent->mkeys);
834 xa_unlock_irq(&ent->mkeys);
837 static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
839 if (!mlx5_debugfs_root || dev->is_rep)
842 debugfs_remove_recursive(dev->cache.fs_root);
843 dev->cache.fs_root = NULL;
846 static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
847 struct mlx5_cache_ent *ent)
849 int order = order_base_2(ent->rb_key.ndescs);
852 if (!mlx5_debugfs_root || dev->is_rep)
855 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
856 order = MLX5_IMR_KSM_CACHE_ENTRY + 2;
858 sprintf(ent->name, "%d", order);
859 dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
860 debugfs_create_file("size", 0600, dir, ent, &size_fops);
861 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
862 debugfs_create_ulong("cur", 0400, dir, &ent->stored);
863 debugfs_create_u32("miss", 0600, dir, &ent->miss);
866 static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
868 struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev);
869 struct mlx5_mkey_cache *cache = &dev->cache;
871 if (!mlx5_debugfs_root || dev->is_rep)
874 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root);
877 static void delay_time_func(struct timer_list *t)
879 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
881 WRITE_ONCE(dev->fill_delay, 0);
884 struct mlx5_cache_ent *
885 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
886 struct mlx5r_cache_rb_key rb_key,
887 bool persistent_entry)
889 struct mlx5_cache_ent *ent;
893 ent = kzalloc(sizeof(*ent), GFP_KERNEL);
895 return ERR_PTR(-ENOMEM);
897 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
898 ent->rb_key = rb_key;
900 ent->is_tmp = !persistent_entry;
902 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
904 ret = mlx5_cache_ent_insert(&dev->cache, ent);
910 if (persistent_entry) {
911 if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
912 order = MLX5_IMR_KSM_CACHE_ENTRY;
914 order = order_base_2(rb_key.ndescs) - 2;
916 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
917 !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
918 mlx5r_umr_can_load_pas(dev, 0))
919 ent->limit = dev->mdev->profile.mr_cache[order].limit;
923 mlx5_mkey_cache_debugfs_add_ent(dev, ent);
925 mod_delayed_work(ent->dev->cache.wq,
926 &ent->dev->cache.remove_ent_dwork,
927 msecs_to_jiffies(30 * 1000));
933 static void remove_ent_work_func(struct work_struct *work)
935 struct mlx5_mkey_cache *cache;
936 struct mlx5_cache_ent *ent;
939 cache = container_of(work, struct mlx5_mkey_cache,
940 remove_ent_dwork.work);
941 mutex_lock(&cache->rb_lock);
942 cur = rb_last(&cache->rb_root);
944 ent = rb_entry(cur, struct mlx5_cache_ent, node);
946 mutex_unlock(&cache->rb_lock);
948 xa_lock_irq(&ent->mkeys);
950 xa_unlock_irq(&ent->mkeys);
951 mutex_lock(&cache->rb_lock);
954 xa_unlock_irq(&ent->mkeys);
956 clean_keys(ent->dev, ent);
957 mutex_lock(&cache->rb_lock);
959 mutex_unlock(&cache->rb_lock);
962 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
964 struct mlx5_mkey_cache *cache = &dev->cache;
965 struct rb_root *root = &dev->cache.rb_root;
966 struct mlx5r_cache_rb_key rb_key = {
967 .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
969 struct mlx5_cache_ent *ent;
970 struct rb_node *node;
974 mutex_init(&dev->slow_path_mutex);
975 mutex_init(&dev->cache.rb_lock);
976 dev->cache.rb_root = RB_ROOT;
977 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func);
978 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
980 mlx5_ib_warn(dev, "failed to create work queue\n");
984 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
985 timer_setup(&dev->delay_timer, delay_time_func, 0);
986 mlx5_mkey_cache_debugfs_init(dev);
987 mutex_lock(&cache->rb_lock);
988 for (i = 0; i <= mkey_cache_max_order(dev); i++) {
989 rb_key.ndescs = 1 << (i + 2);
990 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
997 ret = mlx5_odp_init_mkey_cache(dev);
1001 mutex_unlock(&cache->rb_lock);
1002 for (node = rb_first(root); node; node = rb_next(node)) {
1003 ent = rb_entry(node, struct mlx5_cache_ent, node);
1004 xa_lock_irq(&ent->mkeys);
1005 queue_adjust_cache_locked(ent);
1006 xa_unlock_irq(&ent->mkeys);
1012 mutex_unlock(&cache->rb_lock);
1013 mlx5_mkey_cache_debugfs_cleanup(dev);
1014 mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
1018 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
1020 struct rb_root *root = &dev->cache.rb_root;
1021 struct mlx5_cache_ent *ent;
1022 struct rb_node *node;
1027 cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
1028 mutex_lock(&dev->cache.rb_lock);
1029 for (node = rb_first(root); node; node = rb_next(node)) {
1030 ent = rb_entry(node, struct mlx5_cache_ent, node);
1031 xa_lock_irq(&ent->mkeys);
1032 ent->disabled = true;
1033 xa_unlock_irq(&ent->mkeys);
1034 cancel_delayed_work_sync(&ent->dwork);
1037 mlx5_mkey_cache_debugfs_cleanup(dev);
1038 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
1040 node = rb_first(root);
1042 ent = rb_entry(node, struct mlx5_cache_ent, node);
1043 node = rb_next(node);
1044 clean_keys(dev, ent);
1045 rb_erase(&ent->node, root);
1048 mutex_unlock(&dev->cache.rb_lock);
1050 destroy_workqueue(dev->cache.wq);
1051 del_timer_sync(&dev->delay_timer);
1054 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
1056 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1057 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1058 struct mlx5_ib_mr *mr;
1063 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1065 return ERR_PTR(-ENOMEM);
1067 in = kzalloc(inlen, GFP_KERNEL);
1073 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1075 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
1076 MLX5_SET(mkc, mkc, length64, 1);
1077 set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
1080 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1085 mr->mmkey.type = MLX5_MKEY_MR;
1086 mr->ibmr.lkey = mr->mmkey.key;
1087 mr->ibmr.rkey = mr->mmkey.key;
1098 return ERR_PTR(err);
1101 static int get_octo_len(u64 addr, u64 len, int page_shift)
1103 u64 page_size = 1ULL << page_shift;
1107 offset = addr & (page_size - 1);
1108 npages = ALIGN(len + offset, page_size) >> page_shift;
1109 return (npages + 1) / 2;
1112 static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
1114 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
1115 return MKEY_CACHE_LAST_STD_ENTRY;
1116 return MLX5_MAX_UMR_SHIFT;
1119 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1120 u64 length, int access_flags, u64 iova)
1122 mr->ibmr.lkey = mr->mmkey.key;
1123 mr->ibmr.rkey = mr->mmkey.key;
1124 mr->ibmr.length = length;
1125 mr->ibmr.device = &dev->ib_dev;
1126 mr->ibmr.iova = iova;
1127 mr->access_flags = access_flags;
1130 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
1134 * The alignment of iova has already been checked upon entering
1135 * UVERBS_METHOD_REG_DMABUF_MR
1141 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
1142 struct ib_umem *umem, u64 iova,
1145 struct mlx5r_cache_rb_key rb_key = {
1146 .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
1148 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1149 struct mlx5_cache_ent *ent;
1150 struct mlx5_ib_mr *mr;
1151 unsigned int page_size;
1153 if (umem->is_dmabuf)
1154 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
1156 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
1158 if (WARN_ON(!page_size))
1159 return ERR_PTR(-EINVAL);
1161 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
1162 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
1163 rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
1164 ent = mkey_cache_ent_from_rb_key(dev, rb_key);
1166 * If the MR can't come from the cache then synchronously create an uncached
1170 mutex_lock(&dev->slow_path_mutex);
1171 mr = reg_create(pd, umem, iova, access_flags, page_size, false);
1172 mutex_unlock(&dev->slow_path_mutex);
1175 mr->mmkey.rb_key = rb_key;
1179 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags);
1185 mr->page_shift = order_base_2(page_size);
1186 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1192 * If ibmr is NULL it will be allocated by reg_create.
1193 * Else, the given ibmr will be used.
1195 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1196 u64 iova, int access_flags,
1197 unsigned int page_size, bool populate)
1199 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1200 struct mlx5_ib_mr *mr;
1206 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1209 return ERR_PTR(-EINVAL);
1210 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1212 return ERR_PTR(-ENOMEM);
1215 mr->access_flags = access_flags;
1216 mr->page_shift = order_base_2(page_size);
1218 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1220 inlen += sizeof(*pas) *
1221 roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
1222 in = kvzalloc(inlen, GFP_KERNEL);
1227 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1229 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1233 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
1234 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1237 /* The pg_access bit allows setting the access flags
1238 * in the page list submitted with the command.
1240 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1242 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1243 set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
1244 populate ? pd : dev->umrc.pd);
1245 MLX5_SET(mkc, mkc, free, !populate);
1246 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1247 MLX5_SET(mkc, mkc, umr_en, 1);
1249 MLX5_SET64(mkc, mkc, len, umem->length);
1250 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1251 MLX5_SET(mkc, mkc, translations_octword_size,
1252 get_octo_len(iova, umem->length, mr->page_shift));
1253 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
1254 if (mlx5_umem_needs_ats(dev, umem, access_flags))
1255 MLX5_SET(mkc, mkc, ma_translation_mode, 1);
1257 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1258 get_octo_len(iova, umem->length, mr->page_shift));
1261 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1263 mlx5_ib_warn(dev, "create mkey failed\n");
1266 mr->mmkey.type = MLX5_MKEY_MR;
1267 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift);
1269 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1272 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1280 return ERR_PTR(err);
1283 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1284 u64 length, int acc, int mode)
1286 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1287 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1288 struct mlx5_ib_mr *mr;
1293 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1295 return ERR_PTR(-ENOMEM);
1297 in = kzalloc(inlen, GFP_KERNEL);
1303 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1305 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1306 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1307 MLX5_SET64(mkc, mkc, len, length);
1308 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1310 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1316 set_mr_fields(dev, mr, length, acc, start_addr);
1326 return ERR_PTR(err);
1329 int mlx5_ib_advise_mr(struct ib_pd *pd,
1330 enum ib_uverbs_advise_mr_advice advice,
1332 struct ib_sge *sg_list,
1334 struct uverbs_attr_bundle *attrs)
1336 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1337 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1338 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1341 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1345 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1346 struct ib_dm_mr_attr *attr,
1347 struct uverbs_attr_bundle *attrs)
1349 struct mlx5_ib_dm *mdm = to_mdm(dm);
1350 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1351 u64 start_addr = mdm->dev_addr + attr->offset;
1354 switch (mdm->type) {
1355 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1356 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1357 return ERR_PTR(-EINVAL);
1359 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1360 start_addr -= pci_resource_start(dev->pdev, 0);
1362 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1363 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1364 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
1365 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1366 return ERR_PTR(-EINVAL);
1368 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1371 return ERR_PTR(-EINVAL);
1374 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1375 attr->access_flags, mode);
1378 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1379 u64 iova, int access_flags)
1381 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1382 struct mlx5_ib_mr *mr = NULL;
1386 xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
1388 mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
1390 unsigned int page_size = mlx5_umem_find_best_pgsz(
1391 umem, mkc, log_page_size, 0, iova);
1393 mutex_lock(&dev->slow_path_mutex);
1394 mr = reg_create(pd, umem, iova, access_flags, page_size, true);
1395 mutex_unlock(&dev->slow_path_mutex);
1398 ib_umem_release(umem);
1399 return ERR_CAST(mr);
1402 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1404 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1408 * If the MR was created with reg_create then it will be
1409 * configured properly but left disabled. It is safe to go ahead
1410 * and configure it again via UMR while enabling it.
1412 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1414 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1415 return ERR_PTR(err);
1421 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1422 u64 iova, int access_flags,
1423 struct ib_udata *udata)
1425 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1426 struct ib_umem_odp *odp;
1427 struct mlx5_ib_mr *mr;
1430 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1431 return ERR_PTR(-EOPNOTSUPP);
1433 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
1435 return ERR_PTR(err);
1436 if (!start && length == U64_MAX) {
1438 return ERR_PTR(-EINVAL);
1439 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1440 return ERR_PTR(-EINVAL);
1442 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1444 return ERR_CAST(mr);
1448 /* ODP requires xlt update via umr to work. */
1449 if (!mlx5r_umr_can_load_pas(dev, length))
1450 return ERR_PTR(-EINVAL);
1452 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
1455 return ERR_CAST(odp);
1457 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
1459 ib_umem_release(&odp->umem);
1460 return ERR_CAST(mr);
1462 xa_init(&mr->implicit_children);
1465 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1469 err = mlx5_ib_init_odp_mr(mr);
1475 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1476 return ERR_PTR(err);
1479 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1480 u64 iova, int access_flags,
1481 struct ib_udata *udata)
1483 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1484 struct ib_umem *umem;
1486 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1487 return ERR_PTR(-EOPNOTSUPP);
1489 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1490 start, iova, length, access_flags);
1492 if (access_flags & IB_ACCESS_ON_DEMAND)
1493 return create_user_odp_mr(pd, start, length, iova, access_flags,
1495 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
1497 return ERR_CAST(umem);
1498 return create_real_mr(pd, umem, iova, access_flags);
1501 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
1503 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
1504 struct mlx5_ib_mr *mr = umem_dmabuf->private;
1506 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
1508 if (!umem_dmabuf->sgt)
1511 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
1512 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
1515 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
1516 .allow_peer2peer = 1,
1517 .move_notify = mlx5_ib_dmabuf_invalidate_cb,
1520 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1521 u64 length, u64 virt_addr,
1522 int fd, int access_flags,
1523 struct ib_udata *udata)
1525 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1526 struct mlx5_ib_mr *mr = NULL;
1527 struct ib_umem_dmabuf *umem_dmabuf;
1530 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
1531 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1532 return ERR_PTR(-EOPNOTSUPP);
1535 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
1536 offset, virt_addr, length, fd, access_flags);
1538 /* dmabuf requires xlt update via umr to work. */
1539 if (!mlx5r_umr_can_load_pas(dev, length))
1540 return ERR_PTR(-EINVAL);
1542 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
1544 &mlx5_ib_dmabuf_attach_ops);
1545 if (IS_ERR(umem_dmabuf)) {
1546 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
1547 PTR_ERR(umem_dmabuf));
1548 return ERR_CAST(umem_dmabuf);
1551 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
1554 ib_umem_release(&umem_dmabuf->umem);
1555 return ERR_CAST(mr);
1558 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1560 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
1561 umem_dmabuf->private = mr;
1562 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1566 err = mlx5_ib_init_dmabuf_mr(mr);
1572 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1573 return ERR_PTR(err);
1577 * True if the change in access flags can be done via UMR, only some access
1578 * flags can be updated.
1580 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
1581 unsigned int current_access_flags,
1582 unsigned int target_access_flags)
1584 unsigned int diffs = current_access_flags ^ target_access_flags;
1586 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1587 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
1589 return mlx5r_umr_can_reconfig(dev, current_access_flags,
1590 target_access_flags);
1593 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1594 struct ib_umem *new_umem,
1595 int new_access_flags, u64 iova,
1596 unsigned long *page_size)
1598 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1600 /* We only track the allocated sizes of MRs from the cache */
1601 if (!mr->mmkey.cache_ent)
1603 if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
1607 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
1608 if (WARN_ON(!*page_size))
1610 return (mr->mmkey.cache_ent->rb_key.ndescs) >=
1611 ib_umem_num_dma_blocks(new_umem, *page_size);
1614 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1615 int access_flags, int flags, struct ib_umem *new_umem,
1616 u64 iova, unsigned long page_size)
1618 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1619 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
1620 struct ib_umem *old_umem = mr->umem;
1624 * To keep everything simple the MR is revoked before we start to mess
1625 * with it. This ensure the change is atomic relative to any use of the
1628 err = mlx5r_umr_revoke_mr(mr);
1632 if (flags & IB_MR_REREG_PD) {
1634 upd_flags |= MLX5_IB_UPD_XLT_PD;
1636 if (flags & IB_MR_REREG_ACCESS) {
1637 mr->access_flags = access_flags;
1638 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1641 mr->ibmr.iova = iova;
1642 mr->ibmr.length = new_umem->length;
1643 mr->page_shift = order_base_2(page_size);
1644 mr->umem = new_umem;
1645 err = mlx5r_umr_update_mr_pas(mr, upd_flags);
1648 * The MR is revoked at this point so there is no issue to free
1651 mr->umem = old_umem;
1655 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
1656 ib_umem_release(old_umem);
1657 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
1661 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1662 u64 length, u64 iova, int new_access_flags,
1663 struct ib_pd *new_pd,
1664 struct ib_udata *udata)
1666 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1667 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1670 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1671 return ERR_PTR(-EOPNOTSUPP);
1675 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1676 start, iova, length, new_access_flags);
1678 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1679 return ERR_PTR(-EOPNOTSUPP);
1681 if (!(flags & IB_MR_REREG_ACCESS))
1682 new_access_flags = mr->access_flags;
1683 if (!(flags & IB_MR_REREG_PD))
1686 if (!(flags & IB_MR_REREG_TRANS)) {
1687 struct ib_umem *umem;
1689 /* Fast path for PD/access change */
1690 if (can_use_umr_rereg_access(dev, mr->access_flags,
1691 new_access_flags)) {
1692 err = mlx5r_umr_rereg_pd_access(mr, new_pd,
1695 return ERR_PTR(err);
1698 /* DM or ODP MR's don't have a normal umem so we can't re-use it */
1699 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1703 * Only one active MR can refer to a umem at one time, revoke
1704 * the old MR before assigning the umem to the new one.
1706 err = mlx5r_umr_revoke_mr(mr);
1708 return ERR_PTR(err);
1711 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1713 return create_real_mr(new_pd, umem, mr->ibmr.iova,
1718 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
1719 * but the logic around releasing the umem is different
1721 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1724 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
1725 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1726 struct ib_umem *new_umem;
1727 unsigned long page_size;
1729 new_umem = ib_umem_get(&dev->ib_dev, start, length,
1731 if (IS_ERR(new_umem))
1732 return ERR_CAST(new_umem);
1734 /* Fast path for PAS change */
1735 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1737 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1738 new_umem, iova, page_size);
1740 ib_umem_release(new_umem);
1741 return ERR_PTR(err);
1745 return create_real_mr(new_pd, new_umem, iova, new_access_flags);
1749 * Everything else has no state we can preserve, just create a new MR
1753 return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
1754 new_access_flags, udata);
1758 mlx5_alloc_priv_descs(struct ib_device *device,
1759 struct mlx5_ib_mr *mr,
1763 struct mlx5_ib_dev *dev = to_mdev(device);
1764 struct device *ddev = &dev->mdev->pdev->dev;
1765 int size = ndescs * desc_size;
1769 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1770 if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) {
1771 int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size));
1773 add_size = min_t(int, end - size, add_size);
1776 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1777 if (!mr->descs_alloc)
1780 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1782 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1783 if (dma_mapping_error(ddev, mr->desc_map)) {
1790 kfree(mr->descs_alloc);
1796 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1798 if (!mr->umem && mr->descs) {
1799 struct ib_device *device = mr->ibmr.device;
1800 int size = mr->max_descs * mr->desc_size;
1801 struct mlx5_ib_dev *dev = to_mdev(device);
1803 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1805 kfree(mr->descs_alloc);
1810 static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
1811 struct mlx5_ib_mr *mr)
1813 struct mlx5_mkey_cache *cache = &dev->cache;
1814 struct mlx5_cache_ent *ent;
1817 if (mr->mmkey.cache_ent) {
1818 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1819 mr->mmkey.cache_ent->in_use--;
1823 mutex_lock(&cache->rb_lock);
1824 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
1826 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
1827 if (ent->disabled) {
1828 mutex_unlock(&cache->rb_lock);
1831 mr->mmkey.cache_ent = ent;
1832 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1833 mutex_unlock(&cache->rb_lock);
1838 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false);
1839 mutex_unlock(&cache->rb_lock);
1841 return PTR_ERR(ent);
1843 mr->mmkey.cache_ent = ent;
1844 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1847 ret = push_mkey_locked(mr->mmkey.cache_ent, false,
1848 xa_mk_value(mr->mmkey.key));
1849 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
1853 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1855 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1856 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1860 * Any async use of the mr must hold the refcount, once the refcount
1861 * goes to zero no other thread, such as ODP page faults, prefetch, any
1862 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1864 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1865 refcount_read(&mr->mmkey.usecount) != 0 &&
1866 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1867 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1869 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1870 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1871 mr->sig, NULL, GFP_KERNEL);
1874 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1880 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1886 if (mlx5_core_destroy_psv(dev->mdev,
1887 mr->sig->psv_memory.psv_idx))
1888 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1889 mr->sig->psv_memory.psv_idx);
1890 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1891 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1892 mr->sig->psv_wire.psv_idx);
1898 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
1899 if (mlx5r_umr_revoke_mr(mr) ||
1900 cache_ent_find_and_store(dev, mr))
1901 mr->mmkey.cache_ent = NULL;
1903 if (!mr->mmkey.cache_ent) {
1904 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1910 bool is_odp = is_odp_mr(mr);
1913 atomic_sub(ib_umem_num_pages(mr->umem),
1914 &dev->mdev->priv.reg_pages);
1915 ib_umem_release(mr->umem);
1917 mlx5_ib_free_odp_mr(mr);
1920 if (!mr->mmkey.cache_ent)
1921 mlx5_free_priv_descs(mr);
1927 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1928 int access_mode, int page_shift)
1932 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1934 /* This is only used from the kernel, so setting the PD is OK. */
1935 set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
1936 MLX5_SET(mkc, mkc, free, 1);
1937 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1938 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1939 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1940 MLX5_SET(mkc, mkc, umr_en, 1);
1941 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1944 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1945 int ndescs, int desc_size, int page_shift,
1946 int access_mode, u32 *in, int inlen)
1948 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1951 mr->access_mode = access_mode;
1952 mr->desc_size = desc_size;
1953 mr->max_descs = ndescs;
1955 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1959 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1961 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1963 goto err_free_descs;
1965 mr->mmkey.type = MLX5_MKEY_MR;
1966 mr->ibmr.lkey = mr->mmkey.key;
1967 mr->ibmr.rkey = mr->mmkey.key;
1972 mlx5_free_priv_descs(mr);
1976 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
1977 u32 max_num_sg, u32 max_num_meta_sg,
1978 int desc_size, int access_mode)
1980 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1981 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
1983 struct mlx5_ib_mr *mr;
1987 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1989 return ERR_PTR(-ENOMEM);
1992 mr->ibmr.device = pd->device;
1994 in = kzalloc(inlen, GFP_KERNEL);
2000 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
2001 page_shift = PAGE_SHIFT;
2003 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
2004 access_mode, in, inlen);
2017 return ERR_PTR(err);
2020 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2021 int ndescs, u32 *in, int inlen)
2023 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
2024 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
2028 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2029 int ndescs, u32 *in, int inlen)
2031 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
2032 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2035 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2036 int max_num_sg, int max_num_meta_sg,
2039 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2044 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
2048 /* create mem & wire PSVs */
2049 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
2053 mr->sig->psv_memory.psv_idx = psv_index[0];
2054 mr->sig->psv_wire.psv_idx = psv_index[1];
2056 mr->sig->sig_status_checked = true;
2057 mr->sig->sig_err_exists = false;
2058 /* Next UMR, Arm SIGERR */
2059 ++mr->sig->sigerr_count;
2060 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2061 sizeof(struct mlx5_klm),
2062 MLX5_MKC_ACCESS_MODE_KLMS);
2063 if (IS_ERR(mr->klm_mr)) {
2064 err = PTR_ERR(mr->klm_mr);
2065 goto err_destroy_psv;
2067 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2068 sizeof(struct mlx5_mtt),
2069 MLX5_MKC_ACCESS_MODE_MTT);
2070 if (IS_ERR(mr->mtt_mr)) {
2071 err = PTR_ERR(mr->mtt_mr);
2072 goto err_free_klm_mr;
2075 /* Set bsf descriptors for mkey */
2076 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2077 MLX5_SET(mkc, mkc, bsf_en, 1);
2078 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
2080 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
2081 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2083 goto err_free_mtt_mr;
2085 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
2086 mr->sig, GFP_KERNEL));
2088 goto err_free_descs;
2092 destroy_mkey(dev, mr);
2093 mlx5_free_priv_descs(mr);
2095 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
2098 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
2101 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2102 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2103 mr->sig->psv_memory.psv_idx);
2104 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2105 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2106 mr->sig->psv_wire.psv_idx);
2113 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
2114 enum ib_mr_type mr_type, u32 max_num_sg,
2115 u32 max_num_meta_sg)
2117 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2118 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2119 int ndescs = ALIGN(max_num_sg, 4);
2120 struct mlx5_ib_mr *mr;
2124 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2126 return ERR_PTR(-ENOMEM);
2128 in = kzalloc(inlen, GFP_KERNEL);
2134 mr->ibmr.device = pd->device;
2138 case IB_MR_TYPE_MEM_REG:
2139 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
2141 case IB_MR_TYPE_SG_GAPS:
2142 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
2144 case IB_MR_TYPE_INTEGRITY:
2145 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
2146 max_num_meta_sg, in, inlen);
2149 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
2164 return ERR_PTR(err);
2167 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2170 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
2173 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
2174 u32 max_num_sg, u32 max_num_meta_sg)
2176 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
2180 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2182 struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
2183 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2184 struct mlx5_ib_mw *mw = to_mmw(ibmw);
2185 unsigned int ndescs;
2189 struct mlx5_ib_alloc_mw req = {};
2192 __u32 response_length;
2195 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2199 if (req.comp_mask || req.reserved1 || req.reserved2)
2202 if (udata->inlen > sizeof(req) &&
2203 !ib_is_udata_cleared(udata, sizeof(req),
2204 udata->inlen - sizeof(req)))
2207 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2209 in = kzalloc(inlen, GFP_KERNEL);
2213 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2215 MLX5_SET(mkc, mkc, free, 1);
2216 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2217 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2218 MLX5_SET(mkc, mkc, umr_en, 1);
2219 MLX5_SET(mkc, mkc, lr, 1);
2220 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2221 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2222 MLX5_SET(mkc, mkc, qpn, 0xffffff);
2224 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2228 mw->mmkey.type = MLX5_MKEY_MW;
2229 ibmw->rkey = mw->mmkey.key;
2230 mw->mmkey.ndescs = ndescs;
2232 resp.response_length =
2233 min(offsetofend(typeof(resp), response_length), udata->outlen);
2234 if (resp.response_length) {
2235 err = ib_copy_to_udata(udata, &resp, resp.response_length);
2240 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2241 err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
2250 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
2256 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2258 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2259 struct mlx5_ib_mw *mmw = to_mmw(mw);
2261 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
2262 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
2264 * pagefault_single_data_segment() may be accessing mmw
2265 * if the user bound an ODP MR to this MW.
2267 mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
2269 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
2272 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2273 struct ib_mr_status *mr_status)
2275 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2278 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2279 pr_err("Invalid status check mask\n");
2284 mr_status->fail_status = 0;
2285 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2288 pr_err("signature status check requested on a non-signature enabled MR\n");
2292 mmr->sig->sig_status_checked = true;
2293 if (!mmr->sig->sig_err_exists)
2296 if (ibmr->lkey == mmr->sig->err_item.key)
2297 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2298 sizeof(mr_status->sig_err));
2300 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2301 mr_status->sig_err.sig_err_offset = 0;
2302 mr_status->sig_err.key = mmr->sig->err_item.key;
2305 mmr->sig->sig_err_exists = false;
2306 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2314 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2315 int data_sg_nents, unsigned int *data_sg_offset,
2316 struct scatterlist *meta_sg, int meta_sg_nents,
2317 unsigned int *meta_sg_offset)
2319 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2320 unsigned int sg_offset = 0;
2323 mr->meta_length = 0;
2324 if (data_sg_nents == 1) {
2326 mr->mmkey.ndescs = 1;
2328 sg_offset = *data_sg_offset;
2329 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2330 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2331 if (meta_sg_nents == 1) {
2333 mr->meta_ndescs = 1;
2335 sg_offset = *meta_sg_offset;
2338 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2339 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2341 ibmr->length = mr->data_length + mr->meta_length;
2348 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2349 struct scatterlist *sgl,
2350 unsigned short sg_nents,
2351 unsigned int *sg_offset_p,
2352 struct scatterlist *meta_sgl,
2353 unsigned short meta_sg_nents,
2354 unsigned int *meta_sg_offset_p)
2356 struct scatterlist *sg = sgl;
2357 struct mlx5_klm *klms = mr->descs;
2358 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2359 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2362 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2363 mr->ibmr.length = 0;
2365 for_each_sg(sgl, sg, sg_nents, i) {
2366 if (unlikely(i >= mr->max_descs))
2368 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2369 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2370 klms[i].key = cpu_to_be32(lkey);
2371 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2377 *sg_offset_p = sg_offset;
2379 mr->mmkey.ndescs = i;
2380 mr->data_length = mr->ibmr.length;
2382 if (meta_sg_nents) {
2384 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2385 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2386 if (unlikely(i + j >= mr->max_descs))
2388 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2390 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2392 klms[i + j].key = cpu_to_be32(lkey);
2393 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2397 if (meta_sg_offset_p)
2398 *meta_sg_offset_p = sg_offset;
2400 mr->meta_ndescs = j;
2401 mr->meta_length = mr->ibmr.length - mr->data_length;
2407 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2409 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2412 if (unlikely(mr->mmkey.ndescs == mr->max_descs))
2416 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2421 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2423 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2426 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
2430 descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
2431 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2437 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2438 int data_sg_nents, unsigned int *data_sg_offset,
2439 struct scatterlist *meta_sg, int meta_sg_nents,
2440 unsigned int *meta_sg_offset)
2442 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2443 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2446 pi_mr->mmkey.ndescs = 0;
2447 pi_mr->meta_ndescs = 0;
2448 pi_mr->meta_length = 0;
2450 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2451 pi_mr->desc_size * pi_mr->max_descs,
2454 pi_mr->ibmr.page_size = ibmr->page_size;
2455 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2457 if (n != data_sg_nents)
2460 pi_mr->data_iova = pi_mr->ibmr.iova;
2461 pi_mr->data_length = pi_mr->ibmr.length;
2462 pi_mr->ibmr.length = pi_mr->data_length;
2463 ibmr->length = pi_mr->data_length;
2465 if (meta_sg_nents) {
2466 u64 page_mask = ~((u64)ibmr->page_size - 1);
2467 u64 iova = pi_mr->data_iova;
2469 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2470 meta_sg_offset, mlx5_set_page_pi);
2472 pi_mr->meta_length = pi_mr->ibmr.length;
2474 * PI address for the HW is the offset of the metadata address
2475 * relative to the first data page address.
2476 * It equals to first data page address + size of data pages +
2477 * metadata offset at the first metadata page
2479 pi_mr->pi_iova = (iova & page_mask) +
2480 pi_mr->mmkey.ndescs * ibmr->page_size +
2481 (pi_mr->ibmr.iova & ~page_mask);
2483 * In order to use one MTT MR for data and metadata, we register
2484 * also the gaps between the end of the data and the start of
2485 * the metadata (the sig MR will verify that the HW will access
2486 * to right addresses). This mapping is safe because we use
2487 * internal mkey for the registration.
2489 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2490 pi_mr->ibmr.iova = iova;
2491 ibmr->length += pi_mr->meta_length;
2494 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2495 pi_mr->desc_size * pi_mr->max_descs,
2502 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2503 int data_sg_nents, unsigned int *data_sg_offset,
2504 struct scatterlist *meta_sg, int meta_sg_nents,
2505 unsigned int *meta_sg_offset)
2507 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2508 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2511 pi_mr->mmkey.ndescs = 0;
2512 pi_mr->meta_ndescs = 0;
2513 pi_mr->meta_length = 0;
2515 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2516 pi_mr->desc_size * pi_mr->max_descs,
2519 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2520 meta_sg, meta_sg_nents, meta_sg_offset);
2522 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2523 pi_mr->desc_size * pi_mr->max_descs,
2526 /* This is zero-based memory region */
2527 pi_mr->data_iova = 0;
2528 pi_mr->ibmr.iova = 0;
2529 pi_mr->pi_iova = pi_mr->data_length;
2530 ibmr->length = pi_mr->ibmr.length;
2535 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2536 int data_sg_nents, unsigned int *data_sg_offset,
2537 struct scatterlist *meta_sg, int meta_sg_nents,
2538 unsigned int *meta_sg_offset)
2540 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2541 struct mlx5_ib_mr *pi_mr = NULL;
2544 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2546 mr->mmkey.ndescs = 0;
2547 mr->data_length = 0;
2549 mr->meta_ndescs = 0;
2552 * As a performance optimization, if possible, there is no need to
2553 * perform UMR operation to register the data/metadata buffers.
2554 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2555 * Fallback to UMR only in case of a failure.
2557 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2558 data_sg_offset, meta_sg, meta_sg_nents,
2560 if (n == data_sg_nents + meta_sg_nents)
2563 * As a performance optimization, if possible, there is no need to map
2564 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2565 * descriptors and fallback to KLM only in case of a failure.
2566 * It's more efficient for the HW to work with MTT descriptors
2567 * (especially in high load).
2568 * Use KLM (indirect access) only if it's mandatory.
2571 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2572 data_sg_offset, meta_sg, meta_sg_nents,
2574 if (n == data_sg_nents + meta_sg_nents)
2578 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2579 data_sg_offset, meta_sg, meta_sg_nents,
2581 if (unlikely(n != data_sg_nents + meta_sg_nents))
2585 /* This is zero-based memory region */
2589 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2591 ibmr->sig_attrs->meta_length = mr->meta_length;
2596 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2597 unsigned int *sg_offset)
2599 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2602 mr->mmkey.ndescs = 0;
2604 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2605 mr->desc_size * mr->max_descs,
2608 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2609 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2612 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2615 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2616 mr->desc_size * mr->max_descs,