2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/mlx5-abi.h>
49 #define mlx5_ib_dbg(dev, format, arg...) \
50 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
51 __LINE__, current->pid, ##arg)
53 #define mlx5_ib_err(dev, format, arg...) \
54 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
55 __LINE__, current->pid, ##arg)
57 #define mlx5_ib_warn(dev, format, arg...) \
58 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
59 __LINE__, current->pid, ##arg)
61 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
62 sizeof(((type *)0)->fld) <= (sz))
63 #define MLX5_IB_DEFAULT_UIDX 0xffffff
64 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
66 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
69 MLX5_IB_MMAP_CMD_SHIFT = 8,
70 MLX5_IB_MMAP_CMD_MASK = 0xff,
73 enum mlx5_ib_mmap_cmd {
74 MLX5_IB_MMAP_REGULAR_PAGE = 0,
75 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
76 MLX5_IB_MMAP_WC_PAGE = 2,
77 MLX5_IB_MMAP_NC_PAGE = 3,
78 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
79 MLX5_IB_MMAP_CORE_CLOCK = 5,
83 MLX5_RES_SCAT_DATA32_CQE = 0x1,
84 MLX5_RES_SCAT_DATA64_CQE = 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
89 enum mlx5_ib_latency_class {
90 MLX5_IB_LATENCY_CLASS_LOW,
91 MLX5_IB_LATENCY_CLASS_MEDIUM,
92 MLX5_IB_LATENCY_CLASS_HIGH,
95 enum mlx5_ib_mad_ifc_flags {
96 MLX5_MAD_IFC_IGNORE_MKEY = 1,
97 MLX5_MAD_IFC_IGNORE_BKEY = 2,
98 MLX5_MAD_IFC_NET_VIEW = 4,
102 MLX5_CROSS_CHANNEL_BFREG = 0,
110 struct mlx5_ib_vma_private_data {
111 struct list_head list;
112 struct vm_area_struct *vma;
115 struct mlx5_ib_ucontext {
116 struct ib_ucontext ibucontext;
117 struct list_head db_page_list;
119 /* protect doorbell record alloc/free
121 struct mutex db_page_mutex;
122 struct mlx5_bfreg_info bfregi;
124 /* Transport Domain number */
126 struct list_head vma_private_list;
128 unsigned long upd_xlt_page;
129 /* protect ODP/KSM */
130 struct mutex upd_xlt_page_mutex;
134 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
136 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
144 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
145 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
146 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
147 #error "Invalid number of bypass priorities"
149 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
151 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
152 #define MLX5_IB_NUM_SNIFFER_FTS 2
153 struct mlx5_ib_flow_prio {
154 struct mlx5_flow_table *flow_table;
155 unsigned int refcount;
158 struct mlx5_ib_flow_handler {
159 struct list_head list;
160 struct ib_flow ibflow;
161 struct mlx5_ib_flow_prio *prio;
162 struct mlx5_flow_handle *rule;
165 struct mlx5_ib_flow_db {
166 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
167 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
168 struct mlx5_flow_table *lag_demux_ft;
169 /* Protect flow steering bypass flow tables
170 * when add/del flow rules.
171 * only single add/removal of flow steering rule could be done
177 /* Use macros here so that don't have to duplicate
178 * enum ib_send_flags and enum ib_qp_type for low-level driver
181 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
182 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
183 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
184 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
185 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
186 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
188 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
190 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
191 * creates the actual hardware QP.
193 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
194 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
196 #define MLX5_IB_UMR_OCTOWORD 16
197 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
199 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
200 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
201 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
202 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
203 #define MLX5_IB_UPD_XLT_PD BIT(4)
204 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
205 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
207 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
209 * These flags are intended for internal use by the mlx5_ib driver, and they
210 * rely on the range reserved for that use in the ib_qp_create_flags enum.
213 /* Create a UD QP whose source QP number is 1 */
214 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
216 return IB_QP_CREATE_RESERVED_START;
224 enum mlx5_ib_rq_flags {
225 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
231 struct wr_list *w_list;
235 /* serialize post to the work queue
250 enum mlx5_ib_wq_flags {
251 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
256 struct mlx5_core_qp core_qp;
262 struct ib_umem *umem;
264 unsigned int page_shift;
271 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
285 struct mlx5_ib_rwq_ind_table {
286 struct ib_rwq_ind_table ib_rwq_ind_tbl;
290 struct mlx5_ib_ubuffer {
291 struct ib_umem *umem;
296 struct mlx5_ib_qp_base {
297 struct mlx5_ib_qp *container_mibqp;
298 struct mlx5_core_qp mqp;
299 struct mlx5_ib_ubuffer ubuffer;
302 struct mlx5_ib_qp_trans {
303 struct mlx5_ib_qp_base base;
310 struct mlx5_ib_rss_qp {
315 struct mlx5_ib_qp_base base;
316 struct mlx5_ib_wq *rq;
317 struct mlx5_ib_ubuffer ubuffer;
318 struct mlx5_db *doorbell;
325 struct mlx5_ib_qp_base base;
326 struct mlx5_ib_wq *sq;
327 struct mlx5_ib_ubuffer ubuffer;
328 struct mlx5_db *doorbell;
333 struct mlx5_ib_raw_packet_qp {
334 struct mlx5_ib_sq sq;
335 struct mlx5_ib_rq rq;
340 unsigned long offset;
341 struct mlx5_sq_bfreg *bfreg;
347 struct mlx5_ib_qp_trans trans_qp;
348 struct mlx5_ib_raw_packet_qp raw_packet_qp;
349 struct mlx5_ib_rss_qp rss_qp;
354 struct mlx5_ib_wq rq;
358 struct mlx5_ib_wq sq;
360 /* serialize qp state modifications
372 /* only for user space QPs. For kernel
373 * we have it from the bf object
379 /* Store signature errors */
382 struct list_head qps_list;
383 struct list_head cq_recv_list;
384 struct list_head cq_send_list;
388 struct mlx5_ib_cq_buf {
390 struct ib_umem *umem;
395 enum mlx5_ib_qp_flags {
396 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
397 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
398 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
399 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
400 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
401 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
402 /* QP uses 1 as its source QP number */
403 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
404 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
405 MLX5_IB_QP_RSS = 1 << 8,
406 MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
410 struct ib_send_wr wr;
414 unsigned int page_shift;
415 unsigned int xlt_size;
421 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
423 return container_of(wr, struct mlx5_umr_wr, wr);
426 struct mlx5_shared_mr_info {
428 struct ib_umem *umem;
433 struct mlx5_core_cq mcq;
434 struct mlx5_ib_cq_buf buf;
437 /* serialize access to the CQ
443 struct mutex resize_mutex;
444 struct mlx5_ib_cq_buf *resize_buf;
445 struct ib_umem *resize_umem;
447 struct list_head list_send_qp;
448 struct list_head list_recv_qp;
450 struct list_head wc_list;
451 enum ib_cq_notify_flags notify_flags;
452 struct work_struct notify_work;
457 struct list_head list;
462 struct mlx5_core_srq msrq;
466 /* protect SRQ hanlding
472 struct ib_umem *umem;
473 /* serialize arming a SRQ
479 struct mlx5_ib_xrcd {
480 struct ib_xrcd ibxrcd;
484 enum mlx5_ib_mtt_access_flags {
485 MLX5_IB_MTT_READ = (1 << 0),
486 MLX5_IB_MTT_WRITE = (1 << 1),
489 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
499 struct mlx5_core_mkey mmkey;
500 struct ib_umem *umem;
501 struct mlx5_shared_mr_info *smr_info;
502 struct list_head list;
506 struct mlx5_ib_dev *dev;
507 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
508 struct mlx5_core_sig_ctx *sig;
511 int access_flags; /* Needed for rereg MR */
513 struct mlx5_ib_mr *parent;
514 atomic_t num_leaf_free;
515 wait_queue_head_t q_leaf_free;
520 struct mlx5_core_mkey mmkey;
524 struct mlx5_ib_umr_context {
526 enum ib_wc_status status;
527 struct completion done;
534 /* control access to UMR QP
536 struct semaphore sem;
545 struct mlx5_cache_ent {
546 struct list_head head;
547 /* sync access to the cahce entry
564 struct dentry *fsize;
566 struct dentry *fmiss;
567 struct dentry *flimit;
569 struct mlx5_ib_dev *dev;
570 struct work_struct work;
571 struct delayed_work dwork;
573 struct completion compl;
576 struct mlx5_mr_cache {
577 struct workqueue_struct *wq;
578 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
581 unsigned long last_add;
584 struct mlx5_ib_gsi_qp;
586 struct mlx5_ib_port_resources {
587 struct mlx5_ib_resources *devr;
588 struct mlx5_ib_gsi_qp *gsi;
589 struct work_struct pkey_change_work;
592 struct mlx5_ib_resources {
599 struct mlx5_ib_port_resources ports[2];
600 /* Protects changes to the port resources */
604 struct mlx5_ib_counters {
608 u32 num_cong_counters;
612 struct mlx5_ib_port {
613 struct mlx5_ib_counters cnts;
617 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
620 rwlock_t netdev_lock;
621 struct net_device *netdev;
622 struct notifier_block nb;
624 enum ib_port_state last_port_state;
627 struct mlx5_ib_dbg_param {
629 struct mlx5_ib_dev *dev;
630 struct dentry *dentry;
633 enum mlx5_ib_dbg_cc_types {
634 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
635 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
636 MLX5_IB_DBG_CC_RP_TIME_RESET,
637 MLX5_IB_DBG_CC_RP_BYTE_RESET,
638 MLX5_IB_DBG_CC_RP_THRESHOLD,
639 MLX5_IB_DBG_CC_RP_AI_RATE,
640 MLX5_IB_DBG_CC_RP_HAI_RATE,
641 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
642 MLX5_IB_DBG_CC_RP_MIN_RATE,
643 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
644 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
645 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
646 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
647 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
648 MLX5_IB_DBG_CC_RP_GD,
649 MLX5_IB_DBG_CC_NP_CNP_DSCP,
650 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
651 MLX5_IB_DBG_CC_NP_CNP_PRIO,
655 struct mlx5_ib_dbg_cc_params {
657 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
661 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
664 struct mlx5_ib_delay_drop {
665 struct mlx5_ib_dev *dev;
666 struct work_struct delay_drop_work;
667 /* serialize setting of delay drop */
674 struct ib_device ib_dev;
675 struct mlx5_core_dev *mdev;
676 struct mlx5_roce roce;
678 /* serialize update of capability mask
680 struct mutex cap_mask_mutex;
682 struct umr_common umrc;
683 /* sync used page count stats
685 struct mlx5_ib_resources devr;
686 struct mlx5_mr_cache cache;
687 struct timer_list delay_timer;
688 /* Prevents soft lock on massive reg MRs */
689 struct mutex slow_path_mutex;
691 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
692 struct ib_odp_caps odp_caps;
695 * Sleepable RCU that prevents destruction of MRs while they are still
696 * being used by a page fault handler.
698 struct srcu_struct mr_srcu;
701 struct mlx5_ib_flow_db flow_db;
702 /* protect resources needed as part of reset flow */
703 spinlock_t reset_flow_resource_lock;
704 struct list_head qp_list;
705 /* Array with num_ports elements */
706 struct mlx5_ib_port *port;
707 struct mlx5_sq_bfreg bfreg;
708 struct mlx5_sq_bfreg fp_bfreg;
709 struct mlx5_ib_delay_drop delay_drop;
710 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
712 /* protect the user_td */
713 struct mutex lb_mutex;
718 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
720 return container_of(mcq, struct mlx5_ib_cq, mcq);
723 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
725 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
728 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
730 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
733 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
735 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
738 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
740 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
743 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
745 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
748 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
750 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
753 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
755 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
758 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
760 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
763 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
765 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
768 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
770 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
773 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
775 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
778 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
780 return container_of(msrq, struct mlx5_ib_srq, msrq);
783 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
785 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
788 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
790 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
793 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
795 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
796 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
797 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
798 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
799 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
800 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
801 const void *in_mad, void *response_mad);
802 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
803 struct ib_udata *udata);
804 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
805 int mlx5_ib_destroy_ah(struct ib_ah *ah);
806 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
807 struct ib_srq_init_attr *init_attr,
808 struct ib_udata *udata);
809 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
810 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
811 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
812 int mlx5_ib_destroy_srq(struct ib_srq *srq);
813 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
814 struct ib_recv_wr **bad_wr);
815 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
816 struct ib_qp_init_attr *init_attr,
817 struct ib_udata *udata);
818 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
819 int attr_mask, struct ib_udata *udata);
820 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
821 struct ib_qp_init_attr *qp_init_attr);
822 int mlx5_ib_destroy_qp(struct ib_qp *qp);
823 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
824 struct ib_send_wr **bad_wr);
825 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
826 struct ib_recv_wr **bad_wr);
827 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
828 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
829 void *buffer, u32 length,
830 struct mlx5_ib_qp_base *base);
831 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
832 const struct ib_cq_init_attr *attr,
833 struct ib_ucontext *context,
834 struct ib_udata *udata);
835 int mlx5_ib_destroy_cq(struct ib_cq *cq);
836 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
837 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
838 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
839 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
840 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
841 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
842 u64 virt_addr, int access_flags,
843 struct ib_udata *udata);
844 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
845 struct ib_udata *udata);
846 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
847 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
848 int page_shift, int flags);
849 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
851 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
852 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
853 u64 length, u64 virt_addr, int access_flags,
854 struct ib_pd *pd, struct ib_udata *udata);
855 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
856 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
857 enum ib_mr_type mr_type,
859 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
860 unsigned int *sg_offset);
861 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
862 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
863 const struct ib_mad_hdr *in, size_t in_mad_size,
864 struct ib_mad_hdr *out, size_t *out_mad_size,
865 u16 *out_mad_pkey_index);
866 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
867 struct ib_ucontext *context,
868 struct ib_udata *udata);
869 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
870 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
871 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
872 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
873 struct ib_smp *out_mad);
874 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
875 __be64 *sys_image_guid);
876 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
878 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
880 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
881 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
882 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
884 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
886 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
887 struct ib_port_attr *props);
888 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
889 struct ib_port_attr *props);
890 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
891 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
892 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
893 unsigned long max_page_shift,
894 int *count, int *shift,
895 int *ncont, int *order);
896 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
897 int page_shift, size_t offset, size_t num_pages,
898 __be64 *pas, int access_flags);
899 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
900 int page_shift, __be64 *pas, int access_flags);
901 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
902 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
903 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
904 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
906 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
907 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
908 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
909 struct ib_mr_status *mr_status);
910 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
911 struct ib_wq_init_attr *init_attr,
912 struct ib_udata *udata);
913 int mlx5_ib_destroy_wq(struct ib_wq *wq);
914 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
915 u32 wq_attr_mask, struct ib_udata *udata);
916 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
917 struct ib_rwq_ind_table_init_attr *init_attr,
918 struct ib_udata *udata);
919 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
921 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
922 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
923 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
924 struct mlx5_pagefault *pfault);
925 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
926 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
927 int __init mlx5_ib_odp_init(void);
928 void mlx5_ib_odp_cleanup(void);
929 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
931 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
932 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
933 size_t nentries, struct mlx5_ib_mr *mr, int flags);
934 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
935 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
940 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
941 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
942 static inline int mlx5_ib_odp_init(void) { return 0; }
943 static inline void mlx5_ib_odp_cleanup(void) {}
944 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
945 static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
946 size_t nentries, struct mlx5_ib_mr *mr,
949 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
951 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
952 u8 port, struct ifla_vf_info *info);
953 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
955 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
956 u8 port, struct ifla_vf_stats *stats);
957 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
960 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
962 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
963 int index, enum ib_gid_type *gid_type);
965 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
966 int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
968 /* GSI QP helper functions */
969 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
970 struct ib_qp_init_attr *init_attr);
971 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
972 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
974 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
976 struct ib_qp_init_attr *qp_init_attr);
977 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
978 struct ib_send_wr **bad_wr);
979 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
980 struct ib_recv_wr **bad_wr);
981 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
983 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
985 static inline void init_query_mad(struct ib_smp *mad)
987 mad->base_version = 1;
988 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
989 mad->class_version = 1;
990 mad->method = IB_MGMT_METHOD_GET;
993 static inline u8 convert_access(int acc)
995 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
996 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
997 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
998 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
999 MLX5_PERM_LOCAL_READ;
1002 static inline int is_qp1(enum ib_qp_type qp_type)
1004 return qp_type == MLX5_IB_QPT_HW_GSI;
1007 #define MLX5_MAX_UMR_SHIFT 16
1008 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1010 static inline u32 check_cq_create_flags(u32 flags)
1013 * It returns non-zero value for unsupported CQ
1014 * create flags, otherwise it returns zero.
1016 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
1017 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
1020 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1024 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1025 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1027 *user_index = cmd_uidx;
1029 *user_index = MLX5_IB_DEFAULT_UIDX;
1035 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1036 struct mlx5_ib_create_qp *ucmd,
1040 u8 cqe_version = ucontext->cqe_version;
1042 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1043 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1046 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1050 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1053 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1054 struct mlx5_ib_create_srq *ucmd,
1058 u8 cqe_version = ucontext->cqe_version;
1060 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1061 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1064 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1068 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1071 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1073 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1074 MLX5_UARS_IN_PAGE : 1;
1077 static inline int get_num_uars(struct mlx5_ib_dev *dev,
1078 struct mlx5_bfreg_info *bfregi)
1080 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
1083 #endif /* MLX5_IB_H */