1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
8 #include <linux/if_vlan.h>
9 #include <net/addrconf.h>
10 #include <net/netevent.h>
12 #include <net/ip6_route.h>
14 #include <net/secure_seq.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/inetdevice.h>
18 #include <linux/spinlock.h>
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/pci.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/workqueue.h>
24 #include <linux/slab.h>
26 #include <linux/crc32c.h>
27 #include <linux/kthread.h>
29 #include <linux/io-64-nonatomic-lo-hi.h>
31 #include <linux/auxiliary_bus.h>
32 #include <linux/net/intel/iidc.h>
33 #include <crypto/hash.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_verbs.h>
36 #include <rdma/ib_pack.h>
37 #include <rdma/rdma_cm.h>
38 #include <rdma/iw_cm.h>
39 #include <rdma/ib_user_verbs.h>
40 #include <rdma/ib_umem.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/uverbs_ioctl.h>
51 #include <rdma/irdma-abi.h>
56 extern struct auxiliary_driver i40iw_auxiliary_drv;
58 #define IRDMA_FW_VER_DEFAULT 2
59 #define IRDMA_HW_VER 2
61 #define IRDMA_ARP_ADD 1
62 #define IRDMA_ARP_DELETE 2
63 #define IRDMA_ARP_RESOLVE 3
65 #define IRDMA_MACIP_ADD 1
66 #define IRDMA_MACIP_DELETE 2
68 #define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
69 #define IW_CEQ_SIZE 2048
70 #define IW_AEQ_SIZE 2048
72 #define RX_BUF_SIZE (1536 + 8)
73 #define IW_REG0_SIZE (4 * 1024)
74 #define IW_TX_TIMEOUT (6 * HZ)
75 #define IW_FIRST_QPN 1
77 #define IW_SW_CONTEXT_ALIGN 1024
79 #define MAX_DPC_ITERATIONS 128
81 #define IRDMA_EVENT_TIMEOUT 50000
82 #define IRDMA_VCHNL_EVENT_TIMEOUT 100000
83 #define IRDMA_RST_TIMEOUT_HZ 4
85 #define IRDMA_NO_QSET 0xffff
87 #define IW_CFG_FPM_QP_COUNT 32768
88 #define IRDMA_MAX_PAGES_PER_FMR 262144
89 #define IRDMA_MIN_PAGES_PER_FMR 1
90 #define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
91 #define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
93 #define IRDMA_Q_TYPE_PE_AEQ 0x80
94 #define IRDMA_Q_INVALID_IDX 0xffff
95 #define IRDMA_REM_ENDPOINT_TRK_QPID 3
97 #define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
98 #define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
99 #define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
100 #define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
101 #define IRDMA_DRV_OPT_ENA_MSI 0x00000010
102 #define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
103 #define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
104 #define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
105 #define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
106 #define IRDMA_DRV_OPT_ENA_PAU 0x00000400
107 #define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
109 #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
110 #define IRDMA_ROCE_CWND_DEFAULT 0x400
111 #define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
113 #define IRDMA_FLUSH_SQ BIT(0)
114 #define IRDMA_FLUSH_RQ BIT(1)
115 #define IRDMA_REFLUSH BIT(2)
116 #define IRDMA_FLUSH_WAIT BIT(3)
118 #define IRDMA_IRQ_NAME_STR_LEN (64)
120 enum init_completion_state {
127 CEQ0_CREATED, /* Last state of probe */
133 IP_ADDR_REGISTERED, /* Last state of open */
136 struct irdma_rsrc_limits {
142 struct irdma_cqp_err_info {
148 struct irdma_cqp_compl_info {
156 struct irdma_cqp_request {
157 struct cqp_cmds_info info;
158 wait_queue_head_t waitq;
159 struct list_head list;
161 void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
163 struct irdma_cqp_compl_info compl_info;
164 bool request_done; /* READ/WRITE_ONCE macros operate on it */
170 struct irdma_sc_cqp sc_cqp;
171 spinlock_t req_lock; /* protect CQP request list */
172 spinlock_t compl_lock; /* protect CQP completion processing */
173 wait_queue_head_t waitq;
174 wait_queue_head_t remove_wq;
175 struct irdma_dma_mem sq;
176 struct irdma_dma_mem host_ctx;
178 struct irdma_cqp_request *cqp_requests;
179 struct list_head cqp_avail_reqs;
180 struct list_head cqp_pending_reqs;
184 struct irdma_sc_cq sc_cq;
185 struct irdma_dma_mem mem_cq;
186 struct irdma_dma_mem shadow_area;
190 struct irdma_sc_ceq sc_ceq;
191 struct irdma_dma_mem mem;
194 struct irdma_pci_f *rf;
195 struct tasklet_struct dpc_tasklet;
196 spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
200 struct irdma_sc_aeq sc_aeq;
201 struct irdma_dma_mem mem;
202 struct irdma_pble_alloc palloc;
206 struct irdma_arp_entry {
208 u8 mac_addr[ETH_ALEN];
211 struct irdma_msix_vector {
217 char name[IRDMA_IRQ_NAME_STR_LEN];
220 struct irdma_mc_table_info {
227 struct mc_table_list {
228 struct list_head list;
229 struct irdma_mc_table_info mc_info;
230 struct irdma_mcast_grp_info mc_grp_ctx;
233 struct irdma_qv_info {
234 u32 v_idx; /* msix_vector */
240 struct irdma_qvlist_info {
242 struct irdma_qv_info qv_info[1];
245 struct irdma_gen_ops {
246 void (*request_reset)(struct irdma_pci_f *rf);
247 int (*register_qset)(struct irdma_sc_vsi *vsi,
248 struct irdma_ws_node *tc_node);
249 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
250 struct irdma_ws_node *tc_node);
263 enum irdma_protocol_used protocol_used;
290 unsigned long *allocated_ws_nodes;
291 unsigned long *allocated_qps;
292 unsigned long *allocated_cqs;
293 unsigned long *allocated_mrs;
294 unsigned long *allocated_pds;
295 unsigned long *allocated_mcgs;
296 unsigned long *allocated_ahs;
297 unsigned long *allocated_arps;
298 enum init_completion_state init_state;
299 struct irdma_sc_dev sc_dev;
300 struct pci_dev *pcidev;
303 struct irdma_cqp cqp;
304 struct irdma_ccq ccq;
305 struct irdma_aeq aeq;
306 struct irdma_ceq *ceqlist;
307 struct irdma_hmc_pble_rsrc *pble_rsrc;
308 struct irdma_arp_entry *arp_table;
309 spinlock_t arp_lock; /*protect ARP table access*/
310 spinlock_t rsrc_lock; /* protect HW resource array access */
311 spinlock_t qptable_lock; /*protect QP table access*/
312 struct irdma_qp **qp_table;
313 spinlock_t qh_list_lock; /* protect mc_qht_list */
314 struct mc_table_list mc_qht_list;
315 struct irdma_msix_vector *iw_msixtbl;
316 struct irdma_qvlist_info *iw_qvlist;
317 struct tasklet_struct dpc_tasklet;
318 struct msix_entry *msix_entries;
319 struct irdma_dma_mem obj_mem;
320 struct irdma_dma_mem obj_next;
322 wait_queue_head_t vchnl_waitq;
323 struct workqueue_struct *cqp_cmpl_wq;
324 struct work_struct cqp_cmpl_work;
325 struct irdma_sc_vsi default_vsi;
327 struct irdma_gen_ops gen_ops;
328 struct irdma_device *iwdev;
331 struct irdma_device {
332 struct ib_device ibdev;
333 struct irdma_pci_f *rf;
334 struct net_device *netdev;
335 struct workqueue_struct *cleanup_wq;
336 struct irdma_sc_vsi vsi;
337 struct irdma_cm_core cm_core;
338 DECLARE_HASHTABLE(ah_hash_tbl, 8);
339 struct mutex ah_tbl_lock; /* protect AH hash table access */
346 u16 mac_ip_table_idx;
351 bool roce_dcqcn_en:1;
352 bool dcb_vlan_mode:1;
354 enum init_completion_state init_state;
356 wait_queue_head_t suspend_wq;
359 static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
361 return container_of(ibdev, struct irdma_device, ibdev);
364 static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
366 return container_of(ibucontext, struct irdma_ucontext, ibucontext);
369 static inline struct irdma_user_mmap_entry *
370 to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
372 return container_of(rdma_entry, struct irdma_user_mmap_entry,
376 static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
378 return container_of(ibpd, struct irdma_pd, ibpd);
381 static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
383 return container_of(ibah, struct irdma_ah, ibah);
386 static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
388 return container_of(ibmr, struct irdma_mr, ibmr);
391 static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
393 return container_of(ibmw, struct irdma_mr, ibmw);
396 static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
398 return container_of(ibcq, struct irdma_cq, ibcq);
401 static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
403 return container_of(ibqp, struct irdma_qp, ibqp);
406 static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
408 return container_of(dev, struct irdma_pci_f, sc_dev);
412 * irdma_alloc_resource - allocate a resource
413 * @iwdev: device pointer
414 * @resource_array: resource bit array:
415 * @max_resources: maximum resource number
416 * @req_resources_num: Allocated resource number
417 * @next: next free id
419 static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
420 unsigned long *rsrc_array, u32 max_rsrc,
421 u32 *req_rsrc_num, u32 *next)
426 spin_lock_irqsave(&rf->rsrc_lock, flags);
427 rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
428 if (rsrc_num >= max_rsrc) {
429 rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
430 if (rsrc_num >= max_rsrc) {
431 spin_unlock_irqrestore(&rf->rsrc_lock, flags);
432 ibdev_dbg(&rf->iwdev->ibdev,
433 "ERR: resource [%d] allocation failed\n",
438 __set_bit(rsrc_num, rsrc_array);
439 *next = rsrc_num + 1;
440 if (*next == max_rsrc)
442 *req_rsrc_num = rsrc_num;
443 spin_unlock_irqrestore(&rf->rsrc_lock, flags);
449 * irdma_free_resource - free a resource
450 * @iwdev: device pointer
451 * @resource_array: resource array for the resource_num
452 * @resource_num: resource number to free
454 static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
455 unsigned long *rsrc_array, u32 rsrc_num)
459 spin_lock_irqsave(&rf->rsrc_lock, flags);
460 __clear_bit(rsrc_num, rsrc_array);
461 spin_unlock_irqrestore(&rf->rsrc_lock, flags);
464 int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
465 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
466 int irdma_rt_init_hw(struct irdma_device *iwdev,
467 struct irdma_l2params *l2params);
468 void irdma_rt_deinit_hw(struct irdma_device *iwdev);
469 void irdma_qp_add_ref(struct ib_qp *ibqp);
470 void irdma_qp_rem_ref(struct ib_qp *ibqp);
471 void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
472 struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
473 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
474 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
475 const unsigned char *mac_addr,
476 u32 *ip_addr, bool ipv4, u32 action);
477 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
478 void irdma_del_apbvt(struct irdma_device *iwdev,
479 struct irdma_apbvt_entry *entry);
480 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
482 void irdma_free_cqp_request(struct irdma_cqp *cqp,
483 struct irdma_cqp_request *cqp_request);
484 void irdma_put_cqp_request(struct irdma_cqp *cqp,
485 struct irdma_cqp_request *cqp_request);
486 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
487 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
488 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
490 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
491 void irdma_port_ibevent(struct irdma_device *iwdev);
492 void irdma_cm_disconn(struct irdma_qp *qp);
494 bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
495 u16 maj_err_code, u16 min_err_code);
496 int irdma_handle_cqp_op(struct irdma_pci_f *rf,
497 struct irdma_cqp_request *cqp_request);
499 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
500 struct ib_udata *udata);
501 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
502 int attr_mask, struct ib_udata *udata);
503 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
505 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
506 int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
507 struct irdma_modify_qp_info *info, bool wait);
508 int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
509 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
510 enum irdma_quad_entry_type etype,
511 enum irdma_quad_hash_manage_type mtype, void *cmnode,
513 void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
514 void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
515 void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
516 int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
517 void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
518 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
520 int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
521 int irdma_send_reset(struct irdma_cm_node *cm_node);
522 struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
523 u16 rem_port, u32 *rem_addr, u16 loc_port,
524 u32 *loc_addr, u16 vlan_id);
525 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
526 struct irdma_qp_flush_info *info, bool wait);
527 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
528 struct irdma_gen_ae_info *info, bool wait);
529 void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
530 void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
531 u16 irdma_get_vlan_ipv4(u32 *addr);
532 struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
533 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
534 int acc, u64 *iova_start);
535 int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
536 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
537 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
539 void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
541 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
542 bool irdma_cq_empty(struct irdma_cq *iwcq);
543 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
545 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
547 int irdma_net_event(struct notifier_block *notifier, unsigned long event,
549 int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
551 void irdma_add_ip(struct irdma_device *iwdev);
552 void cqp_compl_worker(struct work_struct *work);
553 #endif /* IRDMA_MAIN_H */