1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
10 #include "hw_channel.h"
12 /* Microsoft Azure Network Adapter (MANA)'s definitions
14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
18 /* MANA protocol version */
19 #define MANA_MAJOR_VERSION 0
20 #define MANA_MINOR_VERSION 1
21 #define MANA_MICRO_VERSION 1
23 typedef u64 mana_handle_t;
24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
27 TRI_STATE_UNKNOWN = -1,
32 /* Number of entries for hardware indirection table must be in power of 2 */
33 #define MANA_INDIRECT_TABLE_SIZE 64
34 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37 #define MANA_HASH_KEY_SIZE 40
39 #define COMP_ENTRY_SIZE 64
41 #define RX_BUFFERS_PER_QUEUE 512
42 #define MANA_RX_DATA_ALIGN 64
44 #define MAX_SEND_BUFFERS_PER_QUEUE 256
46 #define EQ_SIZE (8 * PAGE_SIZE)
47 #define LOG2_EQ_THROTTLE 3
49 #define MAX_PORTS_IN_MANA_DEV 256
51 /* Update this count whenever the respective structures are changed */
52 #define MANA_STATS_RX_COUNT 5
53 #define MANA_STATS_TX_COUNT 11
55 struct mana_stats_rx {
61 struct u64_stats_sync syncp;
64 struct mana_stats_tx {
70 u64 tso_inner_packets;
76 struct u64_stats_sync syncp;
80 struct gdma_queue *gdma_sq;
93 struct net_device *ndev;
95 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
96 struct sk_buff_head pending_skbs;
97 struct netdev_queue *net_txq;
99 atomic_t pending_sends;
101 struct mana_stats_tx stats;
104 /* skb data and frags dma mappings */
105 struct mana_skb_head {
106 /* GSO pkts may have 2 SGEs for the linear part*/
107 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
109 u32 size[MAX_SKB_FRAGS + 2];
112 #define MANA_HEADROOM sizeof(struct mana_skb_head)
114 enum mana_tx_pkt_format {
115 MANA_SHORT_PKT_FMT = 0,
116 MANA_LONG_PKT_FMT = 1,
119 struct mana_tx_short_oob {
121 u32 is_outer_ipv4 : 1;
122 u32 is_outer_ipv6 : 1;
123 u32 comp_iphdr_csum : 1;
124 u32 comp_tcp_csum : 1;
125 u32 comp_udp_csum : 1;
126 u32 supress_txcqe_gen : 1;
129 u32 trans_off : 10; /* Transport header offset */
131 u32 short_vp_offset : 8;
134 struct mana_tx_long_oob {
136 u32 inner_is_ipv6 : 1;
137 u32 inner_tcp_opt : 1;
138 u32 inject_vlan_pri_tag : 1;
140 u32 pcp : 3; /* 802.1Q */
141 u32 dei : 1; /* 802.1Q */
142 u32 vlan_id : 12; /* 802.1Q */
144 u32 inner_frame_offset : 10;
145 u32 inner_ip_rel_offset : 6;
146 u32 long_vp_offset : 12;
154 struct mana_tx_short_oob s_oob;
155 struct mana_tx_long_oob l_oob;
166 CQE_RX_COALESCED_4 = 2,
167 CQE_RX_OBJECT_FENCE = 3,
168 CQE_RX_TRUNCATED = 4,
172 CQE_TX_MTU_DROP = 34,
173 CQE_TX_INVALID_OOB = 35,
174 CQE_TX_INVALID_ETH_TYPE = 36,
175 CQE_TX_HDR_PROCESSING_ERROR = 37,
176 CQE_TX_VF_DISABLED = 38,
177 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
178 CQE_TX_VPORT_DISABLED = 40,
179 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
182 #define MANA_CQE_COMPLETION 1
184 struct mana_cqe_header {
190 /* NDIS HASH Types */
191 #define NDIS_HASH_IPV4 BIT(0)
192 #define NDIS_HASH_TCP_IPV4 BIT(1)
193 #define NDIS_HASH_UDP_IPV4 BIT(2)
194 #define NDIS_HASH_IPV6 BIT(3)
195 #define NDIS_HASH_TCP_IPV6 BIT(4)
196 #define NDIS_HASH_UDP_IPV6 BIT(5)
197 #define NDIS_HASH_IPV6_EX BIT(6)
198 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
199 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
201 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
202 #define MANA_HASH_L4 \
203 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
204 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
206 struct mana_rxcomp_perpkt_info {
213 #define MANA_RXCOMP_OOB_NUM_PPI 4
215 /* Receive completion OOB */
216 struct mana_rxcomp_oob {
217 struct mana_cqe_header cqe_hdr;
220 u32 rx_vlantag_present : 1;
221 u32 rx_outer_iphdr_csum_succeed : 1;
222 u32 rx_outer_iphdr_csum_fail : 1;
225 u32 rx_iphdr_csum_succeed : 1;
226 u32 rx_iphdr_csum_fail : 1;
227 u32 rx_tcp_csum_succeed : 1;
228 u32 rx_tcp_csum_fail : 1;
229 u32 rx_udp_csum_succeed : 1;
230 u32 rx_udp_csum_fail : 1;
233 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
238 struct mana_tx_comp_oob {
239 struct mana_cqe_header cqe_hdr;
243 u32 tx_sgl_offset : 5;
244 u32 tx_wqe_offset : 27;
251 #define CQE_POLLING_BUFFER 512
254 struct gdma_queue *gdma_cq;
256 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
259 /* Type of the CQ: TX or RX */
260 enum mana_cq_type type;
262 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
263 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
265 struct mana_rxq *rxq;
267 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
268 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
270 struct mana_txq *txq;
272 /* Buffer which the CQ handler can copy the CQE's into. */
273 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
276 struct napi_struct napi;
281 struct mana_recv_buf_oob {
282 /* A valid GDMA work request representing the data buffer. */
283 struct gdma_wqe_request wqe_req;
286 bool from_pool; /* allocated from a page pool */
288 /* SGL of the buffer going to be sent has part of the work request. */
290 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
292 /* Required to store the result of mana_gd_post_work_request.
293 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
294 * work queue when the WQE is consumed.
296 struct gdma_posted_wqe_info wqe_inf;
299 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
302 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
305 struct gdma_queue *gdma_rq;
306 /* Cache the gdma receive queue id */
309 /* Index of RQ in the vPort, not gdma receive queue id */
318 struct mana_cq rx_cq;
320 struct completion fence_event;
322 struct net_device *ndev;
324 /* Total number of receive buffers to be allocated */
329 struct mana_stats_rx stats;
331 struct bpf_prog __rcu *bpf_prog;
332 struct xdp_rxq_info xdp_rxq;
333 void *xdp_save_va; /* for reusing */
335 int xdp_rc; /* XDP redirect return code */
337 struct page_pool *page_pool;
339 /* MUST BE THE LAST MEMBER:
340 * Each receive buffer has an associated mana_recv_buf_oob.
342 struct mana_recv_buf_oob rx_oobs[];
348 struct mana_cq tx_cq;
350 mana_handle_t tx_object;
353 struct mana_ethtool_stats {
357 u64 hc_tx_ucast_pkts;
358 u64 hc_tx_ucast_bytes;
359 u64 hc_tx_bcast_pkts;
360 u64 hc_tx_bcast_bytes;
361 u64 hc_tx_mcast_pkts;
362 u64 hc_tx_mcast_bytes;
364 u64 tx_cqe_unknown_type;
365 u64 rx_coalesced_err;
366 u64 rx_cqe_unknown_type;
369 struct mana_context {
370 struct gdma_dev *gdma_dev;
376 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
379 struct mana_port_context {
380 struct mana_context *ac;
381 struct net_device *ndev;
383 u8 mac_addr[ETH_ALEN];
385 enum TRI_STATE rss_state;
387 mana_handle_t default_rxobj;
388 bool tx_shortform_allowed;
391 struct mana_tx_qp *tx_qp;
393 /* Indirection Table for RX & TX. The values are queue indexes */
394 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
396 /* Indirection table containing RxObject Handles */
397 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
399 /* Hash key used by the NIC */
400 u8 hashkey[MANA_HASH_KEY_SIZE];
402 /* This points to an array of num_queues of RQ pointers. */
403 struct mana_rxq **rxqs;
405 /* pre-allocated rx buffer array */
410 u32 rxbpre_alloc_size;
413 struct bpf_prog *bpf_prog;
415 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
416 unsigned int max_queues;
417 unsigned int num_queues;
419 mana_handle_t port_handle;
420 mana_handle_t pf_filter_handle;
422 /* Mutex for sharing access to vport_use_count */
423 struct mutex vport_mutex;
429 bool port_st_save; /* Saved port state */
431 struct mana_ethtool_stats eth_stats;
434 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
435 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
436 bool update_hash, bool update_tab);
438 int mana_alloc_queues(struct net_device *ndev);
439 int mana_attach(struct net_device *ndev);
440 int mana_detach(struct net_device *ndev, bool from_close);
442 int mana_probe(struct gdma_dev *gd, bool resuming);
443 void mana_remove(struct gdma_dev *gd, bool suspending);
445 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
446 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
448 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
449 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
450 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
451 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
452 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
453 void mana_query_gf_stats(struct mana_port_context *apc);
455 extern const struct ethtool_ops mana_ethtool_ops;
457 /* A CQ can be created not associated with any EQ */
458 #define GDMA_CQ_NO_EQ 0xffff
460 struct mana_obj_spec {
468 enum mana_command_code {
469 MANA_QUERY_DEV_CONFIG = 0x20001,
470 MANA_QUERY_GF_STAT = 0x20002,
471 MANA_CONFIG_VPORT_TX = 0x20003,
472 MANA_CREATE_WQ_OBJ = 0x20004,
473 MANA_DESTROY_WQ_OBJ = 0x20005,
474 MANA_FENCE_RQ = 0x20006,
475 MANA_CONFIG_VPORT_RX = 0x20007,
476 MANA_QUERY_VPORT_CONFIG = 0x20008,
478 /* Privileged commands for the PF mode */
479 MANA_REGISTER_FILTER = 0x28000,
480 MANA_DEREGISTER_FILTER = 0x28001,
481 MANA_REGISTER_HW_PORT = 0x28003,
482 MANA_DEREGISTER_HW_PORT = 0x28004,
485 /* Query Device Configuration */
486 struct mana_query_device_cfg_req {
487 struct gdma_req_hdr hdr;
489 /* MANA Nic Driver Capability flags */
490 u64 mn_drv_cap_flags1;
491 u64 mn_drv_cap_flags2;
492 u64 mn_drv_cap_flags3;
493 u64 mn_drv_cap_flags4;
502 struct mana_query_device_cfg_resp {
503 struct gdma_resp_hdr hdr;
520 /* Query vPort Configuration */
521 struct mana_query_vport_cfg_req {
522 struct gdma_req_hdr hdr;
526 struct mana_query_vport_cfg_resp {
527 struct gdma_resp_hdr hdr;
530 u32 num_indirection_ent;
537 /* Configure vPort */
538 struct mana_config_vport_req {
539 struct gdma_req_hdr hdr;
545 struct mana_config_vport_resp {
546 struct gdma_resp_hdr hdr;
548 u8 short_form_allowed;
552 /* Create WQ Object */
553 struct mana_create_wqobj_req {
554 struct gdma_req_hdr hdr;
562 u32 cq_moderation_ctx_id;
566 struct mana_create_wqobj_resp {
567 struct gdma_resp_hdr hdr;
570 mana_handle_t wq_obj;
573 /* Destroy WQ Object */
574 struct mana_destroy_wqobj_req {
575 struct gdma_req_hdr hdr;
578 mana_handle_t wq_obj_handle;
581 struct mana_destroy_wqobj_resp {
582 struct gdma_resp_hdr hdr;
586 struct mana_fence_rq_req {
587 struct gdma_req_hdr hdr;
588 mana_handle_t wq_obj_handle;
591 struct mana_fence_rq_resp {
592 struct gdma_resp_hdr hdr;
596 struct mana_query_gf_stat_req {
597 struct gdma_req_hdr hdr;
601 struct mana_query_gf_stat_resp {
602 struct gdma_resp_hdr hdr;
604 /* rx errors/discards */
605 u64 discard_rx_nowqe;
606 u64 err_rx_vport_disabled;
607 /* rx bytes/packets */
609 u64 hc_rx_ucast_pkts;
610 u64 hc_rx_ucast_bytes;
611 u64 hc_rx_bcast_pkts;
612 u64 hc_rx_bcast_bytes;
613 u64 hc_rx_mcast_pkts;
614 u64 hc_rx_mcast_bytes;
616 u64 err_tx_gf_disabled;
617 u64 err_tx_vport_disabled;
618 u64 err_tx_inval_vport_offset_pkt;
619 u64 err_tx_vlan_enforcement;
620 u64 err_tx_ethtype_enforcement;
621 u64 err_tx_SA_enforecement;
622 u64 err_tx_SQPDID_enforcement;
623 u64 err_tx_CQPDID_enforcement;
624 u64 err_tx_mtu_violation;
625 u64 err_tx_inval_oob;
626 /* tx bytes/packets */
628 u64 hc_tx_ucast_pkts;
629 u64 hc_tx_ucast_bytes;
630 u64 hc_tx_bcast_pkts;
631 u64 hc_tx_bcast_bytes;
632 u64 hc_tx_mcast_pkts;
633 u64 hc_tx_mcast_bytes;
638 /* Configure vPort Rx Steering */
639 struct mana_cfg_rx_steer_req_v2 {
640 struct gdma_req_hdr hdr;
642 u16 num_indir_entries;
643 u16 indir_tab_offset;
646 u8 update_default_rxobj;
650 mana_handle_t default_rxobj;
651 u8 hashkey[MANA_HASH_KEY_SIZE];
652 u8 cqe_coalescing_enable;
656 struct mana_cfg_rx_steer_resp {
657 struct gdma_resp_hdr hdr;
660 /* Register HW vPort */
661 struct mana_register_hw_vport_req {
662 struct gdma_req_hdr hdr;
664 u8 is_pf_default_vport;
666 u8 allow_all_ether_types;
672 struct mana_register_hw_vport_resp {
673 struct gdma_resp_hdr hdr;
674 mana_handle_t hw_vport_handle;
677 /* Deregister HW vPort */
678 struct mana_deregister_hw_vport_req {
679 struct gdma_req_hdr hdr;
680 mana_handle_t hw_vport_handle;
683 struct mana_deregister_hw_vport_resp {
684 struct gdma_resp_hdr hdr;
687 /* Register filter */
688 struct mana_register_filter_req {
689 struct gdma_req_hdr hdr;
702 struct mana_register_filter_resp {
703 struct gdma_resp_hdr hdr;
704 mana_handle_t filter_handle;
707 /* Deregister filter */
708 struct mana_deregister_filter_req {
709 struct gdma_req_hdr hdr;
710 mana_handle_t filter_handle;
713 struct mana_deregister_filter_resp {
714 struct gdma_resp_hdr hdr;
717 /* Requested GF stats Flags */
718 /* Rx discards/Errors */
719 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
720 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
722 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
723 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
724 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
725 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
726 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
727 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
728 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
730 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
731 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
732 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
734 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
735 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
737 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
738 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
739 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
740 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
741 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
743 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
744 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
745 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
746 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
747 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
748 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
749 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
751 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
753 #define MANA_MAX_NUM_QUEUES 64
755 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
757 struct mana_tx_package {
758 struct gdma_wqe_request wqe_req;
759 struct gdma_sge sgl_array[5];
760 struct gdma_sge *sgl_ptr;
762 struct mana_tx_oob tx_oob;
764 struct gdma_posted_wqe_info wqe_info;
767 int mana_create_wq_obj(struct mana_port_context *apc,
769 u32 wq_type, struct mana_obj_spec *wq_spec,
770 struct mana_obj_spec *cq_spec,
771 mana_handle_t *wq_obj);
773 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
774 mana_handle_t wq_obj);
776 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
778 void mana_uncfg_vport(struct mana_port_context *apc);