1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
3 * Copyright(c) 2020 Intel Corporation.
8 * This file contains HFI1 support for IPOIB functionality
14 #include <linux/types.h>
15 #include <linux/stddef.h>
16 #include <linux/atomic.h>
17 #include <linux/netdevice.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/list.h>
21 #include <linux/if_infiniband.h>
27 #include <rdma/ib_verbs.h>
29 #define HFI1_IPOIB_ENTROPY_SHIFT 24
31 #define HFI1_IPOIB_TXREQ_NAME_LEN 32
33 #define HFI1_IPOIB_PSEUDO_LEN 20
34 #define HFI1_IPOIB_ENCAP_LEN 4
36 struct hfi1_ipoib_dev_priv;
38 union hfi1_ipoib_flow {
43 } __attribute__((__packed__));
47 * struct ipoib_txreq - IPOIB transmit descriptor
48 * @txreq: sdma transmit request
49 * @sdma_hdr: 9b ib headers
50 * @sdma_status: status returned by sdma engine
51 * @complete: non-zero implies complete
52 * @priv: ipoib netdev private data
53 * @txq: txq on which skb was output
57 struct sdma_txreq txreq;
58 struct hfi1_sdma_header *sdma_hdr;
61 struct hfi1_ipoib_dev_priv *priv;
62 struct hfi1_ipoib_txq *txq;
67 * struct hfi1_ipoib_circ_buf - List of items to be processed
68 * @items: ring of items each a power of two size
69 * @max_items: max items + 1 that the ring can contain
70 * @shift: log2 of size for getting txreq
71 * @sent_txreqs: count of txreqs posted to sdma
73 * @stops: count of stops of queue
74 * @ring_full: ring has been filled
75 * @no_desc: descriptor shortage seen
76 * @complete_txreqs: count of txreqs completed by sdma
79 struct hfi1_ipoib_circ_buf {
83 /* consumer cache line */
84 u64 ____cacheline_aligned_in_smp sent_txreqs;
90 /* producer cache line */
91 u64 ____cacheline_aligned_in_smp complete_txreqs;
96 * struct hfi1_ipoib_txq - IPOIB per Tx queue information
97 * @priv: private pointer
99 * @tx_list: tx request list
100 * @sent_txreqs: count of txreqs posted to sdma
101 * @flow: tracks when list needs to be flushed for a flow change
102 * @q_idx: ipoib Tx queue index
103 * @pkts_sent: indicator packets have been sent from this queue
104 * @wait: iowait structure
105 * @napi: pointer to tx napi interface
106 * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
108 struct hfi1_ipoib_txq {
109 struct napi_struct napi;
110 struct hfi1_ipoib_dev_priv *priv;
111 struct sdma_engine *sde;
112 struct list_head tx_list;
113 union hfi1_ipoib_flow flow;
118 struct hfi1_ipoib_circ_buf ____cacheline_aligned_in_smp tx_ring;
121 struct hfi1_ipoib_dev_priv {
122 struct hfi1_devdata *dd;
123 struct net_device *netdev;
124 struct ib_device *device;
125 struct hfi1_ipoib_txq *txqs;
126 const struct net_device_ops *netdev_ops;
134 /* hfi1 ipoib rdma netdev's private data structure */
135 struct hfi1_ipoib_rdma_netdev {
136 struct rdma_netdev rn; /* keep this first */
137 /* followed by device private data */
138 struct hfi1_ipoib_dev_priv dev_priv;
141 static inline struct hfi1_ipoib_dev_priv *
142 hfi1_ipoib_priv(const struct net_device *dev)
144 return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
147 int hfi1_ipoib_send(struct net_device *dev,
149 struct ib_ah *address,
152 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
153 void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
155 int hfi1_ipoib_rxq_init(struct net_device *dev);
156 void hfi1_ipoib_rxq_deinit(struct net_device *dev);
158 void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
159 void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
161 struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
162 int size, void *data);
164 int hfi1_ipoib_rn_get_params(struct ib_device *device,
166 enum rdma_netdev_t type,
167 struct rdma_netdev_alloc_params *params);
169 void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
171 #endif /* _IPOIB_H */