1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
13 #include "wq_enet_desc.h"
14 #include "rq_enet_desc.h"
15 #include "cq_enet_desc.h"
16 #include "vnic_resource.h"
17 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_stats.h"
29 int enic_get_vnic_config(struct enic *enic)
31 struct vnic_enet_config *c = &enic->config;
34 err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
36 dev_err(enic_get_dev(enic),
37 "Error getting MAC addr, %d\n", err);
41 #define GET_CONFIG(m) \
43 err = vnic_dev_spec(enic->vdev, \
44 offsetof(struct vnic_enet_config, m), \
45 sizeof(c->m), &c->m); \
47 dev_err(enic_get_dev(enic), \
48 "Error getting %s, %d\n", #m, err); \
54 GET_CONFIG(wq_desc_count);
55 GET_CONFIG(rq_desc_count);
57 GET_CONFIG(intr_timer_type);
58 GET_CONFIG(intr_mode);
59 GET_CONFIG(intr_timer_usec);
64 min_t(u32, ENIC_MAX_WQ_DESCS,
65 max_t(u32, ENIC_MIN_WQ_DESCS,
67 c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
70 min_t(u32, ENIC_MAX_RQ_DESCS,
71 max_t(u32, ENIC_MIN_RQ_DESCS,
73 c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
77 c->mtu = min_t(u16, ENIC_MAX_MTU,
78 max_t(u16, ENIC_MIN_MTU,
81 c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
82 vnic_dev_get_intr_coal_timer_max(enic->vdev));
84 dev_info(enic_get_dev(enic),
85 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
86 enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
88 dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
89 "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
90 "loopback tag 0x%04x\n",
91 ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
92 ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
93 ENIC_SETTING(enic, TSO) ? "yes" : "no",
94 ENIC_SETTING(enic, LRO) ? "yes" : "no",
95 ENIC_SETTING(enic, RSS) ? "yes" : "no",
96 c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
97 c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
98 c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
100 c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
101 c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
109 int enic_add_vlan(struct enic *enic, u16 vlanid)
111 u64 a0 = vlanid, a1 = 0;
115 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
117 dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
122 int enic_del_vlan(struct enic *enic, u16 vlanid)
124 u64 a0 = vlanid, a1 = 0;
128 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
130 dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
135 int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
136 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
139 enum vnic_devcmd_cmd cmd = CMD_NIC_CFG;
144 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
145 rss_hash_type, rss_hash_bits, rss_base_cpu,
146 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
151 if (rss_hash_type & (NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 |
152 NIC_CFG_RSS_HASH_TYPE_UDP_IPV6))
153 cmd = CMD_NIC_CFG_CHK;
155 return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
158 int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
160 u64 a0 = (u64)key_pa, a1 = len;
163 return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
166 int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
168 u64 a0 = (u64)cpu_pa, a1 = len;
171 return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
174 void enic_free_vnic_resources(struct enic *enic)
178 for (i = 0; i < enic->wq_count; i++)
179 vnic_wq_free(&enic->wq[i]);
180 for (i = 0; i < enic->rq_count; i++)
181 vnic_rq_free(&enic->rq[i]);
182 for (i = 0; i < enic->cq_count; i++)
183 vnic_cq_free(&enic->cq[i]);
184 for (i = 0; i < enic->intr_count; i++)
185 vnic_intr_free(&enic->intr[i]);
188 void enic_get_res_counts(struct enic *enic)
190 enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
191 enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
192 enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
193 enic->intr_count = vnic_dev_get_res_count(enic->vdev,
196 dev_info(enic_get_dev(enic),
197 "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
198 enic->wq_count, enic->rq_count,
199 enic->cq_count, enic->intr_count);
202 void enic_init_vnic_resources(struct enic *enic)
204 enum vnic_dev_intr_mode intr_mode;
205 unsigned int mask_on_assertion;
206 unsigned int interrupt_offset;
207 unsigned int error_interrupt_enable;
208 unsigned int error_interrupt_offset;
209 unsigned int cq_index;
212 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
214 /* Init RQ/WQ resources.
216 * RQ[0 - n-1] point to CQ[0 - n-1]
217 * WQ[0 - m-1] point to CQ[n - n+m-1]
219 * Error interrupt is not enabled for MSI.
223 case VNIC_DEV_INTR_MODE_INTX:
224 case VNIC_DEV_INTR_MODE_MSIX:
225 error_interrupt_enable = 1;
226 error_interrupt_offset = enic->intr_count - 2;
229 error_interrupt_enable = 0;
230 error_interrupt_offset = 0;
234 for (i = 0; i < enic->rq_count; i++) {
236 vnic_rq_init(&enic->rq[i],
238 error_interrupt_enable,
239 error_interrupt_offset);
242 for (i = 0; i < enic->wq_count; i++) {
243 cq_index = enic->rq_count + i;
244 vnic_wq_init(&enic->wq[i],
246 error_interrupt_enable,
247 error_interrupt_offset);
252 * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
253 * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
256 for (i = 0; i < enic->cq_count; i++) {
259 case VNIC_DEV_INTR_MODE_MSIX:
260 interrupt_offset = i;
263 interrupt_offset = 0;
267 vnic_cq_init(&enic->cq[i],
268 0 /* flow_control_enable */,
269 1 /* color_enable */,
272 1 /* cq_tail_color */,
273 1 /* interrupt_enable */,
274 1 /* cq_entry_enable */,
275 0 /* cq_message_enable */,
277 0 /* cq_message_addr */);
280 /* Init INTR resources
282 * mask_on_assertion is not used for INTx due to the level-
283 * triggered nature of INTx
287 case VNIC_DEV_INTR_MODE_MSI:
288 case VNIC_DEV_INTR_MODE_MSIX:
289 mask_on_assertion = 1;
292 mask_on_assertion = 0;
296 for (i = 0; i < enic->intr_count; i++) {
297 vnic_intr_init(&enic->intr[i],
298 enic->config.intr_timer_usec,
299 enic->config.intr_timer_type,
304 int enic_alloc_vnic_resources(struct enic *enic)
306 enum vnic_dev_intr_mode intr_mode;
310 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
312 dev_info(enic_get_dev(enic), "vNIC resources used: "
313 "wq %d rq %d cq %d intr %d intr mode %s\n",
314 enic->wq_count, enic->rq_count,
315 enic->cq_count, enic->intr_count,
316 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
317 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
318 intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
321 /* Allocate queue resources
324 for (i = 0; i < enic->wq_count; i++) {
325 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
326 enic->config.wq_desc_count,
327 sizeof(struct wq_enet_desc));
329 goto err_out_cleanup;
332 for (i = 0; i < enic->rq_count; i++) {
333 err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
334 enic->config.rq_desc_count,
335 sizeof(struct rq_enet_desc));
337 goto err_out_cleanup;
340 for (i = 0; i < enic->cq_count; i++) {
341 if (i < enic->rq_count)
342 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
343 enic->config.rq_desc_count,
344 sizeof(struct cq_enet_rq_desc));
346 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
347 enic->config.wq_desc_count,
348 sizeof(struct cq_enet_wq_desc));
350 goto err_out_cleanup;
353 for (i = 0; i < enic->intr_count; i++) {
354 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
356 goto err_out_cleanup;
359 /* Hook remaining resource
362 enic->legacy_pba = vnic_dev_get_res(enic->vdev,
363 RES_TYPE_INTR_PBA_LEGACY, 0);
364 if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
365 dev_err(enic_get_dev(enic),
366 "Failed to hook legacy pba resource\n");
368 goto err_out_cleanup;
374 enic_free_vnic_resources(enic);